blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2ba21d4c8e49cd960399f28fa52009def4433a7
|
b0b54c7b80ff22fbb7d3910c7f4eaa0fa5e970c1
|
/make_master.R
|
bb91b08e81a48af1ecf6923b6a1b12b46a5823d8
|
[] |
no_license
|
yumifoo/finemap
|
7880e572fcf333fb1b458eeb65110aeb81dcc5ef
|
9d3132c3683c2409ab8d5249e062bd8ceac387fc
|
refs/heads/master
| 2020-09-24T18:33:45.759851
| 2019-12-11T12:08:11
| 2019-12-11T12:08:11
| 225,817,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,787
|
r
|
make_master.R
|
# make input file master file for ld store of each region
# i.e. the master file will be a two lines file, one header, one content
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
trait <- as.character(args[1])
maf <- 0.01
library(data.table)
source("/fs/projects/ukbb/yu/src/BOLT2z.R")
#########################
### autosomes ###
#########################
chromosomes <- 1:22
##### load GWAS results ######
res <- fread(sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/BOLT_sex_combined_s343695_WhiteBritish.%s.bgen.stats.gz", trait))
res$MAF <- ifelse(res$A1FREQ < 0.5, res$A1FREQ, 1-res$A1FREQ)
res <- res[res$MAF > maf, ]
####### make z files for each region ############
for (ii in seq_along(chromosomes)){
chr <- chromosomes[ii]
region.file <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/regions/merged_region_1mb.txt",trait, chr)
if(file.exists(region.file)){
n.region <- as.integer(system2("wc",
args = c("-l",
region.file,
" | awk '{print $1}'"),
stdout = TRUE))
if(n.region > 0){
regions <- read.table(region.file, header = FALSE)
auto.res <- subset(res, CHR == chr)
filenames <- c()
infile.ld <- c()
infile.finemap <- c()
for (jj in 1:nrow(regions)){
subres <- subset(auto.res, BP > regions[jj,1] & BP < regions[jj,2])
output <- sprintf("BOLT_sex_combined_s343695_WhiteBritish_MAF_0.01_%s_chr%d_%d_%d",
trait, chr,regions[jj,1],regions[jj,2])
BOLT2z(subres, sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/z_files/%s",
trait,chr,output))
filenames <- c(filenames,output)
print(paste("The number of SNPs in", chr, regions[jj,1], regions[jj,2], ":", nrow(subres)))
####### make master files for each region ########
z <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/z_files/%s.z",
trait,chr,output)
#ld <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/%s.ld",
# trait,chr,output)
bcor <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/%s.bcor",
trait,chr,output)
bdose <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/%s.bdose",
trait,chr,output)
bgen <- sprintf("/fs/projects/ukbb/geno_v3/ukb_imp_chr%d_v3.bgen",chr)
bgi <- sprintf("/fs/projects/ukbb/geno_v3/ukb_imp_chr%d_v3.bgen.bgi",chr)
sample <- "/fs/projects/ukbb/geno_v3/ukb22627_imp_chr1-22_v3_s487395.sample"
incl <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/sex_combined_BOLT_%s.incl",trait)
n_samples <- as.integer(system2("wc",
args = c("-l",
incl,
" | awk '{print $1}'"),
stdout = TRUE))
snp <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.snp",
trait,chr,output)
config <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.config",
trait,chr,output)
cred <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.cred",
trait,chr,output)
log <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.log",
trait,chr,output)
master <- cbind(z,bcor, bdose,bgen,bgi,sample,incl,n_samples,snp,config,cred,log)
write.table(master, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d",
trait, chr, chr, regions[jj,1], regions[jj,2]),
quote = FALSE, row.names = FALSE, col.names = TRUE, sep = ";")
#infile[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d",
# trait, chr, chr, regions[jj,1], regions[jj,2])
#write.table(infile, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master_list.txt",trait,chr),
# quote = FALSE, row.names = FALSE, col.names = FALSE)
########### make ld commands ##############
command_ld <- sprintf("/fs/projects/ukbb/christian/binaries/150419/ldstore_v2.0b_x86_64 --in-files /fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d --write-bcor --write-bdose --n-threads 50 --cpu-mem 100",
trait, chr, chr,regions[jj,1], regions[jj,2])
write.table(command_ld,sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/ldstore_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2]),
quote = FALSE, row.names = FALSE, col.names = "#!/bin/bash")
infile.ld[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/ldstore_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2])
write.table(infile.ld, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/ldstore_list.txt",trait,chr),
quote = FALSE, row.names = FALSE, col.names = FALSE)
########### make finemap commands ##############
command_finemap <- sprintf("/fs/projects/ukbb/christian/binaries/180419/finemap_v1.4_x86_64 --cond --in-files /fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d --log --n-causal-snps 30",
trait, chr, chr,regions[jj,1], regions[jj,2])
write.table(command_finemap, sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/finemap_cond_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2]),
quote = FALSE, row.names = FALSE, col.names = "#!/bin/bash")
infile.finemap[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/finemap_cond_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2])
write.table(infile.finemap, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/finemap_cond_list.txt",trait,chr),
quote = FALSE, row.names = FALSE, col.names = FALSE)
}
}
}
}
###########################
### chromsome X ###
###########################
chr <- 23
non.PAR.list <- fread("/fs/projects/ukbb/yu/ukb_imp_chrX_snplist.txt")
region.file <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/regions/merged_region_1mb.txt",trait, chr)
if(file.exists(region.file)){
n.region <- as.integer(system2("wc",
args = c("-l",
region.file,
" | awk '{print $1}'"),
stdout = TRUE))
if(n.region > 0){
regions <- read.table(region.file, header = FALSE)
res.x <- subset(res, CHR == chr)
auto.res <- subset(res.x, BP %in% non.PAR.list$position)
filenames <- c()
infile.ld <- c()
infile.finemap <- c()
for (jj in 1:nrow(regions)){
subres <- subset(auto.res, BP > regions[jj,1] & BP < regions[jj,2])
output <- sprintf("BOLT_sex_combined_s343695_WhiteBritish_MAF_0.01_%s_chr%d_%d_%d",
trait, chr,regions[jj,1],regions[jj,2])
BOLT2z(subres, sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/z_files/%s",
trait,chr,output))
filenames <- c(filenames,output)
print(paste("The number of SNPs in", chr, regions[jj,1], regions[jj,2], ":", nrow(subres)))
####### make master files for each region ########
z <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/z_files/%s.z",
trait,chr,output)
#ld <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/%s.ld",
#trait,chr,output)
bcor <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/%s.bcor",
trait,chr,output)
bdose <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/%s.bdose",
trait,chr,output)
bgen <- "/fs/projects/ukbb/geno_v3/ukb_imp_chrX_v3.bgen"
bgi <- "/fs/projects/ukbb/geno_v3/ukb_imp_chrX_v3.bgen.bgi"
sample <- "/fs/projects/ukbb/geno_v3/ukb22627_imp_chrX_v3_s486743.sample"
incl <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/sex_combined_BOLT_%s.incl",trait)
n_samples <- as.integer(system2("wc",
args = c("-l",
incl,
" | awk '{print $1}'"),
stdout = TRUE))
snp <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.snp",
trait,chr,output)
config <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.config",
trait,chr,output)
cred <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.cred",
trait,chr,output)
log <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/%s.log",
trait,chr,output)
master <- cbind(z,bcor, bdose,bgen,bgi,sample,incl,n_samples,snp,config,cred,log)
write.table(master, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d",
trait, chr, chr, regions[jj,1], regions[jj,2]),
quote = FALSE, row.names = FALSE, col.names = TRUE, sep = ";")
#infile[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d",
# trait, chr, chr, regions[jj,1], regions[jj,2])
#write.table(infile, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master_list.txt",trait,chr),
# quote = FALSE, row.names = FALSE, col.names = FALSE)
########### make ld commands ##############
command_ld <- sprintf("/fs/projects/ukbb/christian/binaries/150419/ldstore_v2.0b_x86_64 --in-files /fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d --write-bcor --write-bdose --n-threads 50 --cpu-mem 100",
trait, chr, chr,regions[jj,1], regions[jj,2])
write.table(command_ld,sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/ldstore_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2]),
quote = FALSE, row.names = FALSE, col.names = "#!/bin/bash")
infile.ld[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/ldstore_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2])
write.table(infile.ld, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/ldstore_list.txt",trait,chr),
quote = FALSE, row.names = FALSE, col.names = FALSE)
########### make finemap commands ##############
command_finemap <- sprintf("/fs/projects/ukbb/christian/binaries/180419/finemap_v1.4_x86_64 --cond --in-files /fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d --log --n-causal-snps 30",
trait, chr, chr,regions[jj,1], regions[jj,2])
write.table(command_finemap, sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/finemap_cond_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2]),
quote = FALSE, row.names = FALSE, col.names = "#!/bin/bash")
infile.finemap[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/finemap_cond_chr%d_%d_%d.sh",
trait, chr, chr, regions[jj,1], regions[jj,2])
write.table(infile.finemap, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/finemap/finemap_cond_list.txt",trait,chr),
quote = FALSE, row.names = FALSE, col.names = FALSE)
}
}
}
###########################
### PAR ###
###########################
#chr <- "PAR"
#PAR.list <- fread("/fs/projects/ukbb/yu/ukb_imp_chrXY_snplist.txt")
#
#regions <- read.table(sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/regions/merged_region_1mb.txt",
# trait, chr), header = FALSE)
#auto.res <- subset(res.x, BP %in% PAR.list$position)
#filenames <- c()
#infile.ld <- c()
#infile.finemap <- c()
#
#for (jj in 1:nrow(regions)){
# subres <- subset(auto.res, BP > regions[jj,1] & BP < regions[jj,2])
# output <- sprintf("BOLT_sex_combined_s343695_WhiteBritish_MAF_0.01_%s_%s_%d_%d",
# trait, chr,regions[jj,1],regions[jj,2])
# BOLT2z(subres, sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/z_files/%s",
# trait,chr,output))
# filenames <- c(filenames,output)
# print(paste("The number of SNPs in", chr, regions[jj,1], regions[jj,2], ":", nrow(subres)))
####### make master files for each region ########
# z <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/z_files/%s.z",
# trait,chr,output)
#ld <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/ld/%s.ld",
#trait,chr,output)
# bcor <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/ld/%s.bcor",
# trait,chr,output)
# bdose <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/ld/%s.bdose",
# trait,chr,output)
# bgen <- "/fs/projects/ukbb/geno_v3/ukb_imp_chrXY_v3.bgen"
# bgi <- "/fs/projects/ukbb/geno_v3/ukb_imp_chrXY_v3.bgen.bgi"
# sample <- "/fs/projects/ukbb/geno_v3/ukb22627_imp_chrXY_v3_s486429.sample"
# incl <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/sex_combined_BOLT_%s.incl",trait)
# n_samples <- as.integer(system2("wc",
# args = c("-l",
# incl,
# " | awk '{print $1}'"),
# stdout = TRUE))
# snp <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/finemap/%s.snp",
# trait,chr,output)
# config <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/finemap/%s.config",
# trait,chr,output)
# cred <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/finemap/%s.cred",
# trait,chr,output)
# log <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/finemap/%s.log",
# trait,chr,output)
# master <- cbind(z,bcor, bdose,bgen,bgi,sample,incl,n_samples,snp,config,cred,log)
# write.table(master, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/master/master_%s_%d_%d",
# trait, chr, chr, regions[jj,1], regions[jj,2]),
# quote = FALSE, row.names = FALSE, col.names = TRUE, sep = ";")
#infile[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master/master_chr%d_%d_%d",
# trait, chr, chr, regions[jj,1], regions[jj,2])
#write.table(infile, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/master_list.txt",trait,chr),
# quote = FALSE, row.names = FALSE, col.names = FALSE)
########### make ld commands ##############
# command_ld <- sprintf("/fs/projects/ukbb/christian/binaries/150419/ldstore_v2.0b_x86_64 --in-files /fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/master/master_%s_%d_%d --write-bcor --write-bdose --n-threads 50 --cpu-mem 100",
# trait, chr, chr,regions[jj,1], regions[jj,2])
# write.table(command_ld,sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/ld/ldstore_%s_%d_%d.sh",
# trait, chr, chr, regions[jj,1], regions[jj,2]),
# quote = FALSE, row.names = FALSE, col.names = "#!/bin/bash")
# infile.ld[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/ld/ldstore_%s_%d_%d.sh",
# trait, chr, chr, regions[jj,1], regions[jj,2])
# write.table(infile.ld, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/ld/ldstore_list.txt",trait,chr),
# quote = FALSE, row.names = FALSE, col.names = FALSE)
########### make finemap commands ##############
# command_finemap <- sprintf("/fs/projects/ukbb/christian/binaries/180419/finemap_v1.4_x86_64 --cond --in-files /fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/master/master_%s_%d_%d --log --n-causal-snps 30",
# trait, chr, chr,regions[jj,1], regions[jj,2])
# write.table(command_finemap, sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/finemap/finemap_cond_%s_%d_%d.sh",
# trait, chr, chr, regions[jj,1], regions[jj,2]),
# quote = FALSE, row.names = FALSE, col.names = "#!/bin/bash")
# infile.finemap[jj] <- sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/finemap/finemap_cond_%s_%d_%d.sh",
# trait, chr, chr, regions[jj,1], regions[jj,2])
# write.table(infile.finemap, file = sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/%s/finemap/finemap_cond_list.txt",trait,chr),
# quote = FALSE, row.names = FALSE, col.names = FALSE)
#}
|
396164188aa3e57a70775fcd7462965fbb555fbb
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/get_max_runtime_by_queue.R
|
0c6f69e9db1cafa6236d3fede652e608b6d8db11
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
get_max_runtime_by_queue.R
|
#' @title Get max runtime of queue
#'
#' @description Check the input string of queue and the maximum runtime available for that queue
#'
#' @param queue String. User supplied queue
#'
#' @return a string of run-time in format \code{DD:HH:MM:SS}
#'
#' @export
get_max_runtime_by_queue <- function(queue) {
if (queue %like% "all") {
return("03:00:00:00")
} else if (queue %like% "geospatial") {
return("25:00:00:00")
} else {
return("16:00:00:00")
}
}
|
e24adca993d3bfff1421491481b681eac72c6c8f
|
d9b893e267d456d44d7188994a89a078b2902224
|
/Homeworks/Homework_3/jachimek_sebastian.R
|
3d62fc12253425c15e0b66489bd02ad62c3ab124
|
[
"MIT"
] |
permissive
|
wojciechwojnar/PiADwR
|
d9cfd90de98b7d2d6855e3b6cc7bd919a67881f2
|
80f61be0e383d21c2f11f7c9abb0cbcae2071fee
|
refs/heads/main
| 2023-05-31T01:56:41.687433
| 2021-02-01T23:57:54
| 2021-02-01T23:57:54
| 308,119,264
| 0
| 1
|
MIT
| 2020-10-28T19:22:49
| 2020-10-28T19:22:48
| null |
WINDOWS-1250
|
R
| false
| false
| 2,677
|
r
|
jachimek_sebastian.R
|
library(data.table)
library(dplyr)
library(lubridate)
library(stringr)
gas_file <- list.files("./data", full.names = TRUE)
gas_dfs_dt <- lapply(gas_file, fread)
gas_dt <- rbindlist(gas_dfs_dt)
gas_dt <- gas_dt[, c("State Name", "County Name", "City Name",
"Local Site Name", "Date Local", "Parameter Name",
"Sample Duration", "Arithmetic Mean")]
names_of_cols <- c("State", "County", "City", "Site", "Date", "Pollutant", "SampleDuration", "MeasuredValue")
setnames(gas_dt, colnames(gas_dt), names_of_cols)
head(gas_dt)
gas_dt[, NumPollutants := uniqueN(Pollutant), by = c("State", "County", "City", "Site", "Date")]
#ZADANIE 1
dates <- gas_dt[, Date]
datetimes <- paste(as.character(dates), paste(sample(0:23, length(dates), replace = TRUE),
sample(0:59, length(dates), replace = TRUE), sample(0:59, length(dates), replace = TRUE), sep = ":"))
datetimes2 <- ymd_hms(datetimes)
gas_dt[, Date := datetimes2]
head(gas_dt)
#ZADANIE 2
rounded_month <- floor_date(gas_dt[, Date], unit = "month")
rounded_year <- floor_date(gas_dt[, Date], unit = "year")
gas_stats <- gas_dt[, .(mean(MeasuredValue), median(MeasuredValue), min(MeasuredValue), max(MeasuredValue)),
by = list(month(rounded_month), year(rounded_year))]
gas_stats_names <- c("Month", "Year", "MeanValue", "Median", "MinValue", "MaxValue")
setnames(gas_stats, gas_stats_names)
head(gas_stats)
#ZADANIE 3
##a) jako datę
get_year <- year(gas_dt[, Date])
get_month <- month(gas_dt[, Date])
get_day <- day(gas_dt[, Date])
get_hour <- hour(gas_dt[, Date])
get_minute <- minute(gas_dt[, Date])
get_second <- second(gas_dt[, Date])
gas_dt_copy1 <- gas_dt[, -5] #tworzę kopię (bez kolumny Date), gdzie dodam poszczególne kolumny (traktując datę jak datę)
gas_dt_copy1[, c("Year", "Month", "Day", "Hour", "Minute", "Second") :=
list(get_year, get_month, get_day, get_hour, get_minute, get_second)]
head(gas_dt_copy1)
##b) jako napis
gas_dt_copy2 <- gas_dt[, -5] #tworzę kopię (bez kolumny Date), gdzie dodam poszczególne kolumny (traktując datę jako napis)
dtms1 <- str_split(datetimes, "-", simplify = TRUE)
dtms1_prime <- dtms1[, 1:2] #macierz złożona z lat i miesięcy
dtms2 <- str_split(dtms1[, 3], " ", simplify = TRUE)
dtms2_prime <- dtms2[, 1] # macierz złożona z dni
dtms3 <- str_split(dtms2[, 2], ":", simplify = TRUE) # macierz złożona z godzin, minut i sekund
dtms <- cbind(dtms1_prime, dtms2_prime, dtms3)
gas_dt_copy2[, c("Year", "Month", "Day", "Hour", "Minute", "Second") :=
list(dtms[, 1], dtms[, 2], dtms[, 3], dtms[, 4], dtms[, 5], dtms[, 6])]
head(gas_dt_copy2)
|
b1e6f1f534b3765d6537e4fc300d5c828017b5c8
|
02da0605981b46d021d0ff9cd321e811c28edf5d
|
/alluvial_plots.R
|
9cdc2b08b5dbbdcef27363e15dbdcc18b0dfdf6c
|
[] |
no_license
|
turkjr19/alluvial
|
fb41ffb154848e6e1307d6a28a495fb9b82e8229
|
6bb5f819f70fda7c20ede237a6fb01cfe589c225
|
refs/heads/main
| 2023-02-27T17:38:51.278320
| 2021-02-01T21:37:35
| 2021-02-01T21:37:35
| 335,090,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,276
|
r
|
alluvial_plots.R
|
library(tidyverse)
library(ggalluvial)
library(RColorBrewer)
# Load in previously cleaned data from OHL scrape
my_data <- read_csv("~/Documents/hockey/OHL_OIR/my_data.csv")
# create data frame based on team and goal scorer
who_contributed <- my_data %>%
filter(scoring_team == "FLNT",
goal.scorer == "Ty Dellandrea") %>%
select(goal.scorer, assist1, assist2) %>%
group_by(goal.scorer, assist1, assist2) %>%
summarise(count = n()) %>%
arrange(goal.scorer, assist1, assist2)
# set text size
geom.text.size = 3
theme.size = (14/5) * geom.text.size
# plot
ggplot(who_contributed, aes(y = count,
axis1 = goal.scorer,
axis2 = assist1,
axis3 = assist2)) +
geom_alluvium(aes(fill = assist1),
width = 0) +
guides(fill = FALSE) +
theme_minimal() +
scale_color_brewer(type = "qual", palette = "BuPu") +
geom_stratum(width = 5/8, reverse = FALSE) +
geom_text(stat = "stratum", aes(label = after_stat(stratum)),
reverse = FALSE, size = geom.text.size) +
scale_x_continuous(breaks = 1:3, labels = c("Goal Scorer", "Primary Assist",
"Secondary Assist")) +
scale_color_brewer(type = "qual", palette = "BuPu") +
labs(y = "")
|
c86ccef641f693f21043fe794ddf5d093c2da7e5
|
dfe7e6cf00d94cf8f010abc68305f57dc362fcc2
|
/utils/recipes/step_recipe.R
|
eeb9309b9c8c0dd1f850e6c204966104188cbbae
|
[] |
no_license
|
pedrorio/statistical_learning
|
067564aa53967d23169d29ff1df8815d6f4d36ff
|
5e630f5d02ca59a57233705b381ef1c42b1e5810
|
refs/heads/master
| 2022-12-17T21:01:21.819776
| 2020-09-10T21:02:38
| 2020-09-10T21:02:38
| 293,506,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,249
|
r
|
step_recipe.R
|
step_winsorise_new <- function(
terms = NULL,
role = NA,
skip = FALSE,
trained = FALSE,
predictor_column_names = NULL,
outcome_column_name = NULL,
id = NULL
) {
recipes::step("winsorise",
terms = terms,
role = role,
skip = skip,
trained = trained,
predictor_column_names = predictor_column_names,
outcome_column_name = outcome_column_name,
id = id
)
}
step_winsorise <- function(
recipe,
...,
role = NA,
skip = FALSE,
trained = FALSE,
predictor_column_names = NULL,
outcome_column_name = NULL,
id = recipes::rand_id("winsorise")
) {
recipes::add_step(
recipe,
step_winsorise_new(
terms = recipes::ellipse_check(...),
role = role,
skip = skip,
trained = trained,
predictor_column_names = predictor_column_names,
outcome_column_name = outcome_column_name,
id = id
)
)
}
prep.step_winsorise <- function(x, training, info = NULL, ...) {
predictor_col_names <- recipes::terms_select(info = info, x$terms)
outcome_col_names <- recipes::terms_select(info = info, recipes::all_outcomes())
outcome_col_name <- outcome_col_names[1]
step_winsorise_new(
terms = x$terms,
role = x$role,
skip = x$skip,
trained = TRUE,
predictor_column_names = predictor_col_names,
outcome_column_name = outcome_col_name,
id = x$id
)
}
bake.step_winsorise <- function(object, new_data, info = NULL, ...) {
outcome_classes <- sort(unique(new_data[[object$outcome_column_name]]))
column_names <- colnames(new_data %>% dplyr::select(!where(is.factor)))
new_data <- old_data <- as.data.frame(new_data)
for (i in outcome_classes) {
selection <- new_data[[object$outcome_column_name]] == i
new_data[selection, column_names] <-
robustHD::winsorize(
old_data[selection, column_names],
standardized = FALSE,
centerFun = median,
prob = 0.999
)
}
new_data %>% tibble()
}
print.step_winsorise <- function(x, width = max(20, options()$width - 30), ...) {
cat("Winsorising ", sep = "")
printer(untr_obj = x$terms, tr_obj = x$predictor_column_names, trained = x$trained, width = width)
invisible(x)
}
|
ede5c81e9a0eb3e6ce4930dad3d197a0d4fa1d63
|
8765a5aca6a11e92ca8cd69b7f363db50d1ab9cf
|
/tamu/basic_stat/R/boxcox,yonkers(1).R
|
21aa290f71de373a3f2a98f931ecf3e5b6aa11cd
|
[] |
no_license
|
sadepu1915/data-science
|
4f6f041db6d9e5530b7470e656a181b568c5f4d7
|
64f847d50d1cfddb365f3fe9e2e4ebf62c27d626
|
refs/heads/master
| 2020-05-25T09:58:03.448166
| 2019-05-08T04:45:03
| 2019-05-08T04:59:31
| 42,747,175
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,309
|
r
|
boxcox,yonkers(1).R
|
x = scan("u:/meth1/Rfiles/ozone2.DAT")
n = length(x)
y = abs(x)
ly = log(y)
s = sum(ly)
yt0 = log(x)
varyt0 = var(yt0)
Lt0 = -1*s - .5*n*(log(2*pi*varyt0)+1)
th = 0
Lt = 0
t = -2.01
i = 0
while(t < 2)
{t = t+.001
i = i+1
th[i] = t
yt = (x^t -1)/t
varyt = var(yt)
Lt[i] = (t-1)*s - .5*n*(log(2*pi*varyt)+1)
if(abs(th[i])<1.0e-10)Lt[i]<-Lt0
if(abs(th[i])<1.0e-10)th[i]<-0
}
# The following outputs the values of the likelihood and theta and yields
# the value of theta where likelihood is a maximum
out = cbind(th,Lt)
Ltmax= max(Lt)
imax= which(Lt==max(Lt))
thmax= th[imax]
#postscript("u:/meth1/psfiles/boxcox_yonkers.ps",horizontal=FALSE)
plot(th,Lt,main="Box-Cox Transformations, Yonkers Ozone",
xlab=expression(theta),type="l",lab=c(30,40,10),
ylab=expression(Lt(theta)))
#the following plots a 95\% c.i. for theta
cic = Ltmax-.5*qchisq(.95,1)
del= .01
iLtci = which(abs(Lt-cic)<=del)
iLtciL= min(iLtci)
iLtciU= max(iLtci)
thLci= th[iLtciL]
thUci= th[iLtciU]
abline(h=cic)
abline(v=thLci)
abline(v=thUci)
abline(v=thmax)
#postscript("u:/meth1/psfiles/trans_yonkers1.ps",horizontal=FALSE)
qqnorm(x,main="Normal Prob Plots of Yonkers Ozone Data",
lab=c(7,9,7),xlab="normal quantiles",ylab="ozone concentration",cex=.65)
qqline(x)
legend(-2,120,"SW=.9525\
p-value=.00006")
y2= x^.318
y3= x^.5
y5= x^.23
s = shapiro.test(x)
s2 = shapiro.test(y2)
s3 = shapiro.test(y3)
s5 = shapiro.test(y5)
#postscript("u:/meth1/psfiles/trans_yonkers2.ps",horizontal=FALSE)
qqnorm(y2,main="Normal Prob Plots of Yonkers Ozone Data with (Ozone)^.318",
lab=c(7,9,7),xlab="normal quantiles",ylab=expression(Ozone^.318),cex=.65)
qqline(y2)
legend(-2,4.5,"SW=.9882\
p-value=.243")
#postscript("u:/meth1/psfiles/trans_yonkers3.ps",horizontal=FALSE)
qqnorm(y3,main="Normal Prob Plots of Yonkers Ozone Data with SQRT(Ozone)",
lab=c(7,9,7),xlab="normal quantiles",ylab=expression(Ozone^.5),cex=.65)
qqline(y3)
legend(-2,11,"SW=.9852\
p-value=.1151")
#postscript("u:/meth1/psfiles/trans_yonkers4.ps",horizontal=FALSE)
qqnorm(y5,main="Normal Prob Plots of Yonkers Ozone Data with (Ozone)^.23",
lab=c(7,9,7), xlab="normal quantiles",ylab=expression(Ozone^.23),cex=.65)
qqline(y5)
legend(-2,2.9,"SW=.9875\
p-value=.2064")
#graphics.off()
|
6bdea15b38a7637a68e517032b1a3fc45aea0043
|
57222f96e553dd2802316928f2f2c7825ef05197
|
/054-nvd3-line-chart-output/server.R
|
54db32f734caf196df93ee43f39e97cdb9af72ea
|
[
"MIT"
] |
permissive
|
rstudio/shiny-examples
|
6815bb4d8198e4b90765926a4865fdef1d1dc935
|
c7bf00db4a8a68e579e39ed07d516a33661a853e
|
refs/heads/main
| 2023-08-17T16:35:03.411795
| 2023-08-03T19:51:30
| 2023-08-03T19:51:30
| 13,722,949
| 2,046
| 4,576
|
NOASSERTION
| 2023-08-03T19:51:31
| 2013-10-20T17:05:23
|
JavaScript
|
UTF-8
|
R
| false
| false
| 337
|
r
|
server.R
|
function(input, output, session) {
output$mychart <- renderLineChart({
# Return a data frame. Each column will be a series in the line chart.
data.frame(
Sine = sin(1:100/10 + input$sinePhase * pi/180) * input$sineAmplitude,
Cosine = 0.5 * cos(1:100/10),
"Sine 2" = sin(1:100/10) * 0.25 + 0.5
)
})
}
|
85972dd76acc4a69ddc59d6d05de6675d5e93fb5
|
ee6313c363be9f5ae48a4ddd08fdf68194e408f3
|
/plot3.R
|
4839bbe6318fddab4f9dcc68673b4e19664df08e
|
[] |
no_license
|
RechalC/ExploratoryDataAnalysisWeek4
|
b2ea13a6c3f54c8528902be9625e57f844bab439
|
8f65bd6895257ad05994fc95ba5b5a6333c7c817
|
refs/heads/master
| 2020-05-23T17:04:32.424037
| 2019-05-15T16:55:37
| 2019-05-15T16:55:37
| 186,860,896
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 510
|
r
|
plot3.R
|
# Call Packages dplyr, bindrcpp & ggplot2
library(dplyr)
library(bindrcpp)
library(ggplot2)
png("plot3.png", width=480, height=480)
# Baltimore yearly emmisisons data
baltYrTypEmm <- aggregate(Emissions ~ year+ type, baltdata, sum)
# plot3.png
chart <- ggplot(baltYrTypEmm, aes(year, Emissions, color = type))
chart <- chart + geom_line() +
xlab("year") +
ylab(expression('Total Emissions')) +
ggtitle('Total Baltimore Emissions [2.5]* From 1999 to 2008')
print(chart)
dev.off()
|
90e45f6d8ae79a55e2f6504f5366c98335b3c369
|
263d8a552164c80e9171e5855aac5d0a82841810
|
/MarcoScutari_BayesianNetworksInR/Chapter1: Discrete Case/ex1.4.R
|
9ff54916621eed72b479b4cb62595e1bfcef8a8d
|
[] |
no_license
|
statisticallyfit/RBayesianStatistics
|
9d66714fb06410b112337611dfbaa75055c66c8b
|
959f2a13719f3f870faccc4d33b474f04e1d8cad
|
refs/heads/master
| 2021-01-23T01:17:05.967248
| 2018-11-23T07:45:13
| 2018-11-23T07:45:13
| 92,862,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
ex1.4.R
|
# 1. In bn.mle compare distribution of O|A with corresponding one from querygrain
junction.mle <- compile(as.grain(bn.mle))
jage.young <- setEvidence(junction.mle, "A", states="young")
jage.old <- setEvidence(junction.mle, "A", states="old")
# P(O)
querygrain(junction.mle, nodes = "O")$O
# P(O | A = young)
querygrain(jage.young, nodes="O")$O
# P(O | A = old)
querygrain(jage.old, nodes="O")$O
# 2. How many random observations are needed for cpquery to make estimates of
# parameters of these two distributions with precision +- 0.01?
set.seed(123)
cpquery(bn.mle, event = (O == "emp"),
evidence = list(A = "young"), method = "lw",
n = 10^3) # enough for likelihood weighting
cpquery(bn.mle, event = (O == "emp"),
evidence = (A == "young"), method = "ls",
n = 10^4) # enough for logic sampling
# 3. Extract dag from bn.mle
dag.ex3 <- bn.net(bn.mle)
graphviz.plot(dag.ex3)
# 4. Which nodes d-separate Age and Occupation?
sapply(nodes(dag), function(z) dsep(dag, "A", "O", z))
# ... dsep(dag, "A", "O", "T")
|
2dc5468e5d0041d3344e2a2101945a4c639e8bcb
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/tests/testthat/test_survfit.R
|
78b2a7f1fd63188340fc074a1073cc051229daae
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,967
|
r
|
test_survfit.R
|
Renv = new.env(parent = globalenv())
FLenv = as.FL(Renv)
Renv$data <- sqlQuery(connection,paste0("SELECT DataSetID,Gender,TIME_VAL,STATUS ",
" FROM vwWHAS100 ORDER by 1,2"))
Renv$fit <- dlply(Renv$data,c("DataSetID","Gender"),
function(x)
survival::survfit.formula(Surv(TIME_VAL,STATUS)~1,
data=x,
conf.type="plain"))
FLenv$data <- FLTableMD("vwWHAS100","DataSetID","ObsID")
FLenv$fit <- survfit(Surv(TIME_VAL,STATUS)~1,
data=FLenv$data,
GroupBy="Gender")
## Testing with R example
dat <- survival::aml
colnames(dat) <- c("ftime","fstatus","x")
Renv$data2 <- dat
# dropFLTestTable()
FLenv$data2 <- as.FLTable(Renv$data2)
FLenv$fit2 <- survfit(Surv(ftime, fstatus) ~ 1,
data = FLenv$data2)
Renv$fit2 <- survival::survfit.formula(Surv(ftime, fstatus) ~ 1,
data = Renv$data2,
conf.type="plain")
for(i in 1:2){
## Fetch Results
test_that("Kaplan-Meier with groupBy and dlply result equality: Fetching result",{
result = eval_expect_equal({
if(class(fit)!="list")
fit <- list(fit)
res1 <- lapply(fit,function(x){
x<-fFetchFLSurvfit(x)
x$call<-NULL
x$std.err <- NULL
x$strata <- NULL
x$PetoEstimate <- NULL
x
})
# if(length(res1)==1)
# res1 <- res1[[1]]
},Renv,FLenv,
noexpectation="res1")
})
##
## NAN in R <=> 0 in FL!
test_that("Kaplan-Meier with groupBy and dlply result equality: upper and lower",{
vtemp <- lapply(1:length(Renv$res1),
function(x){
Rupper <- Renv$res1[[x]]$upper
FLupper <- FLenv$res1[[x]]$upper
Rlower <- Renv$res1[[x]]$lower
FLlower <- FLenv$res1[[x]]$lower
expect_equal(Rupper[!is.na(Rupper)],FLupper[!is.na(Rupper)])
expect_equal(Rlower[!is.na(Rlower)],FLlower[!is.na(Rlower)])
})
})
##
test_that("Kaplan-Meier with groupBy and dlply result equality: Without upper and lower",{
result = eval_expect_equal({
# if(class(res1)!="list")
# res1 <- list(res1)
res1 <- lapply(res1,function(x){
x$upper <- NULL
x$lower <- NULL
x
})
if(length(res1)==1)
res1 <- res1[[1]]
},Renv,FLenv,
expectation="res1")
})
Renv$fit <- Renv$fit2
FLenv$fit <- FLenv$fit2
}
|
9cd44ef80531aadc08f7fda4f8a6a4f48252a4ec
|
a585e4c8eacc2fb59aa5b566a74a7dc7eb28174b
|
/packrat/lib/x86_64-pc-linux-gnu/3.2.5/rlang/tests/testthat/test-lang-expr.R
|
05607e917bdbb2526729d054b0c530886ebc2e8e
|
[
"Apache-2.0"
] |
permissive
|
sol-eng/connect-roadshow
|
c86d179621539a7b03e3d6aea5b3b1df590585d0
|
8a1890b8cac9a03bfc07eb5a0a04683ffe063ddd
|
refs/heads/main
| 2021-11-30T00:34:54.811453
| 2019-03-15T19:40:43
| 2019-03-15T19:40:43
| 161,424,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 789
|
r
|
test-lang-expr.R
|
context("lang-expr")
# expr_text() --------------------------------------------------------
test_that("always returns single string", {
out <- expr_text(quote({
a + b
}))
expect_length(out, 1)
})
test_that("can truncate lines", {
out <- expr_text(quote({
a + b
}), nlines = 2)
expect_equal(out, "{\n...")
})
# expr_label() -------------------------------------------------------
test_that("quotes strings", {
expect_equal(expr_label("a"), '"a"')
expect_equal(expr_label("\n"), '"\\n"')
})
test_that("backquotes names", {
expect_equal(expr_label(quote(x)), "`x`")
})
test_that("converts atomics to strings", {
expect_equal(expr_label(0.5), "0.5")
})
test_that("truncates long calls", {
expect_equal(expr_label(quote({ a + b })), "`{\n ...\n}`")
})
|
adaf3a3a05645750a6c6c4793ebcf925d6681451
|
4a2272aa2e585ddf752fc7284dacc72ebcfb3b08
|
/plot4.R
|
ecd4b3ed729fa7857d99eec6b68945fb551317db
|
[] |
no_license
|
nimsathi/EDAProj2
|
a263256d217174b249e2b1e2437715ffd4457068
|
229bdd50a93439fdf6b5fd22b93fd24625d3dad1
|
refs/heads/master
| 2022-09-10T09:32:21.335594
| 2020-06-03T14:49:14
| 2020-06-03T14:49:14
| 269,039,229
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
r
|
plot4.R
|
library(dplyr)
library(stringr)
library(ggplot2)
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# identify all coal sources
coalSources <- SCC %>%
filter(str_detect(EI.Sector, 'Coal'))
# total emissions from coal sources grouped by year
coalEmissions <- NEI %>%
filter(SCC %in% coalSources$SCC) %>%
group_by(year) %>%
summarise(totalEmissions = sum(Emissions)/1000)
coalEmissions$year <- as.factor(coalEmissions$year)
png('plot4.png', width=480, height=480)
g <- ggplot(data=coalEmissions, aes(x=year, y=totalEmissions)) +
geom_bar(stat="identity", fill="blue") +
labs(title="PM25 Emissions from coal sources", x="Year", y="PM2.5 Emissions (kiloton)")
print(g)
dev.off()
|
91c31ede61a47ecfa39e3edc2addd6979ba04a5f
|
4bcb78635e3d5ff3a47b5dcd0e5771f6eb360313
|
/cachematrix.R
|
c28c6d151c42e901e861ec5773b2a0b0f26ab0bd
|
[] |
no_license
|
zohairh/ProgrammingAssignment2
|
1a0a3857ff22f9249aa3928ad1926c376144b9b0
|
03079848051301d487e0bd3f3ea63058f3905966
|
refs/heads/master
| 2021-01-17T05:49:58.765997
| 2014-06-21T00:51:50
| 2014-06-21T00:51:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 942
|
r
|
cachematrix.R
|
## We will make a special matrix object called CacheMatrix, which will cache the inverse of the matrix within itself
## and contain a list of functions to get and set the matrix and the inverse
## This function creates a CacheMatrix object and optionally initializes the matrix
makeCacheMatrix <- function(x = matrix()) {
invm <- NULL
set <- function(y) {
x <<- y
invm <<- NULL
}
get <- function() x
getinv <- function() invm
setinv <- function(i) {
invm <<- i
}
list(get=get, set=set, getinv=getinv, setinv=setinv)
}
## this function calculates the inverse of the CacheMatrix unless the value is already chached,
## in which case it simply returns the cached value.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if (is.null(i)) {
m <- x$get()
i<-solve(m, ...)
x$setinv(i)
} else {
message("getting cached inverse")
}
i
}
|
3b4c78cc8abd360aeaf2ea0fe4ebffca9da282ce
|
461ea396babcd50cf55f83ba7e9b4e50e2588807
|
/myFunctions/R/miscellaneous.R
|
7bd3430f4b66445803a8f16f2bbf5d57e6e01da1
|
[] |
no_license
|
vikram-g/R-Functions
|
e9f7dca39b41fa3b2c5552dd0013b6f72fa480e9
|
aabb1ac6616b75ac23d4985af99036c8fb3c9a27
|
refs/heads/master
| 2020-03-27T00:33:00.735484
| 2018-08-22T03:27:38
| 2018-08-22T03:27:38
| 145,633,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,801
|
r
|
miscellaneous.R
|
#Get a percentage string from a decimal
#' Decimal to percent
#'
#' Function takes a decimal value and returns it formatted as percentage.
#'
#' @param x integer or list of interger that need to be formated as percentage
#' @param digits Number of digits after decimal point to retain. Defaults to 2
#' @return The list of decimal values formatted as percentage string.
#' @export
#'
decimal_to_percent <- function(x, digits = 2) {
paste0(formatC(x*100, format = "f", digits = digits),"%")
}
# Current time stamp - generally to append to files
#' Current time stamp
#
#' This function when called returns the current time stamp. This can be generally used to append time stamp to file names when writing them multiple times without over writing.
#' @param format Format of the timestamp to be returned
#' @return timestamp
#' @export
#'
timestamp <- function(format = "%y%m%d%H%M%S")
{
strftime(Sys.time(), format)
}
#' @export
# Not in function
'%ni%' <- function(x,y)!('%in%'(x,y))
#' Match strings that vary slightly
#'
#' Often the same bureau variables have different names depending on the data source. This function matches these subtly different names using restricted Damerau-Levenshtein string distnace and returns top 5 matches for each name. \cr
#' Note: Always ensure that generally longer version of string names are in nm1. Match rate turns bad if this condition is not met.
#' @param nm1 List of names that needs to be matched with nm2. Ensure that nm1 is generally longer in length than nm2.
#' @param nm2 List of names that needs to be matched with nm1
#' @return Data frame with one row for each names in first parameter nm1 and 5 columns with the best 5 matches for each of them.
#' @export
#'
match_names <- function(nm1, nm2){
# Removing any "_" or "." in the names
nm1_fixed <- unname(sapply(nm1, function(x) gsub("_|\\.","",x)))
nm2_fixed <- unname(sapply(nm2, function(x) gsub("_|\\.","",x)))
res <- data.frame(var = character(), match1 = character(),match2 = character(), match3 = character(), match4 = character(), match5 = character(),stringsAsFactors = F)
for (i in 1:length(nm2)){
distances <- stringdist::stringdist(unname(sapply(nm2_fixed[i], replace_nums_to_char))[1], unname(sapply(nm1_fixed, replace_nums_to_char)), method = "osa")
res[i,] <- c(nm2[i], nm1[order(distances)[1:5]])
}
return(res)
}
replace_nums_to_char <- function(str_to_replace){
nums_in_str <- unlist(strsplit(unlist(str_to_replace), "[^0-9]+")) # Picking the numbers in the string
nums_in_str <- nums_in_str[order(nums_in_str,decreasing = T)] # Sorting by number of digits so numbers with more digits are replaced before smaller ones
nums_in_str <- nums_in_str[nchar(nums_in_str) > 0] # Removing empty string
for (x in nums_in_str){ str_to_replace <- gsub(x,numbers2words(as.numeric(x)),str_to_replace)}
return (str_to_replace)
}
# Function to convert numbers to string equivalent from here https://gist.github.com/psychemedia/150cb9901529da58124a
numbers2words <- function(x){
helper <- function(x){
digits <- rev(strsplit(as.character(x), "")[[1]])
nDigits <- length(digits)
if (nDigits == 1) as.vector(ones[digits])
else if (nDigits == 2)
if (x <= 19) as.vector(teens[digits[1]])
else trim(paste(tens[digits[2]],
Recall(as.numeric(digits[1]))))
else if (nDigits == 3) trim(paste(ones[digits[3]], "hundred and",
Recall(makeNumber(digits[2:1]))))
else {
nSuffix <- ((nDigits + 2) %/% 3) - 1
if (nSuffix > length(suffixes)) stop(paste(x, "is too large!"))
trim(paste(Recall(makeNumber(digits[
nDigits:(3*nSuffix + 1)])),
suffixes[nSuffix],"," ,
Recall(makeNumber(digits[(3*nSuffix):1]))))
}
}
trim <- function(text){
#Tidy leading/trailing whitespace, space before comma
text=gsub("^\ ", "", gsub("\ *$", "", gsub("\ ,",",",text)))
#Clear any trailing " and"
text=gsub(" and$","",text)
#Clear any trailing comma
gsub("\ *,$","",text)
}
makeNumber <- function(...) as.numeric(paste(..., collapse=""))
#Disable scientific notation
opts <- options(scipen=100)
on.exit(options(opts))
ones <- c("", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine")
names(ones) <- 0:9
teens <- c("ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", " seventeen", "eighteen", "nineteen")
names(teens) <- 0:9
tens <- c("twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty",
"ninety")
names(tens) <- 2:9
x <- round(x)
suffixes <- c("thousand", "million", "billion", "trillion")
if (length(x) > 1) return(trim(sapply(x, helper)))
helper(x)
}
|
85a2d77857ec6c05a11963e2e1e53533746e7b1e
|
3f7d08494725e4a588519503c842fc5dd3c4e0b3
|
/man/attachment.Rd
|
9d377e73c4407a3f5c9cffab81563d9ee9efa576
|
[] |
no_license
|
kazuya030/gmailr
|
bcafd1a0e22517cba54a962da7a5adc04420e306
|
d8ac8429b5eb5b5aa4a25f955de10ca7f13fd3e1
|
refs/heads/master
| 2021-01-24T15:53:30.629433
| 2016-01-02T09:58:38
| 2016-01-02T09:58:38
| 48,846,830
| 0
| 1
| null | 2016-01-01T15:34:24
| 2015-12-31T11:06:49
|
R
|
UTF-8
|
R
| false
| true
| 820
|
rd
|
attachment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/message.R
\name{attachment}
\alias{attachment}
\title{Retrieve an attachment to a message}
\usage{
attachment(id, message_id, user_id = "me")
}
\arguments{
\item{id}{id of the attachment}
\item{message_id}{id of the parent message}
\item{user_id}{gmail user_id to access, special value of 'me' indicates the authenticated user.}
}
\description{
Function to retrieve an attachment to a message by id of the attachment
and message. To save the attachment use \code{\link{save_attachment}}.
}
\examples{
\dontrun{
my_attachment = attachment('a32e324b', '12345')
save attachment to a file
save_attachment(my_attachment, 'photo.jpg')
}
}
\references{
\url{https://developers.google.com/gmail/api/v1/reference/users/messages/attachments/get}
}
|
f1951a15d1c5e06042c8124ec07308d8f8294497
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/base/R/dynload.R
|
f13f1c5edd39c2d76cbfab37025c8a549d8ab9c5
|
[
"LGPL-2.1-only",
"GPL-2.0-only",
"GPL-2.0-or-later",
"LGPL-3.0-only",
"GPL-3.0-only",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 6,552
|
r
|
dynload.R
|
# File src/library/base/R/dynload.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2018 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
dyn.load <-
if(.Platform$OS.type == "windows") {
function(x, local = TRUE, now = TRUE, ...) {
inDL <- function(x, local, now, ..., DLLpath = "")
.Internal(dyn.load(x, local, now, DLLpath))
inDL(x, as.logical(local), as.logical(now), ...)
}
} else {
function(x, local = TRUE, now = TRUE, ...)
.Internal(dyn.load(x, as.logical(local), as.logical(now), ""))
}
dyn.unload <- function(x)
.Internal(dyn.unload(x))
is.loaded <- function(symbol, PACKAGE = "", type = "")
.Internal(is.loaded(symbol, PACKAGE, type))
getNativeSymbolInfo <- function(name, PACKAGE, unlist = TRUE,
withRegistrationInfo = FALSE)
{
if(missing(PACKAGE)) PACKAGE <- ""
if(is.character(PACKAGE))
pkgName <- PACKAGE
else if(inherits(PACKAGE, "DLLInfo")) {
pkgName <- PACKAGE[["path"]]
PACKAGE <- PACKAGE[["info"]]
} else if(inherits(PACKAGE, "DLLInfoReference")) {
pkgName <- character()
} else
stop(gettextf("must pass a package name, %s or %s object",
dQuote("DLLInfo"),
dQuote("DllInfoReference")),
domain = NA)
syms <- lapply(name, function(id) {
v <- .Internal(getSymbolInfo(as.character(id), PACKAGE,
as.logical(withRegistrationInfo)))
if(is.null(v)) {
msg <- paste("no such symbol", id)
if(length(pkgName) && nzchar(pkgName))
msg <- paste(msg, "in package", pkgName)
stop(msg, domain = NA)
}
names(v) <- c("name", "address", "package", "numParameters")[seq_along(v)]
v
})
if(length(name) == 1L && unlist)
syms <- syms[[1L]]
else
names(syms) <- name
syms
}
getLoadedDLLs <- function() .Internal(getLoadedDLLs())
getDLLRegisteredRoutines <- function(dll, addNames = TRUE)
UseMethod("getDLLRegisteredRoutines")
getDLLRegisteredRoutines.character <- function(dll, addNames = TRUE)
{
dlls <- getLoadedDLLs()
w <- vapply(dlls, function(x) x[["name"]] == dll || x[["path"]] == dll, NA)
if(!any(w))
stop(gettextf("No DLL currently loaded with name or path %s", sQuote(dll)),
domain = NA)
dll <- which.max(w)
if(sum(w) > 1L)
warning(gettextf("multiple DLLs match '%s'. Using '%s'",
names(dll), dlls[[dll]][["path"]]),
domain = NA)
getDLLRegisteredRoutines(dlls[[dll]], addNames)
}
getDLLRegisteredRoutines.DLLInfo <- function(dll, addNames = TRUE)
{
## Provide methods for the different types.
if(!inherits(dll, "DLLInfo"))
stop(gettextf("must specify DLL via a %s object. See getLoadedDLLs()",
dQuote("DLLInfo")),
domain = NA)
info <- dll[["info"]]
els <- .Internal(getRegisteredRoutines(info))
## Put names on the elements by getting the names from each element.
if(addNames) {
els <- lapply(els, function(x) {
if(length(x))
names(x) <- vapply(x, function(z) z$name, "")
x
})
}
class(els) <- "DLLRegisteredRoutines"
els
}
print.NativeRoutineList <-
function(x, ...)
{
m <- data.frame(numParameters = sapply(x, function(x) x$numParameters),
row.names = sapply(x, function(x) x$name))
print(m, ...)
invisible(x)
}
### This is arranged as a ragged data frame. It may be confusing
### if one reads it row-wise as the columns are related in pairs
### but not across pairs. We might leave it as a list of lists
### but that spans a great deal of vertical space and involves
### a lot of scrolling for the user.
print.DLLRegisteredRoutines <-
function(x, ...)
{
## Create a data frame with as many rows as the maximum number
## of routines in any category. Then fill the column with ""
## and then the actual entries.
n <- lengths(x)
x <- x[n > 0]
n <- max(n)
d <- list()
sapply(names(x),
function(id) {
d[[id]] <<- rep.int("", n)
names <- vapply(x[[id]], function(x) x$name, "")
if(length(names)) d[[id]][seq_along(names)] <<- names
d[[paste(id, "numParameters")]] <<- rep.int("", n)
names <- sapply(x[[id]], function(x) x$numParameters)
if(length(names))
d[[paste(id, "numParameters")]][seq_along(names)] <<- names
})
print(as.data.frame(d), ...)
invisible(x)
}
getCallingDLLe <- function(e)
{
if (is.null(env <- e$".__NAMESPACE__.")) env <- baseenv()
if(!is.null(Ds <- get0("DLLs", envir = env)) && length(Ds))
Ds[[1L]] ## else NULL
}
getCallingDLL <-
function(f = sys.function(-1), doStop = FALSE)
{
e <- environment(f)
if(!isNamespace(e)) {
if(doStop)
stop("function is not in a namespace, so cannot locate associated DLL")
else
return(NULL)
}
if(is.null(r <- getCallingDLLe(e)) && doStop)
stop("looking for DLL for native routine call, but no DLLs in namespace of call")
## else
r
}
print.DLLInfo <- function(x, ...)
{
tmp <- as.data.frame.list(x[c("name", "path", "dynamicLookup")])
names(tmp) <- c("DLL name", "Filename", "Dynamic lookup")
write.dcf(tmp, ...)
invisible(x)
}
print.DLLInfoList <- function(x, ...)
{
if(length(x)) {
m <- data.frame(Filename = sapply(x, function(x) x[["path"]]),
"Dynamic Lookup" =
sapply(x, function(x) x[["dynamicLookup"]]))
print(m, ...)
}
invisible(x)
}
`[.DLLInfoList` <- function(x, ...) structure(NextMethod("["), class = class(x))
`$.DLLInfo` <- function(x, name)
getNativeSymbolInfo(as.character(name), PACKAGE = x)
|
cf07e8f4cf18378d52c32f4c8596869c1b231384
|
991c97b4f8697f7c4635b76f4449d83871dcefe2
|
/R/ornament.R
|
c33aafaec32ca4102f5f53b7f6883dd94e4ee544
|
[
"MIT"
] |
permissive
|
r-lib/pillar
|
803706f4fa4e7f039005f5a8ad27945869fe7d02
|
92095fcc0fffa6d2b2d42c3ec6017e7501c0c99b
|
refs/heads/main
| 2023-06-08T05:12:41.813170
| 2023-03-26T01:56:15
| 2023-03-26T01:56:15
| 91,374,446
| 146
| 40
|
NOASSERTION
| 2023-04-05T01:30:09
| 2017-05-15T19:04:32
|
R
|
UTF-8
|
R
| false
| false
| 1,337
|
r
|
ornament.R
|
#' Helper to define the contents of a pillar
#'
#' This function is useful if your data renders differently depending on the
#' available width. In this case, implement the [pillar_shaft()] method for your
#' class to return a subclass of "pillar_shaft" and have the [format()] method
#' for this subclass call `new_ornament()`. See the implementation of
#' `pillar_shaft.numeric()` and `format.pillar_shaft_decimal()` for an example.
#'
#' @param x A character vector with formatting,
#' can use ANYI styles e.g provided by the \pkg{cli} package.
#'
#' @param width An optional width of the resulting pillar, computed from `x` if
#' missing
#' @param align Alignment, one of `"left"` or `"right"`
#'
#' @export
#' @examples
#' new_ornament(c("abc", "de"), align = "right")
new_ornament <- function(x, width = NULL, align = NULL) {
if (is.null(width)) {
width <- get_max_extent(x)
}
ret <- structure(
x,
align = align,
class = "pillar_ornament"
)
ret <- set_width(ret, width)
ret
}
#' @export
print.pillar_ornament <- function(x, ...) {
writeLines(style_bold("<pillar_ornament>"))
writeLines(format(x, ...))
invisible(x)
}
#' @export
format.pillar_ornament <- function(x, width = NULL, ...) {
align <- attr(x, "align", exact = TRUE)
align(x, width = width %||% get_width(x), align = align)
}
|
bd0de9145f52546cd0455e7407344b65cc967d47
|
2a1a9c67e1605068227626c167d3e8d4511ec619
|
/cachematrix.R
|
da8abd794f73dc5742c5dd726625b211f81cfe08
|
[] |
no_license
|
Emsobaa/ProgrammingAssignment2
|
8c7349175a28e7b543b3788fade6e839377900ab
|
4f7dc9218ab2b6167bb403a37099ac4a16eab93e
|
refs/heads/master
| 2020-12-26T01:37:50.135503
| 2016-04-02T16:28:01
| 2016-04-02T16:28:01
| 55,235,704
| 0
| 0
| null | 2016-04-01T13:48:23
| 2016-04-01T13:48:22
| null |
UTF-8
|
R
| false
| false
| 1,769
|
r
|
cachematrix.R
|
## These functions cache a matrix and its inverse, to avoid recalculating its inverse if it's already known.
## makeCacheMatrix() creates a special matrix object that can cache the matrix itself,
## and can cache its inverse. If "M" is an invertible matrix, and "M-1" its inverse,
## makeCacheMatrix(M) will cache M, and makeCacheMatrix()$setinverse("M-1") will cache
## its inverse.
makeCacheMatrix <- function(x = matrix()) {
m<<-NULL
set <- function(y) { ## "set" function is to cache the Matrix ("m" gets NULL).
x <<- y
m <<- NULL
}
get <- function() x ## "get" returns the Matrix currently cached.
setinverse <- function(inverse) m <<- inverse ## "setinverse" function is to cache its inverse (manually)
getinverse <- function() m ## "getinverse" returns its inverse
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## cacheSolve() checks if the inverse of the matrix hasn't been cached yet.
## If it has been cached, then "getinverse()" is not NULL and cacheSolve
## returns the inverted matrix without any operation.
## If not, it gets the matrix cached previously in makeCacheMatrix() ( x$get() )
## to calculate (and cache) its inverse.
cacheSolve <- function(x, ...) {
m <- x$getinverse() ## if the inverse has been cached then m is not NULL
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() ## Otherwise, the function gets the matrix cached previously
m <- solve(data, ...)
x$setinverse(m) ## and cache the inverse.
m
}
|
b3ad9fbcbb559a4c82511adb34951cc12a5be7b1
|
0172e34e195b6f2ce39eee04c3282bcd88836c87
|
/UdaCityLesson3/diamonds.R
|
55cb5594fcddfb69cc287ad26e99eea6dea6041b
|
[] |
no_license
|
o001705/RFolder
|
f9f1b47ffce2777f2017ebe40cab7d757b5877a4
|
a2bafca2fa036c6e8b02aa5257567d70aeab241c
|
refs/heads/master
| 2021-01-25T09:38:06.679537
| 2017-06-15T04:07:06
| 2017-06-15T04:07:06
| 93,474,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 412
|
r
|
diamonds.R
|
if (require("ggplot2") == FALSE) {
install.packages("ggplot2")
require("ggplot2")
}
data(diamonds)
summary(diamonds)
df[sapply(df, is.factor)]
print("Number of Observations: ")
nrow(diamonds)
print("Number of Variables: ")
ncol(diamonds)
i <- 0;
print("Number of ordered variables")
for (name in sapply(diamonds, class)) {
if (name[1] == "ordered" & name[2] == "factor"){
i <- i +1
}
}
print(i)
|
9e62f0f5dcb75d70a30bea357f553874b5c2246c
|
f885f99d0090261317b8528128a1a72958760610
|
/man/activity_instance_id.Rd
|
1b493b2539dabcd5dea0f3909d981b76f00f941a
|
[] |
no_license
|
BijsT/bupaR
|
7a78d0d15655866264bab2bb7882602804303272
|
19f5e63c7393be690addf3c3977f1d00d0cdbfaf
|
refs/heads/master
| 2021-08-26T06:40:32.388974
| 2017-11-21T23:12:47
| 2017-11-21T23:12:47
| 111,611,796
| 0
| 0
| null | 2017-11-21T23:11:10
| 2017-11-21T23:11:09
| null |
UTF-8
|
R
| false
| true
| 524
|
rd
|
activity_instance_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/activity_instance_id.R
\name{activity_instance_id}
\alias{activity_instance_id}
\title{Activity instance classifier}
\usage{
activity_instance_id(eventlog)
}
\arguments{
\item{eventlog}{An object of class \code{eventlog}.}
}
\description{
Get the activity instance classifier of an object of class \code{eventlog}.
}
\seealso{
\code{\link{eventlog}}, \code{\link{activity_id}},
\code{\link{timestamp}},
\code{\link{case_id}}
}
|
50e72abc07d89345b81ede3a7eee31fa4d646ceb
|
c734ebad500a91214f13c0d8cb802cb4b3226564
|
/deploy_MultiLabLeaderboard.R
|
3379fc560bceb82058508876154e32d7d49d7edd
|
[
"BSD-3-Clause"
] |
permissive
|
christacaggiano/MultiLabLeaderboard
|
5efd35cfa65852c58c4155c248def329d9d64313
|
893747a59792eb4152fa6575a9fbeda1ef69fea4
|
refs/heads/master
| 2021-05-24T15:38:31.285151
| 2020-04-06T22:58:30
| 2020-04-06T22:58:30
| 253,634,744
| 0
| 0
|
BSD-3-Clause
| 2020-04-06T22:59:49
| 2020-04-06T22:59:48
| null |
UTF-8
|
R
| false
| false
| 95
|
r
|
deploy_MultiLabLeaderboard.R
|
#deploy_MultiLabLeaderboard.R
#install.packages('rsconnect')
library(rsconnect)
deployApp()
|
ac4b70f8f0d26ea0d65a9af8bad90a331fa9b641
|
0072f4019005b613c4f0bb322f4ab67167520afb
|
/man/nse_BB.Rd
|
547b5d4919da4eddce4a84411db8d086a200bf87
|
[
"MIT"
] |
permissive
|
sudhi1989/nse1minR
|
629df41fd3c9bb85b708d4c58f63e01b71efd6f4
|
b68eb6c48f2c8856c503a8f343eebcfcf7983602
|
refs/heads/master
| 2020-07-01T17:08:29.135586
| 2016-11-29T15:57:25
| 2016-11-29T15:57:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 835
|
rd
|
nse_BB.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{nse_BB}
\alias{nse_BB}
\title{Individual stocks 1 Minute Chart data}
\format{A data frame with five variables: \code{id}, \code{date},
\code{time}, \code{open}, \code{high}, \code{low} , \code{close} and \code{vol}}
\usage{
nse_BB
}
\description{
Choosing individual stocks from other companies can have
advantages over fund allocation for the investors.
Predicting an individual stock price is not a easy task,
while it depends on many external sources such as company performance,
government policy, public expectation and media focus, etc.
Stock price are considered to be
very dynamic and some dependency with technical indicators.
These datasets are aggregated with their alphabetical order (groups)
}
\keyword{datasets}
|
a14e5575fa003e06507a407d14c0292fbb1052c7
|
c89f5c0195c40aafcb4757be733f7d16dd9116bf
|
/data/dominated.R
|
287a48850bf618320487b13cfe858e1f123f9e4d
|
[] |
no_license
|
maxconway/PartIII-diss
|
44d4bc384f16fe9f5b00e9bccca61dc480263d02
|
09fa04a5dd471bb76c2803062518a57735585636
|
refs/heads/master
| 2021-01-01T05:39:43.330409
| 2013-05-01T12:04:43
| 2013-05-01T12:04:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 747
|
r
|
dominated.R
|
stop('this function should be obtained via purl')
dominates <- function(a,b){
# does a dominate b?
any(a>b)&!any(a<b)
}
#helper function
singledom <- function(p,front){
stopifnot(is.data.frame(p) & nrow(p)==1) # this also covers NULL
for(i in 1:nrow(front)){
a=front[i,]
if(dominates(a,p)){
return(TRUE)
}
}
return(FALSE)
}
dominated <- function(p,front){
# if p is a point, points in front that dominate p
# if p is a set of points, is each point dominated?
if(nargs()==1){
return(dominated(p,p))
}
if(!nrow(p)>1){
return(apply(front,1,function(f){
dominates(f,p)
}))
}
if(is.data.frame(p) & nrow(p)>1){
return(apply(p,1,function(a){
singledom(a,front)
}))
}
}
|
28c55f69e8ef0ac1e5bcd93744ba3f681ceade16
|
c389484130a8245d4b634c56ca911daf54e8eee9
|
/home.R
|
857f2a15a72dcf6ba52db56c07bcefe5ce1e5889
|
[] |
no_license
|
SARussell-CO/prox
|
5d246375078e34e8012b32263c437e5ae23b9a7b
|
cab50cedafce7875a85c0b8899d89376b8a8c001
|
refs/heads/master
| 2021-08-23T00:35:15.617034
| 2017-12-01T23:03:08
| 2017-12-01T23:03:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,169
|
r
|
home.R
|
# This will be the home screen for doing several different functins using R
# Sync r and github 12/1
# Clean up prox data and assign clean data set to the desktop for import
source("proxClean.R")
proxClean()
# Most demos in a specific month for top n canvassers----
source("mostDemos.R")
# Load the data into an object using the read.csv function
prx <- read.csv(file = "")
# The default behavior is to look up the top ten canvassers for the current month
# in the current year but only to count Demo No Sales and Sales
mDemos(data = prx)
# These are some changes that I can implement later on:----
# Pull data on multiple months
# Pull data on multiple months for specific canvassers
# Plot production data for several canvassers over different time periods
# (Last one may be done best using a new function)
# Pull data for multiple canvassers over multiple months ----
# The first step is to create a list of canvasser names that we are interested in
# and then identify the months that we are interested in. We can modify the start and
# end periods for the data range but the default for both will be the current month
# of the year.
|
7028b7886df63569a93b170b58827487dec40ffc
|
89bfd2ed18de2845834ff4754f3be23bf45352cf
|
/Fig.5_TME_analysis/bioR17.pheatmap.R
|
8a6b3fad33bc2c42c130eb0c86c12f1a4e379caa
|
[] |
no_license
|
luyuitng/ICB_projects
|
060162beeed03d5aeecf286f53f015483bb6076f
|
2966dee54358759f3692a57df0dabb4a842cbc9e
|
refs/heads/main
| 2023-04-22T06:53:22.576300
| 2021-05-08T15:47:34
| 2021-05-08T15:47:34
| 365,549,511
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 661
|
r
|
bioR17.pheatmap.R
|
#install.packages("pheatmap")
library(pheatmap)
inputFile="input2.txt"
groupFile="group2.txt"
outFile="heatmap.pdf"
setwd("E:\\Lenvatinib\\TIME")
rt=read.table(inputFile,header=T,sep="\t",row.names=1,check.names=F) #读取文件
Type=read.table(groupFile, header=T, sep="\t", row.names=1, check.names=F) #读取临床文件
pheatmap(rt,
annotation=Type,
cluster_cols = F,
cluster_rows = F,
color = colorRampPalette(c("blue", "white", "red"))(50),
show_colnames = F,
#border_color ="NA",
fontsize = 8,
fontsize_row=6,
fontsize_col=6)
dev.off()
|
8b240ed52f7a11f0e932080b03dc7b4ef409afaf
|
205bad3b5b7aeb85300e947a8db358b9536cb7b1
|
/ensg/miashs/fda/tps/tp1/saison.ufo.R
|
9f17c1e11a3e1c5ad82480fed3cf68fc8a8347db
|
[] |
no_license
|
philippe-preux/philippe-preux.github.io
|
744e93a59076a74caf5aeec02fedb383257592f6
|
1e34ad659c214b215134177d8e01f3c4052bfef1
|
refs/heads/master
| 2023-08-31T03:37:21.383038
| 2023-08-18T13:00:36
| 2023-08-18T13:00:36
| 150,579,919
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,352
|
r
|
saison.ufo.R
|
ufo <- read.csv ("ufo.us.csv")
ufo$DateOccurred <- as.character (ufo$DateOccurred)
ufo$DateOccurred <- as.Date (ufo$DateOccurred)
ufo$month <- months.Date (ufo$DateOccurred)
month.occurence <- table (ufo$month)
# on ré-ordonne les mis dans l'ordre de l'année
# il faut un as.table() pour que l'objet demeure de classe table
month.occurence <- as.table (month.occurence [c (5, 4, 9, 2, 8, 7, 6, 1, 12, 11, 10, 3)])
# même chose pour les jours de la semaine
ufo$weekday <- weekdays (ufo$DateOccurred)
weekday.occurence <- table (ufo$weekday)
weekday.occurence <- as.table (weekday.occurence [c (3, 4, 5, 2, 7, 6, 1)])
# même chose pour les jours du mois
ufo$mday <- unclass (as.POSIXlt (ufo$DateOccurred))$mday
mday.occurence <- table (ufo$mday)
pdf ("weekday.occurence.pdf")
plot (weekday.occurence, xlab = "Jour de la semaine", ylab = "Nombre d'observations", main = "Répartition par jour de la semaine des observations d'OVNI aux États-Unis")
dev.off()
pdf ("month.occurence.pdf")
plot (month.occurence, xlab = "Mois", ylab = "Nombre d'observations", main = "Répartition par mois des observations d'OVNI aux États-Unis")
dev.off()
pdf ("mday.occurence.pdf")
plot (mday.occurence, xlab = "Jour du mois", ylab = "Nombre d'observations", main = "Répartition par jour dans le mois des observations d'OVNI aux États-Unis")
dev.off()
# sur 4 ans
# 45/48 mois ont un 29
# 44/48 ont un 30
# 28/48 ont un 31
mday.occurence.corrige <- mday.occurence
mday.occurence.corrige [29] <- mday.occurence.corrige [29] * 48 / 45
mday.occurence.corrige [30] <- mday.occurence.corrige [30] * 48 / 44
mday.occurence.corrige [31] <- mday.occurence.corrige [31] * 48 / 28
pdf("mday.occurence.corrige.pdf")
plot (mday.occurence.corrige, col = "red", xlab = "Jour du mois",
ylab = "Nombre d'observations",
main = "Répartition par jour dans le mois des observations d'OVNI aux États-Unis, avec correction (en rouge)")
points (mday.occurence)
dev.off()
ufo$jj <- julian(ufo$DateOccurred)
ufo$jj <- ufo$jj - min (ufo$jj)
jj.occurence <- table (ufo$jj)
plot (jj.occurence)
plot (cumsum (jj.occurence))
plot (cumsum (jj.occurence), type = "l")
plot (cumsum (jj.occurence), pch = ".")
ufo$month.num <- unclass (as.POSIXlt (ufo$DateOccurred))$mon + 1
plot (jj.occurence, col = ufo$month.num)
plot (jj.occurence, col = rainbow(12) [ufo$month.num])
|
d8e91333ba9732754195dbbac54658021256df6e
|
02301ac0beb647d501c19f5f34c7689a98e8dee4
|
/man/order_cpp.Rd
|
911e81d21e10a9965cab3a4d376e909f064f96e1
|
[] |
no_license
|
brenthueth/twosamples
|
53fc387eb3796294ac31c0710039f10134b53821
|
6e35086c405459d3aafc276d7dc625f72b54a24c
|
refs/heads/master
| 2020-11-26T18:52:48.921782
| 2018-12-03T16:14:39
| 2018-12-03T16:14:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 412
|
rd
|
order_cpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{order_cpp}
\alias{order_cpp}
\title{Order in C++}
\usage{
order_cpp(x)
}
\arguments{
\item{x}{numeric vector}
}
\value{
same length vector of integers representing order of input vector
}
\description{
Simply finds the order of a vector in c++. Mostly for internals.
}
\examples{
vec = c(1,4,3,2)
order_cpp(vec)
}
|
fc28df85e6287c56e1079cb1497f44dc4312ffc6
|
820c0a5f34c4e9899db608c6ccbdc3e2e853f2d8
|
/man/aki_stages.Rd
|
c4a929c7d5795689c83c4cea0871d253e223d5df
|
[
"MIT"
] |
permissive
|
alwinw/epocakir
|
887c0fd0b66251c67d922613e1420effff003611
|
a1bcd4567fb2d91cde53453dff5991af967c4860
|
refs/heads/master
| 2023-05-23T06:05:55.385634
| 2023-01-06T12:52:15
| 2023-01-06T12:52:15
| 296,596,576
| 4
| 1
|
NOASSERTION
| 2022-12-16T10:25:30
| 2020-09-18T11:05:01
|
R
|
UTF-8
|
R
| false
| true
| 451
|
rd
|
aki_stages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aki.R
\docType{data}
\name{aki_stages}
\alias{aki_stages}
\title{AKI Stages}
\format{
An object of class \code{ordered} (inherits from \code{factor}) of length 4.
}
\usage{
aki_stages
}
\description{
Ordered factor of AKI stages
}
\details{
See \url{https://kdigo.org/guidelines/ckd-evaluation-and-management/} for more details
}
\examples{
aki_stages
}
\keyword{datasets}
|
cf165247d0945e3b2c8eff66007053c3955c74fc
|
eaa13fef2e1d8e6afe55513435a444224fa0f98c
|
/man/input_rich.Rd
|
b5de285d2907e36346e041db306895165337681f
|
[] |
no_license
|
lshep/LorisWorld
|
02b77b58486398eaba13fd2ac7552fa0ab7b6d5e
|
4dfc6fefb5e77d25b9c58476f2ccfce135286339
|
refs/heads/master
| 2021-07-04T14:42:11.130027
| 2020-10-14T14:29:46
| 2020-10-14T14:29:46
| 70,102,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 680
|
rd
|
input_rich.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rich.R
\name{input_rich}
\alias{input_rich}
\title{Rich data input}
\usage{
input_rich(pdata_file, exprs_file)
}
\arguments{
\item{pdata_file}{character(1) The path to the phenotype data file}
\item{exprs_file}{character(1) The path to the expression data file}
}
\value{
A SummarizedExperiment() containing samples as colData(), and the
expression values as assay()
}
\description{
Capture the semantic meaning of the data we represent. For instance,
separately recognizing the 'phenotypic' data (describing the sample) and the
'expression' values (derived from , eg microarray or RNAseq data)
}
|
caa0f78e32c8af41de87699e630b2714b594adb2
|
5a83db1fc1af0b72ba8f8ad70293d14e10d14658
|
/ggmap.R
|
60c409818951d5a7bfcdf39b8bb0dd4d989a2eb3
|
[] |
no_license
|
dpebert7/Rsentiment
|
e3ca9cb3f3025e2bb6f519cbdbdc9085d5218ca6
|
a78a0f88ddd48104286c816c86f40252fb380d4b
|
refs/heads/master
| 2020-04-12T09:01:48.488311
| 2016-08-22T22:52:03
| 2016-08-22T22:52:03
| 51,659,917
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,190
|
r
|
ggmap.R
|
#Ebert/Rider
#Created 16 April 2016
#Purpose: Create sentiment score map of LA2014 data.
#load a little bit of sample data from x
load(file = paste(storage.directory, "x1.RData", sep = ""))
x1 = x1[x1$username != "GoldstarPgh",] #remove friggin Pittsburgh newspaper and its >3000 tweets
x1 = x1[x1$username != "GoldstarSA",] #remove friggin San Antonio newspaper and its >3000 tweets
x1 = x1[x1$username != "LoansHomes",] #remove
x1 = x1[x1$username != "AAAcali11",] #remove
x1 = x1[x1$username != "Stompy_0487",] #remove
x1 = x1[x1$username != "Tony_Lien",] #remove friggin Tony lien's checkins
x1 = x1[x1$username != "CreativeGali",] #remove
which.max(table(x1$username))
dim(x1[x1$username == "ThatBoyJoy",])
# 2865 tweets; no real sentiment, but well travelled, apparently:
Tony_Lien = x1[x1$username == "Tony_Lien",]
save(Tony_Lien, file = paste(storage.directory, "Tony_Lien.RData", sep = ""))
Tony_Lien = Tony_Lien[,c("lat", "long", "AFINN.polarity")]
# 2821 tweets; more what I was expecting, finally:
ThatBoyJoy = x1[x1$username == "ThatBoyJoy",]
save(ThatBoyJoy, file = paste(storage.directory, "ThatBoyJoy.RData", sep = ""))
ThatBoyJoy = ThatBoyJoy[,c("lat", "long", "AFINN.polarity")]
library(ggmap)
# The Tony_Lien travel map
Tony_Lien_map <- get_map(location = 'West Covina', zoom = 9)
ggmap(my_map)
ggmap(my_map)+
geom_point(aes(x = long, y = lat), data = Tony_Lien,
alpha = .5, color="darkred", size = 2)
# The ThatBoyJoy travel map
ThatBoyJoy_map <- get_map(location = 'Bellflower', zoom = 12)
ggmap(ThatBoyJoy_map)
ggmap(my_map)+
geom_point(aes(x = long, y = lat), data = ThatBoyJoy,
alpha = .5, color="darkred", size = 3)
# The Tony_Lien travel map
ThatBoyJoy_map <- get_map(location = 'Bellflower', zoom = 12)
ggmap(my_map)
ggmap(my_map)+
geom_tile(aes(x = long, y = lat), data = ThatBoyJoy,
alpha = .5, color="darkred", size = 3)
# Attempt at heat map using chicago crime map as template
ggmap(ThatBoyJoy_map) +
geom_tile(data = ThatBoyJoy, aes(x = long, y = lat, alpha = Frequency),
fill = 'red') + theme(axis.title.y = element_blank(), axis.title.x = element_blank())
|
771267e044d2f1b1f8b13d696b0ba1c32d3e6ed4
|
f5147eb64730136d2a7d75a0f80325742a0d64b8
|
/man/compute_log_lik.Rd
|
ce032525864e5114e3eeed14624172b1f0222072
|
[] |
no_license
|
JohnNay/sa
|
04c3a8f84df5af9582660fed443982a3ac1f0fce
|
ca846dab26341ec271e7f1d9ab382744532cd73e
|
refs/heads/master
| 2021-01-10T16:36:46.461493
| 2016-04-18T18:16:06
| 2016-04-18T18:16:06
| 55,555,996
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 691
|
rd
|
compute_log_lik.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loss_functions.R
\name{compute_log_lik}
\alias{compute_log_lik}
\title{Compute Negative Log Likelihood}
\usage{
compute_log_lik(prediction, actual)
}
\arguments{
\item{prediction}{Numeric vector same length as actual}
\item{actual}{Numeric vector same length as prediction}
}
\value{
Numeric vector length one
}
\description{
Bernoulli likelihood: x = 0, 1; f(x,theta) = (theta^x)*(1-theta)^(1-x)
Bernoulli log-likelihood: $$ ln(L) = sum_{i=1}^I sum_{t=1}^T D_i^C(t) * ln(P_{i}^{C} (t)) + (1 - D_i^C(t)) * ln(1 - P_{i}^{C} (t)) $$
}
\examples{
compute_log_lik(runif(10), sample(c(1,0), 10, replace=TRUE))
}
|
45eb74ca40f256535953fc7be01dc8f6b4eb4f86
|
7560c46d48abf3267c412531fe12857f0a5c8100
|
/analise_china.R
|
90a6dd9138b2e296d555189d07300fca43f83ae5
|
[
"MIT"
] |
permissive
|
marcusfreire0504/desafio-datathon
|
a00595fdda9255f71fe56afb7692f5beeab9adeb
|
f23511bf0f643e53ad5d8c551d69b01d3e9b0009
|
refs/heads/master
| 2022-12-03T02:04:01.720628
| 2020-08-24T22:17:47
| 2020-08-24T22:17:47
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,199
|
r
|
analise_china.R
|
library(xts)
setwd("C:/Users/JOAO VICTOR/Desktop/env/desafio-datathon")
shangai <- read.csv(file = "dado_shangai.csv", fill = FALSE)
hong_kong <- read.csv(file = "dado_hongkong.csv", fill = FALSE)
crypto <- read.csv(file = "dados_crypto.csv", fill = FALSE)
trans_ts <- function(data_prices){
#transforma em time series
stock_data <- xts(data_prices[,-1], order.by=as.Date(data_prices[,1], "%Y-%m-%d"))
return(stock_data)
}
get_return <- function(data) {
data <- trans_ts(data)
#pega o a variação percentual e preenche o primeiro valor com 1
pct_change_data <- (data/lag(data)) - 1
pct_change_data <- na.fill(pct_change_data, fill = 0)
#cálcula o retorno a partir da variação percentual
return_data <- prod(pct_change_data + 1)
return_data <- (return_data - 1) * 100
return(return_data)
}
return_shangai <- get_return(shangai)
return_hongkong <- get_return(hong_kong)
return_crypto <- get_return(crypto)
dado_shangai <- trans_ts(shangai)
dado_hongkong <- trans_ts(hong_kong)
dado_crypto <- trans_ts(crypto)
pct_change <- function(dados){
pct_changes <- (dados/lag(dados)) - 1
pct_changes <- na.fill(pct_changes, fill = 1)
return(pct_changes)
}
pct_change_crypto <- pct_change(dado_crypto)
pct_change_shangai <-pct_change(dado_shangai)
pct_change_hongkong <- pct_change(dado_hongkong)
merg_shangai_crypto <- merge(pct_change_shangai, pct_change_crypto, join="inner")
merg_hongkong_crypto <- merge(pct_change_hongkong, pct_change_crypto, join="inner")
cor_hongkong_geral <- cor(merg_hongkong_crypto)
cor_shangai_geral <- cor(merg_shangai_crypto)
risk_sd <- function(dados){
pct_changes <- (dados/lag(dados)) - 1
pct_changes <- na.fill(pct_changes, fill = 1)
sd_dado <- sd(pct_changes)*100
return(sd_dado)
}
#transformando em série mensal(levando em conta o último dia de cada mês)
transf_mes <- function(dados){
dados[endpoints(dados,on='months')]
}
dados_Shangai <- transf_mes(merg_shangai_crypto)
dados_Hongkong <- transf_mes(merg_hongkong_crypto)
cor_shangai_mes <- cor(dados_Shangai)
cor_hongkong_mes <- cor(dados_Hongkong)
risk_hongkong <- risk_sd(dado_hongkong)
risk_shangai<- risk_sd(dado_shangai)
risk_crypto <- risk_sd(dado_crypto)
|
2220f1c5de9b08d5fc0b71ac701131784595933e
|
e77f2769ef4601cda5fcea35336f1dd1c6157b59
|
/plot1.R
|
5f8b7a1dca1fc4375214dd5440e2ecd121af1633
|
[] |
no_license
|
Elsabell/ExData_Plotting1
|
dd2d9dea8f1cdcb8badb3a3a25fd138146c4a85d
|
5c4265c0611c2787576082f3c1a946e4f7414496
|
refs/heads/master
| 2022-12-05T19:04:37.722536
| 2020-09-01T13:59:05
| 2020-09-01T13:59:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
r
|
plot1.R
|
library(dplyr)
hpc_data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";")
# dim(hpc_data); names(hpc_data); head(hpc_data); str(hpc_data)
# length(grep("^(1/2/2007|2/2/2007)$", hpc_data$Date))
hpc <- hpc_data[grepl("^(1/2/2007|2/2/2007)$", hpc_data$Date), ]
hpc$DateTime <- paste(hpc$Date, hpc$Time)
hpc$DateTime <- strptime(hpc$DateTime, "%d/%m/%Y %H:%M:%S")
# Plot 1
hpc1 <- hpc
hpc1$Global_active_power <- as.numeric(hpc1$Global_active_power)
hpc1 <- hpc1[!is.na(hpc1$Global_active_power),]
png(filename = "plot1.png",width = 480, height = 480)
hist(hpc1$Global_active_power, main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency", col = "red")
dev.off()
dev.cur()
|
b1c3ca09e91bc052f507cde6399fb799a44786d0
|
695b88a36f548e410d8a4181ed7c6f433c7515a1
|
/man/lsmip.Rd
|
952961afcc15bff0bba82b0339c7396b274865b2
|
[] |
no_license
|
jonathon-love/lsmeans
|
16054e0a830df482fd6aa41b5a461535afb8d4bb
|
c6e91712705647bbd9aa2fa37e65929907fecca9
|
refs/heads/master
| 2021-01-12T00:31:38.801483
| 2017-08-02T11:31:32
| 2017-08-02T11:31:32
| 78,736,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,336
|
rd
|
lsmip.Rd
|
% Copyright (c) 2012-2016 Russell V. Lenth %
\name{lsmip}
\alias{lsmip}
\alias{lsmip.default}
\alias{pmmip}
\title{
Least-squares (predicted marginal) means interaction plot
}
\description{
This function creates an interaction plot of the least-squares means based on a fitted model and a simple formula specification.
}
\usage{
\method{lsmip}{default}(object, formula, type,
pch = c(1,2,6,7,9,10,15:20),
lty = 1, col = NULL, plotit = TRUE, ...)
pmmip(...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
An object of class \code{lsmobj}, or a fitted model of a class supported by \code{\link{lsmeans}}.
}
\item{formula}{
Formula of the form \code{trace.factors ~ x.factors | by.factors}. The least-squares means are plotted against \code{x.factor} for each level of \code{trace.factors}. \code{by.factors} is optional, but if present, it determines separate panels. Each element of this formula may be a single factor in the model, or a combination of factors using the \code{*} operator.
}
\item{type}{
As in \code{\link[=predict.ref.grid]{predict}}, this determines whether we want to inverse-transform the predictions (\samp{type="response"}) or not (any other choice). The default is \code{"link"}, unless the \code{"predict.type"} option is in force; see \code{\link{lsm.options}}.
}
\item{pch}{
The plotting characters to use for each group (i.e., levels of \code{trace.factors}). They are recycled as needed.
}
\item{lty}{
The line types to use for each group. Recycled as needed.
}
\item{col}{
The colors to use for each group, recycled as needed. If not specified,
the default trellis colors are used.
}
\item{plotit}{
If \code{TRUE}, the plot is displayed. Otherwise, one may use the \code{"lattice"} attribute of the returned object and print it, perhaps after additional manipulation.
}
\item{\dots}{
Additional arguments passed to \code{\link{lsmeans}} or to \code{\link[lattice]{xyplot}}.
}
}
\details{
If \code{object} is a fitted model, \code{\link{lsmeans}} is called with an appropriate specification to obtain least-squares means for each combination of the factors present in \code{formula} (in addition, any arguments in \code{\dots} that match \code{at}, \code{trend}, \code{cov.reduce}, or \code{fac.reduce} are passed to \code{lsmeans}).
Otherwise, if \code{object} is an \code{lsmobj} object, its first element is used, and it must contain one \code{lsmean} value for each combination of the factors present in \code{formula}.
The wrapper \code{pmmip} is provided for those who prefer the term \sQuote{predicted marginal means}.
}
\value{
(Invisibly), a \code{\link{data.frame}} with the table of least-squares means that were plotted, with an additional \code{"lattice"} attribute containing the \code{trellis} object for the plot.
}
\author{
Russell V. Lenth
}
\note{
This function uses the \code{\link[lattice]{xyplot}} function in the \code{lattice} package (an error is returned if \code{lattice} is not installed). Conceptually, it is equivalent to \code{\link{interaction.plot}} where the summarization function is the least-squares means.
}
\seealso{
\code{\link{interaction.plot}}
}
\examples{
require(lsmeans)
require(lattice)
#--- Two-factor example
warp.lm <- lm(breaks ~ wool * tension, data = warpbreaks)
# Following plot is the same as the usual interaction plot of the data
lsmip(warp.lm, wool ~ tension)
#--- Three-factor example
noise.lm = lm(noise ~ size * type * side, data = auto.noise)
# Separate interaction plots of size by type, for each side
lsmip(noise.lm, type ~ size | side)
# One interaction plot, using combinations of size and side as the x factor
lsmip(noise.lm, type ~ side * size)
# One interaction plot using combinations of type and side as the trace factor
# customize the colors, line types, and symbols to suggest these combinations
lsmip(noise.lm, type * side ~ size, lty=1:2, col=1:2, pch=c(1,1,2,2))
# 3-way interaction is significant, but doesn't make a lot of visual difference...
noise.lm2 = update(noise.lm, . ~ . - size:type:side)
lsmip(noise.lm2, type * side ~ size, lty=1:2, col=1:2, pch=c(1,1,2,2))
}
\keyword{ models }
\keyword{ regression }
|
82a20b9cc9cc4a8f161c5377a12ec0efce411365
|
af177c8bb0c9e496574f930e78584fe5c0f2dfe8
|
/man/set.Rd
|
7a2e2792666ec2608811cb07337c3149c72075ca
|
[] |
no_license
|
nteetor/roasted
|
03055011938b90ef290b9c8b29001862479a900d
|
ca2b17de3e31655642a7d3c4a10c3d52197cbb2d
|
refs/heads/master
| 2021-01-10T05:49:28.262776
| 2016-10-26T21:19:31
| 2016-10-26T21:19:31
| 51,217,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 166
|
rd
|
set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set.R
\name{set}
\alias{set}
\title{Set}
\usage{
set()
}
\description{
A set object.
}
|
474178be1f01db9cc07ee5bcfb2ae32d87e326ff
|
5ac10be9eb74619aa005ba1a7594c15a7b3d8bb8
|
/NYC_Housing_Complaints.R
|
8f1973807a200a8c712d834a48fed5e30f70f86e
|
[] |
no_license
|
Gabrielepa/Capstone_Housing_complaints_R
|
be895d62dca1d049707fe7b52add9c486ffeb6f3
|
0d418161fedc6c31af3cf6dea732714bb884ae60
|
refs/heads/master
| 2021-05-17T16:22:55.106166
| 2019-09-25T20:28:40
| 2019-09-25T20:28:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,939
|
r
|
NYC_Housing_Complaints.R
|
## Library
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(leaflet)) install.packages("leaflet")
if(!require(randomForest)) install.packages("randomForest")
library(tidyverse)
library(ggplot2)
library(caret)
library(lubridate)
library(leaflet)
library(randomForest)
## **\textcolor{red}{Problem 1: Which type of complaint should the Department of Housing
##Preservation and Development of New York City focus on first?}**
dl <- tempfile()
# download file. it may take few minutes (fileSize = 2.57GB, nrows ~ 5800000).
url <- "https://data.cityofnewyork.us/resource/fhrw-4uyv.csv?$limit=100000000&
Agency=HPD&$select=created_date,unique_key,complaint_type,incident_zip,incident_
address,street_name,address_type,city,resolution_description,borough,latitude,
longitude,closed_date,location_type,status"
download.file(url, dl)
df_NYC = read.csv(dl)
# get the idea of dataframe's number of rows and columns
dim(df_NYC)
rm(dl)
## Basic Exploratory Analysis and Summary Statistics
# 7 rows of the dataset with header
head(df_NYC)
# columns names
colnames(df_NYC)
# datatype of columns
sapply(df_NYC, class)
# basic summary statistics
summary(df_NYC)
## Number of Housing Complaints
df_NYC%>%
group_by(complaint_type) %>%
summarize(count = n()) %>%
arrange(desc(count))
## we can visulize the complaints type and number of complainted in the bar plot
df_NYC %>% ggplot(aes(complaint_type))+
geom_bar()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
## After reading the New york city data file, one can see that HEAT/HOT WATER complaint column has been merged with HEATING. Similarily for complaint type PAINT - PLASTER. Lets merge them together.
df_NYC$complaint_type[df_NYC$complaint_type %in% "HEAT/HOT WATER"] <- "HEATING"
df_NYC$complaint_type[df_NYC$complaint_type %in% "PAINT - PLASTER"] <- "PAINT/PLASTER"
df_NYC %>% ggplot(aes(complaint_type))+
geom_bar()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
## Temoral Evolution of Complaints Type
# Convert `timestamp to`POSIXct`
dt <- as.POSIXct(df_NYC$created_date)
df_NYC <- df_NYC %>% mutate(year = format(dt, "%Y"), month = format(dt, "%m"))
rm(dt)
complaint_year <- df_NYC %>%
na.omit() %>% # omit missing values
#select(year, complaint_type) %>% # select columns we are interested in
mutate(year = as.factor(year)) %>% # turn year in factors
mutate(year = as.numeric(levels(year))[year]) %>%
filter(year < 2019) %>%
group_by(year, complaint_type) %>% # group data by year and complaint_Type
summarise(number = n()) # count
complaint_year %>%
filter(complaint_type %in% c("HEATING", "PLUMBING", "GENERAL CONSTRUCTION", "PAINT/PLASTER")) %>%
ggplot(aes(x = year, y = number)) +
geom_line(aes(color=complaint_type)) +
scale_fill_brewer(palette = "Paired")
## Problem 2: Should the Department of Housing Preservation and Development of New York City
##focus on any particular set of boroughs, ZIP codes, or street (where the complaints are severe)
##for the specific type of complaints you identified in response to Question 1?
# number of complaints for each borough
df_NYC %>%
na.omit() %>% # omit missing values
#select(year, complaint_type) %>% # select columns we are interested in
group_by(borough) %>% # group data by year and complaint_Type
summarise(number = n()) # count
# bar plot for complaints in each borough
df_NYC %>% ggplot(aes(borough))+
geom_bar() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
## This chunk of code produces an interactive map of NYC housing complain area
lat <- df_NYC$latitude %>% na.omit()
lng <- df_NYC$longitude %>% na.omit()
df_geo <- data.frame(lat = runif(1000, min = min(lat), max = max(lat)),
lng = runif(1000, min = min(lng), max = max(lng)))
# The interactive map shows cluster of complaint prone area.
df_geo %>% leaflet() %>%
addTiles() %>%
addMarkers(clusterOptions = markerClusterOptions())
## Problem 3: Does the Complaint Type that you identified in response to question 1 have an
##obvious relationship with any particular characteristic or characteristics of the houses or
##buildings?**
# THE ZIP FILE CAN BE DOWNLOADED FROM THE FOLLOWING LINK: "https://www1.nyc.gov/assets/planning/download/zip/data-maps/open-data/nyc_pluto_18v1.zip"
## download file. it may take couple of minutes (fileSize = 46MB).
#dl <- tempfile()
#zip.file.location <- "https://www1.nyc.gov/assets/planning/download/zip/data-maps/open-data/nyc_pluto_18v1.zip"
#download.file(zip.file.location, dl)
#BK_18v1 <- read.csv(unzip(dl,PLUTO_for_WEB/BK_18v1.csv))
#rm(dl)
# I am using my local directory to access the PLUTO files
BK_18v1 <- read.csv('./PLUTO_for_WEB/BK_18v1.csv')
BX_18v1 <- read.csv('./PLUTO_for_WEB/BX_18v1.csv')
MN_18v1 <- read.csv('./PLUTO_for_WEB/MN_18v1.csv')
QN_18v1 <- read.csv('./PLUTO_for_WEB/QN_18v1.csv')
# dimension of data frame
dim(BK_18v1)
# The recommended fields are Address, BldgArea, BldgDepth, BuiltFAR, CommFAR, FacilFAR, Lot, LotArea, LotDepth, NumBldgs, NumFloors, OfficeArea, ResArea, ResidFAR, RetailArea, YearBuilt, YearAlter1, ZipCode, YCoord, and XCoord.
df_BK <- BK_18v1 %>% select('Address', 'BldgArea', 'BldgDepth', 'BuiltFAR', 'CommFAR', 'FacilFAR', 'Lot', 'LotArea', 'LotDepth', 'NumBldgs', 'NumFloors', 'OfficeArea', 'ResArea', 'ResidFAR', 'RetailArea', 'YearBuilt', 'YearAlter1', 'ZipCode', 'YCoord', 'XCoord')
df_BX <- BX_18v1 %>% select('Address', 'BldgArea', 'BldgDepth', 'BuiltFAR', 'CommFAR', 'FacilFAR', 'Lot', 'LotArea', 'LotDepth', 'NumBldgs', 'NumFloors', 'OfficeArea', 'ResArea', 'ResidFAR', 'RetailArea', 'YearBuilt', 'YearAlter1', 'ZipCode', 'YCoord', 'XCoord')
df_MN <- MN_18v1 %>% select('Address', 'BldgArea', 'BldgDepth', 'BuiltFAR', 'CommFAR', 'FacilFAR', 'Lot', 'LotArea', 'LotDepth', 'NumBldgs', 'NumFloors', 'OfficeArea', 'ResArea', 'ResidFAR', 'RetailArea', 'YearBuilt', 'YearAlter1', 'ZipCode', 'YCoord', 'XCoord')
df_QN <- QN_18v1 %>% select('Address', 'BldgArea', 'BldgDepth', 'BuiltFAR', 'CommFAR', 'FacilFAR', 'Lot', 'LotArea', 'LotDepth', 'NumBldgs', 'NumFloors', 'OfficeArea', 'ResArea', 'ResidFAR', 'RetailArea', 'YearBuilt', 'YearAlter1', 'ZipCode', 'YCoord', 'XCoord')
# new data frame with smaller features
dim(df_BK)
# Merge all data frames by rows
df_pluto = rbind(df_BK, df_BX, df_MN, df_QN)
identical(nrow(df_pluto), nrow(df_BK)+nrow(df_BX)+nrow(df_MN)+nrow(df_QN))
### Exploratory Analysis
# print the column names
print(colnames(df_NYC))
print(colnames(df_pluto))
# merge complaint types which were renamed e.g. "HEAT/HOT WATER" to "HEATING" and "PAINT - PLASTER" to "PAINT/PLASTER"
df_NYC$complaint_type[df_NYC$complaint_type %in% "HEAT/HOT WATER"] <- "HEATING"
df_NYC$complaint_type[df_NYC$complaint_type %in% "PAINT - PLASTER"] <- "PAINT/PLASTER"
# remove NA entries
df_NYC %>% na.omit()
### Target defnition: Pluto dataset has all houses information for the given borrows. Some houses are register more complain more often. These particular houses have features that can help in predicting future complaints.
df_target <- as.numeric(df_pluto$Address %in% df_NYC$incident_address)
df_pluto['target'] <- df_target
colnames(df_pluto)
# remove Address column
df_pluto <- df_pluto[-1]
colnames(df_pluto)
# to make sure every column has numeric/integer class
sapply(df_pluto, class)
```
### Pearson correlation matrix heatmap
# correlation matrix
cormat <- round(cor(df_pluto),2)
head(cormat)
# define a function that may help to remove the redundancy in the correlation matrix
# Get lower triangle of the correlation matrix
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
# Get upper triangle of the correlation matrix
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
# use the above defined function to set redundant entries to NA
upper_tri <- get_upper_tri(cormat)
upper_tri
# Melt the correlation matrix
library(reshape2)
melted_cormat <- melt(upper_tri, na.rm = TRUE)
# Heatmap
library(ggplot2)
ggplot(data = melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 12, hjust = 1))+
coord_fixed()
#Selecting correlated features. Here I set the threshold to be 0.12
cor_target = abs(cormat[,"target"])
cor_target<-cor_target[!is.na(cor_target)]
relevant_features = cor_target[cor_target>0.12]
print(relevant_features)
# let's find the features which are highly correlated among themselves. If so, we may use only one of them.
df_corr_2features <- df_pluto%>% select("ResidFAR","FacilFAR")
round(cor(df_corr_2features),2)
# This implies we can drop FacilFAR feature and keep ResiFAR feature as they are highly correlated. So the important features are 'BldgDepth', 'ResiFAR', 'NumFloors', 'ZipCode'. The above analysis gives a rough idea about the data and its general statistics. This type of exploratory analysis helps to better understand feature sets and decide which algorithm may better suit for our problem.
```
### Random forrest method for feature selection
# Here we will drop only three features ("target", "XCoord", "YCoord") and train the model on rest of the features
# data for features set and target
drops <- c("XCoord", "YCoord")
df_PLUTO <- df_pluto[ , !(names(df_pluto) %in% drops)]
df_PLUTO <- df_pluto[ , !(names(df_pluto) %in% drops)]
# Validation set will be 30% of pluto data
test_index <- createDataPartition(y = df_pluto$target, times = 1, p = 0.3, list = FALSE)
train_set <- df_PLUTO[-test_index,]
test_set <- df_PLUTO[test_index,]
# test dataset can be further modified by removing target column
test_set_CM <- test_set
test_set <- test_set %>% select(-target)
# In case of only 2 classifier, one can use linear regression, but here I am using Random Forest method beacause of its general applicability.
# convert target as factor
train_set$target <- as.character(train_set$target)
train_set$target <- as.factor(train_set$target)
set.seed(1)
model<- randomForest(target~.,train_set,ntree=tree_count,importance=TRUE,na.action=na.omit)
# convert all iterations into matrix form
imp_score_matrix <- importance(model)
imp_score_matrix
### The Ransom forest method provides a table of feature importance. It shows two varible 'MeanDecreaseAccuracy', 'MeanDecreaseGini'. Larger the numbers are, greater their feature importance is. A cursory look at the table reveals that features like 'Lot', 'BuiltFAR', 'BldgArea', 'ResArea', 'NumFloors' are most important one.
## Problem 4: Can a predictive model be built for a future prediction of the possibility of
##complaints of the type that you have identified in response to question 1?
## So far, we have pointed out the important features in the pluto data set and did some exploratory analysis. Problem 4 poses a new set of challenge. It asks to predict the future. I don't know who can be well suited for the job 'Prophet', 'Philosopher', or 'Professor'. I beleive everyone will look for 'history' or in simple words time dependent feature sets.
##However our analysis shows that features are static in nature. To predict the HEATING complaints, we may need additional data from external sources that has some temporal dependencies e.g. weather dataset over years.
##With the given dataset, I think it would be good to try our hand over 'Time series analysis' and get a rough future estimate about number of complaints.
### **Time Series Analysis**
colnames(df_NYC)
dt <- as.POSIXct(df_NYC$created_date)
df_NYC <- df_NYC %>% mutate(year_month = format(dt, "%Y-%m"))
rm(dt)
df_TS <- df_NYC %>%
select('year_month', 'unique_key')%>%
filter(year_month<2019)
# below chunk of codes have not been varified so I have not put them in my Pdf file
df_TS %>% sort(df_TS$year_month, decreasing = FALSE)
TS_complaint <- df_TS %>%
na.omit() %>% # omit missing values
#select(year, complaint_type) %>% # select columns we are interested in
mutate(year_month = as.factor(year_month)) %>% # turn year in factors
mutate(year_month = as.numeric(levels(year_month))[year_month]) %>%
group_by(year, unique_key) %>% # group data by year and complaint_Type
summarise(number = n()) # count
TS_complaint %>%
ggplot(aes(x = year_month, y = number)) +
geom_line()
# Trend, Seasonality and error
decomposedRes <- decompose(tsData, type="mult") # use type = "additive" for additive components
plot (decomposedRes) # see plot below
stlRes <- stl(tsData, s.window = "periodic")
|
fa6da881d7219c075536a62d328e9998d8197500
|
cae6eb7fb6d208173c4fb6ea95cdf3b1c38713f1
|
/plot3.R
|
04a61a1a2a7c3c44773d82a26c4fce4c56ddbe2c
|
[] |
no_license
|
MMohey/ExData_Plotting1
|
4a2223865acde3eb9ab71b851014cee6c5d07ac4
|
8e94191fa28a140f6b193c6f5191dcf0c9e18aac
|
refs/heads/master
| 2020-12-24T06:59:08.731863
| 2016-11-11T21:21:06
| 2016-11-11T21:21:06
| 73,385,183
| 0
| 0
| null | 2016-11-10T13:35:30
| 2016-11-10T13:35:30
| null |
UTF-8
|
R
| false
| false
| 1,186
|
r
|
plot3.R
|
## plot3.R plots "Sub_metering_1", "Sub_metering_2" and "Sub_metering_3 from
##the "House Power Consumption" Dataset for a specified dates
original_data <- # importing the data set
read.table(
"household_power_consumption.txt",
header = TRUE,
sep = ';',
stringsAsFactors = F
)
original_data$Date <- ## converting to 'Date' class
as.Date((original_data$Date), format = "%d/%m/%Y")
selected_data <- ## selecting the specified dates
subset(original_data,
subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
# merging Date and Time column into new column of 'POSIXct' class
selected_data$Date_Time <-
as.POSIXct(paste(as.Date(selected_data$Date), selected_data$Time))
png("plot3.png",
width = 480,
height = 480,
units = "px")
with(selected_data, {
plot(
Sub_metering_1 ~ Date_Time,
typ = "l",
ylab = "Global Active Power (kilowatts)",
xlab = ""
)
lines(Sub_metering_2 ~ Date_Time, col = 'Red')
lines(Sub_metering_3 ~ Date_Time, col = 'Blue')
})
legend(
"topright",
col = c("black", "red", "blue"),
lty = 1,
lwd = 2,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
)
dev.off()
|
ed81b29687df8669e8b3c44bce300408cb2fc677
|
fe5166da0b2085b9ebd741acc9c23694ffc09387
|
/man/shinyExplorer.Rd
|
c20fef4265e4a454eadfe2a0a9247ddd372b88b7
|
[] |
no_license
|
DrRoad/shinyExplorer
|
a7badb864a5e08c62020945197f8f4b33ad01422
|
17be823037dbf4bf9157a81b388640e627db398e
|
refs/heads/master
| 2021-10-21T14:05:47.553903
| 2019-03-04T12:17:34
| 2019-03-04T12:17:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 387
|
rd
|
shinyExplorer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinyExplorer.R
\name{shinyExplorer}
\alias{shinyExplorer}
\title{Run shinyMlr}
\usage{
shinyExplorer(...)
}
\arguments{
\item{...}{[\code{any}]\cr
Additional arguments passed to shiny's
\code{runApp()} function.}
}
\description{
Run a local instance of shinyMlr.
}
\examples{
\dontrun{
shinyExplorer()
}
}
|
11c9df7faaa33f2d90bcd1a91a35adcfdbc1e4be
|
3b361820e93c9cbaa7e740b6edbf13c03a1cfcce
|
/R/stackloss.R
|
e3890005916210b5393cbe466c64c6413c1df164
|
[] |
no_license
|
msalibian/RobStatTM
|
f8dabc88197be2460f1ba4c95b595e95ff53c1e9
|
d542c29816d50889f25649817e3ae5de08946141
|
refs/heads/master
| 2023-05-14T08:42:27.747789
| 2023-05-09T17:08:37
| 2023-05-09T17:08:37
| 83,067,068
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 719
|
r
|
stackloss.R
|
#' Stackloss data
#'
#' Observations from 21 days operation of a plant for the oxidation
#' of ammonia as a stage in the production of nitric acid.
#'
#' Format: 21 cases and 4 continuous variables.
#' Description: The columns are:
#' 1. air flow
#' 2. cooling water inlet temperature (C)
#' 3. acid concentration (%)
#' 4. Stack loss, defined as the percentage of ingoing ammonia
#' that escapes unabsorbed (response)
#'
#' @docType data
#'
#' @usage data(stackloss)
#'
#' @format An object of class \code{"data.frame"}.
#'
#' @source Brownlee, K.A. (1965), Statistical Theory and Methodology in Science and
#' Engineering, 2nd Edition, New York: John Wiley & Sons, Inc.
#'
#' @examples
#' data(stackloss)
"stackloss"
|
49d7646ac3f32fdca864d90f06e0c695ffa76951
|
ffb7b79a8b47c09c1fc937b5361acf2cf813b804
|
/man/abiotics_rescaling.Rd
|
a96e438389605ff73ba6b731fce38f5ce29fe0c7
|
[] |
no_license
|
GregoireButruille/FishNmix2
|
f72be9a9600c56ca570d271ef0f3830c75ce7a63
|
ddf5e448336e93c6bc5792e4e89fee98b2cfb66c
|
refs/heads/main
| 2023-07-02T16:24:41.651324
| 2021-08-12T15:04:17
| 2021-08-12T15:04:17
| 377,401,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 891
|
rd
|
abiotics_rescaling.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abiotics_rescaling.R
\name{abiotics_rescaling}
\alias{abiotics_rescaling}
\title{Title Rescale abiotics data to 10 or 30 arcmins}
\usage{
abiotics_rescaling(
flo1k_data,
worldclim_data,
earthenv_data,
minlat,
maxlat,
minlong,
maxlong,
resolution,
geosphere = FALSE
)
}
\arguments{
\item{flo1k_data}{raster stack with data from FLO1K}
\item{worldclim_data}{raster stack with data from WorldClim}
\item{earthenv_data}{raster stack with data from EarthEnv}
\item{minlat}{Mininum latitude}
\item{maxlat}{Maximum latitude}
\item{minlong}{Minimum longitude}
\item{maxlong}{Maximum longitude}
\item{resolution}{Resolution}
\item{geosphere}{If TRUE, import data from geosphere, which take a lot of time}
}
\value{
}
\description{
Title Rescale abiotics data to 10 or 30 arcmins
}
\examples{
}
|
3c6e9d3a4339d65002f963ee4bc871cd787ce634
|
9176b0f0ce12a42d1ac2a868e5f41a93d3446405
|
/ML/R-script/chap23_randomForest.R
|
8aea173a6455bbfe7a25a693321dddfbc2fb008a
|
[] |
no_license
|
jk-jang/R
|
5448f0d53b9c2012f0bc513cdfaf2349d1770aa2
|
c70e80484a1980b9330f032f1f20cdc4fa4e61c2
|
refs/heads/master
| 2020-12-24T06:58:28.970344
| 2017-05-05T09:10:47
| 2017-05-05T09:10:47
| 58,716,839
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,614
|
r
|
chap23_randomForest.R
|
##################################################
#randomForest
##################################################
# 결정트리(Decision tree)에서 파생된 모델
# 랜덤포레스트는 앙상블 학습기법을 사용한 모델
# 앙상블 학습 : 새로운 데이터에 대해서 여러 개의 Tree로 학습한 다음,
# 학습 결과들을 종합해서 예측하는 모델(PPT 참고)
# DT보다 성능 향상, 과적합 문제를 해결
#분류절차 : 여러개 Tree 학습 -> 모델 -> 종합(Voting) -> 예측
#DT vs RF
#DT: 한 개의 훈련데이터(Tree)로 학습하여 모델 생성
#RF: 여러개의 훈련데이터(Tree)로 학습하여 모델 생성
# 랜덤포레스트 구성방법(2가지)
# 1. 결정 트리를 만들 때 데이터의 일부만을 복원 추출하여 트리 생성
# -> 데이터 일부만을 사용해 포레스트 구성
# 2. 트리의 자식 노드를 나눌때 일부 변수만 적용하여 노드 분류
# -> 변수 일부만을 사용해 포레스트 구성
# [해설] 위 2가지 방법을 혼용하여 랜덤하게 Tree(학습데이터)를 구성한다.
# 새로운 데이터 예측 방법
# - 여러 개의 결정트리가 내놓은 예측 결과를 투표방식(voting) 방식으로 선택
install.packages('randomForest')
library(randomForest) # randomForest()함수 제공
data(iris)
# 1. 랜덤 포레스트 모델 생성
# 형식) randomForest(y ~ x, data, ntree, mtry)
#mtry 몇 개의 변수로 자식노드(가지수)를 나눌 것인가(내필기)
#자식 노드로 구분할 변수 개수 지정(강사님필기)
model = randomForest(Species~., data=iris)
#분로오차, 정분류율, 혼돈 matrix 제공
#따라서 별도의 모델 성능 평가 과정이 필요 없음
#앞에서 배운 것들 다 성능평가 과정 있었음...(table, t[1,1]+t[2,2]/nrow(test)막 이런거...)
# 2. 파라미터 조정 300개의 Tree와 4개의 변수 적용 모델 생성
model = randomForest(Species~., data=iris,
ntree=300, mtry=4, na.action=na.omit )
model
# 3. 최적의 파리미터(ntree, mtry) 찾기
# - 최적의 분류모델 생성을 위한 파라미터 찾기
ntree <- c(400, 500, 600)
mtry <- c(2:4)
#조합
#400-2, 400-3 400-4
#500-2 500-3 500-4
#600-2 600-3 600-4
# 2개 vector이용 data frame 생성
param <- data.frame(n=ntree, m=mtry)
param
for(n in param$n){# 400-600반복
cat('ntree=',n)
for(m in param$m){# 2-4 반복
cat(', mtry=',m,'\n')
model=randomForest(Species~.,data = iris,
ntree=n,mtry=m,na.action = na.omit)
print(model)}
}
#주요 변수 평가를 위한 모델 생성# importance 추가해야 밑에 결과치 볼 수 있음
model2 = randomForest(Species~., data=iris, importance = T)
model2
#RF 패키지 제공 함수
importance(model2)
#MeanDecreaseAccuracy 컬럼 값으로 중요변수 파악(Petal.Length가 가장 중요변)
#MeanDecreaseGini:노드 불순도 개선에 기여하는 변수
varImpPlot(model2)
#####################################################
#randomForest: 병렬 처리(데이터 분류 방식)
#####################################################
install.packages('foreach')
library(foreach)
m <- matrix(c(1:9), 3, 3)
m
# 칼럼 수 만큼 반복하여 각 칼럼의 평균 리턴
# 형식) foreach(반복수) %do% 반복작업
foreach(i=1:ncol(m)) %do% mean(m[,i]) # list type 출력
foreach(i=1:ncol(m),.combine=c) %do% mean(m[,i]) # vector type 출력
foreach(i=1:ncol(m),.combine=rbind) %do% mean(m[,i]) #row 단위로 출력
#600 Tree -> 200 씩 3개로 분류 병렬처리
model_iris=foreach(n=rep(200,3))%do%
randomForest(Species~., data=iris, ntree=n)
model_iris
#combine 옵션
model_iris=foreach(n=rep(200,3), .combine = combine)%do% #3개를 한꺼번에 묶어서 계산함
randomForest(Species~., data=iris, ntree=n)
model_iris
#####################################################
#randomForest: 병렬 처리(multi core 방식)
#####################################################
library(randomForest)
library(foreach)
install.packages('doParallel')
library(doParallel)
# 멀티코어(4개 cpu 사용) 방식의 병렬 처리
registerDoParallel(cores=4) # multicore수 4개 지정
getDoParWorkers() # 현재 사용 가능한 core 수 보기
system.time(
rf_iris <- foreach(ntree=rep(250, 8), .combine=combine, .multicombine=TRUE, .packages = 'randomForest') %dopar%
randomForest(Species~., data=iris, ntree=ntree, na.action=na.omit )
)
|
092d0f84bbeecc7a325affeccd0025144186396e
|
38d52a7e16b96555f277cb879a69d3f1ba086dad
|
/R/get_chain_statistics.R
|
fa4703d5962607d884d0cbdb1d37d59fb8cb6a46
|
[
"MIT"
] |
permissive
|
next-game-solutions/tronr
|
c7ec41a0785536670942c653f0f1500f09e7e692
|
e7eb8b1d07e1c0415881ca3259358f707d78b181
|
refs/heads/main
| 2023-06-19T03:06:34.302241
| 2021-07-12T22:01:05
| 2021-07-12T22:01:05
| 305,829,963
| 7
| 0
|
NOASSERTION
| 2021-07-12T22:01:06
| 2020-10-20T20:48:08
|
JavaScript
|
UTF-8
|
R
| false
| false
| 4,165
|
r
|
get_chain_statistics.R
|
#' Get historical metrics of the TRON blockchain
#'
#' Retrieves historical values of the blockhain statistics
#'
#' @param days (double): number of days to look back (defaults to 14 days).
#' @param include_current_date (boolean): whether the current date's statistics
#' (as of the query time) should be returned. Defaults to `FALSE`.
#' @eval function_params("max_attempts")
#'
#' @return A tibble with the following columns:
#' * `date` (Date): date in the `YYYY-MM-DD` format;
#' * `avg_block_size` (double): average block size (in bytes);
#' * `total_blocks` (integer): cumulative number of blocks on the chain;
#' * `new_blocks` (integer): number of newly generated blocks;
#' * `chain_size` (double): cumulative size of the chain (in bytes);
#' * `total_addresses` (integer): cumuative number of address on the chain;
#' * `active_addresses` (integer): number of addresses that were active on `date`;
#' * `new_addresses` (integer): number of newly created addresses;
#' * `addresses_with_trx` (integer): number of Tronix (TRX)-holding addresses;
#' * `total_trc10_tokens` (integer): cumulative number of TRC-10 assets on the
#' chain;
#' * `total_trc20_tokens` (integer): cumulative number of TRC-20 assets on the
#' chain;
#' * `new_trc10_tokens`(integer): number of newly created TRC-10 assets;
#' * `new_trc20_tokens` (integer): number of newly created TRC-20 assets;
#' * `new_tx` (integer): number of new transactions on the chain;
#' * `trx_transfer_tx` (integer): number of TRX transfer transactions;
#' * `trc10_transfer_tx` (integer): number of TRC-10 transfer transactions;
#' * `freeze_tx` (integer): number of TRX freezing transactions;
#' * `vote_tx` (integer): number of vote transactions;
#' * `other_tx` (integer): number of other transactions;
#' * `contract_triggers` (integer): cumulative number of smart contract triggers;
#' * `energy_usage` (double): amount of energy consumed;
#' * `net_usage` (double): amount of bandwidth consumed.
#'
#' @importFrom rlang .data
#' @importFrom magrittr %>%
#'
#' @export
#'
#' @examples
#' r <- get_chain_statistics(days = 7)
#' print(r)
get_chain_statistics <- function(days = 14,
include_current_date = FALSE,
max_attempts = 3L) {
if (length(days) > 1L) {
rlang::abort("Only one `days` value is allowed")
}
if (is.na(days) | !is.numeric(days)) {
rlang::abort("`days` must be a numeric value")
}
if (!is.logical(include_current_date)) {
rlang::abort("`include_current_date` must be a boolean value")
}
validate_arguments(arg_max_attempts = max_attempts)
if (!include_current_date) {
days <- days + 1L
}
url <- build_get_request(
base_url = "https://apilist.tronscan.org/",
path = c("api", "stats", "overview"),
query_parameters = list(days = days)
)
r <- api_request(url = url, max_attempts = max_attempts)
if (length(r$data) == 0L) {
message("No data found")
return(NULL)
}
result <- lapply(r$data, function(x) {
names(x) <- snakecase::to_snake_case(names(x))
tibble::tibble(
date = as.Date(from_unix_timestamp(x$date)),
avg_block_size = as.numeric(x$avg_block_size),
total_blocks = x$total_block_count,
new_blocks = x$new_block_seen,
chain_size = x$blockchain_size,
total_addresses = x$total_address,
active_addresses = x$active_account_number,
new_addresses = x$new_address_seen,
addresses_with_trx = x$account_with_trx,
total_trc10_tokens = x$total_trc_10,
total_trc20_tokens = x$total_trc_20,
new_trc10_tokens = x$new_trc_10,
new_trc20_tokens = x$new_trc_20,
new_tx = x$new_transaction_seen,
trx_transfer_tx = x$trx_transfer,
trc10_transfer_tx = x$trc_10_transfer,
freeze_tx = x$freeze_transaction,
vote_tx = x$vote_transaction,
other_tx = x$other_transaction,
contract_triggers = x$triggers,
energy_usage = x$energy_usage,
net_usage = x$net_usage
)
}) %>%
dplyr::bind_rows()
if (!include_current_date) {
result <- dplyr::filter(result, .data$date != max(.data$date))
}
return(result)
}
|
a61d04846b8e0d16be935d520cce49588469b6ec
|
4fd7dd0ea9f1e73f9d14031d750f4ba02dce3b1b
|
/man/optimize_refTemp.Rd
|
e66dc855992ab4fce7e53aab77d3682c58392336
|
[] |
no_license
|
jlpesoto/prueba
|
8bd4be6551058a064f2e355aa34051336784a454
|
7bcbe619ef35d9e9b60abc539ae11dc5b0966e49
|
refs/heads/master
| 2020-06-30T17:30:29.359435
| 2019-08-06T17:46:48
| 2019-08-06T17:46:48
| 200,897,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,570
|
rd
|
optimize_refTemp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimize_ref_temp.R
\name{optimize_refTemp}
\alias{optimize_refTemp}
\title{Optimization of the Reference Temperature}
\usage{
optimize_refTemp(temp_ref0, lower, upper, inactivation_model, parms,
temp_profile, parms_fix, n_times = 100)
}
\arguments{
\item{temp_ref0}{Initial value of the reference temperature to use for
the optimization.}
\item{lower}{Lower bound for the reference temperature.}
\item{upper}{Upper bound for the reference temperature.}
\item{inactivation_model}{Character identifying the inactivation model
to use for the calculation.}
\item{parms}{Numeric vector with the nominal values of the model parameters.}
\item{temp_profile}{Data frame describing the environmental conditions.}
\item{parms_fix}{Nominal value of the parameters not considered for the
sensitivity.}
\item{n_times}{Numeric value specifying the nombers of time points where
the sensitivity functions will be calculated. 100 by default.}
}
\value{
The object returned by \code{\link{optim}}.
}
\description{
Finds the optimum value of the reference temperature which minimizes
the correlation between sensitivty functions of the model parameters.
}
\details{
The optimization is made using the \code{\link{optim}} function. The
target for the optimization is the maximization of the determinant
of the correlation matrix between parameter sensitivities. The
Brent method is used, as it is the recommended one for unidimensional
optimization.
The parameters z and D/delta cannot be fixed.
}
|
e29a2ca2cec367ddfde8d747eb4dca0b8be2c641
|
9831fdd8ce4d827b271913b536e484555e6196ab
|
/plot2.R
|
9c5df3a4e9394d7cdf95e71031dc2d1bba09dfce
|
[] |
no_license
|
jlmarrero/ExData_Plotting1
|
9fb11411aa66ca5f9af508af06f3019380cb3d94
|
cfe14ab029e7d30622f0d72eedd635ab2afe1c0a
|
refs/heads/master
| 2021-01-18T01:08:21.017507
| 2016-01-11T02:16:04
| 2016-01-11T02:16:04
| 49,390,162
| 0
| 0
| null | 2016-01-10T23:18:58
| 2016-01-10T23:18:57
| null |
UTF-8
|
R
| false
| false
| 736
|
r
|
plot2.R
|
# The following script assumes the "Individual household electric power consumption Data Set" resides in your working directory.
# Load data
df <- read_csv2("household_power_consumption.txt", n_max = 100000)
# Subset the data
df$Date <- as.Date(df$Date, format = "%d/%m/%Y")
new_df <- subset(df, df$Date == "2007/02/01" | df$Date == "2007/02/02")
# Open graphics device and set layout param
png(filename = "plot2.png", width = 480, height = 480)
# Plot data
plot(as.numeric(new_df$Global_active_power), type = "l", ylab = "Global Active Power (kilowatts)", xlab = "", xaxt = "n")
# Label the x-axis
v1 <- c(0,length(new_df$Date)/2,length(new_df$Date))
axis(1, at= v1, labels=c("Thur", "Fri", "Sat"))
# Close device
dev.off()
|
cd9b8003981478ca3846db342f98160e6275fece
|
f0f76daa6900a2330aa432ea78517c721afae8e6
|
/code/Step1_forecast_aq_potential.R
|
684f2182d960e23a068091e87a104864fdb3d4f5
|
[] |
no_license
|
cfree14/aquacast
|
cea44f83fdaec3efbe8d94ed26f3c52907d0a7b5
|
04d3936b6e9115ea593cb86d91fed562f7385701
|
refs/heads/master
| 2022-04-29T01:28:59.820766
| 2022-04-27T18:59:07
| 2022-04-27T18:59:07
| 206,387,817
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,509
|
r
|
Step1_forecast_aq_potential.R
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(raster)
library(ggplot2)
library(tidyverse)
# Directories
codedir <- "code"
sppdir <- "data/species/data"
outputdir <- "/Volumes/GoogleDrive/Shared drives/emlab/projects/current-projects/blue-paper-2/data/output/raw"
plotdir <- "/Volumes/GoogleDrive/Shared drives/emlab/projects/current-projects/blue-paper-2/data/output/raw_plots"
# Read aquacast function
source(file.path(codedir, "aquacast_v4.R"))
source(file.path(codedir, "calc_costs.R"))
# Read species data
load(file.path(sppdir, "aquaculture_species_key.Rdata"))
# Setup data
################################################################################
# Format data
data <- data %>%
mutate(type=recode(class,
"Bivalvia"="Bivalve",
"Actinopterygii"="Finfish"))
# Subset finfish/bivalves
data_do <- data
# data_do <- filter(data, class=="Bivalvia")
# data_do <- filter(data, class=="Actinopterygii")
# Check to see which didn't finish
rcp2check <- "RCP26"
files_should <- paste0(rcp2check, "_", gsub(" ", "_", data_do$species), ".Rds")
files_all <- list.files(outputdir)
files_done <- files_all[grepl(rcp2check, files_all)]
files_missing <- files_should[!files_should%in%files_done]
length(files_missing)
# data_do <- data_do %>%
# mutate(file=paste0(rcp2check, "_", gsub(" ", "_", species), ".Rds")) %>%
# filter(file %in% files_missing)
# Run forecast (in parallel)
################################################################################
# Setup parallel
# library(doParallel)
# ncores <- detectCores()
# registerDoParallel(cores=ncores)
# Loop through species and forecast
i <- 1
for(i in 1:nrow(data_do)){
# foreach(i=1:nrow(data_do)) %dopar% {
# Parameters
species <- data_do[i,]
# periods <- c("2021-2022", "2051-2052", "2091-2092")
periods <- c("2021-2030", "2051-2060", "2091-2100")
# Forecast aquaculture potential
# For testing: rcp="rcp26"; outdir=outputdir
output <- aquacast(species=species, periods=periods, rcp="rcp26", outdir=outputdir, plot=T)
}
# Loop through species and forecast
i <- 1
for(i in 1:nrow(data_do)){
# foreach(i=1:nrow(data_do)) %dopar% {
# Parameters
species <- data_do[i,]
# periods <- c("2021-2022", "2051-2052", "2091-2092")
periods <- c("2021-2030", "2051-2060", "2091-2100")
# Forecast aquaculture potential
# For testing: rcp="rcp26"; outdir=outputdir
output <- aquacast(species=species, periods=periods, rcp="rcp45", outdir=outputdir, plot=F)
}
# Loop through species and forecast
i <- 1
for(i in 1:nrow(data_do)){
# foreach(i=1:nrow(data_do)) %dopar% {
# Parameters
species <- data_do[i,]
# periods <- c("2021-2022", "2051-2052", "2091-2092")
periods <- c("2021-2030", "2051-2060", "2091-2100")
# Forecast aquaculture potential
# For testing: rcp="rcp26"; outdir=outputdir
output <- aquacast(species=species, periods=periods, rcp="rcp60", outdir=outputdir, plot=F)
}
# Loop through species and forecast
for(i in 1:nrow(data_do)){
# foreach(i=1:nrow(data_do)) %dopar% {
# Parameters
species <- data_do[i,]
# periods <- c("2021-2022", "2051-2052", "2091-2092")
periods <- c("2021-2030", "2051-2060", "2091-2100")
# Forecast aquaculture potential
# For testing: rcp="rcp26"; outdir=outputdir
output <- aquacast(species=species, periods=periods, rcp="rcp85", outdir=outputdir, plot=F)
}
|
76dc885c409db6e6d224b1d66cb0c3a4d8e19d3e
|
0f43b7df4006ca85de76f5209b85aa39c649150f
|
/man/load_extra_iots.Rd
|
7aa796485c87429114da6d8b5b72ce9e9da789bb
|
[] |
no_license
|
sybrendeuzeman/WIOD_package
|
29c66e8b17415236421c534c309c1f712cf860ac
|
edcb5485bd4d49b9a6310d7dfc172f1d8864366c
|
refs/heads/master
| 2020-07-02T04:44:56.131660
| 2020-01-23T18:22:31
| 2020-01-23T18:22:31
| 201,419,749
| 1
| 0
| null | 2020-01-23T18:22:32
| 2019-08-09T07:54:13
|
R
|
UTF-8
|
R
| false
| true
| 1,192
|
rd
|
load_extra_iots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_extra_iots.R
\name{load_extra_iots}
\alias{load_extra_iots}
\title{Load extra data to multiple IOTs.}
\usage{
load_extra_iots(iots, extra, directory = get("dir_data", envir = paramEnv))
}
\arguments{
\item{iots:}{List of Input-Output Tables (load via [load_iots()])}
\item{extra:}{Name of the extra data that is required.}
\item{directory:}{directory to where the input-output tables are stored.}
}
\description{
Function to load extra data to a single internation Input-Output Table (IOT).
}
\details{
Directory only needs to direct to the main storage directory, right version and year will be taken care of.
Default is loading via the internet. Users who use IOTs more often are advised to make a local copy.
Local copy of online data can be made via download_iots()
}
\examples{
\dontrun{Using online data:}
iots <- load_iots("WIOD2013")
iots <- load_extra_iots(iots, "SEA")
iots <- load_iots("WIOD2013", 2000:2001)
iots <- load_extra_iots(iots, "SEA")
\dontrun{Using local data:
iots <- load_iots("WIOD2013", directory = "D:/Data")
iots <- load_extra_iots(iots, "SEA", directory = "D:/Data")
}
}
|
3f1b0213277bdca4ee72a954ba0b0568a2b1797f
|
72002076d389156ac665caef39f8f193ab636236
|
/identity.r
|
8fe51121e0ff9b3bbedf29441eac7e10a8bf0f98
|
[] |
no_license
|
Betawolf/panning-scripts
|
b1050a1616c971a017e093e0826f0184351e01fe
|
2db9dcb06429470993d71b199190b0e83f334250
|
refs/heads/master
| 2021-01-13T13:05:31.342112
| 2017-01-11T21:20:17
| 2017-01-11T21:20:17
| 78,681,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,199
|
r
|
identity.r
|
#The ROCR package for various plots and performance assessments
library(ROCR)
library(PRROC)
#The data from the attribute comparisons.
ident_all <- read.csv('identity.csv')
# Display the data structure. The origin and target variables identify the
# two profiles involved in the comparison. The block variable identifies the
# block which produced this comparison -- within each block there will be at
# least one true comparison between profiles and a number of negatives which
# were formed by selecting candidate profiles using a SNSs's name-based search.
# All comparisons should have an origin network of Google+, as this is how most
# connections are found (allowing comparisons across other networks results in
# an explosion of negative examples).
#
# The outcome variable indicates whether the profiles should or should not be
# matched. The remaining variables form the
# comparison vector -- the similarity measures between the various attributes of
# the profile. These are:
# - exactnames : the number of name components which match exactly between two profiles.
# - bestname : the (inverted) levenshtein editdistance between the two best (most likely to be real) name strings.
# - timecomparison : the proportion of time activity peaks and troughs which occur in the same window (1/6th of a day)
# - avatars : pixel similarity between the two profiles' avatars.
# - friends : the proportion of friends with the same names (editdistance measure at threshold 0.8).
# - linkactivity : the proportion of identical web links found in profile content
# - stylometry : euclidean distance between proportions of function words
# - geography : proportion of locations which are 'near' each other (within 10k or substring of text).
str(ident_all)
# You can see that only 1% of the data is matches.
# This reflects the challenging base rates -- searches for a person's name
# often return many matching profiles.
table(ident_all$outcome)
# As we are building a classifier, we need to test a number of thresholds on these numbers.
# Typically in record linkage we would have 'matched' 'not-matched' and 'possible match',
# the third being for human review, but the final class needs to be omitted here, so we're
# just building a binary classifier (i.e. one threshold).
# The below function reports precision, recall and accuracy for a threshold.
accuracy <- function(level){
predict <- ifelse(lmpred > level,1,0)
true <- sum(predict & ident_test$outcome == 1)
pre <- true / sum(predict == 1)
rec <- true / sum(ident_test$outcome == 1)
f1 <- 2 * ((pre * rec)/(pre + rec))
return(c(level, pre, rec, f1))
}
# For modelling purposes, we need only the comparison vector and outcome.
ident <- ident_all[,c('exactnames','bestname','timeactivity','avatars','friends','linkactivity','stylometry','geography','outcome')]
trues <- ident[ident$outcome == 1,]
falses <- ident[ident$outcome == 0,]
max_f <- length(falses$outcome)
tenth_f <- floor(max_f/10)
max_t <- length(trues$outcome)
tenth_t <- floor(max_t/10)
auc_results <- c()
etc_results <- c()
cols = c('orangered','orchid','palegreen','paleturquoise', 'palevioletred', 'royalblue', 'seagreen','sienna','tan')
fired = 0
preds <- c()
truth <- c()
png('roc.png')
#Split into training and test data: 90:10.
for (i in 0:9) {
lower_f <- i*tenth_f+1
upper_f <- i*tenth_f+tenth_f
lower_t <- i*tenth_t+1
upper_t <- i*tenth_t+tenth_t
ident_test <- rbind( trues[lower_t:upper_t,], falses[lower_f:upper_f,])
ident_train <- rbind( trues[c(1:lower_t,upper_t:max_t),], falses[c(1:lower_f,upper_f:max_f),])
# Build a model, including interactions for all of the features (as we expect them to support each other).
ident_model <- glm(outcome ~ .^2, data=ident_train, family='binomial')
# Make numeric predictions based on the linear model.
lmpred <- predict(ident_model, ident_test, type='response')
preds <- c(preds, lmpred)
truth <- c(truth, ident_test$outcome)
#A summary of the predictions shows the range of values
print(summary(lmpred))
rcr_pred <- prediction(lmpred, ident_test$outcome)
#Likely levels based on the range reported in the summary, steps of 0.1
levels <- seq(round(min(lmpred),1), round(max(lmpred),1), 0.1)
# levels <- seq(0.0, 0.3, 0.01)
#Gather the report for each level into a data frame.
res <- data.frame(t(sapply(levels,accuracy)))
names(res) <- c('level','precision','recall','f1score')
print(res)
bl = 0.3
print("Best threshold:")
print(na.omit(res[res$level+0.01 > bl & res$level-0.01 < bl,]))
etc_results <- cbind(etc_results, as.list(head(res[res$level+0.01 > bl & res$level-0.01 < bl,],1)))
rcr_perf <- performance(rcr_pred, 'tpr','fpr')
if (fired == 0){
plot(rcr_perf, col=cols[i], lwd=2)
fired = 1
}
else {
plot(rcr_perf, col=cols[i], add=T, lwd=2)
}
# Calculate the AUC by comparing score values.
pos.scores <- lmpred[which(ident_test$outcome == 1)]
neg.scores <- lmpred[which(ident_test$outcome == 0)]
auc_approx <- mean(sample(pos.scores,10000,replace=T) > sample(neg.scores,10000,replace=T))
auc_results <- c(auc_results, auc_approx)
print(paste("AUC :",auc_approx))
}
overall_pred <- prediction(preds, truth)
overall_perf <- performance(overall_pred, 'tpr','fpr')
plot(overall_perf, col='black', add=T, lwd=3, lty=2)
dev.off()
auprcs <- c()
png('pr.png')
#Split into training and test data: 90:10.
for (i in 0:9) {
lower_f <- i*tenth_f+1
upper_f <- i*tenth_f+tenth_f
lower_t <- i*tenth_t+1
upper_t <- i*tenth_t+tenth_t
ident_test <- rbind( trues[lower_t:upper_t,], falses[lower_f:upper_f,])
ident_train <- rbind( trues[c(1:lower_t,upper_t:max_t),], falses[c(1:lower_f,upper_f:max_f),])
# Build a model, including interactions for all of the features (as we expect them to support each other).
ident_model <- glm(outcome ~ .^2, data=ident_train, family='binomial')
print(summary(ident_model))
# Make numeric predictions based on the linear model.
lmpred <- predict(ident_model, ident_test, type='response')
#A summary of the predictions shows the range of values
# print(summary(lmpred))
rcr_pred <- prediction(lmpred, ident_test$outcome)
#Build PRROC performance object from predict output.
truth <- rcr_pred@labels[[1]]
posvals <- rcr_pred@predictions[[1]][truth == 1]
negvals <- rcr_pred@predictions[[1]][truth == 0]
perf_obj <- pr.curve(scores.class0=posvals, scores.class1=negvals, curve=T)
if (i == 0){
plot(perf_obj, legend=F, color=i, lty=1, auc.main=F, main='')
}
else {
plot(perf_obj, legend=F, color=i, add=T, lty=1, auc.main=F)
}
# Calculate the AUPRC
print(paste("AUPRC :",perf_obj$auc.integral))
auprcs <- c(auprcs, perf_obj$auc.integral)
}
overall_perf <- performance(overall_pred, 'prec','rec')
plot(overall_perf, color=1, add=T, lwd=3, lty=2)
dev.off()
etc_results <- data.frame(t(etc_results))
print(paste('precision:',mean(unlist(etc_results$precision))))
print(paste('recall:',mean(unlist(etc_results$recall))))
print(paste('f1score:',mean(unlist(etc_results$f1score))))
print(paste("Average AUROC :",mean(auc_results)))
print(paste("Average AUPRC :",mean(auprcs)))
|
5be6353cfccb430d9da98c27a8e3f2f03e961fae
|
43eae47269ee5a073218dcda8eed82e3e18c6312
|
/man/priors_analyze.Rd
|
e3997dc11523d5dc39c5f494b4509eb3445a4eb2
|
[] |
no_license
|
wlandau/fbseqStudies
|
4e494d25165130f95a983ee64d751ba3fb24bd3d
|
0169ac5a00d457a261401f926b37e08587eace64
|
refs/heads/main
| 2021-01-19T04:39:24.890203
| 2017-10-21T02:36:07
| 2017-10-21T02:36:07
| 45,758,088
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 389
|
rd
|
priors_analyze.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/study-priors_analyze.R
\name{priors_analyze}
\alias{priors_analyze}
\title{Function \code{priors_analyze}}
\usage{
priors_analyze(from, to)
}
\arguments{
\item{from}{to directory to save simulations and results}
\item{to}{output directory}
}
\description{
Plot and summarize results from simulation studies
}
|
8ee1841c7d1a776f0675f0a5bbe38d037ef661ba
|
1532a1c2779863ab1cf6e3a5d3d7f93c520fd881
|
/Forecast_Combo_Flow.R
|
1ec08a91d20229f2d13299b1c42a515757d7e6a1
|
[] |
no_license
|
gsukisubramaniam/Trend_Forecasting
|
5dbc714517f07cf1c8ba1d31e2a8665b892801e1
|
bfa6d512ac57ee560e3efc2ce97bd8f75172a1f8
|
refs/heads/master
| 2020-12-30T10:37:34.532971
| 2017-07-31T06:10:23
| 2017-07-31T06:10:23
| 98,853,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
r
|
Forecast_Combo_Flow.R
|
## Execute this code after loading the data in the table "data" with values in the column name "value"
library(tseries)
actual<-ts(dataset$Value,frequency = 12,start = c(2014,1))
Decompose_Actual <- decompose(actual)
seasonal <- Decompose_Actual$seasonal
trend <- Decompose_Actual$trend
deseason <- actual-seasonal
detrend <- cbind(t<-c(1:39),deseason)
fit <- lm(deseason ~ t , data = detrend)
summary(fit)
seasonal
|
d8df04ac3b8ea21bb8f008cf5b8b75a891329556
|
1a439dc569ec3025b76cb76ff9dd71fe6614ef88
|
/createNetCDF/CreatePhaseMap.R
|
c75c682bf9a906a80827d6209a049ec028dae24e
|
[] |
no_license
|
seanyx/TimeSeries
|
f2ca38e1fcea182b3f43404e2b151df5d1c6127b
|
c671823c74fe14fd632b67890c13be39c54ba975
|
refs/heads/master
| 2016-09-05T13:59:12.509273
| 2014-08-12T17:55:55
| 2014-08-12T17:55:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,563
|
r
|
CreatePhaseMap.R
|
library(RNetCDF)
source('~/Dropbox/netcdf_test/CreateNetCDF.R', chdir = TRUE)
source('~/Documents/TimeSeries/createNetCDF/phaseshiftest.R', chdir = TRUE)
olddir=getwd()
setwd("~/Dropbox/netcdf_test/")
data=open.nc("SST_Xiao.nc")
lat=var.get.nc(data,'lat')
lon=var.get.nc(data,'lon')
sst=var.get.nc(data,'sst',collapse=F)
anom=var.get.nc(data,'anom',collapse=F)
anomfil=var.get.nc(data,'anomfil',collapse=F)
close.nc(data)
setwd(olddir)
path="."
filename="PhaseMatrix.nc"
time=1
height=1
lat=lat
lon=lon
##### create phase map for the following location
##### only need to change the following 4 lines
da=anomfil
var.names=c('phaseanomfil','ampanomfil')
## caculate the phase map and corr map
inilon=80
inilat=0
variables=list()
null_value=-999
refs='Extended Reconstructed Sea Surface Temperature (ERSST.v3b) http://www.ncdc.noaa.gov/ersst/'
inipoint=c(which(lon==inilon),which(lat==inilat))
phasemap=array(NA,dim=c(180,89,1,1))
# phaseanom=phaseanomfil
# phasesst=phaseanomfil
ampmap=phasemap
for (i in 1:length(lon)) {
for (j in 1:length(lat)) {
if(is.na(da[i,j,1,][1])) {#corrsst[i,j,1,1]=-999;
next}
if(i==inipoint[1] & j==inipoint[2]) {test=list(0,1); next}
test=fitPhase(da[inipoint[1],inipoint[2],1,],da[i,j,1,],N=18)
phasemap[i,j,1,1]=test[[1]]
ampmap[i,j,1,1]=test[[2]]
}
}
variables[[1]]=phasemap
variables[[2]]=ampmap
null_value=rep(-999,length(var.names))
filename=paste('lon',inilon,'_lat',inilat,'_',filename,sep='')
createNetCDF2d(path,filename,time,height,lat,lon,var.names,variables,null_value=null_value,refs=refs)
|
7895f250666c36e1c2f4dcdeb350bdc330142252
|
eb9bbef62511e003f9e9d675b428f83c6f77ac8d
|
/man/davies_bouldin.Rd
|
30748366b7d54b49abb441593a40fe43c26c910e
|
[] |
no_license
|
baptlano24/clustyanaly
|
b6495910b6db2bbcef082aeb11335fda304d2b42
|
91176e0f9bb4fc423aca2d3a05be6a6b57f6be17
|
refs/heads/main
| 2023-01-23T01:00:21.809246
| 2020-12-08T22:34:40
| 2020-12-08T22:34:40
| 319,457,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 787
|
rd
|
davies_bouldin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/davies_bouldin.R
\name{davies_bouldin}
\alias{davies_bouldin}
\title{davies_bouldin function}
\usage{
davies_bouldin(X, clust, distance = "euclidean")
}
\arguments{
\item{X}{data_frame or matrix of features must be numeric}
\item{clust}{vector of result of the cluster, numeric or string}
\item{distance}{str of type of distance see {\link[stats]{dist}} \url{https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/dist}}
}
\value{
davies_bouldin index
}
\description{
function which calculates the Davies Bouldin coefficient where we can decide the type of distance
}
\references{
\url{https://en.wikipedia.org/wiki/Davies-Bouldin_index}
}
\author{
Baptiste Lanoue
}
|
26cbd4daa57d4f9c3837de08cc372c0c8ce560c4
|
56a409f07b25747e195282126664d11ed9a4d37b
|
/Preparation.R
|
07921a9a1f38e50e5caeb8fef1ababfd1e43a046
|
[] |
no_license
|
tanas57/mushroom-machine-learning
|
684f43c17794298961349d468bb4f9f12eb0e884
|
e3b3acb685db1ccd6dd50de19a0f46b7d0d36798
|
refs/heads/master
| 2020-06-16T15:30:33.878660
| 2019-05-05T15:17:54
| 2019-05-05T15:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,802
|
r
|
Preparation.R
|
#install.packages("Amelia")
#library(Amelia)
columnNames <- c(
"class", "cap_shape", "cap_surface",
"cap_color", "bruises", "odor",
"gill_attachement", "gill_spacing", "gill_size",
"gill_color", "stalk_shape", "stalk_root",
"stalk_surface_above_ring", "stalk_surface_below_ring", "stalk_color_above_ring",
"stalk_color_below_ring", "veil_type", "veil_color",
"ring_number", "ring_type", "spore_print_color",
"population", "habitat")
#include dataset from disk
mushroom <- read.table("agaricus-lepiota.data",
sep = ",",
na.strings = "?",
colClasses = NA,
header = FALSE,
col.names= columnNames
) # there are missing value so it gets warning
#unique(mushroom$veil_type)
#missmap(mushroom, main = "Missing values vs observed")
#replace NA values to columns mode
Mode <- function (x, na.rm) {
xtab <- table(x)
xmode <- names(which(xtab == max(xtab)))
if (length(xmode) > 1) xmode <- ">1 mode"
return(xmode)
}
for (var in 1:ncol(mushroom)) {
mushroom[is.na(mushroom[,var]),var] <- Mode(mushroom[,var], na.rm = TRUE)
}
#after data preparation of missing values.
#missmap(mushroom, main = "After data preparation of missing values")
drops <- c("veil_type") #there is one unique values of veil_type, we can remove this column in our dataset.
mushroom <- mushroom[ , !(names(mushroom) %in% drops)] #remove veil_type
#head(mushroom) # without numeric values, pure non preparing
## categoric to numeric without target
mushroom$cap_shape <- scale(as.numeric(mushroom$cap_shape), center = TRUE, scale = TRUE)
mushroom$cap_surface <- scale(as.numeric(mushroom$cap_surface), center = TRUE, scale = TRUE)
mushroom$cap_color <- scale(as.numeric(mushroom$cap_color), center = TRUE, scale = TRUE)
mushroom$bruises <- scale(as.numeric(mushroom$bruises), center = TRUE, scale = TRUE)
mushroom$odor <- scale(as.numeric(mushroom$odor), center = TRUE, scale = TRUE)
mushroom$gill_attachement <-scale(as.numeric(mushroom$gill_attachement), center = TRUE, scale = TRUE)
mushroom$gill_spacing <- scale(as.numeric(mushroom$gill_spacing), center = TRUE, scale = TRUE)
mushroom$gill_size <- scale(as.numeric(mushroom$gill_size), center = TRUE, scale = TRUE)
mushroom$gill_color <- scale(as.numeric(mushroom$gill_color), center = TRUE, scale = TRUE)
mushroom$stalk_shape <- scale(as.numeric(mushroom$stalk_shape), center = TRUE, scale = TRUE)
mushroom$stalk_root <- scale(as.numeric(mushroom$stalk_root), center = TRUE, scale = TRUE)
mushroom$stalk_surface_above_ring <- scale(as.numeric(mushroom$stalk_surface_above_ring), center = TRUE, scale = TRUE)
mushroom$stalk_surface_below_ring <- scale(as.numeric(mushroom$stalk_surface_below_ring), center = TRUE, scale = TRUE)
mushroom$stalk_color_above_ring <- scale(as.numeric(mushroom$stalk_color_above_ring), center = TRUE, scale = TRUE)
mushroom$stalk_color_below_ring <- scale(as.numeric(mushroom$stalk_color_below_ring), center = TRUE, scale = TRUE)
mushroom$veil_color <- scale(as.numeric(mushroom$veil_color), center = TRUE, scale = TRUE)
mushroom$ring_number <- scale(as.numeric(mushroom$ring_number), center = TRUE, scale = TRUE)
mushroom$ring_type <- scale(as.numeric(mushroom$ring_type), center = TRUE, scale = TRUE)
mushroom$spore_print_color <- scale(as.numeric(mushroom$spore_print_color), center = TRUE, scale = TRUE)
mushroom$population <- scale(as.numeric(mushroom$population), center = TRUE, scale = TRUE)
mushroom$habitat <- scale(as.numeric(mushroom$habitat), center = TRUE, scale = TRUE)
#After the mode process, the graph
head(mushroom)
#head(mushroom) # list the dataset after fill missing values, categoricial to numericial, and normalization.
# remove unncessary data
rm(columnNames,drops,var,Mode)
|
97b5ba02308e0dfa72f53d05a61bcb4be512665a
|
19303ed7628348fdd90abf34431f188ec486237a
|
/MacheLeafletWidget.R
|
7b296548a306ba4c131e0f8f5be9f1bc0ee916c3
|
[] |
no_license
|
Hen-son/WebtechUrbanism
|
4a8c88016933efe2942010f7548d00399c5778bc
|
e7cf63fe567172e643b301c22a2c56f792454949
|
refs/heads/master
| 2022-04-26T20:16:18.003006
| 2020-04-28T17:36:42
| 2020-04-28T17:36:42
| 256,495,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 549
|
r
|
MacheLeafletWidget.R
|
library(leaflet)
library(dplyr)
webtech_berlin <- read.csv("data/webtech_located.csv", stringsAsFactors=FALSE)
label <- paste(sep = "<br/>", webtech_berlin$Firma, webtech_berlin$URL)
WebtechKarte <- leaflet(data = webtech_berlin) %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(lng= ~Lon, lat= ~Lat, popup = ~as.character(label), label = ~as.character(Firma))
# # # save a stand-alone, interactive map as an html file
library(htmlwidgets)
saveWidget(widget = WebtechKarte, file = 'WebtechBerlin.html', selfcontained = T)
|
bc6f2e8de257f1e16e1b41bdaf7e478cdf107d4f
|
114663abec8e5796de6f5ae859547dd31962185f
|
/indeed_data.R
|
bde07daff0bbd8043b5bbed3db8b6d4c5336e82a
|
[] |
no_license
|
pauljchoi2/DataFest
|
4ff637add26200cba58a044acd50e56dedffbc2a
|
967f41293cacb39e5992fbeddb9bf33fb5301762
|
refs/heads/master
| 2020-03-14T20:11:16.072407
| 2018-05-02T00:07:02
| 2018-05-02T00:07:02
| 131,772,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 539
|
r
|
indeed_data.R
|
indeed_data <- read.csv("raw-data-datafest2018.csv", header = T , na.strings = c("" , "NA"))
indeed_data$educationRequirement <- as.character(indeed_data$educationRequirement)
indeed_data$educationRequirement[indeed_data$educationRequirement == "None"] <- "0"
indeed_data$educationRequirement[indeed_data$educationRequirement == "High school"] <- "1"
indeed_data$educationRequirement[indeed_data$educationRequirement == "Higher education"] <- "2"
indeed_data$educationRequirement <- as.factor(indeed_data$educationRequirement)
#comment
|
6181c25964cf6e94c9b06ba3335544b69a9ec304
|
d74e2c19e6e3901ba6c49ae811ae22869f164f8f
|
/ensemble.R
|
bcbe2caf23a6a0c8d1fefb67b9589352d6fb9c28
|
[] |
no_license
|
noahhhhhh/Melbourne_Datathon_Kaggle
|
c09604beca03ef334c819a765b8c79aa9b10f5dc
|
1039b7f333526c3ff85480e9502586bd1050d8bb
|
refs/heads/master
| 2021-01-17T05:08:22.808019
| 2015-12-11T04:40:43
| 2015-12-11T04:40:43
| 45,720,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,338
|
r
|
ensemble.R
|
rm(list = ls())
gc()
load("trainEnsemble.RData")
load("valid1Ensemble.RData")
load("valid2Ensemble.RData")
load("testEnsemble.RData")
dt.test.ensemble[, rf:= 1 - rf]
# try simple blending with rf[, 2]
pred.bagging <- rowSums(dt.test.ensemble)
dt.submit <- data.table(Account_ID = dt.test$ACCOUNT_ID, Prediction = pred.bagging)
dim(dt.submit)
# [1] 12935 2
dt.submit <- merge(dtSampleSubmit, dt.submit, by = "Account_ID", all.x = T, sort = F)
# [1] 7374 2
# set the scores of new accts to .437 * 2 * original prediction
trainAcct <- unique(dt.train$ACCOUNT_ID)
valid1Acct <- unique(dt.valid1$ACCOUNT_ID)
valid2Acct <- unique(dt.valid2$ACCOUNT_ID)
existingAcct <- c(trainAcct, valid1Acct, valid2Acct)
existingAcct <- unique(existingAcct)
length(existingAcct)
# [1] 20841
newAcct <- setdiff(dt.submit$Account_ID, existingAcct)
# Prediction_New <- rnorm(1001, mean = .43, sd = .1)
Prediction_New <- .437
dt.newAcct <- data.table(Account_ID = newAcct, Prediction_New = Prediction_New)
# bestSub[, Prediction := Prediction/14]
newSub <- merge(dt.submit, dt.newAcct, by = "Account_ID", all.x = T)
newSub[, Prediction_New := Prediction * 2 * Prediction_New]
newSub[, Prediction := ifelse(is.na(newSub$Prediction_New), newSub$Prediction, newSub$Prediction_New)]
newSub[, Prediction_New := NULL]
newSub[Prediction == .437, with = T]
|
d2ff39d8c5018df2a41f4b08f25e5332822541f7
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/paws/man/sesv2_get_deliverability_test_report.Rd
|
182348f6b3145378f657429737435478a0e3ff20
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 619
|
rd
|
sesv2_get_deliverability_test_report.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sesv2_operations.R
\name{sesv2_get_deliverability_test_report}
\alias{sesv2_get_deliverability_test_report}
\title{Retrieve the results of a predictive inbox placement test}
\usage{
sesv2_get_deliverability_test_report(ReportId)
}
\arguments{
\item{ReportId}{[required] A unique string that identifies the predictive inbox placement test.}
}
\description{
Retrieve the results of a predictive inbox placement test.
}
\section{Request syntax}{
\preformatted{svc$get_deliverability_test_report(
ReportId = "string"
)
}
}
\keyword{internal}
|
e98f91deb3dbf16e8287b589720a5d9b684b0f1e
|
3faa635909c553b5ec850eb5fba14f57826838b5
|
/Demonstrator/MDL Workshop/Acceptance Testing/MDL-Workshop/TestScript_UC51.R
|
cb98c499b464996dee791677cdae381c8f99e14c
|
[] |
no_license
|
DDMoReFoundation/testingresources
|
6bdfa08288bed1c9e2d9167c2bd1889ac914e59e
|
c1af2a7610214e059d9bc570200e08fa43416e70
|
refs/heads/master
| 2021-01-23T02:16:25.321962
| 2016-07-29T11:26:35
| 2016-07-29T11:26:35
| 85,980,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,735
|
r
|
TestScript_UC51.R
|
#' Executing Nonmem against supported models
#' =========================================================================================
#' =========================================================================================
#' Initialisation
if(!exists(".MDLIDE_WORKSPACE_PATH") || is.null(.MDLIDE_WORKSPACE_PATH)) {
stop(".MDLIDE_WORKSPACE_PATH variable should be set to the path of the MDL IDE workspace")
}
source(file.path(.MDLIDE_WORKSPACE_PATH,"Test-Utils/utils/utils.R"));
projectPath="MDL-Workshop"
modelsDir="models/"
setwd(.MDLIDE_WORKSPACE_PATH)
setwd(projectPath)
projectPath=getwd()
mdlFileLocation=file.path(projectPath, modelsDir, "UseCase5_1")
setwd(mdlFileLocation)
mdlfile="UseCase5_1.mdl"
printMessage(paste("Working with", mdlfile))
printMessage("List objects in the environment")
objects("package:DDMoRe.TEL")
printMessage("Loading in data object(s)")
myDataObj <- getDataObjects(mdlfile)[[1]]
printMessage("Data object:")
myDataObj
printMessage("Loading parameter object(s)")
myParObj <- getParameterObjects(mdlfile)[[1]]
printMessage("Parameter object:")
myParObj
printMessage("Loading model object(s)")
myModObj <- getModelObjects(mdlfile)[[1]]
printMessage("Model object:")
myModObj
printMessage("Loading Task Properties object(s)")
myTaskObj <- getTaskPropertiesObjects(mdlfile)[[1]]
printMessage("Task Properties object:")
myTaskObj
#' Exploratory Data Analysis
#' =========================
printMessage("Estimating with Nonmem")
resultDir = .resultDir("NONMEM");
soNonmem <- estimate(mdlfile, target="NONMEM", subfolder= resultDir);
outputDirectory = file.path(mdlFileLocation,resultDir)
verifyEstimate(soNonmem,outputDirectory)
soNonmemUC51 <- soNonmem;
printMessage("DONE")
|
de121d19350003c7fab53262d729ac9784466b97
|
0e754612e667b5f44de6194d73a3139bc624f048
|
/plotScripts/permutationTestMotifColocalization.R
|
4ab585d32df5b5df829873cac388e57efc5ebd72
|
[
"MIT"
] |
permissive
|
emsanford/combined_responses_paper
|
068d0a26a79533dbdb20ff83f67a764d669dd75f
|
6d8d1e6f9e3e660f8acd4e748b5a80b3bb6c6d7c
|
refs/heads/master
| 2021-08-08T01:55:47.880023
| 2021-01-03T23:20:22
| 2021-01-03T23:20:22
| 238,577,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,579
|
r
|
permutationTestMotifColocalization.R
|
library(tidyverse)
library(here)
joinedTib <- read_tsv(here('extractedData', 'joinedTablePeaksNearGenes.annotated.tsv'))
geneTib <- read_tsv(here('extractedData', 'DeSeqOutputAllConds.annotated.tsv'))
peakTib <- read_tsv(here('extractedData', 'differentialAtacPeaks.annotated.tsv'))
n.permutations.to.make.null.distributions <- 12000
# 1. Analysis on peaks near upregulated genes
genesUp <- joinedTib %>%
filter(`RA-low_isDeGene` | `RA-med_isDeGene` | `RA-high_isDeGene`, `RA-high_log2fc` > 0) %>%
filter(`TGFb-low_isDeGene` | `TGFb-med_isDeGene` | `TGFb-high_isDeGene`, `TGFb-high_log2fc` > 0)
# now get the superadditive peaks, unique set (avoid duplicates near genes that are close together)
genesUpPeaksUp <- genesUp %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, peak_startLoc, peak_chrom, `isUpRegulatedPeak-med`, ensg) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
#idea: what kinds of motif matches enrich in the superadditive peaks near these genes? 334 superadditive peaks total...
# null distribution could be the rest of the peaks nearby that aren't superadditive (N = 4,313)
# to avoid double counting, could extract peak features only and unique-ify them
# can test specific motif frequencies, motif pair frequencies...
# first, test frequency of double motif match (all RA, all TGFb)
genesUpPeaksUp.nonSuperAdditive <- filter(genesUpPeaksUp, ! `isSuperAdditivePeak-med`)
genesUpPeaksUp.superAdditive <- filter(genesUpPeaksUp, `isSuperAdditivePeak-med`)
dualMotifMatchesNotsuperadditive <- (genesUpPeaksUp.nonSuperAdditive$`group-allTGFB_maxMotifMatchScore` > 1) & (genesUpPeaksUp.nonSuperAdditive$`group-allRA_maxMotifMatchScore` > 1)
dualMotifMatchesSuperadditive <- (genesUpPeaksUp.superAdditive$`group-allTGFB_maxMotifMatchScore` > 1) & (genesUpPeaksUp.superAdditive$`group-allRA_maxMotifMatchScore` > 1)
dualMotifMatchesAll <- (genesUpPeaksUp$`group-allTGFB_maxMotifMatchScore` > 1) & (genesUpPeaksUp$`group-allRA_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(dualMotifMatchesNotsuperadditive) / length(dualMotifMatchesNotsuperadditive)
superadditive.average <- sum(dualMotifMatchesSuperadditive) / length(dualMotifMatchesSuperadditive)
print(sprintf("fraction dual match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction dual match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(dualMotifMatchesSuperadditive)
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(dualMotifMatchesAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 100) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks near upregulated genes,\nfraction that have dual motif matches") +
xlab("Fraction of peaks with a TGFb motif match and an RA motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
# 2. Analysis on all upregulated peaks
#get the superadditive peaks, unique set (avoid duplicates near genes that are close together)
peaksUp <- peakTib %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, startLocs, chrom, `isUpRegulatedPeak-med`) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
peaksUp.nonSuperAdditive <- filter(peaksUp, ! `isSuperAdditivePeak-med`)
peaksUp.superAdditive <- filter(peaksUp, `isSuperAdditivePeak-med`)
dualMotifMatchesNotsuperadditive <- (peaksUp.nonSuperAdditive$`group-allTGFB_maxMotifMatchScore` > 1) & (peaksUp.nonSuperAdditive$`group-allRA_maxMotifMatchScore` > 1)
dualMotifMatchesSuperadditive <- (peaksUp.superAdditive$`group-allTGFB_maxMotifMatchScore` > 1) & (peaksUp.superAdditive$`group-allRA_maxMotifMatchScore` > 1)
dualMotifMatchesAll <- (peaksUp$`group-allTGFB_maxMotifMatchScore` > 1) & (peaksUp$`group-allRA_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(dualMotifMatchesNotsuperadditive) / length(dualMotifMatchesNotsuperadditive)
superadditive.average <- sum(dualMotifMatchesSuperadditive) / length(dualMotifMatchesSuperadditive)
print(sprintf("fraction dual match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction dual match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(dualMotifMatchesSuperadditive)
# samplesize <- 350
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(dualMotifMatchesAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 500) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks upregulated by both signals,\nfraction that have dual motif matches") +
xlab("Fraction of peaks with a TGFb motif match and an RA motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
nGenesRepSuperAdd <- genesUpPeaksUp.superAdditive$ensg %>% unique() %>% length()
nSuperAddPeaksPerGene.nearUpregulatedWithSuperadditive <- nrow(genesUpPeaksUp.superAdditive) / nGenesRepSuperAdd
nGenesRep.nearUpregulated <- genesUpPeaksUp$ensg %>% unique() %>% length()
nSuperAddPeaksPerGene.nearAllUpregulated <- nrow(filter(genesUpPeaksUp, `isSuperAdditivePeak-med`)) / nGenesRep.nearUpregulated
# 3. Analysis on all upregulated peaks -- FOX motif test at superadditive peaks; hypothesis that FOX will be enriched
#get the superadditive peaks, unique set (avoid duplicates near genes that are close together)
peaksUp <- peakTib %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, startLocs, chrom, `isUpRegulatedPeak-med`) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
peaksUp.notSuperadditive <- filter(peaksUp, ! `isSuperAdditivePeak-med`)
peaksUp.superAdditive <- filter(peaksUp, `isSuperAdditivePeak-med`)
foxMotifMatchNotsuperadditive <- (peaksUp.notSuperadditive$`group-FOX_maxMotifMatchScore` > 1)
foxMotifMatchSuperadditive <- (peaksUp.superAdditive$`group-FOX_maxMotifMatchScore` > 1)
foxMotifMatchAll <- (peaksUp$`group-FOX_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(foxMotifMatchNotsuperadditive) / length(foxMotifMatchNotsuperadditive)
superadditive.average <- sum(foxMotifMatchSuperadditive) / length(foxMotifMatchSuperadditive)
print(sprintf("fraction FOX match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction FOX match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(foxMotifMatchSuperadditive)
# samplesize <- 350
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(foxMotifMatchAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 500) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks upregulated by both signals,\nfraction that have at least one FOX motif match") +
xlab("Fraction of peaks with a FOX motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
# 4. Analysis on all upregulated peaks -- AP1 motif test at superadditive peaks
#get the superadditive peaks, unique set (avoid duplicates near genes that are close together)
peaksUp <- peakTib %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, startLocs, chrom, `isUpRegulatedPeak-med`) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
peaksUp.notSuperadditive <- filter(peaksUp, ! `isSuperAdditivePeak-med`)
peaksUp.superAdditive <- filter(peaksUp, `isSuperAdditivePeak-med`)
ap1MotifMatchNotsuperadditive <- (peaksUp.notSuperadditive$`group-AP1_maxMotifMatchScore` > 1)
ap1MotifMatchSuperadditive <- (peaksUp.superAdditive$`group-AP1_maxMotifMatchScore` > 1)
ap1MotifMatchAll <- (peaksUp$`group-AP1_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(ap1MotifMatchNotsuperadditive) / length(ap1MotifMatchNotsuperadditive)
superadditive.average <- sum(ap1MotifMatchSuperadditive) / length(ap1MotifMatchSuperadditive)
print(sprintf("fraction ap1 match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction ap1 match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(ap1MotifMatchSuperadditive)
# samplesize <- 350
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(ap1MotifMatchAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 500) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks upregulated by both signals,\nfraction that have at least one AP1 motif match") +
xlab("Fraction of peaks with a AP1 motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
# 5. Analysis on all upregulated peaks -- SMAD
peaksUp <- peakTib %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, startLocs, chrom, `isUpRegulatedPeak-med`) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
peaksUp.notSuperadditive <- filter(peaksUp, ! `isSuperAdditivePeak-med`)
peaksUp.superAdditive <- filter(peaksUp, `isSuperAdditivePeak-med`)
smadMotifMatchNotsuperadditive <- (peaksUp.notSuperadditive$`group-SMAD_maxMotifMatchScore` > 1)
smadMotifMatchSuperadditive <- (peaksUp.superAdditive$`group-SMAD_maxMotifMatchScore` > 1)
smadMotifMatchAll <- (peaksUp$`group-SMAD_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(smadMotifMatchNotsuperadditive) / length(smadMotifMatchNotsuperadditive)
superadditive.average <- sum(smadMotifMatchSuperadditive) / length(smadMotifMatchSuperadditive)
print(sprintf("fraction smad match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction smad match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(smadMotifMatchSuperadditive)
# samplesize <- 350
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(smadMotifMatchAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 500) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks upregulated by both signals,\nfraction that have at least one smad motif match") +
xlab("Fraction of peaks with a smad motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
# 6. Analysis on all upregulated peaks -- RAR
peaksUp <- peakTib %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, startLocs, chrom, `isUpRegulatedPeak-med`) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
peaksUp.notSuperadditive <- filter(peaksUp, ! `isSuperAdditivePeak-med`)
peaksUp.superAdditive <- filter(peaksUp, `isSuperAdditivePeak-med`)
rarMotifMatchNotsuperadditive <- (peaksUp.notSuperadditive$`group-RAR_maxMotifMatchScore` > 1)
rarMotifMatchSuperadditive <- (peaksUp.superAdditive$`group-RAR_maxMotifMatchScore` > 1)
rarMotifMatchAll <- (peaksUp$`group-RAR_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(rarMotifMatchNotsuperadditive) / length(rarMotifMatchNotsuperadditive)
superadditive.average <- sum(rarMotifMatchSuperadditive) / length(rarMotifMatchSuperadditive)
print(sprintf("fraction rar match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction rar match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(rarMotifMatchSuperadditive)
# samplesize <- 350
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(rarMotifMatchAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 500) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks upregulated by both signals,\nfraction that have at least one RAR motif match") +
xlab("Fraction of peaks with a RAR motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
# 7. Analysis on all upregulated peaks -- AP1 + FOX motif test at superadditive peaks; hypothesis that both will be enriched
#get the superadditive peaks, unique set (avoid duplicates near genes that are close together)
peaksUp <- peakTib %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, startLocs, chrom, `isUpRegulatedPeak-med`) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
peaksUp.notSuperadditive <- filter(peaksUp, ! `isSuperAdditivePeak-med`)
peaksUp.superAdditive <- filter(peaksUp, `isSuperAdditivePeak-med`)
ap1AndFoxMotifMatchNotsuperadditive <- (peaksUp.notSuperadditive$`group-AP1_maxMotifMatchScore` > 1) & (peaksUp.notSuperadditive$`group-FOX_maxMotifMatchScore` > 1)
ap1AndFoxMotifMatchSuperadditive <- (peaksUp.superAdditive$`group-AP1_maxMotifMatchScore` > 1) & (peaksUp.superAdditive$`group-FOX_maxMotifMatchScore` > 1)
ap1AndFoxMotifMatchAll <- (peaksUp$`group-AP1_maxMotifMatchScore` > 1) & (peaksUp$`group-FOX_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(ap1AndFoxMotifMatchNotsuperadditive) / length(ap1AndFoxMotifMatchNotsuperadditive)
superadditive.average <- sum(ap1AndFoxMotifMatchSuperadditive) / length(ap1AndFoxMotifMatchSuperadditive)
print(sprintf("fraction ap1AndFox match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction ap1AndFox match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(ap1AndFoxMotifMatchSuperadditive)
# samplesize <- 350
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(ap1AndFoxMotifMatchAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 500) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks upregulated by both signals,\nfraction that have at least one ap1AndFox motif match") +
xlab("Fraction of peaks with a ap1AndFox motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
# 8. Analysis on all upregulated peaks -- SMAD + FOX motif test at superadditive peaks; hypothesis that both will be enriched
#get the superadditive peaks, unique set (avoid duplicates near genes that are close together)
peaksUp <- peakTib %>%
mutate(`isSuperAdditivePeak-med` = `peakAdditivePredFcResidual-med` > 1.5) %>%
mutate(`isUpRegulatedPeak-med` = `TGFb-and-RA-med-avgFoldchange` > 1) %>%
filter(`TGFb-med-avgFoldchange` > 1) %>%
filter(`RA-med-avgFoldchange` > 1) %>%
filter(`isUpRegulatedPeak-med`) %>%
dplyr::select(matches(".*otif.*"), `isSuperAdditivePeak-med`, startLocs, chrom, `isUpRegulatedPeak-med`) %>%
unique()
# filter(`peakAdditivePredFcResidual-med` > 1.5)
peaksUp.notSuperadditive <- filter(peaksUp, ! `isSuperAdditivePeak-med`)
peaksUp.superAdditive <- filter(peaksUp, `isSuperAdditivePeak-med`)
smadAndFoxMotifMatchNotsuperadditive <- (peaksUp.notSuperadditive$`group-SMAD_maxMotifMatchScore` > 1) & (peaksUp.notSuperadditive$`group-FOX_maxMotifMatchScore` > 1)
smadAndFoxMotifMatchSuperadditive <- (peaksUp.superAdditive$`group-SMAD_maxMotifMatchScore` > 1) & (peaksUp.superAdditive$`group-FOX_maxMotifMatchScore` > 1)
smadAndFoxMotifMatchAll <- (peaksUp$`group-SMAD_maxMotifMatchScore` > 1) & (peaksUp$`group-FOX_maxMotifMatchScore` > 1)
nonsuperadditive.average <- sum(smadAndFoxMotifMatchNotsuperadditive) / length(smadAndFoxMotifMatchNotsuperadditive)
superadditive.average <- sum(smadAndFoxMotifMatchSuperadditive) / length(smadAndFoxMotifMatchSuperadditive)
print(sprintf("fraction smadAndFox match, non-superadditive: %0.3f", nonsuperadditive.average))
print(sprintf("fraction smadAndFox match, superadditive: %0.3f", superadditive.average))
# permutation test for significance
samplesize <- length(smadMotifMatchSuperadditive)
# samplesize <- 350
set.seed(0)
resultsvec <- c()
for (ii in 1:n.permutations.to.make.null.distributions) {
this.sample <- sample(smadAndFoxMotifMatchAll, samplesize)
fracDual <- sum(this.sample) / samplesize
resultsvec <- c(resultsvec, fracDual)
}
qplot(resultsvec, bins = 500) +
geom_vline(xintercept = superadditive.average, color = "red") +
theme_minimal(base_size = 16) +
ggtitle("Randomly sampled groups of peaks upregulated by both signals,\nfraction that have at least one smadAndFox motif match") +
xlab("Fraction of peaks with a smadAndFox motif match")
quantile(resultsvec, c(0, .001, 0.01, 0.025, .25, .50, .75, .975, 0.99, .999, 1))
|
10d2c27e807438639ac779a4894de5fc35c2eef7
|
0c90b72ff5b57481b7edb74620333a7c3e3c603f
|
/R_finance/R_Quant_Portfolio/dynamic_assetallocation_strategy.R
|
7825af35608770e2e4044b5b01837dcb05890ee7
|
[] |
no_license
|
diligejy/R
|
41abd1cc6ef3731366f3196d3d428fd5541215e5
|
1b54334094c54e041b81d45f87c5c07336d62ff9
|
refs/heads/master
| 2022-09-11T18:13:10.614301
| 2022-08-21T14:57:21
| 2022-08-21T14:57:21
| 210,329,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,726
|
r
|
dynamic_assetallocation_strategy.R
|
library(quantmod)
library(PerformanceAnalytics)
library(RiskPortfolios)
library(tidyr)
library(dplyr)
library(ggplot2)
symbols = c('SPY', # 미국 주식
'IEV', # 유럽 주식
'EWJ', # 일본 주식
'EEM', # 이머징 주식
'TLT', # 미국 장기채
'IEF', # 미국 중기채
'IYR', # 미국 리츠
'RWX', # 글로벌 리츠
'GLD', # 금
'DBC' # 상품
)
getSymbols(symbols, src = 'yahoo')
prices = do.call(cbind,
lapply(symbols, function(x) Ad(get(x)))) %>%
setNames(symbols)
rets = Return.calculate(prices) %>% na.omit()
ep = endpoints(rets, on = 'months')
wts = list()
lookback = 12
wt_zero = rep(0, 10) %>% setNames(colnames(rets))
for (i in (lookback + 1) : length(ep)){
sub_ret = rets[ep[i-lookback] : ep[i], ]
cum = Return.cumulative(sub_ret)
K = rank(-cum) <= 5
covmat = cov(sub_ret[, K])
wt = wt_zero
wt[K] = optimalPortfolio(covmat,
control = list(type = 'minvol',
constraint = 'user',
LB = rep(0.10, 5),
UB = rep(0.30, 5)))
wts[[i]] = xts(t(wt), order.by = index(rets[ep[i]]))
}
wts = do.call(rbind, wts)
GDAA = Return.portfolio(rets, wts, verbose = TRUE)
charts.PerformanceSummary(GDAA$returns, main = '동적자산배분')
wts %>% fortify.zoo() %>%
gather(key, value, -Index) %>%
mutate(Index = as.Date(Index)) %>%
mutate(Key = factor(key, levels = unique(key))) %>%
ggplot(aes(x = Index, y = value)) +
geom_area(aes(color = key, fill = key),
position = 'stack') +
xlab(NULL) + ylab(NULL) + theme_bw() +
scale_x_date(date_breaks ="years", date_labels = "%Y",
expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
theme(plot.title = element_text(hjust = 0.5,
size = 12),
legend.position = 'bottom',
legend.title = element_blank(),
axis.text.x = element_text(angle = 45,
hjust = 1, size = 8),
panel.grid.minor.x = element_blank()) +
guides(color = guide_legend(byrow = TRUE))
GDAA$turnover = xts(
rowSums(abs(GDAA$BOP.Weight -
timeSeries::lag(GDAA$EOP.Weight)),
na.rm = TRUE),
order.by = index(GDAA$BOP.Weight))
chart.TimeSeries(GDAA$turnover)
fee = 0.0030
GDAA$net = GDAA$returns - GDAA$turnover*fee
cbind(GDAA$returns, GDAA$net) %>%
setNames(c('No Fee', 'After Fee')) %>%
charts.PerformanceSummary(main = 'GDAA')
|
e706b8e3ce794f17ac02bb29409e093bc4698d1c
|
8925be5c960950efecf706f2a1b07472aced61ec
|
/src/R/run.edgeR.on.simulation.R
|
d23813cfa6ba7eb89ee4dc136c13681db2405c89
|
[] |
no_license
|
heejungshim/multiscale_analysis
|
fca34a072a2ed80fa1e28b1cb6f464bcc7577409
|
b22c566894ec44e5bbb8570fd335697613f626c9
|
refs/heads/master
| 2021-01-18T23:40:15.643632
| 2016-05-14T19:02:13
| 2016-05-14T19:02:13
| 20,192,741
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,786
|
r
|
run.edgeR.on.simulation.R
|
## `run.edgeR.on.simulation.R' contains scrits to run edgeR on simulated data.
##
##
## Example Usage (see command in /mnt/lustre/home/shim/multiscale_analysis/analysis/simulation/sample_size/simulation_manyQTLfinal_v1/sum/edgeR/com/): R CMD BATCH --no-save --no-restore "--args wd.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/simulation/sample_size/simulation_manyQTLfinal_v1/' numSam=10 read.depth.ratio=1 num.simu=578 siteSize=1024 filter.cut=0 combineNullandAlt=TRUE separateNullandAlt=TRUE" /mnt/lustre/home/shim/multiscale_analysis/src/R/run.edgeR.on.simulation.R
##
##
## wd.path : working directory path
## numSam : number of sample
## read.depth.ratio : library read depth (either x0.5, x1, x2, x4)
## num.simu : number of simulations
## siteSize : site size in simulation
## filter.cut : analysis includes window with read count > filter.cut
## null.path : path to directory where null data have been saved.
## combineNullandAlt : if it is true, the script runs software after combining null and alternative data sets
## separateNullandAlt : if it is true, the script runs software after combining null and alternative data sets
##
##
## Copyright (C) 2014 Heejung Shim
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
args = (commandArgs(TRUE))
eval(parse(text=args))
if(!exists("null.path")){
null.path = NULL
}
if(!exists("combineNullandAlt")){
combineNullandAlt = TRUE
}
if(!exists("separateNullandAlt")){
separateNullandAlt = TRUE
}
##wd.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/simulation/sample_size/simulation_manyQTLfinal_v2/'
##numSam=10
##read.depth.ratio=1
##num.simu=578
##siteSize=1024
##filter.cut=0
##null.path='/mnt/lustre/home/shim/multiscale_analysis/analysis/simulation/sample_size/simulation_manyQTLfinal_v1/'
##combineNullandAlt = TRUE
##separateNullandAlt = TRUE
library("edgeR")
window.size.list = c(100, 300, 1024)
## set up working directory
setwd(wd.path)
## set up directory name
if(read.depth.ratio==0.5){
dir.name=paste0("halfread.", numSam, "ind")
}
if(read.depth.ratio==1){
dir.name=paste0("fullread.", numSam, "ind")
}
if(read.depth.ratio==2){
dir.name=paste0("fullread2.", numSam, "ind")
}
if(read.depth.ratio==4){
dir.name=paste0("fullread4.", numSam, "ind")
}
## set up gp to run edgeR
gp = factor(c(rep(0,numSam/2), rep(1,numSam/2)) ,labels=c("A","B"))
for(ww in 1:3){
window.size = window.size.list[ww]
numC = siteSize%/%window.size
## read null data
if(is.null(null.path)){
input.path = paste0(wd.path, "null/DESeq/", dir.name, ".", window.size, ".run/")
}else{
input.path = paste0(null.path, "null/DESeq/", dir.name, ".", window.size, ".run/")
}
edgeR.data.null = read.table(paste0(input.path, "data.txt"))
## read alt data
input.path = paste0(wd.path, "alt/DESeq/", dir.name, ".", window.size, ".run/")
edgeR.data.alt = read.table(paste0(input.path, "data.txt"))
if(combineNullandAlt){
## combine data
edgeR.data = rbind(edgeR.data.null, edgeR.data.alt)
## filter data
rsum = rowSums(edgeR.data)
use = ((rsum > filter.cut) & (!is.na(rsum)))
countData.filtered = edgeR.data[ use, ]
## Perform edgeR Analysis
res = DGEList(counts=countData.filtered, group=gp)
res = calcNormFactors(res, method="RLE")
res = estimateCommonDisp(res)
res = estimateTagwiseDisp(res)
res = exactTest(res, dispersion="auto")
## get p-value
pval.vec = rep(NA, length(use))
pval.vec[use==TRUE] = res$table$PValue
pval.filtered =matrix(pval.vec,ncol=numC,byrow=T)
## get minimum p-value for each site
min.pval=apply(pval.filtered,1,min,na.rm=TRUE)
min.pval[is.infinite(min.pval)] = NA
## try to save output
output.path = paste0(wd.path, "sum/edgeR/", dir.name, ".", window.size)
## output min.pval
write.table(min.pval, file = paste0(output.path, ".min.pval.txt"), quote= FALSE, row.names = FALSE, col.names = FALSE)
## output object just in case we want to take a close look!
save("res", "use", file =paste0(output.path, ".Robj"))
}
if(separateNullandAlt){
## for null data
edgeR.data = edgeR.data.null
## filter data
rsum = rowSums(edgeR.data)
use = ((rsum > filter.cut) & (!is.na(rsum)))
countData.filtered = edgeR.data[ use, ]
## Perform edgeR Analysis
res = DGEList(counts=countData.filtered, group=gp)
res = calcNormFactors(res, method="RLE")
res = estimateCommonDisp(res)
res = estimateTagwiseDisp(res)
res = exactTest(res, dispersion="auto")
## get p-value
pval.vec = rep(NA, length(use))
pval.vec[use==TRUE] = res$table$PValue
pval.filtered =matrix(pval.vec,ncol=numC,byrow=T)
## get minimum p-value for each site
min.pval=apply(pval.filtered,1,min,na.rm=TRUE)
min.pval[is.infinite(min.pval)] = NA
min.pval.null = min.pval
res.null = res
use.null = use
## for alt data
edgeR.data = edgeR.data.alt
## filter data
rsum = rowSums(edgeR.data)
use = ((rsum > filter.cut) & (!is.na(rsum)))
countData.filtered = edgeR.data[ use, ]
## Perform edgeR Analysis
res = DGEList(counts=countData.filtered, group=gp)
res = calcNormFactors(res, method="RLE")
res = estimateCommonDisp(res)
res = estimateTagwiseDisp(res)
res = exactTest(res, dispersion="auto")
## get p-value
pval.vec = rep(NA, length(use))
pval.vec[use==TRUE] = res$table$PValue
pval.filtered =matrix(pval.vec,ncol=numC,byrow=T)
## get minimum p-value for each site
min.pval=apply(pval.filtered,1,min,na.rm=TRUE)
min.pval[is.infinite(min.pval)] = NA
min.pval.alt = min.pval
res.alt = res
use.alt = use
min.pval = c(min.pval.null, min.pval.alt)
use = c(use.null, use.alt)
## try to save output
output.path = paste0(wd.path, "sum/edgeR/sep.", dir.name, ".", window.size)
## output min.pval
write.table(min.pval, file = paste0(output.path, ".min.pval.txt"), quote= FALSE, row.names = FALSE, col.names = FALSE)
## output object just in case we want to take a close look!
save("res.alt", "res.null", "use", "use.alt", "use.null", file =paste0(output.path, ".Robj"))
}
}
|
b9857ab541c3afa1b25c768f7df06aeca8497447
|
4b02f42b79947ee0014fb3478fa255d1ee363314
|
/scripts/annotation.r
|
ab19af987e62039a70931dbfb4ef027d78ef1b32
|
[] |
no_license
|
mavishou/project
|
0893b82135322570abd3e5b2373e570f717b0318
|
4b8943a40924ca61b90f1f8ca66b7ed3c18975ef
|
refs/heads/master
| 2021-01-22T05:33:34.865770
| 2014-11-14T14:35:24
| 2014-11-14T14:35:24
| 24,197,639
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
annotation.r
|
anno <- read.table("V4_transcript_annotation_0331.txt", sep = '\t', header=T, stringsAsFactors=F)
bioType <- sort(table(anno[['gene_biotype']]), decreasing=T)
src <- sort(table(anno[['source']]), decreasing=T)
write.table(bioType, file = 'bio_type.txt', col.names=F, quote=F, sep = '\t')
write.table(src, file = 'source.txt', col.names=F, quote=F, sep = '\t')
anno[anno[, 3] == '-', c(1, 3, 4, 5)]
anno[anno[, 3] == '-', 3] <- 'lincRNA'
write.table(anno[, c(1,2,3,6,7)], file = 'V4_final_transcript_annotation_0401.txt',
sep = '\t', quote = F, row.names = F)
final <- anno[, c(1,2,3,6,7)]
|
afbdd9ead82bf91d2e28cd12e57345e841f3ae41
|
50bf542824fc7d964a25e3bf30b2c2a9218acd8f
|
/R/simulate_SIR.R
|
632af616f6afd4fd2cffb5cab4fcee19ed10d2d5
|
[] |
no_license
|
fintzij/BDAepimodel
|
9a00a9796bbd302bace101e85bed05fdc17bb9fb
|
0a59d785050401942bad2138cb13af58dc0dc62f
|
refs/heads/master
| 2021-01-21T01:58:15.844456
| 2020-09-15T14:57:40
| 2020-09-15T14:57:40
| 36,148,207
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,460
|
r
|
simulate_SIR.R
|
#' Simulate an epidemic with SIR dynamics and draw binomial samples.
#'
#' @param obstimes vector of observation times
#' @param params vector of parameters, must be in the following order: beta, mu,
#' rho, S0, I0, R0
#' @param popsize population size
#' @param init_config compartment counts at t0.
#' @param trim logical for whether to trim the observations after the epidemic
#' has died off
#'
#' @return list with a population bookkeeping matrix and an observation matrix
#' @export
#'
#' @examples obstimes <- 0:10
#' params <- c(0.04, 1, 0.5, 0.9, 0.03, 0.07)
#' popsize <- 100
#'
#' epidemic <- simulate_SIR(obstimes, params, popsize)
simulate_SIR <- function(obstimes, params, popsize, init_config = NULL, trim = TRUE) {
if(is.null(init_config)) {
# generate initial configuration
init_config <- rmultinom(1, popsize, params[4:6])
if(init_config[2] == 0) {
while(init_config[2] == 0) {
init_config <- rmultinom(1, popsize, params[4:6])
}
}
}
# simulate the pop_mat
pop_mat <- simulateSIR(obstimes = obstimes, params = params[1:2], init_config = init_config)
# get rid of the empty rows
pop_mat <- pop_mat[-which(pop_mat[,1] == 0),]
# add the observation times
pop_mat <- rbind(pop_mat, cbind(obstimes, matrix(0, nrow = length(obstimes), ncol = 5)))
# reorder the matrix
row_ord <- order(pop_mat[,1])
pop_mat <- reorderMat(pop_mat, row_ord)
# attach column names
colnames(pop_mat) <- c("time", "ID", "Event", "S", "I", "R")
# set the compartment counts at observation times
obs_time_inds <- which(pop_mat[,"ID"] == 0)
pop_mat[1,c("S", "I", "R")] <- init_config
for(j in 2:length(obs_time_inds)) {
pop_mat[obs_time_inds[j], c("S", "I", "R")] <- pop_mat[obs_time_inds[j] - 1, c("S", "I", "R")]
}
# initialize the observation matrix
obs_mat <- matrix(0, nrow = length(obstimes), ncol = 3)
colnames(obs_mat) <- c("time", "I_observed", "I_augmented")
obs_mat[,"time"] <- obstimes
obs_mat[,"I_augmented"] <- pop_mat[obs_time_inds, "I"]
obs_mat[,"I_observed"] <- rbinom(length(obstimes), obs_mat[,"I_augmented"], params[3])
# if trim is TRUE, then trim off the excess 0 measurements in the case
# when the epidemic died off before the final observation time.
if((trim == TRUE) && any(pop_mat[obs_time_inds, "I"] == 0)) {
# find the index of the first observation time where the rates are 0
end_ind <- which(obstimes > pop_mat[which(pop_mat[,"I"] == 0)[1], "time"])[1]
# trim the observation times, observation matrix, and data matrix
obstimes <- obstimes[1:end_ind]
obs_mat <- obs_mat[1:end_ind, ]
# reset the final observation time in the configuration matrix
pop_mat <- pop_mat[pop_mat[,"time"] <= max(obstimes),]
}
return(list(pop_mat = pop_mat, obs_mat = obs_mat))
}
|
153e87a1a621e96c3c4c2eb86eb358994b2f2e65
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/klaR/examples/TopoS.Rd.R
|
c1f9a007664a22ab68a9506a56596b8d873549b2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
TopoS.Rd.R
|
library(klaR)
### Name: TopoS
### Title: Computation of criterion S of a visualization
### Aliases: TopoS
### Keywords: internal
### ** Examples
# Compute S for the MDS visualization of the german business data
data(B3)
plot(cmdscale(dist(B3[, 2:14])), col = rainbow(4)[B3[, 1]], pch = 16)
TopoS(dist(B3[, 2:14]), dist(cmdscale(dist(B3[, 2:14]))))
|
e9369f481a5fb265854229c7ece8e5de28c166a1
|
694c5b6e59fd4e0ee09515834bc8b800e2b596cc
|
/glmerDiagnostics.R
|
f56c9eed919be61d7cff7a8f7754873464e0830c
|
[] |
no_license
|
CYGUBICKO/wash
|
b59985002deeed11dc45859cb7b10734ad3f7a4c
|
06335f646cb73893a8f4d6c02e3dbda8409d51dc
|
refs/heads/master
| 2021-07-09T03:32:46.114245
| 2020-08-28T01:41:09
| 2020-08-28T01:41:09
| 187,945,140
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 992
|
r
|
glmerDiagnostics.R
|
#### ---- Project: APHRC Wash Data ----
#### ---- Task: Simulation ----
#### ---- Complex Diagnostics ----
#### ---- By: Steve and Jonathan ----
#### ---- Date: 2019 May 06 (Mon) ----
library(dplyr)
library(tidyr)
library(tibble)
library(ggplot2)
library(gridExtra)
library(lme4)
library(lattice)
library(dotwhisker)
theme_set(theme_bw() + theme(panel.spacing=grid::unit(0,"lines")))
load("complexGlmer.rda")
# Incoming objects:
# * complexglmer_list - glmer fits per simulation
# * complexcoef_df - fixed effect coef per simulation
# * betas_df & betas - initial beta values for simulations
# * predictors
lme4_model <- complexglmer_list[[1]]
long_df <- complexdf_list[[1]]
# Parameter estimates
dwplot(lme4_model, effects = "fixed", by_2sd = FALSE)
# Residual plots
plot(lme4_model, service ~ resid(.))
plot(lme4_model)
# Compute profile confidence intervals for comparison
#lme4_CI_prof <- confint(lme4_model)
save(file = "glmerDiagnostics.rda"
, lme4_model
# , lme4_CI_prof
)
|
eb05eb43c1c85be99b021562823f8f316990bf4c
|
f132901efd10d3d2bcf6624bfb0e622cf759c5cc
|
/run_analysis.R
|
0d97352bf7ee49e2921721cf4171fe08705c4cae
|
[] |
no_license
|
tdwis/course3_project
|
2abb8afb8bf297457163a755cee2265eb69602ea
|
47cfab1bd3463ba370bee2d036a873374b1cb4a1
|
refs/heads/master
| 2021-05-04T06:03:20.647235
| 2016-10-16T10:44:45
| 2016-10-16T10:44:45
| 71,021,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
run_analysis.R
|
#read raw data
x1 <- read.table(".\\UCI HAR Dataset\\train\\X_train.txt")
y1 <- read.table(".\\UCI HAR Dataset\\train\\y_train.txt")
s1 <- read.table(".\\UCI HAR Dataset\\train\\subject_train.txt")
x2 <- read.table(".\\UCI HAR Dataset\\test\\X_test.txt")
y2 <- read.table(".\\UCI HAR Dataset\\test\\y_test.txt")
s2 <- read.table(".\\UCI HAR Dataset\\test\\subject_test.txt")
c <- read.table(".\\UCI HAR Dataset\\features.txt")
a <- read.table(".\\UCI HAR Dataset\\activity_labels.txt")
#grep mean and std columns
m <- subset(c, grepl("mean()", c$V2, fixed=TRUE)|grepl("std()", c$V2, fixed=TRUE))
x1<-x1[,m$V1]
x2<-x2[,m$V1]
#set column names
colnames(x1)<-m$V2
colnames(x2)<-m$V2
colnames(y1)<-"ActivityCode"
colnames(y2)<-"ActivityCode"
colnames(s1)<-"Subject"
colnames(s2)<-"Subject"
colnames(a)<-c("ActivityCode","ActivityLabel")
#merge raw data file
t1<-cbind(s1,y1,x1)
t2<-cbind(s2,y2,x2)
#combin training and test data
df<-rbind(t1,t2)
#decode Activitycode
mdf = merge(df,a, by.x="ActivityCode", by.y="ActivityCode", All=True)
#change column names
names(mdf) <- gsub("^t", "Time", names(mdf))
names(mdf) <- gsub("^f", "Frequency", names(mdf))
names(mdf) <- gsub("-mean\\(\\)", "Mean", names(mdf))
names(mdf) <- gsub("-std\\(\\)", "StdDev", names(mdf))
names(mdf) <- gsub("-", "", names(mdf))
names(mdf) <- gsub("BodyBody", "Body", names(mdf))
#first Data Set
mdf
#aggreate / average data by subject and Activiy
aggdata <-aggregate(mdf[ ,names(mdf) != "ActivityLabel"] , by=list(mdf$Subject, mdf$ActivityCode, mdf$ActivityLabel), FUN=mean, na.rm=TRUE)
# Tidy / Second Data Set
aggdata
write.table(aggdata, "aggdata.txt", row.names = FALSE)
|
b7c5ea6d48b67c328d949c743be23e049bebbc94
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Introduction_To_Probability_And_Statistics_by_William_Mendenhall,_Robert_J_Beaver,_And_Barbara_M_Beaver/CH9/EX9.5/Ex9_5.R
|
6bc299f993ce36c71c9989d6ebd453285a49cc11
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 593
|
r
|
Ex9_5.R
|
sample_mean <- 871; # sample mean
hypothesized_value <- 880; # hypothesized value
sigma <- 21; # standard deviation
sample_size <- 50; # sample size
z <- round(((sample_mean - hypothesized_value)/(sigma/sqrt(sample_size))),2)
z # test statistic
alpha <- .05
z.alpha <- round((qnorm(1 - alpha/2)),2)
z.alpha # critical value
cat("The value of z is",z)
cat("critical value is",z.alpha)
cat("null hypothesis can be rejected")
cat("she is reasonably confident that the decision is correct")
|
3785dc31d295828658aa0d8c24fdea9d05e05221
|
21cd3dc891f02c973a581c5bce9cde72491e9dbb
|
/R/SimCMk.R
|
b4b51f597f4c45e6344884215a0c2799150bc5f4
|
[] |
no_license
|
aggounekatia/simCHMk
|
ff47d35774050663857e4b0640f7de2cc79dfe89
|
e7631e447e6cb4e0e767fe38f89b2a7fa54cccd7
|
refs/heads/main
| 2023-06-04T13:00:32.618294
| 2021-06-30T10:24:26
| 2021-06-30T10:24:26
| 381,660,971
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
r
|
SimCMk.R
|
#'simulation d´une chaine de markov a espace des etats fini
#'@export
#'@param P matrix of transition
#'@param mu numeric vector of initial distrebution
#'@param n numeric number of repetition
SimCMk=function(P,mu,n){
m=length(mu)
y=c(seq(1,m,1))
x=c(rep(0,n+1))
t=c(0:n)
x[1]=rdist(y,mu)
for(i in 1:n){
x[i+1]=rdist(y,P[x[i],]) }
plot(t,x,pch=8,xlim=c(0,n),ylim=c(1,m+1),col=3)
}
|
a955b95d786f0f4cefcebcc7d9fa11442aded96e
|
064b30bd608a223d8afeb077d32feda26e3c70af
|
/scripts/randomForestModel.R
|
7dd28147dd68a3355838a6566bae639dc0ecbbbd
|
[] |
no_license
|
dipeshnainani/Bank-Marketing-System-Analysis
|
6485cea877499a50dba7c709bc39626d568fbd11
|
9277bd8448b05c07aae703822df373e54fccfc05
|
refs/heads/master
| 2020-03-10T02:13:25.018558
| 2018-04-11T17:37:44
| 2018-04-11T17:37:44
| 129,131,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
randomForestModel.R
|
#Prediction using random forest model
rf.test<-testingDataSet
#Creating vector of target variable
rf.label <- as.factor(trainingDataSet.clean.equal$y)
#Changing yes and no to 1 and 0 for making lable compatible to Random forest model
rf.label <- ifelse(rf.label=="yes", 1, 0)
str(rf.label)
table(rf.label)
rf.label<-factor(rf.label)
rf.train <- trainingDataSet.clean.equal
rf.train$y<-NULL
set.seed(1234)
rf <- randomForest(x = rf.train, y = rf.label, importance = TRUE, ntree = 1000)
rf
varImpPlot(rf)
Testing.knownOutput<-rf.test$y
rf.test$y<-NULL
#rf.test<- kNN(rf.test,k=10)
rf.y <- predict(rf, rf.test)
str(rf.y)
table(rf.y)
table(testingDataSet$y)
rf.actual <- ifelse(testingDataSet$y=="yes", 1, 0)
rf.actual<-factor(rf.actual)
cm = as.matrix(table(Actual = rf.actual, Predicted = rf.y))
cm
table(rf.actual)
auc <- roc(as.numeric(rf.actual),as.numeric( rf.y))
print(auc)
plot(auc,legacy.axes = TRUE)
|
098e9c283be61dbc7d7476a39c904c6d0cdccef9
|
b19c97da15b61a3f9ec831e454ab38889d5bf555
|
/Functions_plotting/dot_plots_per_class_ext.R
|
297baffc4e3efd863204b5fec74854078c80ff3f
|
[] |
no_license
|
smvanliempd/IAVmetabolism
|
c014b263bc0bc7d5f5aa30585f29cc3bb57eab21
|
49d2a0d16f5d236fa90d519e94f1daca28a99c4f
|
refs/heads/master
| 2022-12-08T17:37:39.648629
| 2020-09-04T10:23:58
| 2020-09-04T10:23:58
| 260,969,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,256
|
r
|
dot_plots_per_class_ext.R
|
dot.plots.per.class.ext <- function(dat_plot, set, p_effect = 0.8, sel_effect = F, dvc = "png") {
require(egg)
require(grid)
require(cowplot)
# define variables for heatmaps
vars.fc <- c("del_pl_bl","del_d_3i_0i","del_c_3i_0i","del_del_str_inf") #,"del_d3i_c3i"
# define observations for heatmaps
obs.fc <- c("Final_ID","variable","strain","baseline","time",paste0("Class_",set),"P_pos","q5","q25","q50","q75","q95")
# extract data
dat <- dat_plot$dat_deltas[ variable %in% vars.fc, .SD, .SDcols = obs.fc] # data for fold change
dat.ftr <- dat_plot$features
# change name of Class column for faceting later
setnames(dat,paste0("Class_",set),"Class")
# delete rows for non-included mets
dat <- dat[!is.na(Class)]
# get classes in set
classes <- na.exclude(unique(dat$Class) )
# plot per class
pp <- sapply(classes , function(cls) {
# get data per class
dat.cls <- dat[Class == cls]
# clean up and set levels
dat.cls[ , Facet_vert := ifelse(variable == "del_pl_bl", "IS Healthy",ifelse(variable =="del_del_str_inf" , "IS Delta 3 dpi" ,"3 dpi"))]
dat.cls[ , Facet_vert := factor(Facet_vert, levels = c("IS Healthy", "3 dpi","IS Delta 3 dpi"))]
# set ID level according to P_pos
# met.levels <- dat.cls[variable == vars.fc[1] , .(P_pos = P_pos), by = Final_ID]
# met.levels[ , met_levels := order(P_pos)]
# met.levels <- met.levels[ , Final_ID[met_levels]]
# dat.cls[ ,Final_ID := factor(Final_ID, met.levels)]
# set ID level to manual order
met.levels <- dat.ftr[,Final_ID]
dat.cls[ ,Final_ID := factor(Final_ID, met.levels)]
# highlight effects with reliabale effect directions excluding "del_pl_bl"
dat.cls[ , flag_direction := ifelse((P_pos <= p_effect & P_pos >= 1 - p_effect), "o", "x")]
fts <- droplevels(dat.cls[flag_direction == "x", Final_ID])
# prepare plot data and parameters
dw <- .5
#function for changing log2(fc) in percentage delta change
lfc2perc <- function(a) { 100*( ( 2^a ) - 1 ) }
if (sel_effect == T) dat.cls <- dat.cls[Final_ID %in% fts ]
# get number of metabolites to plot
n_mets <- length(unique(dat.cls$Final_ID))
# plot
p1 <- ggplot(dat.cls[variable != "del_del_str_inf"], aes(x= Final_ID, y = q50)) +
geom_hline(yintercept = 0, lty = 2, col = "red") +
geom_linerange(aes(ymin = q25, ymax = q75, col = strain),
position = position_dodge(width = dw), size = 1 )+
geom_linerange(aes(ymin = q5, ymax = q95, col = strain),
position = position_dodge(width = dw) , alpha = 0.7)+
geom_point(aes( col = strain, shape = flag_direction),
position = position_dodge(width =dw), fill = "white")+
scale_color_manual(name = NULL,values = c("blue","#ff8c00","black") ,
labels = c("C57 Infected","DBA Infected","IS Healthy"))+
scale_shape_manual(values = c(21,19), guide = F)+
labs( y = expression(paste(Delta,"% (50/90 PI)") ) ) +
scale_y_continuous(breaks = c(-log2(c(2,5,10)),log2(c(1,2,5,11))),labels = lfc2perc )+
facet_grid(Facet_vert~Class,scales = "free", space = "free") +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.length.x = unit(0,"in"),
axis.title.x = element_blank(), panel.grid.minor = element_blank())
# set fixed panel sizes
p1 <- set_panel_size(p1, width = unit(.2 * n_mets, "in"), height = unit(1.5, "in") )
# save plot
cls.name <- str_replace(make.names(cls), "\\.", "_")
ggsave2(paste0(getwd(),"/Plots/Dot_plots/Extended/dots_per_class_ext_",set,"_",cls.name,"_infected.", dvc), p1,
width = .2 * n_mets + 5,
height = 8,
units = "in", dpi = 600,device = dvc)
p2 <- ggplot(dat.cls[variable == "del_del_str_inf"], aes(x= Final_ID, y = q50)) +
geom_hline(yintercept = 0, lty = 2, col = "red") +
geom_linerange(aes(ymin = q25, ymax = q75, col = strain),
position = position_dodge(width = dw), size = 1 )+
geom_linerange(aes(ymin = q5, ymax = q95, col = strain),
position = position_dodge(width = dw) , alpha = 0.7)+
geom_point(aes( col = strain, shape = flag_direction),
position = position_dodge(width =dw), fill = "white")+
scale_color_manual(name = NULL,values = c("red") ,
labels = c("IS Infected"))+
scale_shape_manual(values = c(21,19), guide = F)+
labs( y = expression(paste(Delta,Delta,"% (50/90 PI)") ) ) +
facet_grid(Facet_vert~.,scales = "free", space = "free") +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, vjust = .3, hjust = 1),
axis.title.x = element_blank(), panel.grid.minor = element_blank() )
p2 <- set_panel_size(p2, width = unit(.2 * n_mets, "in"), height = unit(1.5, "in") )
ggsave2(paste0(getwd(),"/Plots/Dot_plots/Extended/dots_per_class_ext_",set,"_",cls.name,"_infdiff.", dvc), p2,
width = .2 * n_mets + 5,
height = 8,
units = "in", dpi = 600,device = dvc)
},simplify = F, USE.NAMES = T)
}
|
ced93b58f13d0de81fa72354e742656b294ba399
|
d226838e64a1d55fdaf797893f7468651b725183
|
/inst/testScripts/complete/hg19/21.FastaReferenceFile,Bowtie2.R
|
237dea7236633a92d86f6f662b5185346e67f52e
|
[] |
no_license
|
HenrikBengtsson/aroma.seq
|
5fd673cc449d9c3b89daf1125e8cc95556d0641d
|
6464f1e5e929c423978cf7dcb11ac7018d179a6d
|
refs/heads/master
| 2021-06-21T13:53:21.618898
| 2021-02-10T02:57:15
| 2021-02-10T02:57:15
| 20,848,327
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
21.FastaReferenceFile,Bowtie2.R
|
############################################################################
#
############################################################################
library("aroma.seq");
organism <- "Homo_sapiens";
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Indexing a reference genome
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
path <- file.path("annotationData", "organisms", organism);
filename <- "human_g1k_v37.fasta";
fa <- FastaReferenceFile(filename, path=path);
print(fa);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Build BWA index set
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
is <- buildBowtie2IndexSet(fa, verbose=-10);
print(is);
############################################################################
# HISTORY:
# 2013-06-27
# o Created (from ditto for BWA).
############################################################################
|
0087af9ea01fb3c726a990a1792966ca86240cf6
|
1d34ffca722c3de81ab449ad92d621ea2020c306
|
/man/group_birds_by_location.Rd
|
bedb8123171929a5f048ce7b66f41079e9949c48
|
[] |
no_license
|
eriqande/gaiah
|
9672b4ac51dbf48516592a24e11817be9c80b897
|
58ed0fe099448876602a065d2661f3cc8d5477ea
|
refs/heads/master
| 2023-05-10T20:35:20.629230
| 2023-04-27T21:10:13
| 2023-04-27T21:10:13
| 56,340,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,792
|
rd
|
group_birds_by_location.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isotope-maneuvers.R
\name{group_birds_by_location}
\alias{group_birds_by_location}
\title{Group bird isotope data by locations}
\usage{
group_birds_by_location(
D,
feather_isotope_col,
location_col,
iso_pred_col = "iso_pred",
iso_sd_col = "iso_sd"
)
}
\arguments{
\item{D}{the data frame of feather isotope data with the isoscape predictions
extracted for each location, as well, and a column giving general grouping
locations for the birds.}
\item{feather_isotope_col}{the string name of the column holding the feather isotope
data.}
\item{location_col}{the string name of the column holding the locations to be
used for grouping.}
\item{iso_pred_col}{name of the column holding the predicted values from the isoscape. Default
is \code{iso_pred}.}
\item{iso_sd_col}{name of the column holding the standard deviations of the predicted values
from the isoscape. Default is \code{iso_sd_col}.}
}
\description{
This takes as input a data frame of feather isotope data that also has the
isoscape predictions attached to it, just like the data frame returned by
\code{\link{extract_isopredictions}}. The data frame must have a column
that gives the general location by which you will group birds for the
rescaling function. The isoscape predictions by default should be in columns named
\code{iso_pred} for the actual prediction, and \code{iso_sd} for the standard deviation,
as produced by \code{\link{extract_isopredictions}}, but those are user configurable,
as well.
}
\details{
This function returns a data frame with columns for the mean and SD of feather/bird values,
(\code{meanH} and \code{sdH}) and the mean predicted isotope value and the mean sd of the predicted
isotope values (\code{meaniso} and \code{sdiso}) for all the samples within each location. It
also returns the Location column itself and a column \code{cnt} that gives the number of bird/tissue
samples from each location.
This function
throws an error if any of the locations has only 1 sample. If that is the case, you may consider
merging that sample with another location (or dropping it?).
}
\examples{
# first run the example for extract_isopredictions to get the variable "x"
example("extract_isopredictions")
# If this were run it gives an error because there is only 1 bird at the
# location "Charlevoix"
\dontrun{
group_birds_by_location(x, feather_isotope_col = "Isotope.Value", location_col = "Location")
}
# remove that one bird at Charlevoix and re-run
y <- x \%>\%
dplyr::filter(Location != "Charlevoix")
# then group birds by location
gbl <- group_birds_by_location(D = y,
feather_isotope_col = "Isotope.Value",
location_col = "Location")
}
|
68db751e59f09f91418aa41b47ada1ce21b8c008
|
a6d33b17790d652032c0abd8eb55522282302a0b
|
/quiz.R
|
946b1bdeca35d894c279d2281317c94f4a89bccd
|
[] |
no_license
|
up1/sck-r-programming-101
|
4555d55524a8cb34ff43d2000f57dc8974701f82
|
dc8fba0268a3624cb7b597b98df947fe079b15d3
|
refs/heads/master
| 2022-07-31T03:35:07.218643
| 2020-05-20T18:08:18
| 2020-05-20T18:08:18
| 265,646,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
quiz.R
|
f <- function(x) {
2*x^2 + 1
}
x <- c(0:10)
plot(x,f(x), type="l", col="red")
abline(70, 0)
grid()
|
985de799f823a5c52424eeef415578079df7af06
|
cc8f3577c8d36b0bbe33a7c5a00ac52274ba58d2
|
/man/subsetExons.Rd
|
2b7b59d157dd2ed785803556fac6dddd4372fe9b
|
[] |
no_license
|
mumichae/DASSIE
|
89188f3105d1cd7184438b865a44971d4119552c
|
b0b7ff1528f319e2c8c5368d78487297330f6d96
|
refs/heads/master
| 2020-12-01T12:37:02.372801
| 2020-01-09T16:19:24
| 2020-01-09T16:19:24
| 230,627,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 476
|
rd
|
subsetExons.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/features.R
\name{subsetExons}
\alias{subsetExons}
\title{Extract first/last exons depending on strand
takes the first hit after sorting}
\usage{
subsetExons(exon_dt, by, which = "first", strand = "both")
}
\arguments{
\item{by}{sorting criterium (transcript or gene)}
\item{dt}{containing exon info}
}
\description{
Extract first/last exons depending on strand
takes the first hit after sorting
}
|
9d084f19ae6b825eba6b018ccea149fb95f1f36e
|
b90060ec71c5428cbbc3a1514f0d1beef60cac1e
|
/final_script_BDAP03.R
|
5496ba52d59d6050c3ecbcfd24f3bb34f8fdab56
|
[] |
no_license
|
Rathi-Nikita/Credit-Risk-Modelling
|
a4866ea6259b1783df8ca470a48c786934e438e1
|
ce08cab4af801c99998e799df8c263452f684cd6
|
refs/heads/master
| 2021-01-11T17:13:34.388243
| 2017-01-22T20:28:43
| 2017-01-22T20:28:43
| 79,741,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,781
|
r
|
final_script_BDAP03.R
|
credge_data <- read.csv(file.choose(),header = T,na.strings=c(""," ","- ","_","NA"))
credge_data <- read.csv(file.choose(),header = T)
View(credge_data)
names(credge_data)
attach(credge_data)
str(credge_data)
#keeping copy
data <-credge_data
attach(data)
library(gplots)
library(ROCR)
library(Rcpp)
library(ggplot2)
library(pROC)
str(data)
summary(data)
attach(data)
##deleting unwanted columns
data[,c(1,2,3,5,7,13)] <- data.frame(apply(data[c(1,2,3,5,7,13)],2,as.null))
#converting sales and profit to int
#data$Sales<- (data$Sales*1000)/10000000
#profit in crores
#data$Net.Profit<-(data$Net.Profit*1000)/10000000
##Years in business
data$yearsRating <- 1
summary(data$yearsRating)
attach(data)
names(data)
colnames(data)[5] <- "yrs_in_bus"
data$yearsRating[is.na(data$yrs_in_bus)] <- 0
data$yearsRating[data$yrs_in_bus == "0 - 1"] <- 2
data$yearsRating[data$yrs_in_bus == "2 - 3"] <- 3
data$yearsRating[data$yrs_in_bus == "4 - 6"] <- 6
data$yearsRating[data$yrs_in_bus == "13 - 20"] <- 9
data$yearsRating[data$yrs_in_bus == "21 - Above"] <- 10
###business_exp
names(data)
summary(data$business_exp)
levels(data$business_exp)
data$expRating <- 1
colnames(data)[11]<-"business_exp"
attach(data)
data$expRating[is.na(data$business_exp)] <- 0
data$expRating[data$business_exp == "0-1"] <- 1
data$expRating[data$business_exp == "0 - 1"] <- 1
data$expRating[data$business_exp == "2 - 3"] <- 2
data$expRating[data$business_exp == "4 - 6"] <- 4
data$expRating[data$business_exp == "7 - 10"] <- 7
data$expRating[data$business_exp == "15-20"] <- 9
data$expRating[data$business_exp == "21-25"] <- 9
data$expRating[data$business_exp == "21-50"] <- 9
data$expRating[data$business_exp == "Owned"] <- 9
data$expRating[data$business_exp == " 26-Above"] <- 10
data$expRating = as.factor(data$expRating)
levels(data$expRating)
summary(data$expRating)
summary(data$business_exp)
###score ownofc
names(data)
colnames(data)[4]<-"type_of_ownership"
data$ofcRating<- 1
levels(data$type_of_ownership)
data$ofcRating[is.na(data$type_of_ownership)] <- 0
data$ofcRating[data$type_of_ownership == "13 - 20"] <- 0
data$ofcRating[data$type_of_ownership == "Rented"] <- 4
data$ofcRating[data$type_of_ownership == "rented"] <- 4
data$ofcRating[data$type_of_ownership == "Occupied"] <- 4
data$ofcRating[data$type_of_ownership == "Leased"] <- 9
data$ofcRating[data$type_of_ownership == "Owned"] <- 10
data$ofcRating = as.factor(data$ofcRating)
levels(data$type_of_ownership)
levels(data$ofcRating)
head(data$ofcRating)
###score number of employees
names(data)
colnames(data)[6]<-"employees"
summary(data$employees)
levels(data$employees)
data$empRating <- 1
data$empRating[is.na(data$employees)] <- 0
data$empRating[data$employees == "1-20"] <- 2
data$empRating[data$employees == "Inadequate"] <- 2
data$empRating[data$employees == "21-50"] <- 4
data$empRating[data$employees == "51-70"] <- 6
data$empRating[data$employees == "71-100"] <- 8
data$empRating[data$employees == "101-150"] <- 10
data$empRating[data$employees == "100-150"] <- 10
data$empRating[data$employees == "151 and above"] <-12
data$empRating[data$employees == "Adequate"] <- 8
data$empRating[data$employees == "Not Available"] <- 0
data$empRating<-as.factor(data$empRating)
levels(data$empRating)
levels(data$employees)
##score suppliers base
names(data)
data$supplierRating <- 1
colnames(data)[7]<-"supplier"
data$supplierRating[is.na(data$supplier)] <- 0
data$supplierRating[data$supplier == "<1.5L"]<- 0
data$supplierRating[data$supplier == "1.5L-3L"] <- 0
data$supplierRating[data$supplier == "Concentrated Unknown"] <- 4
data$supplierRating[data$supplier == "Concentrated Reputed"] <- 6
data$supplierRating[data$supplier == "Dispersed Unknown"] <- 8
data$supplierRating[data$supplier == "Dispersed Reputed"] <- 10
data$supplierRating = as.factor(data$supplierRating)
levels(data$supplierRating)
##score buyers base
names(data)
colnames(data)[8]<-"buyers"
summary(data$buyers)
levels(data$buyers)
data$buyerRating <- 1
data$buyerRating [is.na(data$buyers)] <- 0
data$buyerRating[data$buyers == "Stiff Credit Terms"] <- 0
data$buyerRating[data$buyers == "Yes"] <- 0
data$buyerRating[data$buyers == "No"] <- 0
data$buyerRating[data$buyers == "Concentrated with bad reputation"] <-2
data$buyerRating[data$buyers == "Concentrated Unknown"]<- 4
data$buyerRating[data$buyers == "Concentrated Reputed"]<- 6
data$buyerRating[data$buyers == "Dispersed Unknown"] <-8
data$buyerRating[data$buyers == "Dispersed Reputed"] <-10
data$buyerRating= as.factor(data$buyerRating)
summary(data$buyers)
summary(data$buyerRating)
##score qualification
names(data)
colnames(data)[10]<-"qualification"
summary(data$qualification)
levels(data$qualification)
data$qualRating <- 1
data$qualRating[data$qualification == "NULL"] <- 0
data$qualRating[data$qualification == "Under Graduate"] <- 4
data$qualRating[data$qualification == "Professional"] <- 6
data$qualRating[data$qualification == "Graduate/ No Qualification Available"] <- 6
data$qualRating[data$qualification == "Graduate"] <- 6
data$qualRating[data$qualification == "Post Graduate"] <- 8
data$qualRating[data$qualification == "Diploma"] <- 8
data$qualRating[data$qualification == "PG Diploma"] <- 8
data$qualRating[data$qualification == "Engineering and Management"] <-10
data$qualRating[data$qualification == "Other Profession higher of above"] <- 10
data$qualRating= as.factor(data$qualRating)
summary(data$qualRating)
summary(data$qualification)
##score generation
names(data)
colnames(data)[12]<-"generation"
summary(data$generation)
levels(data$generation)
data$genRating <- 1
data$genRating[is.na(data$generation)] <- 0
data$genRating[data$generation == "First Generations"] <- 4
data$genRating[data$generation == "Second Generations"] <- 8
data$genRating[data$generation == "Third Generations"] <- 2
data$genRating[data$generation == "First & Second generations together"] <- 10
data$genRating[data$generation == "Second & Third Generation together"] <- 6
data$genRating= as.factor(data$genRating)
summary(data$genRating)
summary(data$generation)
summary(data$supplierRating)
summary(data$supplier)
###financial scores
# score sales trend
names(data)
colnames(data)[15]<-"trendsales"
summary(data$trendsales)
levels(data$trendsales)
data$saletrn_Rating <- 1
data$saletrn_Rating[data$trendsales == "NULL"] <- 0
data$saletrn_Rating[data$trendsales == "No previous trend"] <- 0
data$saletrn_Rating[data$trendsales == "Negative"] <- 2
data$saletrn_Rating[data$trendsales == "Positive"] <- 10
data$saletrn_Rating[data$trendsales == "Stagnant over last three years"] <- 4
data$saletrn_Rating[data$trendsales == "increase earlier but decreased last year"] <- 5
data$saletrn_Rating[data$trendsales == "Decrease earlier but increased last year"] <- 7
data$saletrn_Rating= as.factor(data$saletrn_Rating)
levels(data$saletrn_Rating)
summary(data$saletrn_Rating)
summary(data$trendsales)
#score profit trend
names(data)
colnames(data)[17]<-"trendprofit"
summary(data$trendprofit)
levels(data$trendprofit)
data$profittrn_Rating <- 1
data$profittrn_Rating[data$trendprofit == "NULL"] <- 0
data$profittrn_Rating[data$trendprofit == "No previous trend"] <- 0
data$profittrn_Rating[data$trendprofit == "0"] <- 2
data$profittrn_Rating[data$trendprofit == "Negative"] <- 2
data$profittrn_Rating[data$trendprofit == "Positive"] <- 10
data$profittrn_Rating[data$trendprofit == "Stagnant over last three years"] <- 4
data$profittrn_Rating[data$trendprofit == "increase earlier but decreased last year"] <- 5
data$profittrn_Rating[data$trendprofit == "Decrease earlier but increased last year"] <- 7
data$profittrn_Rating= as.factor(data$profittrn_Rating)
summary(data$profittrn_Rating)
summary(data$trendprofit)
#score icr
names(data)
colnames(data)[18]<-"icr"
str(data$icr)
class(data$icr)
#data$icr<-as.numeric(data$icr)
data$icr<-as.double(as.character(data$icr))
data$icrRating[data$icr < 1] <- 0
data$icrRating[data$icr >= 1 & data$icr <= 1.28] <- 1
data$icrRating[data$icr >= 1.29 & data$icr <= 1.56] <- 2
data$icrRating[data$icr >= 1.57 & data$icr <= 1.84] <- 3
data$icrRating[data$icr >= 1.85 & data$icr <= 2.12] <- 4
data$icrRating[data$icr >= 2.13 & data$icr <= 2.42] <- 5
data$icrRating[data$icr >= 2.43 & data$icr <= 2.72] <- 6
data$icrRating[data$icr >= 2.73 & data$icr <= 3.02] <- 7
data$icrRating[data$icr >= 3.03 & data$icr <= 3.32] <- 8
data$icrRating[data$icr >= 3.33 & data$icr <= 3.62] <- 9
data$icrRating[data$icr >= 3.63] <- 10
data$icrRating<-as.factor(data$icrRating)
levels(data$icrRating)
summary(data$icrRating)
#score sales to inventory
names(data)
colnames(data)[19]<-"Invturnover"
summary(data$Invturnover)
data$invRating <- NA
data$Invturnover<-as.double(as.character(data$Invturnover))
data$invRating[data$Invturnover < 3] <- 0
data$invRating[data$Invturnover >= 3 & data$Invturnover <= 4.15] <- 1
data$invRating[data$Invturnover >= 4.16 & data$Invturnover <= 5.28] <- 2
data$invRating[data$Invturnover >= 5.29 & data$Invturnover <= 6.41] <- 3
data$invRating[data$Invturnover >= 6.42 & data$Invturnover <= 7.54] <- 4
data$invRating[data$Invturnover >= 7.55 & data$Invturnover <= 8.79] <- 5
data$invRating[data$Invturnover >= 8.8 & data$Invturnover <= 10.03] <- 6
data$invRating[data$Invturnover >= 10.04 & data$Invturnover <= 11.29] <- 7
data$invRating[data$Invturnover >= 11.30 & data$Invturnover <= 12.54] <- 8
data$invRating[data$Invturnover >= 12.55 & data$Invturnover <= 13.79] <- 9
data$invRating[data$Invturnover >= 13.8] <- 10
data$invRating= as.factor(data$invRating)
summary(data$invRating)
summary(data$Invturnover)
#score collection collection_days
names(data)
colnames(data)[20]<-"collection_days"
summary(data$collection_days)
str(data$collection_days)
data$collection_daysRating <- NA
class(data$collection_days)
data$collection_days<-as.double(as.character(data$collection_days))
data$collection_daysRating[data$collection_days > 180] <- 0
data$collection_daysRating[data$collection_days >= 127 & data$collection_days <= 180] <- 1
data$collection_daysRating[data$collection_days >= 106 & data$collection_days <= 126.99] <- 2
data$collection_daysRating[data$collection_days >= 87 & data$collection_days <= 105.99] <- 3
data$collection_daysRating[data$collection_days >= 67 & data$collection_days <= 86.99] <- 4
data$collection_daysRating[data$collection_days >= 61 & data$collection_days <= 66.99] <- 5
data$collection_daysRating[data$collection_days >= 56 & data$collection_days <= 60.99] <- 6
data$collection_daysRating[data$collection_days >= 51 & data$collection_days <= 55.99] <- 7
data$collection_daysRating[data$collection_days >= 46 & data$collection_days <= 50.99] <- 8
data$collection_daysRating[data$collection_days >= 40 & data$collection_days <= 45.99] <- 9
data$collection_daysRating[data$collection_days < 40] <- 10
data$collection_daysRating = as.factor(data$collection_daysRating)
summary(data$collection_daysRating)
summary(data$collection_days)
##score ROA summary(data$roa)
names(data)
colnames(data)[22]<-"roa"
summary(data$roa)
data$roa<-as.double(as.character(data$roa))
data$roaRating <- NA
data$roaRating[data$roa <= 0 ] <- 0
data$roaRating[data$roa >= 0.01 & data$roa <= 0.82] <- 1
data$roaRating[data$roa >= 0.83 & data$roa <= 1.66] <- 2
data$roaRating[data$roa >= 1.67 & data$roa <= 2.50] <- 3
data$roaRating[data$roa >= 2.51 & data$roa <= 3.34] <- 4
data$roaRating[data$roa >= 3.35 & data$roa <= 4.19] <- 5
data$roaRating[data$roa >= 4.20 & data$roa <= 5.19] <- 6
data$roaRating[data$roa >= 5.20 & data$roa <= 6.19] <- 7
data$roaRating[data$roa >= 6.20 & data$roa <= 7.19] <- 8
data$roaRating[data$roa >= 7.20 & data$roa <= 8.19] <- 9
data$roaRating[data$roa >= 8.20] <- 10
data$roaRating = as.factor(data$roaRating)
summary(data$roaRating)
summary(data$roa)
##score NetMargin
names(data)
colnames(data)[23]<-"netmargin"
summary(data$netmargin)
data$nmRating <- NA
data$netmargin<-as.double(as.character(data$netmargin))
data$nmRating[data$netmargin < 0 ] <- 0
data$nmRating[data$netmargin >= 0.00 & data$netmargin <= 0.069] <- 1
data$nmRating[data$netmargin >= 0.07 & data$netmargin <= 1.39] <- 2
data$nmRating[data$netmargin >= 1.40 & data$netmargin <= 2.09] <- 3
data$nmRating[data$netmargin >= 2.10 & data$netmargin <= 2.79] <- 4
data$nmRating[data$netmargin >= 2.80 & data$netmargin <= 3.35] <- 5
data$nmRating[data$netmargin >= 3.36 & data$netmargin <= 3.91] <- 6
data$nmRating[data$netmargin >= 3.92 & data$netmargin <= 4.47] <- 7
data$nmRating[data$netmargin >= 4.48 & data$netmargin <= 5.03] <- 8
data$nmRating[data$netmargin >= 5.04 & data$netmargin <= 5.59] <- 9
data$nmRating[data$netmargin >= 5.60] <- 10
data$nmRating = as.factor(data$nmRating)
summary(data$nmRating)
summary(data$netmargin)
#score DTE
names(data)
colnames(data)[21]<-"dte"
summary(data$dte)
data$dte<-as.double(as.character(data$dte))
data$dteRating <- NA
data$dteRating[data$dte >= 2.74] <- 0
data$dteRating[data$dte >= 2.47 & data$dte <= 2.73] <- 1
data$dteRating[data$dte >= 2.19 & data$dte <= 2.46] <- 2
data$dteRating[data$dte >= 1.91 & data$dte <= 2.18] <- 3
data$dteRating[data$dte >= 1.63 & data$dte <= 1.90] <- 4
data$dteRating[data$dte >= 1.35 & data$dte <= 1.62] <- 5
data$dteRating[data$dte >= 1.09 & data$dte <= 1.34] <- 6
data$dteRating[data$dte >= .82 & data$dte <= 1.08] <- 7
data$dteRating[data$dte >= .55 & data$dte <= .81] <- 8
data$dteRating[data$dte >= .28 & data$dte <= .54] <- 9
data$dteRating[data$dte <= .27] <- 10
data$dteRating = as.factor(data$dteRating)
summary(data$dteRating)
###score Sales summary(data$Sales)
names(data)
data$SalesRating <- NA
data$Sales<-as.numeric(as.character(data$Sales))
#Sales in lakhs
data$Sales <-round(((data$Sales/100000)),digits = 2)
##Sales Rating
data$SalesRating[data$Sales < 0.5 ] <- 0
data$SalesRating[data$Sales >= 0.5 & data$Sales <= 2.39 ] <- 1
data$SalesRating[data$Sales >= 2.4 & data$Sales <= 5.69] <- 2
data$SalesRating[data$Sales >= 5.7 & data$Sales <= 8.99 ] <- 3
data$SalesRating[data$Sales >= 9 & data$Sales <= 12.29 ] <- 4
data$SalesRating[data$Sales >= 12.3 & data$Sales <= 15.59 ] <- 5
data$SalesRating[data$Sales >= 15.1 & data$Sales <= 18.89 ] <- 6
data$SalesRating[data$Sales >= 18.9 & data$Sales <= 22.19 ] <- 7
data$SalesRating[data$Sales >= 22.2 & data$Sales <= 25.49 ] <- 8
data$SalesRating[data$Sales >= 25.5 & data$Sales <= 28.79 ] <- 9
data$SalesRating[data$Sales >= 28.8 ] <- 10
data$SalesRating = as.factor(data$SalesRating)
summary(data$SalesRating)
summary(data$Sales)
#profit in lakhs
names(data)
colnames(data)[16]<-"netprofit"
class(data$netprofit)
data$netprofit<-as.numeric(as.character(data$netprofit))
data$netprofit <-round(((data$netprofit/100000)),digits = 2)
#rating net profit
data$profitRating <- NA
data$profitRating[data$netprofit < -10 ] <- -4
data$profitRating[data$netprofit >= -9.99 & data$netprofit <= 0] <- -2
data$profitRating[data$netprofit >= 0 & data$netprofit <= 0.05] <- 2
data$profitRating[data$netprofit >= 0.06 & data$netprofit <= 0.1 ] <- 3
data$profitRating[data$netprofit >= 0.11 & data$netprofit <= 0.15] <- 4
data$profitRating[data$netprofit >= 0.16 & data$netprofit <= 0.20 ] <- 5
data$profitRating[data$netprofit >= 0.21 & data$netprofit <= 0.25 ] <- 6
data$profitRating[data$netprofit >= 0.26 & data$netprofit <= 0.30 ] <- 7
data$profitRating[data$netprofit >= 0.31 & data$netprofit <= 0.35 ] <- 8
data$profitRating[data$netprofit >= 0.36 ] <- 10
data$profitRating = as.factor(data$profitRating)
summary(data$profitRating)
summary(data$netprofit)
levels(data$profitRating)
####
Performance_Capability<-c()
attach(data)
Performance_Capability<-ifelse((Rating.NSIC=="SE 1A")| (Rating.NSIC=="SE 1B")|(Rating.NSIC=="SE 1C"),append(Performance_Capability,"Highest"),ifelse((Rating.NSIC=="SE 2A")|(Rating.NSIC=="SE 2B")|(Rating.NSIC=="SE 2C"),append(Performance_Capability,"High"),ifelse((Rating.NSIC=="SE 3A")|(Rating.NSIC=="SE 3B")|(Rating.NSIC=="SE 3C" ),append(Performance_Capability,"moderate"),ifelse((Rating.NSIC=="SE 4A")|(Rating.NSIC=="SE 4B")|(Rating.NSIC=="SE 4C"),append(Performance_Capability,"weak"),ifelse((Rating.NSIC=="SE 5A")|(Rating.NSIC=="SE 5B")|(Rating.NSIC=="SE 5C"),append(Performance_Capability,"Poor"),append(Performance_Capability,"NULL"))))))
Performance_Capability
Financial_Strength<-c()
Financial_Strength<-ifelse((Rating.NSIC=="SE 1A")| (Rating.NSIC=="SE 2A")|(Rating.NSIC=="SE 3A")|(Rating.NSIC=="SE 4A")|(Rating.NSIC=="SE 5A"),append(Financial_Strength,"High"),ifelse((Rating.NSIC=="SE 1B")| (Rating.NSIC=="SE 2B")|(Rating.NSIC=="SE 3B")|(Rating.NSIC=="SE 4B")|(Rating.NSIC=="SE 5B"),append(Financial_Strength,"Moderate"),ifelse((Rating.NSIC=="SE 1C")|(Rating.NSIC=="SE 2C")|(Rating.NSIC=="SE 3C")|(Rating.NSIC=="SE 4C")|(Rating.NSIC=="SE 5C"),append(Financial_Strength,"Low"),append(Financial_Strength,"Null"))))
data<-data.frame(data,Financial_Strength,Performance_Capability)
data$Perf_cap<-NA
data$Perf_cap[data$Performance_Capability=="Highest"]<-10
data$Perf_cap[data$Performance_Capability=="High"]<-8
data$Perf_cap[data$Performance_Capability=="moderate"]<-6
data$Perf_cap[data$Performance_Capability=="weak"]<-4
data$Perf_cap[data$Performance_Capability=="poor"]<-2
data$Perf_cap<-as.factor(data$Perf_cap)
levels(data$Performance_Capability)
levels(data$Perf_cap)
summary(data$Perf_cap)
summary(data$Performance_Capability)
data$fs_rating<-NA
data$fs_rating[data$Financial_Strength=="High"]<-10
data$fs_rating[data$Financial_Strength=="Moderate"]<-6
data$fs_rating[data$Financial_Strength=="Low"]<-2
data$fs_rating<-as.factor(data$fs_rating)
levels(data$fs_rating)
summary(data$Financial_Strength)
summary(data$fs_rating)
#creating a new dataset with rating
names(data)
sub_data = data[,c(24:42,45,46)]
summary(sub_data)
sub_data = na.omit(sub_data)
##chi squared chi_data = data4[,-21]
chi_data = sub_data[,-1]
Users <- as.list(colnames(chi_data))
chi_square <- matrix(data = NA , nrow = ncol(chi_data), ncol = ncol(chi_data), dimnames = list(c(Users),c(Users)))
for(i in 1:length(chi_data)){ for(j in 1:length(chi_data)){
a = chisq.test(table(chi_data[,i],chi_data[,j])) chi_square[i,j] = a$p.value
}
}
#feature selection with Boruta library(ranger) library(Boruta)
View(sub_data)
dim(sub_data)
attach(sub_data)
install.packages("ranger")
library("ranger")
install.packages("Boruta")
library(Boruta)
set.seed(123)
?Boruta
sub_data$Default<-as.factor(sub_data$Default)
boruta.train <- Boruta(Default~., doTrace=2,data=sub_data)
print(boruta.train)
final.boruta <- TentativeRoughFix(boruta.train)
print(final.boruta)
a = getSelectedAttributes(final.boruta, withTentative = F)
boruta.df <- attStats(final.boruta)
class(boruta.df)
boruta.df
###factor analysis techniques
##new data frame
sub_data2 = sub_data[,c(a)]
names(sub_data2)
##dummy creation
library(lattice)
library(caret)
dummy1 <- dummyVars("~.", data = sub_data2,fullRank = T)
df_pred = data.frame(predict(dummy1,newdata = sub_data2))
##adding default
sub_data2$Default<-sub_data$Default
summary(sub_data2$Default)
#training and testing dataset
set.seed(1)
ind <- sample(2,nrow(sub_data2),replace = T,prob = c(0.75,0.25))
dim(sub_data2)
training <- sub_data2[ind == 1,1:15]
test <- sub_data2[ind==2,1:15]
trainlabels <- sub_data2[ind==1,15]
testlabels <- sub_data2[ind==2,15]
training$Default = trainlabels
test$Default = testlabels
test1<-data.matrix(test[,-15])
actual<-test[,15]
sub_data3=data.matrix(training[,-15])
k=training$Default
#logistic regression
model1 = glm(training$Default~.,data = training, family = "binomial")
summary(model1)
library("leaps")
model1 = regsubsets(training$Default~.,data = training, nvmax=10,method="backward")
summary(model1)
model1 = glm(training$Default~icrRating+empRating,data = training, family = "binomial")
summary(model1)
pred0 = predict.glm(model1,newdata = test,type = "response")
predicted=rep("N",1815)
predicted[pred0 > 0.75]="Y"
t0 <- table(actual,predicted)
t0
##Accuracy
accuracy_log<-sum(diag(t0)/sum(t0))
### 0.9707989
##Miscalssification rate
Error<-(t0[1,2]+t0[2,1])/sum(t0)
Error
#### 0.0292011
##Sensitivity(TP/TP+FN)
ST0 <- t0[2,2]/(t0[2,2]+t0[1,2])
ST0
## 0.6842105
##Specificity(TN/TN+FP)
SP0 <- t0[1,1]/(t0[1,1]+t0[2,1])
SP0
### 0.9769274
###Prevalence(Actualyes/Total)
"how often does yes condition appears in our sample"
PV0<- t0[2,2]/sum(t0)
PV0
#### 0.01432507
roc.model1=roc(predicted,as.numeric(actual))
plot(roc.model1,col = "blue")
roc.model1
####lasso
set.seed(1)
library(Matrix)
library(foreach)
library(glmnet)
library(ISLR)
####lasso regression
model.lasso = glmnet(sub_data3,y=k,alpha=1,family="binomial")
coef(model.lasso)
plot(model.lasso,xvar = "dev")
summary(model.lasso)
model.lasso
grid()
#####cv.lambda
model = cv.glmnet(sub_data3,y=k, family = "binomial",alpha=1)
plot(model)
model$lambda.min
#predict on test set
predicted = predict(model, s='lambda.min', newx=test1, type="class")
t1 <- table(actual,predicted)
t1
##Miscalssification rate
ER1 = (t1[1,2]+t1[2,1])/sum(t1)
ER1
##Accuracy[(TP+TN)/TP+TN+FP+FN ]
A1 <- sum(diag(t1))/sum(t1)
A1
##Sensitivity(TP/TP+FN)
ST1 <- t1[2,2]/(t1[2,2]+t1[2,1])
ST1
##Specificity(TN/TN+FP)
SP1 <- t1[1,1]/(t1[1,1]+t1[1,2])
SP1
###Prevalence(Actualyes/Total)
"how often does yes condition appears in our sample"
PV1<- t1[2,2]/sum(t1)
PV1
#####ROC_lasso
library(pROC)
roc.lasso=roc(predicted,as.numeric(actual))
plot(roc.lasso,col = "blue")
##ridge regression
model.ridge = glmnet(x=sub_data3,y=k,alpha=0,family='binomial')
coef(model.ridge)
plot(model.ridge,xvar = "dev")
summary(model.ridge)
model.ridge
grid()
predicted=predict(model.ridge, newx = test1, type = "class",s=0.05)
table(predicted,actual)
######cvridge
cv.model.ridge <- cv.glmnet(sub_data3,y=k,family = "binomial", type.measure = "class",alpha =0)
plot(cv.model.ridge)
cv.model.ridge$lambda.min
coef(cv.model.ridge, s = "lambda.min")
predicted = predict(cv.model.ridge, newx = test1, s = "lambda.min",type = "class")
t2 <- table(actual,predicted)
t2
##Miscalssification rate
ER2= (t2[1,2]+t2[2,1])/sum(t2)
ER2
##Accuracy[(TP+TN)/TP+TN+FP+FN ]
A2 <- sum(diag(t2))/sum(t2)
##Sensitivity(TP/TP+FN)
ST2 <- t2[2,2]/(t2[2,2]+t2[2,1])
ST2
##Specificity(TN/TN+FP)
SP2 <- t2[1,1]/(t2[1,1]+t2[1,2])
SP2
###Prevalence(Actualyes/Total)
"how often does yes condition appears in our sample"
PV2<- t2[2,2]/sum(t2)
PV2
###roc_ridge
library(pROC)
roc.ridge=roc(predicted,as.numeric(actual))
plot(roc.ridge,col = "blue")
##decision trees
library(rpart)
library(rpart.plot)
library(RColorBrewer)
install.packages("grid")
library(partykit)
library(rattle)
form <- as.formula(training$Default ~ .)
tree.1 <- rpart(form,data=training,control=rpart.control(cp=0.001))
plot(tree.1)
text(tree.1)
prp(tree.1,varlen=3)
##Interatively prune the tree
#new.tree.1 <- prp(tree.1,snip=TRUE)$obj
#display the new tree
prp(new.tree.1)
# A more reasonable tree
tree.2 <- rpart(form,training,parms = list(loss =matrix(c(0,10,1,0),ncol=2)),control=rpart.control(cp=0.001))
prp(tree.2) # A fast plot
fancyRpartPlot(tree.2) # A fancy plot from rattle
pred.tree = predict(tree.2,test)
pred.tree
predicted =rep("N",1815)
predicted[pred.tree[,2] > 0.80]="Y"
t3=table(predicted,actual)
t3
###Miscalssification rate
ER3= (t3[1,2]+t3[2,1])/sum(t3)
ER3
"zero miss classification"
##Accuracy[(TP+TN)/TP+TN+FP+FN ]
A3 <-sum(diag(t3))/sum(t3)
A3
##Sensitivity(TP/TP+FN)
ST3 <- t3[1,1]/(t3[1,1]+t3[1,2])
ST3
##Specificity(TN/TN+FP)
SP3 <- t3[2,2]/(t3[2,2]+t3[2,1])
SP3
#roc_tree
roc.tree=roc(predicted,as.numeric(actual))
plot(roc.tree,col = "blue")
# ##without rating variable
ind <- sample(2,nrow(data),replace = T,prob = c(0.75,0.25))
test_tree <- data[ind==2,4:24]
training_tree <- data[ind == 1,4:24]
actual_tree<-test_tree[,21]
training1 = training_tree[,c(1:21)]
form1 <- as.formula(training1$Default ~ .)
tree.3 <- rpart(form1,data=training1,control=rpart.control(minsplit=20,cp=0))
plot(tree.3)
text(tree.3)
#prp(tree.3,varlen=3)
tree.4 <- rpart(form1,training1) # A more reasonable tree
#prp(tree.4) # A fast plot
#fancyRpartPlot(tree.4) # A fancy plot from rattle
pred.tree.2 = predict(tree.4,test_tree)
predicted =rep("N",2139)
predicted[pred.tree.2[,2] > 0.917]="Y"
##confusion matrix
t4 <- table(actual_tree,predicted)
t4
##misclassification rate
ER4 = (t4[1,2]+t4[2,1])/sum(t4)
##Accuracy[(TP+TN)/TP+TN+FP+FN ]
A4 <- sum(diag(t4))/sum(t4)
A4
##Sensitivity(TP/TP+FN)
ST4 <- t4[2,2]/(t4[2,2]+t4[1,2])
ST4
##Specificity(TN/TN+FP)
SP4 <- t4[1,1]/(t4[1,1]+t4[2,1])
SP4
##Prevalence
PV4<-t4[2,2]/sum(t4)
PV4
#roc_tree
roc.tree=roc(predicted,as.numeric(actual_tree))
plot(roc.tree,col = "blue")
###########################################
#SVM
library(e1071)
svm.model = svm(Default~., data = training, cost =100,gamma =.01)
summary(svm.model)
pred.svm=predict(svm.model,test)
t5 = table(actual,pred.svm)
t5
##Misclassification error[(FP+FN)/TP+TN+FP+FN]
ER5 = (t5[1,2]+t5[2,1])/sum(t5)
ER5
##Accuracy[(TP+TN)/TP+TN+FP+FN ]
A5 <- sum(diag(t5))/sum(t5)
A5
##Sensitivity(TP/TP+FN)
SP5 <- t5[1,1]/(t5[1,1]+t5[1,2])
SP5
##Specificity(TN/TN+FP)
ST5 <- t5[2,2]/(t5[2,2]+t5[2,1])
ST5
##Prevalence
PV5<-t5[2,2]/sum(t5)
###ROC_svm
roc.svm=roc(pred.svm,as.numeric(actual))
plot(roc.svm,col = "blue")
## Random Forest
set.seed(1)
library(randomForest)
# Fitting model
fit <- randomForest(Default ~ .,training,ntree=400)
summary(fit)
#Predict Output
predicted= predict(fit,test)
t6 <- table(predicted,actual)
t6
##Misclassification error[(FP+FN)/TP+TN+FP+FN]
ER6 = (t6[1,2]+t6[2,1])/sum(t6)
ER6
##Accuracy[(TP+TN)/TP+TN+FP+FN ]
A6 <-sum(diag(t6))/sum(t6)
A6
##Sensitivity(TP/TP+FN)
ST6 <- t6[2,2]/(t6[2,2]+t6[2,1])
ST6
##Specificity(TN/TN+FP)
SP6 <- t6[1,1]/(t6[1,1]+t6[1,2])
SP6
#Prevalence
PV6<-t6[2,2]/sum(t6)
#############################
##roc_randomforest
library(pROC)
roc.random_forest=roc(predicted,as.numeric(actual))
plot(roc.random_forest,col = "blue")
lines.roc(roc.svm,col = "cyan")
lines.roc(roc.tree,col = "green")
lines.roc(roc.model1,col = "red")
lines.roc(roc.ridge,col = "black")
lines.roc(roc.lasso,col = "purple")
|
b2718c8f1ef3625347140605b5dcfaa6256ac97d
|
e8133e93f13612f5b0f400e2004a6f71e4f4dc72
|
/Unidad 1/Clases/C1-7/C1-7 Regresión lineal.R
|
7a12739549b88c1e1a71dabc07860af7d8675d60
|
[] |
no_license
|
CristopherA98/Curso_R-UCE
|
760049ce8e72c471534d49e76c45a00f2db2c137
|
6d4ed39fad456c05a21cecfcfe0c27e7359d4e75
|
refs/heads/main
| 2023-01-19T07:31:10.378749
| 2020-11-24T14:43:51
| 2020-11-24T14:43:51
| 303,800,487
| 18
| 15
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,220
|
r
|
C1-7 Regresión lineal.R
|
####################################
### Curso R - UCE ###
### Clase 07 ###
####################################
# 1. Análisis de regresión -------------------------------------------------
# Prerrequisitos
paquetes <- c('tidyverse','GGally','openxlsx','MASS','hexbin','modelr','lmtest',
'car','boot','leaps')
install.packages(paquetes)
library(easypackages)
libraries(paquetes)
# Lectura de la base de datos
data_venta_publicidad <- read.xlsx("Datasets/Venta_vs_Publicidad.xlsx", detectDates = TRUE)
str(data_venta_publicidad)
data_venta_publicidad <- tibble::as.tibble( data_venta_publicidad)
# Dispersión de los datos
data_venta_publicidad %>%
ggplot(., aes(PUBLICIDAD_TOTAL, VENTA)) +
geom_point()
# Correlaciones
ggpairs(data_venta_publicidad, lower = list(continuous = "smooth"),
diag = list(continuous = "barDiag"), axisLabels = "none")
# Estimación
mod_1 <- lm(VENTA ~ PUBLICIDAD_TOTAL, data = data_venta_publicidad)
summary(mod_1)
## Crea una tabla de datos base para predecir
grid <- data_venta_publicidad %>%
data_grid(PUBLICIDAD_TOTAL)
## Agrega predicciones
grid <- grid %>%
add_predictions(mod_1, var = 'PREDIC')
## Graficar
data_venta_publicidad %>%
ggplot(., aes(x= PUBLICIDAD_TOTAL)) +
geom_point(aes(y= VENTA)) +
geom_line(aes(y= PREDIC), data = grid, colour = "red", size = 1) +
ggtitle("Datos + predicción")
## Agregar residuales a datos
data_venta_publicidad <- data_venta_publicidad %>%
add_residuals(mod_1, var = 'RESIDUALES')
## Explorar los residuales
ggplot(data_venta_publicidad, aes(RESIDUALES)) +
geom_freqpoly()
## Gráfico qq
mod_1 %>%
ggplot(., aes(qqnorm(.stdresid)[[1]], .stdresid)) +
geom_point(na.rm = TRUE) +
geom_abline() +
xlab("Theoretical Quantiles") +
ylab("Standardized Residuals") +
ggtitle("Normal Q-Q")
# Analisis de los residuos
# H0 : Los residuos se ajustan a una distribución normal vs
# H1: Los residuos NO se ajustan a una distribución normal
# Si tuviésemos pocos datos
shapiro.test(data_venta_publicidad$RESIDUALES)
# Test KS
ks.test(data_venta_publicidad$RESIDUALES, "pnorm")
# Podemos utilizar los residuos studentizados
data_venta_publicidad$STUDRESIDUAL <- studres(mod_1)
# Test KS
ks.test(data_venta_publicidad$STUDRESIDUAL, "pnorm")
## Explorar la varianza
ggplot(data_venta_publicidad, aes(PUBLICIDAD_TOTAL, RESIDUALES)) +
geom_ref_line(h = 0) +
geom_point() +
ggtitle("Residuos")
# H0 : La varianza es constante vs
# H1: La varianza no es constante
## Prueba de homocedasticidad
bptest(mod_1)
# Correlacion
## Grafico ACF
acf( residuals( mod_1))
# H0: La autocorrelación de los residuos es 0 vs
# H1: La autocorrelación de los residuos es diferente de 0
dwtest(mod_1,alternative = "two.side")
## transformando el predictor
mod_1a <- lm(VENTA ~ log(PUBLICIDAD_TOTAL), data = data_venta_publicidad)
summary(mod_1a)
## Crea una tabla de datos base para predecir
grid <- data_venta_publicidad %>%
data_grid(PUBLICIDAD_TOTAL)
## Agrega predicciones
grid <- grid %>%
add_predictions(mod_1a, var = 'PREDIC')
## Graficar
data_venta_publicidad %>%
ggplot(., aes(x= PUBLICIDAD_TOTAL)) +
geom_point(aes(y= VENTA)) +
geom_line(aes(y= PREDIC), data = grid, colour = "red", size = 1) +
ggtitle("Datos + predicción")
bptest(mod_1a)
# Prediccion --------------------------------------------------------------
## Predecir con el primer modelo
predict(mod_1, newdata = data.frame(PUBLICIDAD_TOTAL= 2000))
grid %>% filter(PUBLICIDAD_TOTAL== 2000)
## Predecir con el modelo transformado
predict(mod_1a, newdata = data.frame(PUBLICIDAD_TOTAL= 2000))
# 2. Regresión lineal multiple -------------------------------------------
# Prerrequisitos
paquetes <- c("tidyverse",'corrplot','GGally','MASS','leaps','gridExtra',
'lmtest','car')
library(easypackages)
libraries(paquetes)
datos <- as.data.frame(state.x77)
datos <- datos %>% rename(habitantes = Population, analfabetismo = Illiteracy,
ingresos = Income, esp_vida = `Life Exp`, asesinatos = Murder,
universitarios = `HS Grad`, heladas = Frost, area = Area) %>%
mutate(densidad_pob = habitantes * 1000 / area)
## 2.1. Relación entre las variables
round(cor(x = datos, method = "pearson"), 3)
corrplot::corrplot(cor(x = datos, method = "pearson")) # corrplot de las variables
ggpairs(datos, lower = list(continuous = "smooth"),
diag = list(continuous = "barDiag"), axisLabels = "none")
# 2.2. Selección de los mejores predictores
# 2.2.1. Modelo con todos los predictores
full.model <- lm(esp_vida ~ ., data=datos)
summary(full.model)
# backward
model1 <- stepAIC(full.model, trace=TRUE, direction="backward")
model1$anova
summary(model1)
# Para obtener los intervalos de confianza
?confint
confint(model1,level = 0.95)
# 2.2.2. Método forward
names(datos)
model2 <- lm(esp_vida ~ 1, data=datos)
completo <- formula(y ~ habitantes + ingresos + analfabetismo + asesinatos
+ universitarios + heladas + area + densidad_pob , datos)
model2_final <- stepAIC(model2, trace=FALSE, direction="forward", scope=completo)
model2_final$anova
summary(model2_final)
# 2.2.3. Método both
model3 <- stepAIC(model2, trace=FALSE, direction="both", scope=completo)
model3$anova
# Obtener R2 ajustado
summary(model1)$adj.r.squared
summary(model2_final)$adj.r.squared
# 2.2.4. regsubsets
model4 <- regsubsets(esp_vida~.,datos, nbest = 2, nvmax = 13)
summary(model4)
summary(model4)$which
model4_summary <- summary(model4)
data.frame(model4_summary$outmat)
a <- data.frame(cbind(model4_summary$which,
model4_summary$bic,
model4_summary$adjr2))
# Graficamente
par(mfrow=c(1, 2))
plot(model4, scale="adjr2", main=expression(R[Adj]^2))
plot(model4, scale="bic", main="BIC")
# valores excatos de R2 y BIC
summary(model4)$adjr2
summary(model4)$bic
# Ver cual es el mejor modelo
which(model4_summary$bic == min(summary(model4)$bic))
model4_summary$which[7, ]
which(model4_summary$adjr2 == max(summary(model4)$adjr2))
model4_summary$which[9, ]
# Otra forma de obtener el mejor modelo
# which.max(model4_summary$adjr2)
# model4_summary$which[which.max(model4_summary$adjr2), ]
## 2.3. SUpuestos del modelo
summary(model1)
plot1 <- ggplot(data = datos, aes(habitantes, model1$residuals)) +
geom_point() + geom_smooth(color = "firebrick") + geom_hline(yintercept = 0)
plot2 <- ggplot(data = datos, aes(asesinatos, model1$residuals)) +
geom_point() + geom_smooth(color = "firebrick") + geom_hline(yintercept = 0)
plot3 <- ggplot(data = datos, aes(universitarios, model1$residuals)) +
geom_point() + geom_smooth(color = "firebrick") + geom_hline(yintercept = 0)
plot4 <- ggplot(data = datos, aes(heladas, model1$residuals)) +
geom_point() + geom_smooth(color = "firebrick") + geom_hline(yintercept = 0)
# Juntar los gráficos
grid.arrange(plot1, plot2, plot3, plot4)
# 2.3.1. Distribución normal de los residuos
# par(mfrow = c(1,1))
qqnorm(model1$residuals)
qqline(model1$residuals)
ks.test(model1$residuals,"pnorm")
# media errores
mean(model1$residuals)
# 2.3.2. Varianzas constantes
ggplot(data = datos, aes(model1$fitted.values, model1$residuals)) +
geom_point() +
geom_smooth(color = "firebrick", se = FALSE) +
geom_hline(yintercept = 0)
bptest(model1)
# 2.3.3. No autocorrelacion en los erroes
dwt(model1, alternative = "two.sided")
bgtest(model1)
# 2.3.3. Multicolinealidad
corrplot(cor(dplyr::select(datos, habitantes, asesinatos,universitarios,heladas))
, type="upper", order="hclust", tl.col="black", tl.srt=45)
corrplot(cor(dplyr::select(datos, habitantes, asesinatos,universitarios,heladas)),
method = "number", type= "upper",tl.col = "black")
vif(model1)
## 2.4. Predicción
data2 <- data.frame(habitantes= 15000,
asesinatos= 65,
universitarios= 30.5,
heladas = 35)
predict(object=model1, newdata=data2)
# FUENTES ADICIONALES -----------------------------------------------------
browseURL(url="https://fhernanb.github.io/libro_regresion/",
browser = getOption("browser"))
|
5d3c68eb3ccb38bbc9122f393fa8e69fb663762a
|
285aff879570bad28b16013b63de9670bdc33082
|
/server.R
|
749732b4a9fb9e9c2953cf616a6f6521c9f236eb
|
[] |
no_license
|
wcm95/Developing-Data-Products
|
57aab64d1929e1381dc363863d8cfb19e3acc50f
|
d066b7f49613a4333b8767fe6516d1956ca05665
|
refs/heads/master
| 2021-06-10T09:20:18.464649
| 2017-02-15T08:15:05
| 2017-02-15T08:21:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 981
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(dplyr)
shinyServer(function(input,output) {
diamonds<-diamonds[,1:7]
dat<-reactive({
data<-subset(diamonds,cut==input$cut & color==input$color & clarity==input$clarity & price %in% seq(input$price[1],input$price[2]))
if(input$asc){
arrange(data,price)
}
else{
arrange(data,desc(price))
}
})
output$table<-renderTable({
data.frame(dat())
})
output$summary <- renderPrint({
summary(dat()$price)
})
output$ave<-renderText({
paste("The average price of the selected diamonds is $",round(mean(dat()$price),2),".",sep="")
})
output$carat<-renderText({
paste("The average weight of the selected diamonds is",round(mean(dat()$carat),2))
})
})
|
dfb586bb9adbd394c0cb0546a0da592424e2e072
|
464439d8ceb29146335ef2da40b6475c1aa3f9fc
|
/R/rateModel-class.R
|
95951d0c1606feda932571891a577325bc0770f0
|
[] |
no_license
|
ndukler/epiAlleleGLM
|
f5e951b08e3640c94fb5425386a81069d980ae30
|
c2cd5bfd86857b0b04ec1e3a33b3f5f32416188a
|
refs/heads/master
| 2020-04-09T20:54:04.745927
| 2018-12-20T22:45:51
| 2018-12-20T22:45:51
| 160,587,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,655
|
r
|
rateModel-class.R
|
#' Class rateModel
#'
#' Class \code{rateModel} holds a rate model for a set of alleles
#'
#' @slot alleleData an allele data object in a locked environment
#' @slot edgeGroups a data.table with four columns: parent, child, edgeGroup. All parent-child combos must be valid for alleleData object.
#' @slot rateFormula A formula that uses the variables in the allele data object to compute turnover rate
#' @slot piFormula A formula that uses the variables in the allele data object to compute pi
#' @slot rateDM the design matrix for the rate
#' @slot piDM the design matrix for pi
#' @slot params a vector of parameter values
#' @slot rateIndex a parameter index for the rate coefficients
#' @slot piIndex a parameter index for the pi coefficients
#' @slot fixed a logical vector indicating which variables are fixed
#'
#' @name rateModel-class
#' @rdname rateModel-class
#' @include rateModelValidityCheck.R
#' @importClassesFrom data.table data.table
#' @importClassesFrom Matrix Matrix
#' @exportClass rateModel
methods::setClass("rateModel", slots=c(alleleData = "environment",edgeGroups="data.table",
rateFormula="formula",piFormula="formula",rateDM="Matrix",
piDM="Matrix",params="numeric",rateIndex="ANY",
piIndex="ANY",fixed="logical"),
validity = rateModelValidityCheck)
#' rateModel
#'
#' Contructs an object that holds a rate model for a set of alleles
#' @param data An alleleData object
#' @param rateFormula A formula that uses the variables in the allele data object to compute turnover rate
#' @param piFormula A formula that uses the variables in the allele data object to compute pi (if NULL uses the same as rateFormula)
#' @param lineageTable A table with columns parent,child,edgeID, and rateGroup, where rateGroup is used to specify how the rates are tied between the branches
#' @name rateModel
#' @return an rateModel object
#' @examples
#'
#' @export
rateModel <- function(data,rateFormula,piFormula=NULL,lineageTable=NULL){
## ** Validity checks and default setting** ##
## Check that data is of class alleleData
if(class(data)!="alleleData"){
stop("data must be of class alleleData")
}
## Check that a rate formula is specified as a formula
if(class(rateFormula)!="formula"){
stop("rateFormula must be a formula")
}
## If piFormula is NULL set equal to rateFormula
if(is.null(piFormula)){
write("piFormula is not specified, using same formula as rateFormula...")
piFormula=rateFormula
}
## Check that all coavariates specified in rateFormula are contained in siteInfo
if(!all(all.vars(rateFormula) %in% colnames(getSiteInfo(data)))){
stop("Some of the covariates in the rateFormula are not present in the siteInfo of the alleleData object.")
}
## Check that all coavariates specified in piFormula are contained in siteInfo
if(!all(all.vars(piFormula) %in% colnames(getSiteInfo(data)))){
stop("Some of the covariates in the piFormula are not present in the siteInfo of the alleleData object.")
}
## Checks for lineage table validity
if(!is.null(lineageTable)){
if(!is.data.frame(lineageTable)){
stop("Lineage table must be a data.frame or data.table")
} else if(!all(c("parent","child","edgeGroup") %in% colnames(lineageTable))){
stop("Lineage table must contain the columns \'parent\', \'child\', and \'edgeGroup\'")
} else if(!setequal(with(getEdgeTable(data),paste0(parent,"-",child)),with(lineageTable,paste0(parent,"-",child)))){
stop("Edges in the alleleData object and the supplied lineageTable do not match. Run getEdgeTable(data) to view alleleData edges.")
} else if(any(table(with(lineageTable,paste0(parent,"-",child)))>1)){
stop("Duplicated edges in lineageTable")
}
## Ensuring edge groups are integer labeled from 0 to number_of_groups-1
lineageTable[,edgeGroup:=as.numeric(as.factor(edgeGroup))-1]
} else {
## Create default lineage table
lineageTable=getEdgeTable(data)
lineageTable[,edgeGroup:=0]
}
## ** Intermediate reformating and computation ** ##
## Create environment to hold parameter values and associated indices, etc.
adEnviron=new.env()
adEnviron$alleleData=data
## Create the design matrix for the rate variable
rateDM=Matrix::Matrix(model.matrix(rateFormula,getSiteInfo(data)))
## Create pi design matrix
if(isTRUE(all.equal.formula(rateFormula,piFormula))){
piDM=rateDM
} else {
piDM=Matrix::Matrix(model.matrix(piFormula,getSiteInfo(data)))
}
## Standardize format for lineageTable
data.table::setcolorder(lineageTable,c("parent","child","edgeGroup"))
data.table::setkeyv(x = lineageTable,cols = c("child"))
## Create parameter index
rateP=expand.grid(group=unique(lineageTable$edgeGroup),column=1:ncol(rateDM)-1,
stringsAsFactors = FALSE)
rateIndex=new(epiAlleleGLM:::paramIndex,rateP$group,rateP$column,colnames(rateDM)[rateP$column+1],0)
piP=expand.grid(group=2:data@nAlleles-2,column=1:ncol(piDM)-1,stringsAsFactors = FALSE)
piIndex=new(epiAlleleGLM:::paramIndex,piP$group,piP$col,colnames(piDM)[piP$column+1],nrow(rateP))
## Build the parameter vector
params=rep(1,nrow(rateP)+nrow(piP))
## Set fixed vector to default to FALSE
fixed=logical(length(params))
## ** Object construction ** ##
methods::new("rateModel",alleleData=adEnviron,edgeGroups=lineageTable,rateFormula=rateFormula,
piFormula=piFormula,rateDM=rateDM,piDM=piDM,params=params,rateIndex=rateIndex,piIndex=piIndex,
fixed=fixed)
}
|
e1314bfe93bec6ca693ef5c05721fb6be9cf0262
|
7d6ddced6b34b607718441e402656c41543a8a9c
|
/postgreSQL_R.R
|
ed6157d1b168a8039149901a662a57e98cd5f9c3
|
[] |
no_license
|
prashantbhuyan/prashantbhuyan-install
|
19b5cc2f27fd47ff21962dda8e2ce1afd4c41d11
|
e136e277f01d2dcf564ab14001c0bd9a88cb6b3a
|
refs/heads/master
| 2020-06-05T00:29:41.157861
| 2015-01-15T18:30:00
| 2015-01-15T18:30:00
| 23,474,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,012
|
r
|
postgreSQL_R.R
|
# Prashant Bhuyan
# R, RStudio and PostregSQL have been properly installed and that the DMwR
# package has been properly loaded and installed.
# 1) What Version of R & RStudio do you have installed?
#
# (Answer) I have installed Version 3.0.3 (2014-03-06) of R. I have also installed
# Version 0.98.501 – © 2009-2013 RStudio, Inc of RStudio for Mac OSX 64 Bit.
#
# 2) What Version of PostgreSQL do you have installed?
#
# (Answer) I have installed Version 9.3.5 of PostgreSQL for Mac OSX.
#
# 3a) Install and load the R Package DMwR.
#
# (Answer) > library("DMwR", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
# Loading required package: lattice
# Loading required package: grid
# KernSmooth 2.23 loaded
# Copyright M. P. Wand 1997-2009
#
# 3b) Load the data set sales and determine the number of observations in the data set.
#
# (Answer) There are 401,146 observations of 5 variables in the data set sales.
# > df <- data.frame(sales)
# > nrow(df)
# [1] 401146
#
#
|
80fe4336acd27715fd02bfed7962c2e9132b000b
|
b6f92a891ba3b05bc556a04e2116a00daf4181c3
|
/cleaning.R
|
e21d76acfa629c7fd45ad23369650bef2f723bf7
|
[] |
no_license
|
chanbin-oh/R-programing
|
45c4d3a5eac9ec52d65e198a5f582f57bff6e80a
|
12d181183e7935cdc4df4dc6320414ab7ef83a77
|
refs/heads/master
| 2020-05-01T14:49:34.797428
| 2019-05-26T09:25:04
| 2019-05-26T09:25:04
| 177,530,503
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 3,095
|
r
|
cleaning.R
|
### 극단치와 이상치 제거하기
## 07-1 빠진 데이터를 찾아라! - 결측치 정제하기
# 결측치 찾기
df <- data.frame(sex = c("M", "F", NA, "M", "F"),
score = c(5, 4, 3, 4, NA))
df
# 유의점
# - 문자로 구성된 변수는 NA가 <>에 감싸진 형태로 출력됨.
# - NA 앞뒤에는 겹따옴표가 없다. 따옴표가 있으면 결측치가 아니라 영문자 "NA"를 의미
is.na(df) # 결측치 확인
table(is.na(df)) # 결측치 빈도 출력
table(is.na(df$sex)) # sex 결측치 빈도 출력
table(is.na(df$score)) # score 결측치 빈도 출력
mean(df$score)
sum(df$score)
# 결측치 제거하기
library(dplyr) # dplyr 패키지 로드
df %>% filter(is.na(score)) # score가 NA인지 데이터만 출력
df %>% filter(!is.na(score)) # score 결측치 제거
df_nomiss <- df %>% filter(!is.na(score)) # score 결측치 제거를 변수에 할당
mean(df_nomiss$score) # score 평균 산출
sum(df_nomiss$score) # score 합계 산출
df_nomiss <- df %>% filter(!is.na(score) & !is.na(sex)) # 여러 변수 동시에 결측치 없는 데이터 추출하기
df_nomiss
# 결측치가 하나라도 있으면 제거하는 방법
# 앞에서 filter()에는 일일이 변수를 지정해서 삭제하는 것이다. 즉, 결측치가 어디에 있는지 일일이 확인해야함.
# na.omit()을 이용하면 변수를 지정하지 않고 결측치가 있는 행을 한번에 제거할 수 있다.
df_nomiss2 <- na.omit(df) # 모든 변수에 결측치 없는 데이터 추
df_nomiss2
# 유의점
# - na.omit()은 결측치가 하나라도 있으면 모두 제거하기 떄문에 간편한 측면이 있지만, 분석에 필요한 행까지 손실되는 장점.
# - 그러므로 filter()를 활용해서 분석에 필요한 자료만 골라서 없애는 것을 추천.
# 함수의 결측치 제외 기능 이용하기
# na.rm 파라미터 사용해보기.
mean(df$score, na.rm = T) # 결측치 제외하고 평균 산
sum(df$score, na.rm = T) # 결측치 제외하고 합계 산
# summarise()를 이용해서 요약통계량을 산출할 때도 na.rm을 적용할 수 있다.
df_exam[c(2,4,6), "math"] <- NA
df_exam
df_exam %>% summarise(mean_math = mean(math)) # math 평균산출
# math 결측치 제외하고 평균 산출
df_exam %>% summarise(mean_math = mean(math,na.rm = T))
## mean()함수뿐만 아니라 다른 수치연산 함수도 na.rm을 지원 -> 요약통계량 함수도 가능
## 결측치 대체하기
# 평균값으로 대체하
mean(df_exam$math, na.rm = T) # 결측치 제외하고 math 평균 산출
# ifelse()함수로 결측치 제외하기
df_exam$math <- ifelse(is.na(df_exam$math), 55 ,df_exam$math)
table(is.na(df_exam$math))
df_exam
|
51d872f381edbfbd4d6d63560d35de7c5d3bf87f
|
4551a2ec73889616de3196856c43df6934a2ef72
|
/v04 Logistic Regression + Other Files/v04 Logistic Regression + Other Files.R
|
e47b5a8b4713e9f4cbd0f95a2f2b2b80c128a970
|
[] |
no_license
|
gjanesch/Kaggle-Credit-Risk-Competition-Solutions
|
ee4fb0b4f6b46f5e256b084b734ca961f1014399
|
594a17987344f9af7937b4a911e3ba3c8cbb277d
|
refs/heads/master
| 2020-03-28T09:36:24.058560
| 2018-09-09T18:49:56
| 2018-09-09T18:49:56
| 148,045,876
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,997
|
r
|
v04 Logistic Regression + Other Files.R
|
# Solution attempt 4: Logistic Regression + Other Files
# Final submission: 2018-06-14
# Submission score: 0.712
# In addition to new tweaks to the data, information from other files will be added in to try to
# improve predictions.
library(ROCR)
library(feather)
library(plyr)
library(dplyr)
has_NAs <- function(x){
return(sum(is.na(x)) > 0)
}
turn_into_onehot_column <- function(x, new_levels){
return(new_levels[as.factor(x)])
}
print_summary_to_file <- function(model, file){
sink(file)
print(summary(model))
sink()
}
# Should be used as a flag in order to control whether I'm just testing things or making a
# submission
TESTING <- TRUE
train <- readRDS("./Data Files/application_train.rds")
test <- readRDS("./Data Files/application_test.rds")
#################################################
# DATA CLEANING
#################################################
train_cleaning <- function(df){
building_info_columns <- names(df)[grep("_AVG$|_MODE$|_MEDI$", names(df))]
other_cols_to_remove <- c("EXT_SOURCE_1", "EXT_SOURCE_3", "OWN_CAR_AGE") #more NAs
other_cols_to_remove <- c(other_cols_to_remove, "NAME_INCOME_TYPE", "NAME_HOUSING_TYPE")#large p-vals
df <- df[,!(names(df) %in% c(building_info_columns,other_cols_to_remove))]
return(df)
}
test_ID <- test$SK_ID_CURR
train <- train_cleaning(train)
test <- train_cleaning(test)
# For this first try, just train on rows with full data on remaining columns
train <- train[!apply(train, FUN = has_NAs, MARGIN = 1),]
#################################################
# DATA TRANSFORMATION
#################################################
train_transform <- function(df){
#Make some things normal
df$AMT_INCOME_TOTAL <- log10(df$AMT_INCOME_TOTAL)
#df$AMT_CREDIT <- log10(df$AMT_CREDIT)
df$CNT_CHILDREN <- as.factor(ifelse(df$CNT_CHILDREN <= 1 , as.character(df$CNT_CHILDREN), "2+"))
return(df)
}
train <- train_transform(train)
test <- train_transform(test)
train[train$CODE_GENDER == "XNA","CODE_GENDER"] <- "F"
train$CODE_GENDER <- droplevels(train$CODE_GENDER)
# There are still some missing values in test data; we'll just impute the median, though, since they
# are either only a couple in each column or 0/1 with one value predominant
test_cols_with_NAs <- names(test)[sapply(test, has_NAs)]
for(tc in test_cols_with_NAs){
test[is.na(test[,tc]),tc] <- median(test[,tc], na.rm = TRUE)
}
#################################################
# ADDING NEW FILES
#################################################
bureau <- read_feather("./Data Files/bureau.feather")
bureau_sub <- bureau %>% group_by(SK_ID_CURR) %>%
summarize(CREDIT_COUNT = n(), ANY_OVERDUE = (max(CREDIT_DAY_OVERDUE == 0)))
add_bureau_data <- function(df, bureau_df){
df <- left_join(df, bureau_df, by = "SK_ID_CURR")
df[is.na(df$CREDIT_COUNT), "CREDIT_COUNT"] <- 0
df[is.na(df$ANY_OVERDUE), "ANY_OVERDUE"] <- 0
return(df)
}
train <- add_bureau_data(train, bureau_sub)
test <- add_bureau_data(test, bureau_sub)
rm(bureau, bureau_sub)
previous_application <- read_feather("./Data Files/previous_application.feather")
prev_app_sub <- previous_application %>% group_by(SK_ID_CURR) %>%
summarize(NUMBER_APPLICATIONS = n(), NUMBER_REFUSED = sum(NAME_CONTRACT_STATUS == "Refused"),
NUMBER_APPROVED = sum(NAME_CONTRACT_STATUS == "Approved"))
add_prev_app_data <- function(df, prev_app_df){
df <- left_join(df, prev_app_df, by = "SK_ID_CURR")
no_previous_applications <- is.na(df$NUMBER_APPLICATIONS)
for(clm in c("NUMBER_APPLICATIONS", "NUMBER_REFUSED","NUMBER_APPROVED")){
df[no_previous_applications,clm] <- 0
}
return(df)
}
train <- add_prev_app_data(train, prev_app_sub)
test <- add_prev_app_data(test, prev_app_sub)
rm(previous_application, prev_app_sub)
cc_balance <- read_feather("./Data Files/credit_card_balance.feather")
cc_balance_sub <- cc_balance %>% group_by(SK_ID_CURR) %>%
summarize(NUM_LATE_CC_PAYMENTS = sum(SK_DPD_DEF != 0), MAX_CREDIT_LIMIT = max(AMT_CREDIT_LIMIT_ACTUAL),
NUM_PREV_CC_LOANS = length(unique(SK_ID_PREV)))
add_cc_balance_data <- function(df, cc_df){
df <- left_join(df, cc_df, by = "SK_ID_CURR")
no_cc_balance_data <- is.na(df$NUM_LATE_CC_PAYMENTS)
for(clm in c("NUM_LATE_CC_PAYMENTS","MAX_CREDIT_LIMIT", "NUM_PREV_CC_LOANS")){
df[no_cc_balance_data, clm] <- 0
}
return(df)
}
train <- add_cc_balance_data(train, cc_balance_sub)
test <- add_cc_balance_data(test, cc_balance_sub)
rm(cc_balance, cc_balance_sub)
install_paym <- read_feather("./Data Files/installments_payments.feather")
missing_payment_info <- is.na(install_paym$AMT_PAYMENT)
install_paym[missing_payment_info,"AMT_PAYMENT"] <- install_paym[missing_payment_info,"AMT_INSTALMENT"]
install_paym[missing_payment_info,"DAYS_ENTRY_PAYMENT"] <- install_paym[missing_payment_info,"DAYS_INSTALMENT"]
install_paym_sub <- install_paym %>% group_by(SK_ID_CURR) %>%
summarize(NUM_PAYMENTS_UNDER = sum(AMT_PAYMENT < AMT_INSTALMENT),
NUM_PAYMENTS_LATE = sum(DAYS_ENTRY_PAYMENT > DAYS_INSTALMENT))
add_install_paym <- function(df, ip_df){
df <- left_join(df, ip_df, by = "SK_ID_CURR")
df[is.na(df$NUM_PAYMENTS_UNDER), "NUM_PAYMENTS_UNDER"] <- 0
df[is.na(df$NUM_PAYMENTS_LATE), "NUM_PAYMENTS_LATE"] <- 0
return(df)
}
train <- add_install_paym(train, install_paym_sub)
test <- add_install_paym(test, install_paym_sub)
rm(install_paym, install_paym_sub, missing_payment_info)
POS_cash <- read_feather("./Data Files/POS_CASH_balance.feather")
pc_sub <- POS_cash %>% group_by(SK_ID_CURR) %>% summarize(NUM_LATE_POS_PAYMENTS = sum(SK_DPD_DEF == 1))
add_POS <- function(df, pos_df){
df <- left_join(df, pos_df, by = "SK_ID_CURR")
df[is.na(df$NUM_LATE_POS_PAYMENTS), "NUM_LATE_POS_PAYMENTS"] <- 0
return(df)
}
train <- add_POS(train, pc_sub)
test <- add_POS(test, pc_sub)
rm(POS_cash, pc_sub)
#################################################
# MAKING THE MODEL
#################################################
train$SK_ID_CURR <- NULL
test$SK_ID_CURR <- NULL
if(TESTING == TRUE){
set.seed(555)
train_indices <- sample(seq_len(nrow(train)), size = floor(0.7*nrow(train)))
train_train <- train[train_indices,]
train_test <- train[-train_indices,]
model <- glm(TARGET ~ ., family = binomial(link = "logit"), data = train_train)
predictions <- predict(model, train_test, type = "response")
prediction_object <- prediction(predictions, train_test$TARGET)
auc <- performance(prediction_object, measure = "auc")
auc <- auc <- auc@y.values[[1]]
print(paste("AUC: ",auc))
}else{
model <- glm(TARGET ~ ., family = binomial(link = "logit"), data = train)
predictions <- predict(model, test, type = "response")
write.csv(data.frame(SK_ID_CURR = test_ID, TARGET = round(predictions,3)),
"v04_predictions.csv", row.names = FALSE)
}
print_summary_to_file(model, "v04_model_summary.txt")
|
d7ae3827b4aa3c09b271be55cba915872dee86c0
|
5f240923dc5b64d11e75dba8f99a305f42b5ef86
|
/man/Anthropometry-package.Rd
|
00daf0d60cf625312b6565c43bde8418c92ded91
|
[] |
no_license
|
cran/Anthropometry
|
47cdfbc1f58265fc6284d378c23e339a3bb69f2f
|
d7cdec4461048d16c08b4c05563acf591764de48
|
refs/heads/master
| 2023-03-05T22:02:54.232885
| 2023-02-22T09:20:02
| 2023-02-22T09:20:02
| 17,677,730
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,710
|
rd
|
Anthropometry-package.Rd
|
\name{Anthropometry-package}
\alias{Anthropometry-package}
\docType{package}
\title{
Statistical Methods for Anthropometric Data
}
\description{
Statistical methodologies especially developed to analyze anthropometric data. These methods are aimed at providing effective solutions to some commons problems related to Ergonomics and Anthropometry. They are based on clustering, the statistical concept of data depth, statistical shape analysis and archetypal analysis. Please see Vinue (2017) <doi:10.18637/jss.v077.i06>.
}
\details{
\tabular{ll}{
Package: Anthropometry\cr
Type: Package\cr
Version: 1.19\cr
Date: 2023-02-22\cr
License: GPL (>=2)\cr
LazyLoad: yes\cr
LazyData: yes\cr
}
anthrCases: Helper generic function for obtaining the anthropometric cases.\cr
Anthropometry-internalArchetypoids: Several internal functions to compute and represent archetypes and archetypoids.\cr
Anthropometry-internalHipamAnthropom: Several internal functions used by both $HIPAM_{MO}$ and $HIPAM_{IMO}$ algorithms.\cr
Anthropometry-internalPlotTree: Several internal functions used to build the HIPAM plot tree.\cr
Anthropometry-internalTDDclust: Several internal functions to clustering based on the L1 data depth. \cr
archetypesBoundary: Archetypal analysis in multivariate accommodation problem.\cr
archetypoids: Finding archetypoids.\cr
array3Dlandm: Helper function for the 3D landmarks.\cr
bustSizesStandard: Helper function for defining the bust sizes.\cr
CCbiclustAnthropo: Cheng and Church biclustering algorithm applied to anthropometric data.\cr
cdfDissWomenPrototypes: CDF for the dissimilarities between women and computed medoids and standard prototypes.\cr
checkBranchLocalIMO: Evaluation of the candidate clustering partition in $HIPAM_{IMO}$.\cr
checkBranchLocalMO: Evaluation of the candidate clustering partition in $HIPAM_{MO}$.\cr
computSizesTrimowa: Computation of the trimowa elements for a given number of sizes defined by the EN.\cr
computSizesHipamAnthropom: Computation of the hipamAnthropom elements for a given number of sizes defined by the EN.\cr
cube8landm: Cube of 8 landmarks.\cr
cube34landm: Cube of 34 landmarks.\cr
descrDissTrunks: Description of the dissimilarities between women's trunks.\cr
figures8landm: Figures of 8 landmarks with labelled landmarks.\cr
getBestPamsamIMO: Generation of the candidate clustering partition in $HIPAM_{IMO}$. \cr
getBestPamsamMO: Generation of the candidate clustering partition in $HIPAM_{MO}$.\cr
getDistMatrix: Dissimilarity matrix between individuals and prototypes.\cr
HartiganShapes: Hartigan-Wong k-means for 3D shapes.\cr
hipamAnthropom: HIPAM algorithm for anthropometric data.\cr
landmarksSampleSpaSurv: Landmarks of the sampled women of the Spanish Survey.\cr
LloydShapes: Lloyd k-means for 3D shapes.\cr
nearestToArchetypes: Nearest individuals to archetypes.\cr
optraShapes: Auxiliary optra subroutine of the Hartigan-Wong k-means for 3D shapes.\cr
overlapBiclustersByRows: Overlapped biclusters by rows.\cr
parallelep8landm: Parallelepiped of 8 landmarks.\cr
parallelep34landm: Parallelepiped of 34 landmarks.\cr
percentilsArchetypoid: Helper function for computing percentiles of a certain archetypoid.\cr
plotPrototypes: Prototypes representation.\cr
plotTreeHipamAnthropom: HIPAM dendogram.\cr
plotTrimmOutl: Trimmed or outlier observations representation.\cr
preprocessing: Data preprocessing before computing archetypal observations.\cr
projShapes: Helper function for plotting the shapes.\cr
qtranShapes: Auxiliary qtran subroutine of the Hartigan-Wong k-means for 3D shapes.\cr
sampleSpanishSurvey: Sample database of the Spanish anthropometric survey.\cr
screeArchetypal: Screeplot of archetypal individuals.\cr
shapes3dShapes: 3D shapes plot.\cr
skeletonsArchetypal: Skeleton plot of archetypal individuals.\cr
stepArchetypesRawData: Archetype algorithm to raw data.\cr
stepArchetypoids: Run the archetypoid algorithm several times.\cr
TDDclust: Trimmed clustering based on L1 data depth.\cr
trimmedLloydShapes: Trimmed Lloyd k-means for 3D shapes.\cr
trimmedoid: Trimmed k-medoids algorithm.\cr
trimmOutl: Helper generic function for obtaining the trimmed and outlier observations.\cr
trimowa: Trimmed PAM with OWA operators.\cr
USAFSurvey: USAF 1967 survey.\cr
weightsMixtureUB: Calculation of the weights for the OWA operators.\cr
xyplotPCArchetypes: PC scores for archetypes.\cr
}
\references{
Vinue, G., (2017). Anthropometry: An R Package for Analysis of Anthropometric Data, \emph{Journal of Statistical Software} \bold{77(6)}, 1--39, \doi{10.18637/jss.v077.i06}.
}
\author{
Guillermo Vinue <Guillermo.Vinue@uv.es>, Irene Epifanio, Amelia Simo, M. Victoria Ibanez, Juan Domingo, Guillermo Ayala
}
\keyword{ANTHROP}
|
1a99cc73ceaf7a1b2a78788a3e8b6e30d4fff072
|
775b0afd0c99ba3b57eed716463e93bb29bb44d0
|
/src/aggregate_frequencies.R
|
e69274b46ad3f26f87b6a2ce19246bfaa54e267e
|
[] |
no_license
|
rgorman/test-1
|
85ae1618e9e76808d6e410be69ff198c052dd404
|
66b40000cd69481fc0b188856c3d29016b744d48
|
refs/heads/master
| 2021-05-12T15:54:09.923902
| 2018-02-26T20:27:04
| 2018-02-26T20:27:04
| 116,996,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,353
|
r
|
aggregate_frequencies.R
|
## A script to calculate the corpus-wide frequencies of combined variables with values > 0.
## These frequencies will be used to select a subset of combined variables for further processing.
require(XML)
require(tidyverse)
require(stringr)
input.dir <- "./output_2"
files.v <- dir(path=input.dir, pattern=".*xml")
y <- readRDS(file = "variable_names.R") #pre-generated names of variables in XML input.
group_1.list <- readRDS(file = "combined_var_names.R")
############## user-defined functions
generate_vars <- function(x) {
# a <- unlist(x)
b <- z[, a]
if (length(a) > 1) {
c <- apply(b, 1, paste0, collapse = "-")
} else {
c <- b
}
return(c)
}
generate_var_names <- function(x) {
# a <- unlist(x)
b <- paste0(a, collapse = "_&_")
c <- NULL
c <- append(c, b)
return(c)
}
######################
output.m <- matrix(ncol = 1)
colnames(output.m) <- c("variable_name")
output.tib <- as_tibble(output.m)
for (i in 18:35) {
# read xml structure from file to .R object
doc.object <- xmlTreeParse(file.path(input.dir, files.v[i]), useInternalNodes=TRUE)
# extract all <word> elements and children into XmlNodeList object
word.nodes <- getNodeSet(doc.object, "//word")
word.list <- xmlApply(word.nodes, xmlToList)
for (j in seq_along(y)) {
if ( j == 1) {
z <- word.list %>% map_chr(y[j]) %>%
data.frame(check.names = FALSE, stringsAsFactors = FALSE)
} else {
z <- word.list %>% map_chr(y[j]) %>%
cbind(z, ., stringsAsFactors = FALSE)
}
}
colnames(z) <- y # add column names to z
new_vars.m <- z[,1:2]
for (k in 1:3) {
start_time <- Sys.time()
a <- sapply(group_1.list[[k]], generate_vars)
nomina <- sapply(group_1.list[[k]], generate_var_names)
colnames(a) <- nomina
new_vars.m <- cbind(new_vars.m, a) %>%
as.matrix()
end_time <- Sys.time()
print(paste(files.v[i], "loop", k))
print(end_time - start_time)
}
start_time <- Sys.time()
new_vars.m[str_detect(new_vars.m, "NA")] <- NA # add logical NA to cells
end_time <- Sys.time()
print(files.v[i])
print(end_time - start_time)
start_time <- Sys.time()
new_vars.m[str_detect(new_vars.m, "NULL")] <- NA # add logical NA to cells
end_time <- Sys.time()
print(files.v[i])
print(end_time - start_time)
vars.tib <- as_tibble(new_vars.m[, -c(1, 2)])
vars_long.tib <- gather(vars.tib, variable_name, variable_value, na.rm = TRUE) %>%
group_by(variable_name) %>%
summarize(n())
output.tib <- bind_rows(output.tib, vars_long.tib)
print(paste("end of loop", i))
} # end of main loop
result <- xtabs(`n()` ~ variable_name, data = output.tib[ -1, ])
final_result.tib <- as_tibble(result, n = "count")
######################## create a set of the most commonly occuring variable types
most_frequent.tib <- final_result.tib %>%
filter(count >=
final_result.tib[, 2] %>%
map(`[`) %>%
unlist() %>%
quantile(.99)) # argument of quantile() should be frequency precentile representing lower limit of variables selected
final_result.tib[2, 1] %>%
as.character() %>%
strsplit("_&_")
most_frequent.list <- most_frequent.tib[, 1] %>%
map(`[`) %>%
unlist() %>%
strsplit("_&_")
generate_vars <- function(x) {
a <- unlist(x)
b <- z[, a]
if (length(a) > 1) {
c <- apply(b, 1, paste0, collapse = "-")
} else {
c <- b
}
return(c)
}
a <- most_frequent.list[[2]]
e <- z[, d]
apply(e, 1, paste0, collapse = "-")
seq_along(most_frequent.list)
new_vars.m <- z[,1:2]
for (k in seq_along(most_frequent.list)) {
start_time <- Sys.time()
a <- sapply(most_frequent.list[[k]], generate_vars)
nomina <- sapply(most_frequent.list[[k]], generate_var_names)
colnames(a) <- nomina
new_vars.m <- cbind(new_vars.m, a) %>%
as.matrix()
end_time <- Sys.time()
print(paste(files.v[i], "loop", k))
print(end_time - start_time)
}
#############################
new_vars.m <- matrix(nrow = nrow(z), ncol = 1)
nomina <- NULL
for (k in seq_along(most_frequent.list)) {
x <- most_frequent.list[[k]]
if (length(x) > 1) {
a <- z[, x] %>%
apply(., 1, paste0, collapse = "/")
} else {
a <- z[, x]
}
new_vars.m <- cbind(new_vars.m, a)
nomina <- append(nomina, paste0(x, collapse = "_&_"))
}
new_vars.m <- new_vars.m[, -1]
colnames(new_vars.m) <- nomina
vars.tib <-as_tibble(new_vars.m)
file_name <- files.v[35] %>%
gsub(".xml","", .)
token_id <- seq_len(nrow(z)) %>%
paste0(file_name, "_token_", .)
meta_data <- cbind(token_id, z[, "cite"], z[, "form"])
colnames(meta_data) <- c("token_id", "cite", "form")
vars.tib <- cbind(meta_data, vars.tib)
start_time <- Sys.time()
vars.long.tib <- gather(vars.tib, variable_name, variable_value, -token_id, na.rm = TRUE)
end_time <- Sys.time()
print(end_time - start_time)
vars.long.tib %>%
group_by(variable_name) %>%
summarize(n())
start_time <- Sys.time()
var_value.tib <- vars.long.tib %>%
transmute(combined = paste(variable_name, variable_value, sep = "=") )
end_time <- Sys.time()
end_time - start_time
var_value.tib %>% # this works
group_by(combined) %>%
summarize(n()) %>%
arrange(desc(`n()`)) %>%
filter(`n()` >= 850) %>%
View()
######################################
names(new_vars.m) <- nomina
dim(new_vars.m)
names(new_vars.m[1])
list_of_working_tibbles <- vector(mode = "list", 35)
list_of_working_tibbles[[1]] <- vars.tib
v <- list_of_working_tibbles[[1]]
1193494 / 8551
colnames(new_vars.m[3:187])
new_vars.m <- new_vars.m[, -c(1, 2)]
names(new_vars.m) <- nomina
new_vars.m %>%
class()
test.tib <-
<- nomina
nomina
ncol(new_vars.m)
x <- most_frequent.list[[2]]
length(x)
z[, x] %>%
apply(., 1, paste0, collapse = "-")
sapply(most_frequent.list[[2]], generate_vars)
sapply(most_frequent.list[[2]], generate_var_names)
class(x)
colnames(new_vars.m)
output.tib
saveRDS(output.tib, file = "variable_count_interrupted_2.R")
saveRDS(most_frequent.list, file = "most_freq_list.R")
arrange(final_result.tib, desc(count))
sum(final_result.tib[, 2])
final_result.tib[, 2] %>%
summary()
count.v <- final_result.tib[, 2]
count.v <- as.numeric(sapply(final_result.tib[, 2], paste0))
quantile(count.v, seq(0.9, 1, 0.005))
which(count.v >= 102473)
final_result.tib[which(final_result.tib[,2] >= 269126), ] %>%
arrange(desc(count)) %>%
View()
final_result.tib %>%
filter(count >= 269126 ) %>%
arrange(desc(count))
final_result.tib[2, 1] %>%
as.character() %>%
strsplit("_&_")
output.tib_2
result[1:5]
longer.tib
vars_long.tib
output.tib
final_result.tib
longer.tib <- gather(test.tib, variable_name, variable_value, na.rm = TRUE) %>%
group_by(variable_name) %>%
summarize(n()) %>%
bind_rows(vars_long.tib, .)
View(head(new_vars.m))
View(head(test.m))
View(head(test.m))
dim(new_vars.m)
test.m <- new_vars.m[, -1]
test.tib <- as_tibble(test.m)
result_2.tib <- gather(test.tib, variable_name, variable_value, na.rm = TRUE) %>%
group_by(variable_name) %>%
summarize(n())
result_3.tib <- bind_rows(result_1.tib, result_2.tib)
as_tibble(result_3.tib, n = "count")
result_1.tib
files.v <- files.v[1:2]
|
2a4b515f783584714380bfa62742c2a00724cc06
|
d03924f56c9f09371d9e381421a2c3ce002eb92c
|
/R/UnivarMixingDistribution.R
|
7809637f7f56c3fffa7e427e1a228fac4d88874d
|
[] |
no_license
|
cran/distr
|
0b0396bbd5661eb117ca54026afc801afaf25251
|
c6565f7fef060f0e7e7a46320a8fef415d35910f
|
refs/heads/master
| 2023-05-25T00:55:19.097550
| 2023-05-08T07:10:06
| 2023-05-08T07:10:06
| 17,695,561
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,767
|
r
|
UnivarMixingDistribution.R
|
UnivarMixingDistribution <- function(..., Dlist, mixCoeff,
withSimplify = getdistrOption("simplifyD"))
{
ldots <- list(...)
if(!missing(Dlist)){
Dlist.L <- as(Dlist, "list")
if(!is(try(do.call(UnivarDistrList,args=Dlist.L),silent=TRUE),"try-error"))
ldots <- c(ldots, Dlist.L)
}
l <- length(ldots)
if(l==0) stop ("No components given")
if(l==1) return(ldots[[1]])
mixDistr <- do.call(UnivarDistrList,args=ldots)
ep <- .Machine$double.eps
if(missing(mixCoeff))
mixCoeff <- rep(1,l)/l
else{ if (l!=length(mixCoeff))
stop("argument 'mixCoeff' and the mixing distributions must have the same length")
if(any(mixCoeff < -ep) || sum(mixCoeff)>1+ep)
stop("mixing coefficients are no probabilities")
}
rnew <- .rmixfun(mixDistr = mixDistr, mixCoeff = mixCoeff)
pnew <- .pmixfun(mixDistr = mixDistr, mixCoeff = mixCoeff)
.withArith <- any(as.logical(lapply(mixDistr, function(x) x@".withArith")))
.withSim <- any(as.logical(lapply(mixDistr, function(x) x@".withSim")))
.lowerExact<- all(as.logical(lapply(mixDistr, function(x) x@".lowerExact")))
if (all( as.logical(lapply(mixDistr, function(x) is(x,"AbscontDistribution")))) ||
all( as.logical(lapply(mixDistr, function(x) is(x,"DiscreteDistribution")))))
dnew <- .dmixfun(mixDistr = mixDistr, mixCoeff = mixCoeff)
gaps <- NULL
for(i in 1:l){
if(is.null(gaps)){
try(gaps <- gaps(mixDistr[[i]]), silent=TRUE)
}else{
if(!is(try(gaps0 <- gaps(mixDistr[[i]]), silent=TRUE),"try-error"))
if(!is.null(gaps0)) gaps <- .mergegaps2(gaps,gaps0)
}
}
support <- numeric(0)
for(i in 1:l){
if(!is(try(support0 <- support(mixDistr[[i]]), silent=TRUE),"try-error"))
support <- unique(sort(c(support,support0)))
}
gaps <- .mergegaps(gaps,support)
qnew <- .qmixfun(mixDistr = mixDistr, mixCoeff = mixCoeff,
Cont = TRUE, pnew = pnew, gaps = gaps)
obj <- new("UnivarMixingDistribution", p = pnew, r = rnew, d = NULL, q = qnew,
mixCoeff = mixCoeff, mixDistr = mixDistr, .withSim = .withSim,
.withArith = .withArith,.lowerExact =.lowerExact, gaps = gaps,
support = support)
if (all( as.logical(lapply(mixDistr, function(x) is(x@Symmetry,"SphericalSymmetry"))))){
sc <- SymmCenter(mixDistr[[1]]@Symmetry)
if (all( as.logical(lapply(mixDistr, function(x) .isEqual(SymmCenter(x@Symmetry),sc)))))
obj@Symmetry <- SphericalSymmetry(sc)
}
if (withSimplify)
obj <- simplifyD(obj)
return(obj)
}
setMethod("mixCoeff", "UnivarMixingDistribution", function(object)object@mixCoeff)
setReplaceMethod("mixCoeff", "UnivarMixingDistribution", function(object,value){
object@mixCoeff<- value; object})
setMethod("mixDistr", "UnivarMixingDistribution", function(object)object@mixDistr)
setReplaceMethod("mixDistr", "UnivarMixingDistribution", function(object,value){
object@mixDistr<- value; object})
setMethod("support", "UnivarMixingDistribution", function(object)object@support)
setMethod("gaps", "UnivarMixingDistribution", function(object)object@gaps)
#------------------------------------------------------------------------
# new p.l, q.r methods
#------------------------------------------------------------------------
setMethod("p.l", signature(object = "UnivarMixingDistribution"),
function(object) .pmixfun(mixDistr = mixDistr(object),
mixCoeff = mixCoeff(object),
leftright = "left"))
setMethod("q.r", signature(object = "UnivarMixingDistribution"),
function(object){
if(!is.null(gaps(object))&&length(gaps(object)))
.modifyqgaps(pfun = p(object), qfun = q.l(object),
gaps = gaps(object), leftright = "right")
else
q.l(object)
})
#------------------------------------------------------------------------
# new accessor methods
#------------------------------------------------------------------------
setMethod(".lowerExact", "UnivarMixingDistribution", function(object){
er <- is(try(slot(object, ".lowerExact"), silent = TRUE), "try-error")
if(er){ object0 <- conv2NewVersion(object)
objN <- paste(substitute(object))
warning(gettextf("'%s' was generated in an old version of this class.\n",
objN),
gettextf("'%s' has been converted to the new version",objN),
gettextf(" of this class by a call to 'conv2NewVersion'.\n")
)
eval.parent(substitute(object<-object0))
return(object0@.lowerExact)}
object@.lowerExact})
setMethod(".logExact", "UnivarMixingDistribution", function(object){
er <- is(try(slot(object, ".logExact"), silent = TRUE), "try-error")
if(er){ object0 <- conv2NewVersion(object)
objN <- paste(substitute(object))
warning(gettextf("'%s' was generated in an old version of this class.\n",
objN),
gettextf("'%s' has been converted to the new version",objN),
gettextf(" of this class by a call to 'conv2NewVersion'.\n")
)
eval.parent(substitute(object<-object0))
return(object0@.logExact)}
object@.logExact})
setMethod("Symmetry", "UnivarMixingDistribution", function(object){
er <- is(try(slot(object, "Symmetry"), silent = TRUE), "try-error")
if(er){ object0 <- conv2NewVersion(object)
objN <- paste(substitute(object))
warning(gettextf("'%s' was generated in an old version of this class.\n",
objN),
gettextf("'%s' has been converted to the new version",objN),
gettextf(" of this class by a call to 'conv2NewVersion'.\n")
)
eval.parent(substitute(object<-object0))
return(object0@Symmetry)}
object@Symmetry})
|
77d015b116103c2ffe9d9e87562fefbbd21415c7
|
49599cbeb99d70432a688e11e97a4d3160881f6d
|
/R/make.Mondrian.planning.units.R
|
664c1ee57a6f67b72539df6dfab7e3086f0d0bdb
|
[] |
no_license
|
langfob/rdv-framework-frozen-google-code-export-do-not-change
|
260de4eae4de9d4d0465d90e693790d57f411073
|
fe7e745223251790e50bc9a412080e45e0cbc709
|
refs/heads/master
| 2021-01-21T23:29:59.459162
| 2015-07-17T09:30:46
| 2015-07-17T09:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,883
|
r
|
make.Mondrian.planning.units.R
|
#-----------------------------------------------------------#
# #
# make.Mondrian.planning.units.R #
# #
# Make a planning unit map. This is for use with the MARXAN#
# reserve selection software. #
# Create the planning units by dropping lots of random #
# size rectangles at random locations so that it looks #
# a bit like a Mondrian painting. #
# The goal here is to create lots of planning units with #
# boundaries that have Some complexity, but not too much. #
# #
# Cloned from make.rectangular.planning.units.R. #
# August 31, 2007 - BTL #
# #
# source( 'make.Mondrian.planning.units.R' ) #
# #
#-----------------------------------------------------------#
#source( 'variables.R' )
source( 'w.R' )
# Create the base map for the planning units.
# Initialize the background value to be the non-habitat indicator
# value.
background.value <- as.integer( non.habitat.indicator );
pu.map <- matrix (background.value, nrow=rows, ncol=cols);
rect.size.range <-
PAR.Mondrian.min.rectangle.size:PAR.Mondrian.max.rectangle.size;
for (cur.rect.ID in 1:PAR.Mondrian.num.rectangles)
{
# Choose a random size for the rectangle.
cur.rect.height <- sample (rect.size.range, 1, replace=TRUE);
cur.rect.width <- sample (rect.size.range, 1, replace=TRUE);
# Choose a random location for the rectangle.
last.legal.row <- rows - cur.rect.height + 1;
last.legal.col <- cols - cur.rect.width + 1;
cur.upper.left.row <- sample (1:last.legal.row, 1, replace=T) - 1;
cur.upper.left.col <- sample (1:last.legal.col, 1, replace=T) - 1;
if (DEBUG)
{
cat ("\nAt ", cur.rect.ID,
", llr = ", last.legal.row,
", llc = ", last.legal.col,
", h = ", cur.rect.height,
", w = ", cur.rect.width,
", ulr = ", cur.upper.left.row,
", ulc = ", cur.upper.left.col,
"\n");
}
# Draw the rectangle on the map.
for (i in 1:cur.rect.width)
{
for (j in 1:cur.rect.height)
{
pu.map [cur.upper.left.row + j, cur.upper.left.col + i] <- cur.rect.ID;
}
}
}
#write.pgm.file (pu.map, "Mondrian", rows, cols);
#write.pgm.txt.files( pu.map, planning.units.filename.base, rows, cols );
write.to.3.forms.of.files( pu.map, planning.units.filename.base, rows, cols );
#write.asc.file( pu.map, planning.units.filename.base, rows, cols );
|
74e408da4e3301d7617f46026f3e05db8992dc12
|
75977c1f9257c38c45a543b0183184b2b0e34a4d
|
/R/datasets.R
|
d7c189eb74d23a2152fc50e5ae16a91a6181ad09
|
[
"Apache-2.0"
] |
permissive
|
aimwts/rfaculty
|
54ab33411449178f91bdc679e6cee76736794246
|
81962dab67cf56b4804e83d1cd583ea28b61c11f
|
refs/heads/master
| 2022-11-09T21:53:36.303173
| 2020-06-23T13:11:36
| 2020-06-23T13:11:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,782
|
r
|
datasets.R
|
# Copyright 2018-2019 Faculty Science Limited
#' Copy from Faculty datasets to the local filesystem.
#'
#' @param datasets_path File path on Faculty datasets
#' @param local_path The destination path on the local filesystem.
#' @param project_id The ID of the project; by default, the current project is used.
#' @export
datasets_get <-
function(datasets_path, local_path, project_id = NULL) {
pydatasets$get(datasets_path, local_path, project_id)
}
#' Copy from the local filesystem to Faculty datasets.
#'
#' @param local_path The destination path on the local filesystem.
#' @param datasets_path File path on Faculty datasets
#' @param project_id The ID of the project; by default, the current project is used.
#' @export
datasets_put <-
function(local_path, datasets_path, project_id = NULL) {
pydatasets$put(local_path, datasets_path, project_id)
}
#' List files on Faculty datasets.
#'
#' @param prefix The prefix by which to filter files (default: '/')
#' @param project_id The ID of the project; by default, the current project is used.
#' @param show_hidden Whether to show hidden files (files prefixed with a dot).
#' @export
datasets_list <-
function(prefix = "/",
project_id = NULL,
show_hidden = FALSE) {
pydatasets$ls(prefix, project_id, show_hidden)
}
#' Copy a file from one location to another on Faculty Datasets.
#'
#' @param source_path Source path on Faculty datasets
#' @param destination_path Destination path on Faculty datasets
#' @param project_id The ID of the project; by default, the current project is used.
#' @export
datasets_copy <-
function(source_path,
destination_path,
project_id = NULL) {
pydatasets$cp(source_path, destination_path, project_id)
}
#' Move a file from one location to another on Faculty Datasets.
#'
#' @param source_path Source path on Faculty datasets
#' @param destination_path Destination path on Faculty datasets
#' @param project_id The ID of the project; by default, the current project is used.
#' @export
datasets_move <-
function(source_path,
destination_path,
project_id = NULL) {
pydatasets$mv(source_path, destination_path, project_id)
}
#' Delete a file from Faculty Datasets.
#'
#' @param path File path on Faculty datasets
#' @param project_id The ID of the project; by default, the current project is used.
#' @export
datasets_delete <- function(path, project_id = NULL) {
pydatasets$rm(path, project_id)
}
#' Retrieve the etag for a file on Faculty datasets.
#'
#' @param path File path on Faculty datasets
#' @param project_id The ID of the project; by default, the current project is used.
#' @export
datasets_etag <- function(path, project_id = NULL) {
pydatasets$etag(path, project_id)
}
|
2d169f1da610b692cd8697535f3342147c6e9e7d
|
b35ffbfd5d78961299f359f02ff96e1002f19173
|
/plot3.R
|
9dca8da18ed9d413205a3c2fd56e64880f4311bd
|
[] |
no_license
|
zfkl/ExData_Plotting1
|
7a68bf8b5f309da64e21a775960b04eb65667c90
|
2085870a8583cea265d2b57d8af1b0810ac1a559
|
refs/heads/master
| 2021-01-09T07:36:12.762317
| 2015-02-08T19:34:45
| 2015-02-08T19:34:45
| 30,475,291
| 0
| 0
| null | 2015-02-08T00:13:47
| 2015-02-08T00:13:47
| null |
UTF-8
|
R
| false
| false
| 873
|
r
|
plot3.R
|
house<-read.table("~/household_power_consumption.txt",nrows=1,header=TRUE,na.strings="?",sep=";")
house<-read.table("~/household_power_consumption.txt",skip=397+60*24*15,nrows=60*24*2,header=FALSE,na.strings="?",sep=";",col.names=y,colClasses=sapply(house,class))
house$index=c(1:2880)
plot(house$index, house$Sub_metering_1,type="l",,xlab="",ylab="Energy sub metering",xaxt="n",ylim=range(house$Sub_metering_3))
par(new=TRUE)
plot(house$index, house$Sub_metering_2,type="l",col="red",xlab="",ylab="Energy sub metering",xaxt="n",ylim=range(house$Sub_metering_3))
par(new=TRUE)
plot(house$index, house$Sub_metering_3,type="l",col="blue",xlab="",ylab="Energy sub metering",xaxt="n")
axis(1, at=c(0,1440,2880),labels=expression("Monday","Tuesday","Wednesday"))
legend("topleft",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c('black','red','blue'),lty=1,cex=0.50)
|
9ddc96ca14b036941b176935f81455fb001fb9f4
|
2457cfad7d128305656d73c79fd0f162d3181608
|
/Code/R_Code/Scatboot.R
|
7bae3e209d4a461682ceeb04bb2dea8466c5cc2d
|
[] |
no_license
|
Jadamso/PrettyR
|
6b6bcc8331c45364267e9c4a67a31c594fd579a8
|
16cc947bb2f216297113a3189326072d149635ea
|
refs/heads/master
| 2021-01-15T16:51:41.219506
| 2020-03-22T23:08:35
| 2020-03-22T23:08:35
| 99,728,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,532
|
r
|
Scatboot.R
|
#------------------------------------------------------------------
##################
#' M out of N Bootstrapped Loess Confidence Intervals
##################
#'
#' @param x,y coordinates
#' @param breps number of bootstrap replications
#' @param mfun function to define m out of n subsample
#' @param confidence CI interval
#' @param degree,span,family loess parameters
#'
#' @return list of loess outputs
#'
# @examples
#' @details see "http://content.csbs.utah.edu/~rogers/datanal/R/scatboot.r"
#'
#' @export
scatboot <- compiler::cmpfun( function(
x, y,
breps=100, mfun=function(m){m^(.9)},
confidence=0.9, family="gaussian",
degree=2, span=2/3){
# Put input data into a data frame, sorted by x, with no missing
# values.
dat <- na.omit(data.frame(x=x,y=y))
if(nrow(dat) == 0) {
print("Error: No data left after dropping NAs")
print(dat)
return(NULL)
}
dat <- dat[order(dat$x),]
# Establish a series of x values for use in fits
r <- range(dat$x, na.rm=T)
x.out <- seq(r[1], r[2], length.out=40)
# Fit curve to data
f <- loess(y~x, data=dat, degree=degree, span=span,
family=family)
y.fit <- approx(f$x, fitted(f), x.out,rule=2)$y
# Generate bootstrap replicates
PREDS <- parallel::mclapply( seq(breps), function(i, len=length(dat$x)){
m <- mfun(len)
ndx <- sample.int(len, size=m, replace=TRUE)
fit <- loess(y[ndx]~x[ndx], degree=degree,
span=span, family=family)
pred <- predict(fit, newdata=x.out)
return(pred)
} )
pred <- as.data.frame( do.call("rbind", PREDS) )
# Calculate limits and standard deviation
# Too few good values to get estimate?
n.na <- apply(is.na(pred), 2, sum) # num NAs in each column
n.na.test <- !(n.na > breps*(1.0-confidence) )
pred <- pred[, n.na.test]
# effective confidence excluding NAs
pr <- 0.5*(1.0 - confidence)
up.low.lim <- apply(pred, 2, quantile, c(1.0-pr, pr), na.rm=T)
stddev <- apply(pred, 2, sd, na.rm=T)
# Output
pfit <- as.data.frame( cbind(
x=x.out[n.na.test],
y.fit=y.fit[n.na.test],
up.lim=up.low.lim[1,],
low.lim=up.low.lim[2,],
stddev=stddev) )
ret_list <- list(
breps=breps,
confidence=confidence,
degree=degree,
span=span,
family=family,
data=dat,
fit=pfit)
return( ret_list)
})
|
bb3910d87b3e98d390891b14437ad835fbaadb99
|
2f686ad3fb833cfe417ee7db9297e511e42bdbdc
|
/ctap_R/ctap_rtools/man/load_ctap_mc_measurement.Rd
|
40233b59941e53bce5d319b794d4d581b6e0ef0f
|
[
"MIT"
] |
permissive
|
RuoyanMeng/ctap
|
171ffffef9700b3188ecc99cbdae0e1def8bb4c3
|
192d3fc6d85779b968736a4cfe23afe414f77b42
|
refs/heads/master
| 2021-04-12T16:20:43.797721
| 2020-03-20T10:48:54
| 2020-03-20T10:48:54
| 249,091,294
| 0
| 0
|
NOASSERTION
| 2020-03-22T01:22:39
| 2020-03-22T01:22:38
| null |
UTF-8
|
R
| false
| true
| 648
|
rd
|
load_ctap_mc_measurement.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mc_utils.R
\name{load_ctap_mc_measurement}
\alias{load_ctap_mc_measurement}
\title{Load CTAP MC measurement sheet/table}
\usage{
load_ctap_mc_measurement(mc_file, measurement_sheet,
dateformat = "\%d.\%m.\%Y")
}
\arguments{
\item{mc_file}{character, Full path to the MC file}
\item{measurement_sheet}{character, Name of sheet that lists measurements}
\item{dateformat}{character, Format for the date column}
}
\value{
A tibble with the contents of the measurement_sheet sheet/table
}
\description{
Load CTAP MC measurement sheet/table. File can be xlsx or sqlite.
}
|
9bad5c9653c42b7465bf767da475db46dd2fa05f
|
e214f8451eaf187ddcd7f70dbb287147df1884eb
|
/run_analysis.R
|
8a21816d08315d9351e9d21d78bec46c551cf8a4
|
[] |
no_license
|
wanghm02/Get_and_Clean_Data
|
584de7664561944db45ecd3b222ceecf3a93defc
|
bc9fe41756502a0ad35a0be26f86aa3f11fc786e
|
refs/heads/master
| 2020-05-23T10:19:34.628098
| 2017-01-30T13:22:22
| 2017-01-30T13:22:22
| 80,420,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,129
|
r
|
run_analysis.R
|
library(data.table)
library(reshape2)
filename<-"Dataset.zip"
## Download and unzip the dataset
if(!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, filename, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# Load activity labels and features
activitylabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activitylabels[,2] <- as.character(activitylabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Load the datasets
datatrain <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
datatrainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
datatrainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
datatrain <- cbind(datatrainSubjects, datatrainActivities, datatrain)
datatest <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
datatestActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
datatestSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
datatest <- cbind(datatestSubjects, datatestActivities, datatest)
# merge datasets and add labels
data_all <- rbind(datatrain, datatest)
colnames(data_all) <- c("subject", "activity", featuresWanted.names)
# turn activities & subjects into factors
data_all$activity <- factor(data_all$activity, levels = activitylabels[,1], labels = activitylabels[,2])
data_all$subject <- as.factor(data_all$subject)
data_all.melted <- melt(data_all, id = c("subject", "activity"))
data_all.mean <- dcast(data_all.melted, subject + activity ~ variable, mean)
write.table(data_all.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
c671eb43b93ec57de4ec59a7380548ea05fe7edd
|
a0506d64a4cfe8de19554bb914f7299f57782bd1
|
/plot1.R
|
c84284cdf98e8922af164bfbca9560ee6e2cb61d
|
[] |
no_license
|
wborgmeyer/ExData_Plotting1
|
e9972d689d657cdecf8f97b29f288c20da82a563
|
cf0be452979e0f8949c52be93930999e6f5bac00
|
refs/heads/master
| 2020-05-22T21:04:54.723844
| 2017-03-13T15:46:42
| 2017-03-13T15:46:42
| 84,724,599
| 0
| 0
| null | 2017-03-12T12:42:34
| 2017-03-12T12:42:34
| null |
UTF-8
|
R
| false
| false
| 1,056
|
r
|
plot1.R
|
library(data.table)
library(dplyr)
#The dates that we will graph for this exersize
begintime <- strptime("2007-02-01 00:00:00", "%Y-%m-%d %H:%M:%S")
endtime <- strptime("2007-02-03 00:00:00", "%Y-%m-%d %H:%M:%S")
# Read the datafile
hpc.df <- read.table("household_power_consumption.txt", header = TRUE, sep=";",
colClasses = c("character", "character", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric"),
strip.white = TRUE, skipNul = TRUE, na.strings = c("?"))
#Combine the date and time fields and then put it in a POSIXct, POSIXt format
datetimec <- paste(hpc.df$Date, hpc.df$Time)
datetime <- strptime(datetimec, "%d/%m/%Y %H:%M:%S")
hpc.df <- cbind(hpc.df, datetime)
#Extract only the data that needs to be graphed
graphdata <- filter(hpc.df, datetime >= begintime & datetime < endtime)
#Time to graph
png("plot1.png")
hist(graphdata$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
dev.off()
|
61098a16a1037dc8ff8556c16bfc9cfaa4556157
|
2aa473e524c173313ebbfc757b6a91f7c7b24f00
|
/tests/testthat/test-mungepiece.R
|
a50de932007955ec7a7ae923367436700149ebb5
|
[
"MIT"
] |
permissive
|
syberia/mungebits2
|
64052c3756828cef6bc1139106d4929ba02c8e75
|
1b4f0cd2a360856769ccb2b11dfc42f36fa5d84c
|
refs/heads/master
| 2020-04-06T04:16:54.292474
| 2017-09-19T21:27:27
| 2017-09-19T21:27:27
| 29,579,712
| 1
| 2
| null | 2017-09-19T21:27:28
| 2015-01-21T08:32:15
|
R
|
UTF-8
|
R
| false
| false
| 22,901
|
r
|
test-mungepiece.R
|
context("mungepiece")
library(testthatsomemore)
describe("errors", {
test_that("it cannot run a mungepiece without arguments", {
mb <- mungebit$new()
mp <- mungepiece$new(mb)
expect_error(mp$run(), "is missing, with no default")
})
test_that("it cannot initialize a mungepiece with a non-mungebit", {
expect_error(mungepiece$new(1), "as the first argument")
expect_error(mungepiece$new(NULL), "as the first argument")
expect_error(mungepiece$new(identity), "as the first argument")
})
test_that("it cannot initialize mungepiece training args with a non-list", {
mb <- mungebit$new()
expect_error(mungepiece$new(mb, 1), "as the second argument")
expect_error(mungepiece$new(mb, NULL), "as the second argument")
expect_error(mungepiece$new(mb, identity), "as the second argument")
})
test_that("it cannot initialize mungepiece training args with a non-list", {
mb <- mungebit$new()
expect_error(mungepiece$new(mb, list(), 1), "as the third argument")
expect_error(mungepiece$new(mb, list(), NULL), "as the third argument")
expect_error(mungepiece$new(mb, list(), identity), "as the third argument")
})
test_that("it errors when you pass a list with non-unique names as args", {
mb <- mungebit$new()
expect_error(mungepiece$new(mb, list(a = 1, a = 2)), "duplicate names")
expect_error(mungepiece$new(mb, predict_args = list(a = 1, a = 2)), "duplicate names")
})
})
make_fn <- function(train) {
force(train)
function(data, first = NULL, ...) {
list(train = train, first = first, dots = list(...),
first_expr = substitute(first),
dots_expr = eval(substitute(alist(...))))
}
}
make_bit <- function() { mungebit$new(make_fn(TRUE), make_fn(FALSE)) }
make_piece <- function(...) { mungepiece$new(make_bit(), ...) }
describe("without default arguments", {
test_that("it can create a mungepiece without error", {
testthatsomemore::assert(make_piece())
})
describe("with no arguments", {
test_that("it can train a mungepiece without error", {
testthatsomemore::assert(make_piece()$run(iris))
})
test_that("it can predict on a mungepiece without error", {
piece <- make_piece()
piece$run(iris)
testthatsomemore::assert(piece$run(iris))
})
test_that("it can predict on a mungepiece without error", {
piece <- make_piece()
piece$run(iris)
expect_true(piece$mungebit()$trained())
})
})
describe("with variable arguments", {
describe("during training", {
test_that("it can train with variable arguments", {
testthatsomemore::assert(make_piece()$run(iris, "foo", "bar"))
})
test_that("it captures the expected values during train", {
expect_contains(make_piece()$run(iris, "foo", "bar"),
list(train = TRUE, first = "foo", dots = list("bar")))
})
test_that("it captures expressions during train", {
x <- "fo"
expect_contains(make_piece()$run(iris, paste0(x, "o"), identity("bar")),
list(train = TRUE, first = "foo", dots = list("bar"),
first_expr = quote(paste0(x, "o")),
dots_expr = list(quote(identity("bar")))))
})
})
describe("during prediction", {
test_that("it can predict with variable arguments", {
piece <- make_piece()
piece$run(iris, "foo", "bar")
testthatsomemore::assert(piece$run(iris, "foo", "bar"))
})
test_that("it captures the expected values during train", {
piece <- make_piece()
piece$run(iris)
expect_contains(piece$run(iris, "foo", "bar"),
list(train = FALSE, first = "foo", dots = list("bar")))
})
test_that("it captures expressions during train", {
piece <- make_piece()
piece$run(iris)
x <- "fo"
expect_contains(piece$run(iris, paste0(x, "o"), identity("bar")),
list(train = FALSE, first = "foo", dots = list("bar"),
first_expr = quote(paste0(x, "o")),
dots_expr = list(quote(identity("bar")))))
})
})
})
})
describe("with unnamed default arguments", {
make_piece2 <- function() {
make_piece(list("Stephen", "Colbert"), list("Jon", "Stewart"))
}
test_that("it can create a mungepiece without error", {
testthatsomemore::assert(make_piece2())
})
describe("without arguments", {
test_that("it can train a mungepiece without error", {
testthatsomemore::assert(make_piece2()$run(iris))
})
test_that("it can predict on a mungepiece without error", {
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris))
})
test_that("it can predict on a mungepiece without error", {
piece <- make_piece2()
piece$run(iris)
expect_true(piece$mungebit()$trained())
})
})
describe("with unnamed variable arguments", {
describe("during training", {
test_that("it can train with unnamed variable arguments", {
testthatsomemore::assert(make_piece2()$run(iris, "Jim"))
testthatsomemore::assert(make_piece2()$run(iris, "Jim", "Hester"))
})
test_that("it captures the expected partial values during train", {
expect_contains(make_piece2()$run(iris, "Jim"),
list(train = TRUE, first = "Jim", dots = list("Colbert")))
})
test_that("it captures the expected full values during train", {
expect_contains(make_piece2()$run(iris, "Jim", "Hester"),
list(train = TRUE, first = "Jim", dots = list("Hester")))
})
test_that("it captures partial expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, paste0(x, "m")),
list(train = TRUE, first = "Jim", dots = list("Colbert"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, paste0(x, "m"), identity("Hester")),
list(train = TRUE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
})
})
describe("during prediction", {
test_that("it can predict with unnamed variable arguments", {
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, "Jim"))
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, "Jim", "Hester"))
})
test_that("it captures the expected partial values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, "Jim"),
list(train = FALSE, first = "Jim", dots = list("Stewart")))
})
test_that("it captures the expected full values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, "Jim", "Hester"),
list(train = FALSE, first = "Jim", dots = list("Hester")))
})
test_that("it captures partial expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, paste0(x, "m")),
list(train = FALSE, first = "Jim", dots = list("Stewart"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, paste0(x, "m"), identity("Hester")),
list(train = FALSE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
})
})
})
})
describe("with named default arguments", {
make_piece2 <- function() {
make_piece(list(first = "Stephen", "Colbert"), list("Jon", first = "Stewart"))
}
test_that("it can create a mungepiece without error", {
testthatsomemore::assert(make_piece2())
})
describe("without arguments", {
test_that("it can train a mungepiece without error", {
testthatsomemore::assert(make_piece2()$run(iris))
})
test_that("it can predict on a mungepiece without error", {
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris))
})
test_that("it can predict on a mungepiece without error", {
piece <- make_piece2()
piece$run(iris)
expect_true(piece$mungebit()$trained())
})
})
describe("with unnamed variable arguments", {
describe("during training", {
test_that("it can train with unnamed variable arguments", {
testthatsomemore::assert(make_piece2()$run(iris, "Jim"))
testthatsomemore::assert(make_piece2()$run(iris, "Jim", "Hester"))
})
test_that("it captures the expected partial values during train", {
expect_contains(make_piece2()$run(iris, "Jim"),
list(train = TRUE, first = "Jim", dots = list("Colbert")))
})
test_that("it captures the expected full values during train", {
expect_contains(make_piece2()$run(iris, "Jim", "Hester"),
list(train = TRUE, first = "Jim", dots = list("Hester")))
})
test_that("it captures partial expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, paste0(x, "m")),
list(train = TRUE, first = "Jim", dots = list("Colbert"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, paste0(x, "m"), identity("Hester")),
list(train = TRUE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
})
})
describe("during prediction", {
test_that("it can predict with unnamed variable arguments", {
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, "Jim"))
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, "Jim", "Hester"))
})
test_that("it captures the expected partial values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, "Jim"),
list(train = FALSE, first = "Jim", dots = list("Jon")))
})
test_that("it captures the expected full values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, "Jim", "Hester"),
list(train = FALSE, first = "Jim", dots = list("Hester")))
})
test_that("it captures partial expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, paste0(x, "m")),
list(train = FALSE, first = "Jim", dots = list("Jon"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, paste0(x, "m"), identity("Hester")),
list(train = FALSE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
})
})
})
})
describe("with unnamed default arguments and named argument calls", {
make_piece2 <- function() {
make_piece(list("Stephen", "Colbert"), list("Jon", "Stewart"))
}
describe("during training", {
test_that("it can train with unnamed variable arguments", {
testthatsomemore::assert(make_piece2()$run(iris, first = "Jim"))
testthatsomemore::assert(make_piece2()$run(iris, first = "Jim", "Hester"))
testthatsomemore::assert(make_piece2()$run(iris, "Jim", first = "Hester"))
})
test_that("it captures the expected partial values during train", {
expect_contains(make_piece2()$run(iris, first = "Jim"),
list(train = TRUE, first = "Jim", dots = list("Colbert")))
})
test_that("it captures the expected full values during train", {
expect_contains(make_piece2()$run(iris, first = "Jim", "Hester"),
list(train = TRUE, first = "Jim", dots = list("Hester")))
expect_contains(make_piece2()$run(iris, "Jim", first = "Hester"),
list(train = TRUE, first = "Hester", dots = list("Jim")))
})
test_that("it captures partial expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, first = paste0(x, "m")),
list(train = TRUE, first = "Jim", dots = list("Colbert"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, first = paste0(x, "m"), identity("Hester")),
list(train = TRUE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
expect_contains(make_piece2()$run(iris, paste0(x, "m"), first = identity("Hester")),
list(train = TRUE, first = "Hester", dots = list("Jim"),
first_expr = quote(identity("Hester")),
dots_expr = list(quote(paste0(x, "m")))))
})
})
describe("during prediction", {
test_that("it can predict with unnamed variable arguments", {
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, first = "Jim"))
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, first = "Jim", "Hester"))
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, "Jim", first = "Hester"))
})
test_that("it captures the expected partial values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, first = "Jim"),
list(train = FALSE, first = "Jim", dots = list("Stewart")))
})
test_that("it captures the expected full values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, first = "Jim", "Hester"),
list(train = FALSE, first = "Jim", dots = list("Hester")))
expect_contains(piece$run(iris, "Jim", first = "Hester"),
list(train = FALSE, first = "Hester", dots = list("Jim")))
})
test_that("it captures partial expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, first = paste0(x, "m")),
list(train = FALSE, first = "Jim", dots = list("Stewart"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, first = paste0(x, "m"), identity("Hester")),
list(train = FALSE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
expect_contains(piece$run(iris, paste0(x, "m"), first = identity("Hester")),
list(train = FALSE, first = "Hester", dots = list("Jim"),
first_expr = quote(identity("Hester")),
dots_expr = list(quote(paste0(x, "m")))))
})
})
})
describe("with named default arguments and named argument calls", {
make_piece2 <- function() {
make_piece(list(first = "Stephen", "Colbert"), list("Jon", first = "Stewart"))
}
describe("during training", {
test_that("it can train with unnamed variable arguments", {
testthatsomemore::assert(make_piece2()$run(iris, first = "Jim"))
testthatsomemore::assert(make_piece2()$run(iris, first = "Jim", "Hester"))
testthatsomemore::assert(make_piece2()$run(iris, "Jim", first = "Hester"))
})
test_that("it captures the expected partial values during train", {
expect_contains(make_piece2()$run(iris, first = "Jim"),
list(train = TRUE, first = "Jim", dots = list("Colbert")))
})
test_that("it captures the expected full values during train", {
expect_contains(make_piece2()$run(iris, first = "Jim", "Hester"),
list(train = TRUE, first = "Jim", dots = list("Hester")))
expect_contains(make_piece2()$run(iris, "Jim", first = "Hester"),
list(train = TRUE, first = "Hester", dots = list("Jim")))
})
test_that("it captures partial expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, first = paste0(x, "m")),
list(train = TRUE, first = "Jim", dots = list("Colbert"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during train", {
x <- "Ji"
expect_contains(make_piece2()$run(iris, first = paste0(x, "m"), identity("Hester")),
list(train = TRUE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
expect_contains(make_piece2()$run(iris, paste0(x, "m"), first = identity("Hester")),
list(train = TRUE, first = "Hester", dots = list("Jim"),
first_expr = quote(identity("Hester")),
dots_expr = list(quote(paste0(x, "m")))))
})
})
describe("during prediction", {
test_that("it can predict with unnamed variable arguments", {
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, first = "Jim"))
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, first = "Jim", "Hester"))
piece <- make_piece2()
piece$run(iris)
testthatsomemore::assert(piece$run(iris, "Jim", first = "Hester"))
})
test_that("it captures the expected partial values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, first = "Jim"),
list(train = FALSE, first = "Jim", dots = list("Jon")))
})
test_that("it captures the expected full values during predict", {
piece <- make_piece2()
piece$run(iris)
expect_contains(piece$run(iris, first = "Jim", "Hester"),
list(train = FALSE, first = "Jim", dots = list("Hester")))
expect_contains(piece$run(iris, "Jim", first = "Hester"),
list(train = FALSE, first = "Hester", dots = list("Jim")))
})
test_that("it captures partial expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, first = paste0(x, "m")),
list(train = FALSE, first = "Jim", dots = list("Jon"),
first_expr = quote(paste0(x, "m"))))
})
test_that("it captures full expressions during predict", {
piece <- make_piece2()
piece$run(iris)
x <- "Ji"
expect_contains(piece$run(iris, first = paste0(x, "m"), identity("Hester")),
list(train = FALSE, first = "Jim", dots = list("Hester"),
first_expr = quote(paste0(x, "m")),
dots_expr = list(quote(identity("Hester")))))
expect_contains(piece$run(iris, paste0(x, "m"), first = identity("Hester")),
list(train = FALSE, first = "Hester", dots = list("Jim"),
first_expr = quote(identity("Hester")),
dots_expr = list(quote(paste0(x, "m")))))
})
})
})
describe("edge cases", {
test_that("it can handle conflicting / mixed expressions", {
mb <- mungebit$new(function(data, x, y) {
list(x = x, y = y, x_expr = substitute(x), y_expr = substitute(y))
})
mp <- mungepiece$new(mb, list(x = 2))
x <- 3
expect_contains(mp$run(iris, y = x),
list(x = 2, y = 3, y_expr = quote(x)))
})
test_that("it can run builtin train functions", {
mp <- mungepiece$new(mungebit$new(`[`), list("Sepal.Width"))
expect_equal(mp$run(iris), iris["Sepal.Width"])
})
test_that("it can use NSE during train", {
mp <- mungepiece$new(mungebit$new(nse = TRUE, function(x) substitute(x)))
expect_equal(mp$run(iris), quote(iris))
})
test_that("it can use NSE during predict", {
mp <- mungepiece$new(mungebit$new(nse = TRUE, function(x) substitute(x)))
mp$run(iris)
expect_equal(mp$run(iris), quote(iris))
})
test_that("it can run with overwritten args", {
mp <- mungepiece$new(mungebit$new(`[`))
expect_equal(mp$run(iris, 1:2), iris[1:2])
})
})
describe("debugging", {
test_that("calling debug on a mungepiece sets the debug flag on its train function", {
mp <- mungepiece$new(mungebit$new(identity))
debug(mp)
expect_true(isdebugged(mp$mungebit()$.train_function))
})
test_that("calling debug on a mungepiece sets the debug flag on its predict function", {
mp <- mungepiece$new(mungebit$new(identity, identity))
debug(mp)
expect_true(isdebugged(mp$mungebit()$.predict_function))
})
test_that("calling undebug on a mungepiece unsets the debug flag on its train function", {
mp <- mungepiece$new(mungebit$new(identity))
debug(mp)
undebug(mp)
expect_false(isdebugged(mp$mungebit()$.train_function))
})
test_that("calling undebug on a mungepiece sets the undebug flag on its predict function", {
mp <- mungepiece$new(mungebit$new(identity, identity))
debug(mp)
undebug(mp)
expect_false(isdebugged(mp$mungebit()$.predict_function))
})
})
|
4d0346e1d9418d7acfdebfd91d8e7251e1a6f858
|
a8713ffd133f1c887c7f08abc3a47e827548ceda
|
/man/join.Rd
|
b5b32c3b277003ebe59e5c061865868925230087
|
[] |
no_license
|
cran/disk.frame
|
5859ee6faa628ecab4012fc9caabdb4b28336958
|
dfb7c3bb5e4ea5f35e4e6432a35dcfbcdb774514
|
refs/heads/master
| 2023-04-16T19:08:35.501577
| 2023-04-07T16:50:15
| 2023-04-07T16:50:15
| 208,672,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,162
|
rd
|
join.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anti_join.r, R/full_join.r, R/inner_join.r,
% R/left_join.r, R/semi_join.r
\name{anti_join.disk.frame}
\alias{anti_join.disk.frame}
\alias{full_join.disk.frame}
\alias{inner_join.disk.frame}
\alias{left_join.disk.frame}
\alias{semi_join.disk.frame}
\title{Performs join/merge for disk.frames}
\usage{
\method{anti_join}{disk.frame}(
x,
y,
by = NULL,
copy = FALSE,
...,
outdir = tempfile("tmp_disk_frame_anti_join"),
merge_by_chunk_id = FALSE,
overwrite = TRUE,
.progress = FALSE
)
\method{full_join}{disk.frame}(
x,
y,
by = NULL,
copy = FALSE,
...,
outdir = tempfile("tmp_disk_frame_full_join"),
overwrite = TRUE,
merge_by_chunk_id,
.progress = FALSE
)
\method{inner_join}{disk.frame}(
x,
y,
by = NULL,
copy = FALSE,
suffix = c(".x", ".y"),
...,
keep = FALSE,
outdir = tempfile("tmp_disk_frame_inner_join"),
merge_by_chunk_id = NULL,
overwrite = TRUE,
.progress = FALSE
)
\method{left_join}{disk.frame}(
x,
y,
by = NULL,
copy = FALSE,
suffix = c(".x", ".y"),
...,
keep = FALSE,
outdir = tempfile("tmp_disk_frame_left_join"),
merge_by_chunk_id = FALSE,
overwrite = TRUE,
.progress = FALSE
)
\method{semi_join}{disk.frame}(
x,
y,
by = NULL,
copy = FALSE,
...,
outdir = tempfile("tmp_disk_frame_semi_join"),
merge_by_chunk_id = FALSE,
overwrite = TRUE,
.progress = FALSE
)
}
\arguments{
\item{x}{a disk.frame}
\item{y}{a data.frame or disk.frame. If data.frame then returns lazily; if disk.frame it performs the join eagerly and return a disk.frame}
\item{by}{join by}
\item{copy}{same as dplyr::anti_join}
\item{...}{same as dplyr's joins}
\item{outdir}{output directory for disk.frame}
\item{merge_by_chunk_id}{the merge is performed by chunk id}
\item{overwrite}{overwrite output directory}
\item{.progress}{Show progress or not. Defaults to FALSE}
\item{suffix}{see dplyr::XXX_join}
\item{keep}{see dplyr::XXX_join}
}
\value{
disk.frame or data.frame/data.table
}
\description{
Performs join/merge for disk.frames
}
\examples{
df.df = as.disk.frame(data.frame(x = 1:3, y = 4:6), overwrite = TRUE)
df2.df = as.disk.frame(data.frame(x = 1:2, z = 10:11), overwrite = TRUE)
anti_joined.df = anti_join(df.df, df2.df)
anti_joined.df \%>\% collect
anti_joined.data.frame = anti_join(df.df, data.frame(x = 1:2, z = 10:11))
# clean up
delete(df.df)
delete(df2.df)
delete(anti_joined.df)
cars.df = as.disk.frame(cars)
join.df = full_join(cars.df, cars.df, merge_by_chunk_id = TRUE)
# clean up cars.df
delete(cars.df)
delete(join.df)
cars.df = as.disk.frame(cars)
join.df = inner_join(cars.df, cars.df, merge_by_chunk_id = TRUE)
# clean up cars.df
delete(cars.df)
delete(join.df)
cars.df = as.disk.frame(cars)
join.df = left_join(cars.df, cars.df)
# clean up cars.df
delete(cars.df)
delete(join.df)
cars.df = as.disk.frame(cars)
join.df = semi_join(cars.df, cars.df)
# clean up cars.df
delete(cars.df)
delete(join.df)
}
|
7dfbf5cba45abf98691ec25181a33ab91744dd1d
|
0e372329ed96ee03c72e787b183fbabe4a6703d8
|
/man/write_stderr_msg.Rd
|
46c82a51c41ab10af0b7333412e1cf52844231d3
|
[] |
no_license
|
haven-jeon/RscriptUtils
|
4e45c866e041e9a3af8d5c79d4dd1e745fa0989f
|
a315742d687c608aeb54604513ab36163d84be3c
|
refs/heads/master
| 2020-06-02T17:06:02.959931
| 2013-01-16T17:22:01
| 2013-01-16T17:22:01
| 7,650,116
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 210
|
rd
|
write_stderr_msg.Rd
|
\name{write_stderr_msg}
\alias{write_stderr_msg}
\title{write_stderr_msg}
\usage{
write_stderr_msg(msg)
}
\arguments{
\item{msg}{messages}
}
\description{
Write stderr messages with timed informations
}
|
b8994d92d35db39f24a0f6f2509f36904c1c3248
|
b4e8faa53a1991ffa007ae95785d8441a1341fe3
|
/man/normalise.Rd
|
bd952f978c71d0d0473e01f89f238d48926e6e41
|
[
"MIT"
] |
permissive
|
VCCRI/CardiacProfileR
|
6363ef5250593ac9dc865b783bd78321fbeecbed
|
e231c37f220cd39aa3af982c7dbacf7b571ced7a
|
refs/heads/master
| 2021-01-24T11:27:59.936732
| 2018-06-10T19:56:51
| 2018-06-10T19:56:51
| 123,085,087
| 8
| 0
| null | 2018-05-07T00:36:13
| 2018-02-27T06:40:22
|
R
|
UTF-8
|
R
| false
| true
| 698
|
rd
|
normalise.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/profile_generation.R
\name{normalise}
\alias{normalise}
\title{Normalise heart rate in profile}
\usage{
normalise(profile)
}
\arguments{
\item{profile}{profile to be nomalised.
See \code{\link{get_profile}} for a description of profile.}
}
\value{
profile that has been normalised.
See \code{\link{get_profile}} for a description of profile.
}
\description{
normalises heart rate between 0 and 1 in heart rate dynamic profile
}
\seealso{
See \code{\link{get_profile}} for a description of profile.
Other profile functions: \code{\link{aggregate_profiles}},
\code{\link{get_profiles}}, \code{\link{get_profile}}
}
|
b4fb37066c697c2752c657efac80b8dc74ab909c
|
397182bd2f6ed4e39dcc3f3d62e89807af03ac5a
|
/R/init.R
|
df7c373dbf72f7f76297e284428800951f67a9f4
|
[] |
no_license
|
cran/bnlearn
|
e04a869228c237067fc09d64b8c65a360dada76a
|
dda6458daf2e0b25d3e87313f35f23d8a1c440c1
|
refs/heads/master
| 2023-05-14T14:21:07.744313
| 2023-04-29T09:30:02
| 2023-04-29T09:30:02
| 17,694,839
| 58
| 42
| null | 2020-01-03T23:49:19
| 2014-03-13T04:09:21
|
C
|
UTF-8
|
R
| false
| false
| 3,448
|
r
|
init.R
|
# safe version of setMethod().
tryMethod = function(f, signature, definition, generic) {
# try a first time.
catch = try(setMethod(f, signature, definition), silent = TRUE)
if (is(catch, "try-error")) {
# if it failed, create a generic function ...
setGeneric(f, generic)
# ... and then try again.
setMethod(f, signature, definition)
}#THEN
}#TRYMETHOD
# set up hooks, S4 classes and initialize global variables.
.onLoad = function(lib, pkg) {
bnlearn.classes = c("bn", "bn.fit", available.classifiers)
# re-register S4 methods (from .GlobalEnv, as we would do manually in an
# interactive session.
setHook(packageEvent("graph", "attach"), action = "append",
function(...) {
for (cl in bnlearn.classes) {
setMethod("nodes", cl, where = .GlobalEnv,
function(object) .nodes(object))
setMethod("nodes<-", cl, where = .GlobalEnv,
function(object, value) .relabel(object, value))
setMethod("degree", cl, where = .GlobalEnv,
function(object, Nodes) .degree(object, Nodes))
}#FOR
})
setHook(packageEvent("gRbase", "attach"), action = "append",
function(...) {
for (cl in bnlearn.classes) {
setMethod("nodes", cl, where = .GlobalEnv,
function(object) .nodes(object))
}#FOR
})
setHook(packageEvent("BiocGenerics", "attach"), action = "append",
function(...) {
for (cl in bnlearn.classes) {
setMethod("score", cl, where = .GlobalEnv,
function(x, data, type = NULL, ..., by.node = FALSE, debug = FALSE)
network.score(x = x, data = data, type = type, ...,
by.node = by.node, debug = debug))
}#FOR
})
setHook(packageEvent("igraph", "attach"), action = "append",
function(...) {
registerS3method("as.igraph", class = "bn", method = as.igraph.bn,
envir = asNamespace("igraph"))
registerS3method("as.igraph", class = "bn.fit", method = as.igraph.bn.fit,
envir = asNamespace("igraph"))
})
# make bnlearn's classes known to S4.
setClass("bn")
setClass("bn.fit")
setClass("bn.naive")
setClass("bn.tan")
# add the methods (if no generic is present, create it) .
for (cl in bnlearn.classes) {
tryMethod("nodes", cl,
definition = function(object) .nodes(object),
generic = function(object, ...) standardGeneric("nodes"))
tryMethod("nodes<-", cl,
definition = function(object, value) .relabel(object, value),
generic = function(object, value) standardGeneric("nodes<-"))
tryMethod("degree", cl,
definition = function(object, Nodes) .degree(object, Nodes),
generic = function(object, Nodes, ...) standardGeneric("degree"))
tryMethod("score", cl,
definition = function(x, data, type = NULL, ..., by.node = FALSE,
debug = FALSE)
network.score(x = x, data = data, type = type, ...,
by.node = by.node, debug = debug),
generic = function (x, ...) standardGeneric("score"))
}#FOR
# load the shared library.
library.dynam("bnlearn", package = pkg, lib.loc = lib)
# initialize stuff at the C level.
.Call(call_onLoad)
}#.ONLOAD
# clean up global variables.
.onUnload = function(libpath) {
# initialize stuff at the C level.
.Call(call_onUnload)
# unload the shared library.
library.dynam.unload("bnlearn", libpath = libpath)
}#ON.UNLOAD
|
fd38fb4c8a1db1489f82c28f17bce2aae0fdc5a1
|
fde9c70b67e2ea092f0a3966c8b098b08ad0ffcc
|
/R/funcRandKmap.R
|
e0bbb06f0ef7e28e2a54672b16dad68869234316
|
[] |
no_license
|
hazaeljones/geozoning
|
41d215022b34e6944e4ba7395dc0778c2c49ba48
|
c8310ca97a775c4d55807eb3ac3ab1ae73da5334
|
refs/heads/master
| 2021-01-20T12:37:46.187798
| 2018-02-23T09:44:47
| 2018-02-23T09:44:47
| 90,385,766
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,959
|
r
|
funcRandKmap.R
|
##############################################################################
#' generate data
#'
#' @details description, a paragraph
#' @param DataObj =NULL: simulated data with given seed or a data frame with real data
#' @param seed numeric value used to generate simulated data
#' @param nPoints number of generated raw data points
#' @param typeMod type of variogram model (see vgm) "Gau", "Sph", "Exp"
#' @param Vpsill partial sill in variogram
#' @param Vrange variogram range
#' @param Vmean average data value
#' @param Vnugget nugget in variogram
#' @param Vanis anisotropy in variogram
#' @param boundary list, contains x and y boundaries
#' @param manualBoundary logical, if TRUE a manual boundary is drawn.
#'
#' @return a list
#' \describe{
#' \item{tabData}{data frame of generated or real data with x,y,z values. x is standardized between 0 and 1, y is standardized with the same ratio used for x }
#' \item{boundary}{standardized boundary}
#' \item{VGMmodel}{VGM variogram model}
#' \item{modelGen}{RM transformed variogram model}
#' \item{ratio}{ratio used to normalize x data}
#' }
#'
#' @export
#' @importFrom stats runif
#' @importFrom sp coordinates
#' @importFrom gstat vgm
#' @importFrom gstat variogram fit.variogram
#' @importFrom RandomFields RMtrend RMnugget RFsimulate
#'
#' @examples
#' # simulate data with Gaussian model
#' resGene=genData(NULL,10,450,"Gau",5,0.2,8,0,list(x=c(0,0,1,1,0),y=c(0,1,1,0,0)),FALSE)
#' plot(resGene$tabData)
genData=function(DataObj=NULL,seed=0,nPoints=450,typeMod="Exp",Vpsill=5,Vrange=0.2,Vmean=8,Vnugget=0,Vanis=1,boundary=list(x=c(0,0,1,1,0),y=c(0,1,1,0,0)),manualBoundary=FALSE)
##############################################################################
{
modelGen=NULL #variogram model
VGMmodel1=NULL
Vang=0# anistotropy angle
ratio=1 # scale for real data
# real or simulated data
if(!is.null(DataObj))
print(paste("reading DataObj,nrow(DataObj)=",nrow(DataObj),",ncol(DataObj)=",ncol(DataObj),collapse=","))
else
print(paste("DataObj=NULL, generating DataObj-seed=",seed))
if(!is.null(DataObj)){
#read data frame x y z
#remove duplicated data pt locations
tabData=DataObj[ ! duplicated(DataObj[,c(1,2)]),]
names(tabData)=c("x","y","z")
#draw boundary if required
if(manualBoundary)
{
print("Draw boundary")
plot(sp::coordinates(tabData))
boundary=locator(500,type="l")
boundary$x[length(boundary$x)]=boundary$x[1]
boundary$y[length(boundary$y)]=boundary$y[1]
}
#Normalize coordinates and boundary
ratio=max(tabData$x)-min(tabData$x)
resNorm=datanormX(tabData,boundary)
if(is.null(resNorm)) return(NULL)
tabData = resNorm$dataN
boundary = resNorm$boundaryN
xmin=resNorm$xmin
xmax=resNorm$xmax
ymin=resNorm$ymin
ymax=resNorm$ymax
xyminmaxI=rbind(c(xmin,xmax),c(ymin,ymax))
rownames(xyminmaxI)=c("InitialX","InitialY")
colnames(xyminmaxI)=c("min","max")
x=tabData$x
xsize=max(x)-min(x)
y=tabData$y
ysize=max(y)-min(y)
boundary$x[boundary$x<0]=0
boundary$y[boundary$y<0]=0
boundary$x[boundary$x>xsize]=xsize
boundary$y[boundary$y>ysize]=ysize
# fit experimental variogram to model
tabDataSp=tabData
sp::coordinates(tabDataSp)=~x+y
expVario=variogram(z~1,data=tabDataSp)
VGMmodel1=fit.variogram(expVario,vgm(c("Exp","Sph","Gau"))) # find best model to be fitted
}
# simulated data
else{
# IS update: 25/01/2018
#Generate random (x,y) values within unit square
set.seed(seed)
x=runif(nPoints, min=0, max=1)
y=runif(nPoints, min=0, max=1)
#Generate z values according to (Gaussian or exponentiel) field
#RMmodel starting from VGM model
VGMmodel=vgm(model=typeMod,range=Vrange,psill=Vpsill,mean=Vmean,ang1=Vang,anis1=Vanis)
modelGen=calRMmodel(VGMmodel)
modelGen=modelGen+RMtrend(mean=Vmean)
if(Vnugget>1e-3) modelGen=modelGen+RMnugget(var=Vnugget)
testMap<-RFsimulate(modelGen,x,y)
#store in dataframe
tabData=data.frame(x=x,y=y,z=testMap$variable1)
# normalize x,y coordinates
tabData=datanormXY(tabData)
xyminmaxI=rbind(c(0,1),c(0,1))
rownames(xyminmaxI)=c("InitialX","InitialY")
colnames(xyminmaxI)=c("min","max")
}
return(list(tabData=tabData,boundary=boundary,xyminmaxI=xyminmaxI,VGMmodel=VGMmodel1,modelGen=modelGen,ratio=ratio))
}
#####################################################
#' compute step for non square grid
#'
#' @param nPointsK numeric value giving the number of points after kriging
#' @param xsize numeric value giving the data range on the x axis
#' @param ysize numeric value giving the data range on the y axis
#' @return a numerical step value
#' @export
#'
#' @examples
#' calStep(1000,1,1)
calStep=function(nPointsK,xsize,ysize)
#####################################################
{
#compute step to obtain square grid
stepA=1
stepB=-2/(xsize+ysize)
stepC=-(nPointsK-1)/(xsize*ysize)
stepApprox=(-stepB+sqrt(stepB^2 -4*stepA*stepC))/2
step=1/trunc(stepApprox)
return(step)
}
################################################################
#' generate grid from raw data
#'
#' @param step numeric step for grid
#' @param xsize numeric value giving the data range on the x axis
#' @param ysize numeric value giving the data range on the y axis
#' @importFrom sp coordinates
#'
#' @return a list that contains x and y kriged positions based on original ones,#' plus nx and ny (number of x and y positions).
#' @export
#' @importFrom sp coordinates
#' @importMethodsFrom sp coordinates
#' @examples
#' genEmptyGrid(calStep(1000,1,1),1,1)
genEmptyGrid=function(step,xsize,ysize)
################################################################
{
# generate grid from raw data
xx=seq(from=step, to=xsize-step,by=step)
yy=seq(from=step, to=ysize-step,by=step)
nx=length(xx)
ny=length(yy)
xempty=rep(xx,times=ny)
yempty=rep(yy,each=nx)
z=rep(NA,nx*ny)
# turn into dataframe
tabEmpty=data.frame(x=xempty,y=yempty,z=z)
sp::coordinates(tabEmpty)=~x+y
return(list(tabEmpty=tabEmpty,xx=xx,yy=yy,nx=nx,ny=ny))
}
####################################################################
#' returns list of point neigbors for each point
#'
#' @param neighBool numeric, boolean neighborhood matrix for pts
#'
#' @return a list of pt neigbors for each pt
#' @export
#'
#' @examples
#' \donttest{
#' data(mapTest) # simulated data
#' grid=genEmptyGrid(calStep(2000,1,1),1,1)
#' nbP= grid$nx*grid$ny
#' neighBool=matrix(logical(nbP^2),nbP,nbP)
#' resVoronoi=voronoiPolygons(mapTest$krigData,c(0,1,0,1),neighBool)
#' neighBool=resVoronoi$neighBool
#' listeNpt=ptNei(neighBool)
#' }
ptNei=function(neighBool)
###################################################################
{
# arg = boolean neighborhood matrix for pts
# returns list of pt neigbors for each pt
vPt=apply(neighBool,1,function(x){return(grep(TRUE,x))})
return(vPt)
}
|
fb2cd1021604b03ee575b8f9230e1f0d681b2138
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clttools/examples/dice.plot.Rd.R
|
b82008e7983ef8718f636c90578ff3565e4495a1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 247
|
r
|
dice.plot.Rd.R
|
library(clttools)
### Name: dice.plot
### Title: Theoretical Probability Distribution Plot of Rolling Dice
### Aliases: dice.plot
### ** Examples
dice.plot(n = 4, col ='red', type = 'p')
dice.plot(3, prob = c(0.3, 0.1, 0.2, 0.1, 0.1, 0.2))
|
b20cb45e52c465e98d89e6998287ab61b7684dbd
|
4c7d9073ca83ef771c620c1063b37f44fa2ee8b6
|
/draft_code/nls.R
|
68871962ab2411fd38b6aa8bc2735d2c3231fd1e
|
[] |
no_license
|
ilyiglesias/LW
|
d816fbf3abc7ed95dd18e149c4435d1bd6d16e6a
|
1a14d2232c812591ff3eb59e95efb8dd6f3c8d48
|
refs/heads/master
| 2020-03-19T08:49:27.967932
| 2018-09-26T17:50:15
| 2018-09-26T17:50:15
| 136,235,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,160
|
r
|
nls.R
|
# an alternative way to plot these length weight values via nls
#NOTE: I was also able to plot these values via nls (non-linear least squares) in case I need to do this again!!
# nls- non-linear regression model- an alternative way to estimate parameters #######################
# i followed instructions from https://stackoverflow.com/questions/22664763/plotting-a-power-fit-of-for-y-axb
m <- nls(Weight~a*Std_Length^b, data = lw, start = list(a=1, b=1))
summary(m) # this reveals totally different values for intercept: 0.000004187 and slope: 3.29
coef(m)
# estimate goodness of fit? Not sure thorough what methods
cor(lw$Std_Length, predict(m)) # 0.85 so I think this is fairly decent
plot(lw$Std_Length, lw$Weight, xlab="Fish Length (mm)", ylab= "Fish Weight (g)") # this is a plot of length (x) and weight (y)
x_L <- sort(lw$Std_Length) # this sorts the length data- not sure why i need this but without doing this step I get a very strange zig zag pattern
lines(x_L, predict(m, list(Std_Length=x_L)), col="purple", lwd=4) # this plots the line for our x= x_L lengths and predicted y (weight) values based on our estimated parameters from the nls model
|
8b0da41be4ea617a438d03707fd3256441429583
|
76540510e51e76171e7559122e60739be2edd5fb
|
/man/forecast_hmnc.Rd
|
b667ca2d602b8a279755f06c7bb5c691006f489b
|
[] |
no_license
|
placeboo/amplify
|
0d889633c8a87ad596b410e777877673f682ada3
|
6749fdb2b9d5a4cbbe3282cf029322f2f7ae6519
|
refs/heads/master
| 2021-07-12T22:30:13.932067
| 2020-09-24T23:19:30
| 2020-09-24T23:19:30
| 202,027,821
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 858
|
rd
|
forecast_hmnc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecast_hmnc.R
\name{forecast_hmnc}
\alias{forecast_hmnc}
\title{Forecasting via Harmonic Regression Model}
\usage{
forecast_hmnc(model, par = list(season = c(7, 365.25), K = c(3, 30)), h = 30)
}
\arguments{
\item{model}{An object of class \code{forecast}, the model after applying the double seasonal harmonic regression}
\item{par}{A list contains:
\itemize{
\item season. A vector displays the seasonality of the times series. The default 7 and 365.25 are for the weekly and yearly seasonality respectively
\item K. A vector displays the number of Fourier term for weekly seasonality and yearly seasonality.
}}
\item{h}{A numeric number shows the number of periods ahead to forecast (optional)}
}
\description{
Forecasting based on the double seasonal harmonic regression
}
|
8eb9b53e08fad31480519dc39091e8857f577d4c
|
42cd205591e6acccb6a6a89c8bcb92ae8d17c72f
|
/TwoWorlds/twoworld-getters-twunity.R
|
c44df66a49dfdcf84e494edcafe4ad232ffdc6c8
|
[] |
no_license
|
hejtmy/two-worlds-analysis
|
8d74f882c9727e0da4dd63f56e6fae9a72b82a2a
|
e404dcb67eed1b2a93a476acaa6a3045091fd3d0
|
refs/heads/master
| 2022-04-01T11:42:44.052365
| 2020-02-12T01:51:42
| 2020-02-12T01:51:42
| 113,116,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,311
|
r
|
twoworld-getters-twunity.R
|
### twunity ----
get_trial_start_id.twunity <- function(obj, trialId){
if(trialId <= 1) return(NULL)
return(get_trial_goal_id.twunity(obj, trialId - 1))
}
get_trial_start.twunity <- function(obj, trialId){
#First trial is started in a center hall
iStart <- get_trial_start_id.twunity(obj, trialId)
if(is.null(iStart)){
df_firs_pos <- obj$walk$data$player_log[1, ]
return(c(df_firs_pos$Position.x[1], df_firs_pos$Position.z[1]))
}
return(get_goal.brainvr(obj$walk, iStart))
}
get_trial_start_name.twunity <- function(obj, trialId){
iStart <- get_trial_start_id.twunity(obj, trialId)
if(is.null(iStart)) return("Starting location")
return(get_goal_name.brainvr(obj$walk, iStart))
}
get_trial_goal_id.twunity <- function(obj, trialId){
iGoal <- obj$walk$data$experiment_log$settings$GoalOrder[trialId]
return(iGoal + 1) #C# indexes from 0
}
get_trial_goal.twunity <- function(obj, trialId){
iGoal <- get_trial_goal_id.twunity(obj, trialId)
return(get_goal.brainvr(obj$walk, iGoal))
}
get_trial_goal_name.twunity <- function(obj, trialId){
iGoal <- get_trial_goal_id.twunity(obj, trialId)
return(get_goal_name.brainvr(obj$walk, iGoal))
}
get_trial_start_goal.twunity <- function(obj, trialId){
start <- get_trial_start.twunity(obj, trialId)
goal <- get_trial_goal.twunity(obj, trialId)
ls <- list(start = start, goal = goal)
return(ls)
}
get_trial_errors.twunity <- function(obj, trialId){
#gets_log
DIAMETER <- 1
n_errors <- ifelse(trialId == 1, -1, -2) #we always get to the start and end door
ALL_DOORS_POSITIONS <- settings$door_positions #a bit problmatic, fethces stuff from the global env
# we will get one correct hit at the goal start and end
df_log <- get_trial_log(obj$walk, trialId)
df_positions <- df_log[,c('Position.x', 'Position.z')]
for(i in 1:nrow(ALL_DOORS_POSITIONS)){
position <- c(ALL_DOORS_POSITIONS$Estimote.x[i], ALL_DOORS_POSITIONS$Estimote.y[i])
diffs <- sweep(df_positions, 2, position)
distances <- apply(diffs, 1, function(x){sqrt(sum(x ^ 2))})
if(any(distances < DIAMETER)) n_errors <- n_errors + 1
}
if(n_errors < 0) n_errors <- 0
return(n_errors)
}
## SOP ----
get_point_start_id.twunity <- function(obj, trialId){
iStart <- obj$sop$data$experiment_log$settings$GoalOrder[trialId]
return(iStart + 1)
}
get_point_start_name.twunity <- function(obj, trialId){
iStart <- get_point_start_id.twunity(obj, trialId)
return(paste0("Viewpoint ", iStart - 6))
}
get_point_start.twunity <- function(obj, trialId){
iStart <- get_point_start_id.twunity(obj, trialId)
return(get_goal.brainvr(obj$sop, iStart))
}
get_point_goal_id.twunity <- function(obj, trialId){
iGoal <- obj$sop$data$experiment_log$settings$GoalPointOrder[trialId]
return(iGoal + 1)
}
get_point_goal_name.twunity <- function(obj, trialId){
iGoal <- get_point_goal_id.twunity(obj, trialId)
return(get_goal_name.brainvr(obj$sop, iGoal))
}
get_point_goal.twunity <- function(obj, trialId){
iGoal <- get_point_goal_id.twunity(obj, trialId)
return(get_goal.brainvr(obj$sop, iGoal))
}
get_point_start_goal.twunity <- function(obj, trialId){
start <- get_point_start.twunity(obj, trialId)
goal <- get_point_goal.twunity(obj, trialId)
ls <- list(start = start, goal = goal)
return(ls)
}
get_trial_point.twunity <- function(obj, trialId){
#when player points, trial ends so the point is at the trial end, but because of unity timing
#sometimes logged after trial finishes
log <- get_trial_log(obj$sop, trialId)
point_line <- tail(log, 1)
return(point_line)
}
get_all_goal_positions.twunity <- function(obj, include_SOP){
return(get_all_goal_positions.brainvr(obj$sop, include_SOP))
}
### BrainVR ----
get_goal.brainvr <- function(obj, iGoal){
goal_pos <- obj$data$experiment_log$positions$GoalPositions[iGoal, ]
return(c(goal_pos$Position.x, goal_pos$Position.z))
}
get_goal_name.brainvr <- function(obj, iGoal){
return(obj$data$experiment_log$settings$GoalNames[iGoal])
}
get_all_goal_positions.brainvr <- function(obj, include_SOP = FALSE){
if(include_SOP){i <- 1:10}else{i <- 1:6}
ls_goals <- setNames(split(obj$data$experiment_log$positions$GoalPositions[i, c(1,3)], i), obj$data$experiment_log$settings$GoalNames[i])
return(ls_goals)
}
get_point_start.sop <- function(obj, trialId){
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.