blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4aad61460301d8c91c8de0f7c4a5b9f788ab2d9 | 2b526666985a1d6367b6359d8e51ad13b5063cc2 | /quiz2.r | e05939c8455eeba6ff15979e10a0909297bc9564 | [] | no_license | nhchauvnu/gettingcleaning | f7734690f071b8c2cdf3986c1fd7c90883b37881 | 2757809b16586ac109ee5085eca665c9bd9f0ff8 | refs/heads/master | 2016-09-10T19:40:42.619362 | 2015-04-25T14:33:43 | 2015-04-25T14:33:43 | 33,798,509 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 571 | r | quiz2.r | require(sqldf)
acs = read.csv('ss06pid.csv')
# sqldf("select * from acs where AGEP < 50 and pwgtp1")
# sqldf("select pwgtp1 from acs where AGEP < 50")
# sqldf("select pwgtp1 from acs")
# sqldf("select * from acs where AGEP < 50")
require(XML)
URL <- 'http://biostat.jhsph.edu/~jleek/contact.html'
# doc <- htmlTreeParse('http://biostat.jhsph.edu/~jleek/contact.html', useInternalNodes=T)
con = url(URL)
html = readLines(con)
close(con)
sapply(html[c(10, 20, 30, 100)], nchar)
###
data = read.fwf('wksst8110.for', skip=4, widths=c(12, 7,4, 9,4, 9,4, 9,4))
sum(data$V4)
|
2b077e3dd6d60819c5b216c21740b8de5fd0f471 | 49f46e7a2b6f6ed6fbc664d028b5c6dc1cdd685b | /R/add_htmlwidgets_deps.R | 55b599b5db55e4860ff6e4a05293889004f72cb0 | [] | no_license | d4tagirl/d4tagirl.com | 158cd2fda5094391f6ef93e375c8b65ab37b4953 | a6f859ee1033bacf27822cfbc91d7a9e279457c6 | refs/heads/master | 2021-01-11T18:10:08.650246 | 2020-07-03T19:20:13 | 2020-07-04T17:51:17 | 79,505,453 | 4 | 8 | null | 2020-01-04T20:28:05 | 2017-01-19T23:27:47 | JavaScript | UTF-8 | R | false | false | 3,045 | r | add_htmlwidgets_deps.R | if(!exists("html_dependency_resolver", mode="function")) source("R/rmarkdown_internal_funs.R")
#' Configure htmlwidgets dependencies for a knitr-jekyll blog
#'
#' Unlike static image plots, the outputs of htmlwidgets dependencies also have
#' Javascript and CSS dependencies, which are not by default processed by knitr.
#' \code{htmlwdigets_deps} provides a system to add the dependencies to a Jekyll
#' blog. Further details are available in the following blog post:
#' \url{http://brendanrocks.com/htwmlwidgets-knitr-jekyll/}.
#'
#' @param a The file path for the input file being knit
#' @param knit_meta The dependencies object.
#' @param lib_dir The directory where the htmlwidgets dependency source code can
#' be found (e.g. JavaScript and CSS files)
#' @param includes_dir The directory to add the HTML file to
#' @param always Should dependency files always be produced, even if htmlwidgets
#' are not being used?
#'
#' @return Used for it's side effects.
#' @export
htmlwidgets_deps <- function(a, knit_meta = knitr::knit_meta(),
lib_dir = "static/htmlwidgets_deps",
includes_dir = "layouts/partials/htmlwidgets/",
always = FALSE) {
# If the directories don't exist, create them
dir.create(lib_dir, showWarnings = FALSE, recursive = TRUE)
dir.create(includes_dir, showWarnings = FALSE, recursive = TRUE)
# Copy the libraries from the R packages to the 'htmlwidgets_deps' dir, and
# obtain the html code required to import them
deps_str <- html_dependencies_to_string(knit_meta, lib_dir, ".")
# Write the html dependency import code to a file, to be imported by the
# liquid templates
deps_file <- paste0(
includes_dir,
gsub(".Rmarkdown$", ".html", basename(a[1]))
)
# Write out the file if either, the dependencies string has anything to add,
# or, if the always parameter has been set to TRUE (useful for those building
# with GitHub pages)
if(always | !grepl("^[[:space:]]*$", deps_str))
writeLines(deps_str, deps_file)
}
#' @keywords internal
#' Adapted from rmarkdown:::html_dependencies_as_string
html_dependencies_to_string <- function (dependencies, lib_dir, output_dir) {
# Flatten and resolve html deps
dependencies <- html_dependency_resolver(
flatten_html_dependencies(dependencies)
)
if (!is.null(lib_dir)) {
dependencies <- lapply(
dependencies, htmltools::copyDependencyToDir, lib_dir
)
dependencies <- lapply(
dependencies, htmltools::makeDependencyRelative, output_dir
)
}
# A function to add Jekyll boilerplate
prepend_baseurl <- function(path){
# If the url doesn't start "/", make sure that it does
path <- ifelse(!grepl("^/", path), paste0("/", path), path)
# remove /static
path <- ifelse(startsWith(path, "/static"), substring(path, 8), path)
}
htmltools::renderDependencies(
dependencies, "file",
encodeFunc = identity,
hrefFilter = prepend_baseurl
)
} |
676ce90ffb587d96e6dad983e1422dc22c8ce40f | 367e08a923163864ae25f7b396a0a8e473e9ead4 | /Salmon_analysis.R | b99147c7caf9a50aa4f12a956ffb2b951189caee | [] | no_license | Yago-91/RNA-seq-Analysis | 78bf9038c1c71e7c9c6bf97f73c96228643772b4 | 37de6c55509653ac9639bf2def090eec2b363fea | refs/heads/master | 2022-12-08T13:23:21.327826 | 2020-08-27T19:39:13 | 2020-08-27T19:39:13 | 290,854,026 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,746 | r | Salmon_analysis.R | #https://bioconductor.org/packages/release/bioc/vignettes/tximport/inst/doc/tximport.html
suppressMessages(library(tximport))
suppressMessages(library(DESeq2))
#Parsing command line arguments
args <- commandArgs(trailingOnly = T)
#Setting variables
loc_dir <- args[1]
quant_dirs <- unlist(strsplit(args[2], ","))
Species <- args[3]
Names <- unlist(strsplit(args[4], ","))
tx2gene_path <- args[5]
Groups_R <- unlist(strsplit(args[6], ","))
Group_A_name <- Groups_R[1]
Group_A_number <- as.integer(Groups_R[2])
Group_B_name <- Groups_R[3]
Group_B_number <- as.integer(Groups_R[4])
#Directories of all the files
files <- file.path(loc_dir, quant_dirs, "quant.sf")
print("Location directories of quant.sf files")
files
names(files) <- Names
print(paste("All passed files exist? ",all(file.exists(files))))
#Correspondance file
tx2gene <- read.csv(tx2gene_path, header = T, colClasses = c(rep("character",2)))
#Importing data
txi.salmon <- tximport(files, type = "salmon", tx2gene = tx2gene, ignoreTxVersion = TRUE)
#Configuring differential analysis
sampleTable <- data.frame(condition = factor(rep(c(Group_B_name,Group_B_name), c(Group_A_number,Group_B_number))))
rownames(sampleTable) <- colnames(txi.salmon$counts)
dds <- DESeqDataSetFromTximport(txi.salmon, sampleTable, ~condition)
#Stablish reference group
dds$condition <- relevel(dds$condition, ref = Group_A_name)
#Differential expression analysis
dds <- DESeq(dds)
res <- results(dds)
#Save results
resOrdered <- res[order(res$pvalue),]
write.csv(as.data.frame(resOrdered), file=paste(Group_A_name,"_vs_",Group_B_name,".csv",sep = ""))
#Save gene-wise analysis data in raw TPM
out_1 <- paste(loc_dir, "/Salmon/Salmon_merge_genes.csv", sep = "")
write.csv(txi.salmon$counts, out_1) |
73bf06263b021076ef108b6704f9ef6e22f10ffc | 38da5449fb2f72e920bb64829f599c999378f812 | /Scripts/re_filter.R | 774966c29c48b4003291d87e0639b6b808d86aed | [] | no_license | chengxiz/Geocoding-R | 6517d6e36551066f88d5f6047cdd9b61dab3da8f | 50298973e5d6bc5a9619cc62707e1908fd75e596 | refs/heads/master | 2021-01-21T10:34:38.640107 | 2017-02-28T21:52:38 | 2017-02-28T21:52:38 | 83,457,036 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 425 | r | re_filter.R | data <- read.csv('../Files/geocoded_unfiltered_Home_Addresses.csv')
home.addresses <- read.csv('../Files/Home_Addresses.csv')
data.filtered <- home.addresses[is.na(data$formatted_address),]
write.csv(data.filtered, file='../Files/mismatch_unfound_Home_Addresses.csv', row.names=TRUE)
data.updated <- data[!is.na(data$formatted_address),]
write.csv(data.updated, file='../Files/geocoded_Home_Addresses.csv', row.names=TRUE)
|
6b171a53e69a50a2de35bf3f70aa9905aea6e8ea | c130bd1b4fae630feb2f6ccf7d219ccb54568377 | /Figure4_Method_comparison_and_sampling_effort.R | 2f563cf38377b6dd5377b9be009d31534c5239e3 | [] | no_license | ASanchez-Tojar/inferring_hierarchies_uncertainty | de8bf9426764fae2ee520c60528f00b912b5f0f9 | 813443dd5748f4fae399327e3fec4f93bbadd8f3 | refs/heads/master | 2021-01-20T12:28:53.121118 | 2019-04-05T03:15:05 | 2019-04-05T03:15:05 | 82,660,228 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 38,118 | r | Figure4_Method_comparison_and_sampling_effort.R | # Authors:
# Alfredo Sanchez-Tojar, MPIO (Seewiesen) and ICL (Silwood Park), alfredo.tojar@gmail.com
# Damien Farine, MPIO (Radolfzell) and EGI (Oxford), dfarine@orn.mpg.de
# Script first created on the 27th of October, 2016
###############################################################################
# Description of script and Instructions
###############################################################################
# This script aims to generate a figure that shows how different dominance
# methods perform inferring the latent hierachy, Figure 5 for:
# Sanchez-Tojar, A., Schroeder, J., Farine, D.R. (In preparation) A practical
# guide for inferring reliable dominance hierarchies and estimating their
# uncertainty
# more info at the Open Science Framework: http://doi.org/10.17605/OSF.IO/9GYEK
###############################################################################
# Packages needed
###############################################################################
# packages needed for this script
library(aniDom)
library(doBy)
library(RColorBrewer)
library(EloRating)
# Clear memory
rm(list=ls())
###############################################################################
# Functions needed
###############################################################################
plot_winner_prob <- function(diff.rank, a, b,coline) {
diff.rank.norm <- diff.rank/max(diff.rank)
lines(diff.rank, 0.5+0.5/(1+exp(-diff.rank.norm*a+b)),col=coline)
}
# ###############################################################################
# # Simulating: COMPARING all METHODS
# ###############################################################################
#
# ptm <- proc.time()
#
#
# db <- data.frame(Ninds=integer(),
# Nobs=integer(),
# poiss=logical(),
# dombias=logical(),
# alevel=integer(),
# blevel=integer(),
# Ndavid=numeric(),
# elo.original=numeric(),
# elo.no.rand=numeric(),
# elo.rand=numeric(),
# ISI98=numeric(),
# spearman.prop=numeric(),
# unknowndyads=numeric(),
# realindividuals=numeric(),
# stringsAsFactors=FALSE)
#
#
# avalues <- c(10,15,10,5)
# bvalues <- c(-5,5,5,5)
# #N.inds.values <- c(10)
# N.inds.values <- c(25)
# #N.inds.values <- c(50)
# N.obs.values <- c(1,4,7,10,15,20,30,40,50,100)
# poiss <- c(FALSE,TRUE)
# dombias <- c(FALSE,FALSE)
#
#
# for (typ in 1:length(poiss)){
#
# for (j in 1:length(avalues)){
#
# for (p in 1:length(N.inds.values)){
#
# for (o in 1:length(N.obs.values)){
#
# for (sim in 1:100){
#
# output <- generate_interactions(N.inds.values[p],
# N.inds.values[p]*N.obs.values[o],
# a=avalues[j],
# b=bvalues[j],
# id.biased=poiss[typ],
# rank.biased=dombias[typ])
#
#
# filename <- paste(paste(paste(ifelse(poiss[typ]==TRUE,1,0),
# ifelse(dombias[typ]==TRUE,1,0),
# sep=""),
# paste0(bvalues[j],a=avalues[j]),
# sep="_"),
# N.obs.values[o],
# sep="_")
#
# winner <- output$interactions$Winner
# loser <- output$interactions$Loser
# date <- c("2010-01-25",rep("2010-01-26",length(loser)-1)) #fake date needed for elo.seq()
#
# # generating elo.rating according to elo.seq from library(EloRating)
# x<-elo.seq(winner=as.factor(winner),
# loser=as.factor(loser),
# Date=date,
# k=200,
# progressbar=FALSE)
#
# w<-extract.elo(x,standardize = FALSE)
#
# z <- data.frame(w,attributes(w),
# row.names = as.character(seq(1,length(w),
# 1)))
#
# z$rank <- as.numeric(as.character(z$names))
#
# z$Elo.ranked <- rank(-z$w,na.last="keep")
#
# spearman.original<-cor(z$rank,z$Elo.ranked,
# use="complete.obs",method="spearman")
#
# # generating David's score
# domatrix<-creatematrix(x, drawmethod="0.5")
#
# # saving matrices for running ADAGIO
# write.csv(as.data.frame(domatrix),
# paste0("databases_package/matrices_10ind_ISI_1st",
# typ,"/matrices/matrix_",filename,"_sim",sim,".csv"),
# row.names=TRUE,
# quote = FALSE)
#
# dav<-DS(domatrix)
#
# dav$ID <- as.numeric(as.character(dav$ID))
#
# dav$normDSrank <- rank(-dav$normDS,na.last="keep")
#
# Ndavid <- cor(dav$ID,dav$normDSrank,
# use="complete.obs",method="spearman")
#
# # generating elo-rating according to elo_scores() from aniDom
# result.no.rand <- elo_scores(winner,
# loser,
# identities=c(1:N.inds.values[p]),
# init.score=1000,
# randomise=FALSE,
# return.as.ranks=TRUE)
#
# #ranks.no.rand <- rank(-result.no.rand,na.last="keep")
#
# spearman.cor.no.rand<-cor(output$hierarchy$Rank,
# #ranks.no.rand,
# result.no.rand,
# use="complete.obs",method="spearman")
#
# # generating elo-rating according to elo_scores() from aniDom and
# # randomizing the order of the interactions 1000 times
# result <- elo_scores(winner,
# loser,
# identities=c(1:N.inds.values[p]),
# init.score=1000,
# randomise=TRUE,
# return.as.ranks=TRUE)
#
# mean.scores <- rowMeans(result)
#
# spearman.cor.rand<-cor(output$hierarchy$Rank,
# mean.scores,
# use="complete.obs",method="spearman")
#
#
# #I&SI
#
# ISI13 <- as.numeric(isi98(domatrix,nTries = 25)$best_order)
#
# dif13<-setdiff(output$hierarchy$Rank,ISI13)
#
# ISI13.2 <- c(ISI13,rep("NA",length(dif13)))
#
# id13 <- c( seq_along(ISI13), dif13-0.5 )
#
# ISI13.3 <- as.numeric(ISI13.2[order(id13)])
#
# ISI13.cor <- cor(ISI13.3,output$hierarchy$Rank,
# use="complete.obs",method="spearman")
#
#
# #%wins
#
# prop.wins.raw <- despotism(domatrix)
#
# prop <- data.frame(prop.wins.raw,attributes(prop.wins.raw),
# row.names = as.character(seq(1,length(prop.wins.raw),
# 1)))
#
# prop$rank <- as.numeric(as.character(prop$names))
#
# prop$prop.ranked <- rank(-prop$prop.wins.raw,na.last="keep")
#
# spearman.prop<-cor(prop$rank,prop$prop.ranked,
# use="complete.obs",method="spearman")
#
#
# #sparseness
#
# unknowndyads<-rshps(domatrix)$unknowns/rshps(domatrix)$total
#
#
# # number of individuals that interacted
#
# individuals <- length(ISI13)
#
#
# #final db
#
# db<-rbind(db,c(N.inds.values[p],
# N.obs.values[o],
# poiss[typ],
# dombias[typ],
# avalues[j],
# bvalues[j],
# Ndavid,
# spearman.original,
# spearman.cor.no.rand,
# spearman.cor.rand,
# ISI13.cor,
# spearman.prop,
# unknowndyads,
# individuals))
#
# write.csv(db,
# "databases_package/final_data_for_Figures_backup/ISIincluded_10ind_1st_t.csv",row.names=FALSE)
#
# }
# }
# }
# }
#
# }
#
# names(db) <- c("Ninds","Nobs",
# "poiss","dombias",
# "alevel","blevel",
# "Ndavid",
# "elo.original",
# "elo.no.rand",
# "elo.rand",
# "ISI98",
# "spearman.prop",
# "unknowndyads",
# "realindividuals")
#
# db$ratio <- (db$Nobs*db$Ninds)/db$realindividuals
#
# proc.time() - ptm
#
#
# # write.csv(db,
# # "databases_package/final_data_for_Figures_backup/Fig5a_db_methods_100sim_fixed_biases_ISIincluded_10ind.csv",row.names=FALSE)
#
# write.csv(db,
# "databases_package/final_data_for_Figures_backup/Fig5a_db_methods_100sim_fixed_biases_ISIincluded_25ind.csv",row.names=FALSE)
#
# # write.csv(db,
# # "databases_package/final_data_for_Figures_backup/Fig5a_db_methods_100sim_fixed_biases_ISIincluded_50ind.csv",row.names=FALSE)
# ###############################################################################
# # ADAGIO: Adding the results from ADAGIO, which needed to be run at the terminal
# ###############################################################################
#
# # importing all rank files (output from ADAGIO)
#
# setwd("C:/allresultsfromADAGIO")
#
# temp = list.files(pattern="*.csv.adagio.ranks") #check you are in the right folder - getwd() and setwd()
#
#
# db <- data.frame(Ninds=integer(),
# Nobs=integer(),
# poiss=integer(),
# dombias=integer(),
# alevel=integer(),
# blevel=integer(),
# spearman=numeric(),
# stringsAsFactors=FALSE)
#
#
# #N.inds.values <- c(10)
# N.inds.values <- c(25)
# #N.inds.values <- c(50)
#
#
# for (filename in 1:length(temp)){
#
# poiss <- substr(temp[filename], 8, 8)
# dombias <- substr(temp[filename], 9, 9)
#
# if(substr(temp[filename], 11, 12)=="-5"){
#
# blevel <- substr(temp[filename], 11, 12)
#
# if(substr(temp[filename], 14, 14)=="_"){
#
# alevel <- substr(temp[filename], 13, 13)
#
# if(substr(temp[filename], 16, 16)=="_"){
#
# Nobs <- substr(temp[filename], 15, 15)
#
# } else if(substr(temp[filename], 17, 17)=="_") {
#
# Nobs <- substr(temp[filename], 15, 16)
#
# } else{
#
# Nobs <- substr(temp[filename], 15, 17)
#
# }
#
# } else {
#
# alevel <- substr(temp[filename], 13, 14)
#
# if(substr(temp[filename], 17, 17)=="_"){
#
# Nobs <- substr(temp[filename], 16, 16)
#
# } else if(substr(temp[filename], 18, 18)=="_") {
#
# Nobs <- substr(temp[filename], 16, 17)
#
# } else{
#
# Nobs <- substr(temp[filename], 16, 18)
#
# }
#
# }
#
# } else {
#
# blevel <- substr(temp[filename], 11, 11)
#
# if(substr(temp[filename], 13, 13)=="_"){
#
# alevel <- substr(temp[filename], 12, 12)
#
# if(substr(temp[filename], 15, 15)=="_"){
#
# Nobs <- substr(temp[filename], 14, 14)
#
# } else if(substr(temp[filename], 16, 16)=="_") {
#
# Nobs <- substr(temp[filename], 14, 15)
#
# } else{
#
# Nobs <- substr(temp[filename], 14, 16)
#
# }
#
# } else {
#
# alevel <- substr(temp[filename], 12, 13)
#
# if(substr(temp[filename], 16, 16)=="_"){
#
# Nobs <- substr(temp[filename], 15, 15)
#
# } else if(substr(temp[filename], 17, 17)=="_") {
#
# Nobs <- substr(temp[filename], 15, 16)
#
# } else{
#
# Nobs <- substr(temp[filename], 15, 17)
#
# }
#
# }
#
# }
#
# db_temp <- read.table(temp[filename],header=FALSE,sep="\t")
#
# spearman.cor<-cor(db_temp$V1,
# db_temp$V2,
# use="complete.obs",method="spearman")
#
#
# db <- rbind(db,as.numeric(c(N.inds.values,
# Nobs,
# poiss,
# dombias,
# alevel,
# blevel,
# spearman.cor)))
#
# }
#
# names(db) <- c("Ninds","Nobs",
# "poiss","dombias",
# "alevel","blevel","spearman")
#
#
#
# # write.csv(db,
# # "databases_package/final_data_for_Figures_backup/Fig5b_db_ADAGIO_100simulations_fixed_biases_10ind_100int.csv",row.names=FALSE)
#
# write.csv(db,
# "databases_package/final_data_for_Figures_backup/Fig5b_db_ADAGIO_100simulations_fixed_biases_25ind_100int.csv",row.names=FALSE)
#
# # write.csv(db,
# # "databases_package/final_data_for_Figures_backup/Fig5b_db_ADAGIO_100simulations_fixed_biases_50ind_100int.csv",row.names=FALSE)
###############################################################################
# Plotting: MAIN TEXT: estimated rank ~ real rank: spearman correaltion for each
# method. Adding 95% CI intervals
###############################################################################
# db5methods <- read.table("databases_package/final_data_for_Figures_backup/Fig5a_db_methods_100sim_fixed_biases_ISIincluded_10ind.csv",
# header=TRUE,sep=",")
db5methods <- read.table("databases_package/final_data_for_Figures_backup/Fig5a_db_methods_100sim_fixed_biases_ISIincluded_25ind.csv",
header=TRUE,sep=",")
# db5methods <- read.table("databases_package/final_data_for_Figures_backup/Fig5a_db_methods_100sim_fixed_biases_ISIincluded_50ind.csv",
# header=TRUE,sep=",")
db5methods_sorted <- db5methods[order(db5methods$Ninds,
db5methods$Nobs,
db5methods$poiss,
db5methods$dombias,
db5methods$blevel,
db5methods$alevel),]
# dbADAGIO <- read.table("databases_package/final_data_for_Figures_backup/Fig5b_db_ADAGIO_100simulations_fixed_biases_10ind_100int.csv",
# header=TRUE,sep=",")
dbADAGIO <- read.table("databases_package/final_data_for_Figures_backup/Fig5b_db_ADAGIO_100simulations_fixed_biases_25ind_100int.csv",
header=TRUE,sep=",")
# dbADAGIO <- read.table("databases_package/final_data_for_Figures_backup/Fig5b_db_ADAGIO_100simulations_fixed_biases_50ind_100int.csv",
# header=TRUE,sep=",")
dbADAGIO_sorted <- dbADAGIO[order(dbADAGIO$Ninds,
dbADAGIO$Nobs,
dbADAGIO$poiss,
dbADAGIO$dombias,
dbADAGIO$blevel,
dbADAGIO$alevel),]
# making sure everything is identical
iden <- as.factor(c("Nobs","poiss","dombias","alevel","blevel"))
for (column in levels(iden)){
print(identical(db5methods_sorted[,c(column)],
dbADAGIO_sorted[,c(column)],
attrib.as.set=FALSE))
}
# cbinding both databases to add the ADAGIO measurements
dbADAGIO_cor <- dbADAGIO_sorted[,c("blevel", "spearman")]
names(dbADAGIO_cor) <- c("blevel","ADAGIO")
db.provisional <- cbind(db5methods_sorted,dbADAGIO_cor)
#db.provisional.2 <- db.provisional[,c(1:11,13)]
#db.provisional.2 <- db.provisional[,c(1:10,12)]
db.provisional.2 <- db.provisional[,c(1:15,17)]
# Database is ready, plotting starts!
avalues <- c(15,15,15,10,10,10,5,5,5)
bvalues <- c(5,5,5,5,5,5,5,5,5)
#N.inds.values <- c(10)
N.inds.values <- c(25)
#N.inds.values <- c(50)
N.obs.values <- c(1,4,7,10,15,20,30,40,50,100)
db<-db.provisional.2[db.provisional.2$poiss==1 & db.provisional.2$dombias==0,]
#db<-db.provisional.2[db.provisional.2$poiss==0 & db.provisional.2$dombias==0,]
a <- c("(a)","x","x","(b)","x","x","(c)","x","x")
tiff("plots/after_revision/Figure5_Method_comparison_and_sampling_effort_100int_25ind_Poisson.tiff",
#"plots/supplements/after_revision/FigureS04_Method_comparison_and_sampling_effort_100int_10ind_Poisson.tiff",
#"plots/supplements/after_revision/FigureS11_Method_comparison_and_sampling_effort_100int_50ind_Poisson.tiff",
#"plots/supplements/after_revision/FigureS2_Method_comparison_and_sampling_effort_100int_25ind_uniform.tiff",
height=29.7, width=21,
units='cm', compression="lzw", res=600)
for (p in 1:length(N.inds.values)){
m <- rbind(c(1,1,1,2),c(1,1,1,3),
c(4,4,4,5),c(4,4,4,6),
c(7,7,7,8),c(7,7,7,9))
layout(m)
op <- par(oma = c(6,3,1,1) + 0.1,
mar = c(0.5,5,1,0) + 0.1,
cex.lab=2.5)
db.2 <- db[db$Ninds==N.inds.values[p] & (db$Nobs %in% N.obs.values),]
for (i in 1:length(avalues)){
if(i %in% c(1,4,7)){
db.3 <- db.2[db.2$alevel==avalues[i] & db.2$blevel==bvalues[i],]
db.4 <-summaryBy(Ndavid +
elo.original +
elo.no.rand +
elo.rand +
ADAGIO +
ISI98
~ Nobs,
data = db.3,
FUN = function(x) { c(m = mean(x),
q = quantile(x,probs=c(0.025,0.975))) })
names(db.4) <- c("Nobs",
"Ndavid.m","Ndavid.lower","Ndavid.upper",
"elo.original.m","elo.original.lower","elo.original.upper",
"elo.no.rand.m","elo.no.rand.lower","elo.no.rand.upper",
"elo.rand.m","elo.rand.lower","elo.rand.upper",
"ADAGIO.m","ADAGIO.lower","ADAGIO.upper",
"ISI98.m","ISI98.lower","ISI98.upper")
plot(db.4$elo.rand.m~db.4$Nobs,0.5,type="n",
ylab="",
xlab="",
xaxt="n",
yaxt="n",
#ylim=c(0,1))
ylim=c(-0.4,1))
#ylim=c(-0.8,1))
if(i<7){
axis(1,at=N.obs.values,
cex.axis=1,tck=0.015,
labels=FALSE)
} else {
axis(1,at=N.obs.values,
labels=as.character(N.obs.values),
cex.axis=0.75,tck=0.015)
mtext(" ratio of interactions to individuals",
side=1, adj=0, line=4, cex=1.8);
}
axis(2,
#at=round(seq(0,1,0.1),1),
at=round(seq(-0.4,1,0.1),1),
#at=round(seq(-0.8,1,0.1),1),
cex.axis=1,las=2,tck=0.015)
#adding points for the means and shadowed areas for the 95% CI
# points(db.4$Nobs,db.4$elo.original.m,type="b",col="green",pch=19)
# polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# c(db.4$elo.original.lower,rev(db.4$elo.original.upper)),
# border=NA,col=rgb(0,1,0, 0.15))
points(db.4$Nobs,db.4$ISI98.m,type="b",col="green",pch=19)
polygon(c(db.4$Nobs,rev(db.4$Nobs)),
c(db.4$ISI98.lower,rev(db.4$ISI98.upper)),
border=NA,col=rgb(0,1,0, 0.15))
points(db.4$Nobs,db.4$elo.no.rand.m,type="b",col="red",pch=19)
polygon(c(db.4$Nobs,rev(db.4$Nobs)),
c(db.4$elo.no.rand.lower,rev(db.4$elo.no.rand.upper)),
border=NA,col=rgb(1,0,0,0.15))
points(db.4$Nobs,db.4$Ndavid.m,type="b",col="black",pch=19)
polygon(c(db.4$Nobs,rev(db.4$Nobs)),
c(db.4$Ndavid.lower,rev(db.4$Ndavid.upper)),
border=NA,col=rgb(120/255,120/255,120/255,0.15))
points(db.4$Nobs,db.4$elo.rand.m,type="b",col="blue",pch=19)
polygon(c(db.4$Nobs,rev(db.4$Nobs)),
c(db.4$elo.rand.lower,rev(db.4$elo.rand.upper)),
border=NA,col=rgb(0,0,1, 0.15))
points(db.4$Nobs,db.4$ADAGIO.m,type="b",col="orange",pch=19)
polygon(c(db.4$Nobs,rev(db.4$Nobs)),
c(db.4$ADAGIO.lower,rev(db.4$ADAGIO.upper)),
border=NA,col=rgb(1,165/255,0,0.15))
lines(c(0,101),c(0.7,0.7),col="red",lty=3,lwd=1.5)
atext <- paste("\na = ",avalues[i])
btext <- paste("\nb = ",bvalues[i])
ttext <- paste0(atext,btext,sep="\n")
text(98,-0.35,a[i],adj = 0 ,cex=1.5)
text(81,-0.3,ttext,adj = 0,cex=2)
# text(98,-0.75,a[i],adj = 0 ,cex=1.5)
# text(81,-0.7,ttext,adj = 0,cex=2)
} else {
if(i %in% c(2,5,8)){
plot(c(1,25),
#c(1,50),
#c(1,10),
c(0.5,1),type="n",
ylab="",
xlab="",
xaxt="n",
yaxt="n",
cex=1.5)
axis(1,
at=seq(1,25,2),
#at=seq(1,50,6),
#at=seq(1,10,1),
cex.axis=0.75,tck=0.015)
axis(2,at=seq(0.5,1,0.1),cex.axis=0.75,las=2,tck=0.015)
plot_winner_prob(1:25,a=avalues[i],b=bvalues[i],"black")
#plot_winner_prob(1:50,a=avalues[i],b=bvalues[i],"black")
#plot_winner_prob(1:10,a=avalues[i],b=bvalues[i],"black")
mtext("P (dominant wins)",
side=2, adj=0, line=3, cex=1.10);
} else {
plot(c(1,50),c(0,1),type="n",
ylab="",
xlab="",
xaxt="n",
yaxt="n",
frame.plot=FALSE)
par(xpd=TRUE)
legend(-20,0.8,
c("David's score",
"original Elo-rating",
"randomized Elo-rating",
"ADAGIO",
"I&SI"),
col=c("black",
"red",
"blue",
"orange",
"green"),
cex=1.35,bty='n',
pch=rep(19,4),
inset=c(0,0))
mtext("Difference in rank",
side=3, adj=1, line=-2, cex=0.95);
}
}
}
title(ylab = "Spearman rank correlation coefficient",
outer = TRUE, line = -1)
par(mfrow=c(1,1))
}
dev.off()
# Code for the preprint (i.e. previous version of the manuscript)
# ###############################################################################
# # Plotting: SUPPLEMENTARY MATERIAL, part 1:
# # intermediate scenarios: uniform, dominant bias, poisson*dominant bias
# ###############################################################################
#
# avalues <- c(15,15,15,10,10,10,5,5,5)
# bvalues <- c(5,5,5,5,5,5,5,5,5)
# #N.inds.values <- c(50)
# N.inds.values <- c(10)
# N.obs.values <- c(1,4,7,10,15,20,30,40,50)
#
#
# db<-db.provisional.2[db.provisional.2$poiss==0 & db.provisional.2$dombias==0,]
# #db<-db.provisional.2[db.provisional.2$poiss==0 & db.provisional.2$dombias==1,]
# #db<-db.provisional.2[db.provisional.2$poiss==1 & db.provisional.2$dombias==1,]
#
#
# a <- c("(a)","x","x","(b)","x","x","(c)","x","x")
#
#
# tiff(#"plots/supplements/FigureS2_Method_comparison_and_sampling_effort_uniform.tiff",
# #"plots/supplements/FigureS3_Comparing_original_Elo-rating_packages_uniform.tiff",
# #"plots/supplements/FigureS9_Method_comparison_and_sampling_effort_dombias.tiff",
# #"plots/supplements/FigureSX_Comparing_original_Elo-rating_packages_dombias.tiff",
# #"plots/supplements/FigureS16_Method_comparison_and_sampling_effort_poiss+dombias.tiff",
# "plots/supplements/FigureSX_Comparing_original_Elo-rating_packages_poiss+dombias.tiff",
# height=29.7, width=21,
# units='cm', compression="lzw", res=600)
#
#
# for (p in 1:length(N.inds.values)){
#
# m <- rbind(c(1,1,2),c(1,1,3),
# c(4,4,5),c(4,4,6),
# c(7,7,8),c(7,7,9))
#
# layout(m)
#
# op <- par(oma = c(6,3,1,1) + 0.1,
# mar = c(0.5,5,1,0) + 0.1,
# cex.lab=2.5)
#
#
# db.2 <- db[db$Ninds==N.inds.values[p] & (db$Nobs %in% N.obs.values),]
#
# for (i in 1:length(avalues)){
#
# if(i %in% c(1,4,7)){
#
# db.3 <- db.2[db.2$alevel==avalues[i] & db.2$blevel==bvalues[i],]
#
# db.4 <-summaryBy(Ndavid +
# elo.original +
# elo.no.rand +
# elo.rand +
# ADAGIO
# ~ Nobs,
# data = db.3,
# FUN = function(x) { c(m = mean(x),
# q = quantile(x,probs=c(0.025,0.975))) })
#
# names(db.4) <- c("Nobs",
# "Ndavid.m","Ndavid.lower","Ndavid.upper",
# "elo.original.m","elo.original.lower","elo.original.upper",
# "elo.no.rand.m","elo.no.rand.lower","elo.no.rand.upper",
# "elo.rand.m","elo.rand.lower","elo.rand.upper",
# "ADAGIO.m","ADAGIO.lower","ADAGIO.upper")
#
# plot(db.4$elo.rand.m~db.4$Nobs,0.5,type="n",
# ylab="",
# xlab="",
# xaxt="n",
# yaxt="n",
# ylim=c(-0.3,1))
#
#
# if(i<7){
#
# axis(1,at=N.obs.values,
# cex.axis=1,tck=0.015,
# labels=FALSE)
#
#
# } else {
#
# axis(1,at=N.obs.values,
# labels=as.character(N.obs.values),
# cex.axis=1,tck=0.015)
#
# mtext("ratio of interactions to individuals",
# side=1, adj=0, line=4, cex=1.8);
#
# }
#
# axis(2,at=round(seq(-0.3,1,0.1),1),cex.axis=1.2,las=2,tck=0.015)
#
#
# #adding points for the means and shadowed areas for the 95% CI
# points(db.4$Nobs,db.4$elo.original.m,type="b",col="green",pch=19)
# polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# c(db.4$elo.original.lower,rev(db.4$elo.original.upper)),
# border=NA,col=rgb(0,1,0, 0.15))
#
# points(db.4$Nobs,db.4$elo.no.rand.m,type="b",col="red",pch=19)
# polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# c(db.4$elo.no.rand.lower,rev(db.4$elo.no.rand.upper)),
# border=NA,col=rgb(1,0,0,0.15))
#
# # points(db.4$Nobs,db.4$Ndavid.m,type="b",col="black",pch=19)
# # polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# # c(db.4$Ndavid.lower,rev(db.4$Ndavid.upper)),
# # border=NA,col=rgb(120/255,120/255,120/255,0.15))
# #
# # points(db.4$Nobs,db.4$elo.rand.m,type="b",col="blue",pch=19)
# # polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# # c(db.4$elo.rand.lower,rev(db.4$elo.rand.upper)),
# # border=NA,col=rgb(0,0,1, 0.15))
# #
# # points(db.4$Nobs,db.4$ADAGIO.m,type="b",col="orange",pch=19)
# # polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# # c(db.4$ADAGIO.lower,rev(db.4$ADAGIO.upper)),
# # border=NA,col=rgb(1,165/255,0,0.15))
#
# lines(c(0,51),c(0.7,0.7),col="red",lty=3,lwd=1.5)
#
# atext <- paste("\na = ",avalues[i])
# btext <- paste("\nb = ",bvalues[i])
# ttext <- paste0(atext,btext,sep="\n")
# text(48,-0.25,a[i],adj = 0 ,cex=1.5)
# text(31,-0.2,ttext,adj = 0,cex=2)
#
#
# } else {
#
# if(i %in% c(2,5,8)){
#
# plot(c(1,50),c(0.5,1),type="n",
# ylab="",
# xlab="",
# xaxt="n",
# yaxt="n",
# cex=1.5)
#
# axis(1,at=seq(0,50,10),
# cex.axis=1,tck=0.015)
#
# axis(2,at=seq(0.5,1,0.1),cex.axis=1.2,las=2,tck=0.015)
#
# plot_winner_prob_2(1:50,a=avalues[i],b=bvalues[i],"black")
#
# mtext("P (dominant wins)",
# side=2, adj=0, line=3, cex=1.10);
#
# } else {
#
# plot(c(1,50),c(0,1),type="n",
# ylab="",
# xlab="",
# xaxt="n",
# yaxt="n",
# frame.plot=FALSE)
#
# par(xpd=TRUE)
# # legend(0,0.8,
# # c("David's score",
# # "original Elo-rating",
# # "randomized Elo-rating",
# # "ADAGIO"),
# # col=c("black",
# # "red",
# # "blue",
# # "orange"),
# # cex=1.35,bty='n',
# # pch=rep(19,4),
# # inset=c(0,0))
#
# legend(0,0.8,
# c("package:aniDom",
# "package:EloRating"),
# col=c("red",
# "green"),
# cex=1.35,bty='n',
# pch=rep(19,4),
# inset=c(0,0))
#
# mtext("Difference in rank ",
# side=3, adj=1, line=-2, cex=1.15);
#
# }
#
# }
#
# }
#
#
# title(ylab = "Spearman rank correlation coefficient",
# outer = TRUE, line = 0)
#
# par(mfrow=c(1,1))
#
# }
#
# dev.off()
#
#
# ###############################################################################
# # Plotting: SUPPLEMENTARY MATERIAL, part 2:
# # steep and flat scenarios
# ###############################################################################
#
# avalues <- c(10,10,10,15,15,15,30,30,30,0,0,0)
# bvalues <- c(-5,-5,-5,0,0,0,5,5,5,5,5,5)
# #N.inds.values <- c(50)
# N.inds.values <- c(10)
# N.obs.values <- c(1,4,7,10,15,20,30,40,50)
#
#
# #db<-db.provisional.2[db.provisional.2$poiss==1 & db.provisional.2$dombias==0,]
# db<-db.provisional.2[db.provisional.2$poiss==0 & db.provisional.2$dombias==0,]
# #db<-db.provisional.2[db.provisional.2$poiss==0 & db.provisional.2$dombias==1,]
# #db<-db.provisional.2[db.provisional.2$poiss==1 & db.provisional.2$dombias==1,]
#
#
# a <- c("(a)","x","x","(b)","x","x","(c)","x","x","(d)","x","x")
#
#
# tiff(#"plots/supplements/FigureS1_Method_comparison_and_sampling_effort_steep_and_flat_poisson.tiff",
# #"plots/supplements/FigureS2_Comparing_original_Elo-rating_packages_steep_and_flat_poisson.tiff",
# #"plots/supplements/FigureS3_Method_comparison_and_sampling_effort_steep_and_flat_uniform.tiff",
# #"plots/supplements/FigureS4_Comparing_original_Elo-rating_packages_steep_and_flat_uniform.tiff",
# #"plots/supplements/FigureS10_Method_comparison_and_sampling_effort_steep_and_flat_dombias.tiff",
# #"plots/supplements/FigureSX_Comparing_original_Elo-rating_packages_steep_and_flat_dombias.tiff",
# #"plots/supplements/FigureS17_Method_comparison_and_sampling_effort_steep_and_flat_poiss+dombias.tiff",
# #"plots/supplements/FigureSX_Comparing_original_Elo-rating_packages_steep_and_flat_poiss+dombias.tiff",
# #"plots/supplements/FigureS5_Method_comparison_and_sampling_effort_steep_and_flat_poisson_10ind.tiff",
# "plots/supplements/FigureS7_Method_comparison_and_sampling_effort_steep_and_flat_uniform_10ind.tiff",
# height=29.7, width=21,
# units='cm', compression="lzw", res=600)
#
#
# for (p in 1:length(N.inds.values)){
#
# m <- rbind(c(1,1,2),c(1,1,3),
# c(4,4,5),c(4,4,6),
# c(7,7,8),c(7,7,9),
# c(10,10,11),c(10,10,12))
#
# layout(m)
#
# op <- par(oma = c(6,3,1,1) + 0.1,
# mar = c(0.5,5,1,0) + 0.1,
# cex.lab=2.5)
#
#
# db.2 <- db[db$Ninds==N.inds.values[p] & (db$Nobs %in% N.obs.values),]
#
# for (i in 1:length(avalues)){
#
# if(i %in% c(1,4,7,10)){
#
# db.3 <- db.2[db.2$alevel==avalues[i] & db.2$blevel==bvalues[i],]
#
# db.4 <-summaryBy(Ndavid +
# elo.original +
# elo.no.rand +
# elo.rand +
# ADAGIO
# ~ Nobs,
# data = db.3,
# FUN = function(x) { c(m = mean(x),
# q = quantile(x,probs=c(0.025,0.975))) })
#
# names(db.4) <- c("Nobs",
# "Ndavid.m","Ndavid.lower","Ndavid.upper",
# "elo.original.m","elo.original.lower","elo.original.upper",
# "elo.no.rand.m","elo.no.rand.lower","elo.no.rand.upper",
# "elo.rand.m","elo.rand.lower","elo.rand.upper",
# "ADAGIO.m","ADAGIO.lower","ADAGIO.upper")
#
# plot(db.4$elo.rand.m~db.4$Nobs,0.5,type="n",
# ylab="",
# xlab="",
# xaxt="n",
# yaxt="n",
# #ylim=c(-0.6,1)
# ylim=c(-1,1))
#
#
# if(i<10){
#
# axis(1,at=N.obs.values,
# cex.axis=1,tck=0.015,
# labels=FALSE)
#
#
# } else {
#
# axis(1,at=N.obs.values,
# labels=as.character(N.obs.values),
# cex.axis=1,tck=0.015)
#
# mtext("ratio of interactions to individuals",
# side=1, adj=0, line=4, cex=1.8);
#
# }
#
# axis(2,
# #at=round(seq(-0.6,1,0.1),1),
# at=round(seq(-1,1,0.2),1),
# cex.axis=1.2,las=2,tck=0.015)
#
#
# #adding points for the means and shadowed areas for the 95% CI
# # points(db.4$Nobs,db.4$elo.original.m,type="b",col="green",pch=19)
# # polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# # c(db.4$elo.original.lower,rev(db.4$elo.original.upper)),
# # border=NA,col=rgb(0,1,0, 0.15))
#
# points(db.4$Nobs,db.4$elo.no.rand.m,type="b",col="red",pch=19)
# polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# c(db.4$elo.no.rand.lower,rev(db.4$elo.no.rand.upper)),
# border=NA,col=rgb(1,0,0,0.15))
#
# points(db.4$Nobs,db.4$Ndavid.m,type="b",col="black",pch=19)
# polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# c(db.4$Ndavid.lower,rev(db.4$Ndavid.upper)),
# border=NA,col=rgb(120/255,120/255,120/255,0.15))
#
# points(db.4$Nobs,db.4$elo.rand.m,type="b",col="blue",pch=19)
# polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# c(db.4$elo.rand.lower,rev(db.4$elo.rand.upper)),
# border=NA,col=rgb(0,0,1, 0.15))
#
# points(db.4$Nobs,db.4$ADAGIO.m,type="b",col="orange",pch=19)
# polygon(c(db.4$Nobs,rev(db.4$Nobs)),
# c(db.4$ADAGIO.lower,rev(db.4$ADAGIO.upper)),
# border=NA,col=rgb(1,165/255,0,0.15))
#
# lines(c(0,51),c(0.7,0.7),col="red",lty=3,lwd=1.5)
#
# atext <- paste("\na = ",avalues[i])
# btext <- paste("\nb = ",bvalues[i])
# ttext <- paste0(atext,btext,sep="\n")
# text(48,-0.95,a[i],adj = 0 ,cex=1.5)
# text(31,-0.85,ttext,adj = 0,cex=2)
#
#
# } else {
#
# if(i %in% c(2,5,8,11)){
#
# plot(#c(1,50),
# c(1,10),
# c(0.5,1),type="n",
# ylab="",
# xlab="",
# xaxt="n",
# yaxt="n",
# cex=1.5)
#
# axis(1,
# #at=seq(0,50,10),
# at=seq(0,10,1),
# cex.axis=1,tck=0.015)
#
# axis(2,at=seq(0.5,1,0.1),cex.axis=1.2,las=2,tck=0.015)
#
# #plot_winner_prob_2(1:50,a=avalues[i],b=bvalues[i],"black")
# plot_winner_prob_2(1:10,a=avalues[i],b=bvalues[i],"black")
#
# mtext("P (dominant wins)",
# side=2, adj=0, line=3, cex=0.85);
#
# } else {
#
# plot(c(1,50),c(0,1),type="n",
# ylab="",
# xlab="",
# xaxt="n",
# yaxt="n",
# frame.plot=FALSE)
#
# par(xpd=TRUE)
# legend(0,0.7,
# c("David's score",
# "original Elo-rating",
# "randomized Elo-rating",
# "ADAGIO"),
# col=c("black",
# "red",
# "blue",
# "orange"),
# cex=1.35,bty='n',
# pch=rep(19,4),
# inset=c(0,0))
#
# # legend(0,0.8,
# # c("package:aniDom",
# # "package:EloRating"),
# # col=c("red",
# # "green"),
# # cex=1.35,bty='n',
# # pch=rep(19,4),
# # inset=c(0,0))
#
# mtext("Difference in rank ",
# side=3, adj=1, line=-2, cex=1);
#
# }
#
# }
#
# }
#
#
# title(ylab = "Spearman rank correlation coefficient",
# outer = TRUE, line = 0)
#
# par(mfrow=c(1,1))
#
# }
#
# dev.off() |
59beeed7085f6d57befdee574f13a42686e0e09c | 205eba39f070121c6d322ec62701005c101e82da | /man/gh_legacy_get_user_search_by_keyword.Rd | c5866413837040478be88e74119a38464d1f4918 | [
"MIT"
] | permissive | ramnathv/gh3 | c6f5991c3f780470f65fc57c382e95dc2b584b3e | 71eef49603ea0cce78caf680cf2d80a8eb1a57df | refs/heads/master | 2020-05-19T15:56:20.787819 | 2019-05-06T16:48:24 | 2019-05-06T16:48:24 | 185,095,205 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 652 | rd | gh_legacy_get_user_search_by_keyword.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gh3.R
\name{gh_legacy_get_user_search_by_keyword}
\alias{gh_legacy_get_user_search_by_keyword}
\title{Find users by keyword.}
\usage{
gh_legacy_get_user_search_by_keyword(keyword, ...)
}
\arguments{
\item{keyword}{The search term}
\item{...}{Additional parameters to pass to \code{\link[gh]{gh}}. See details.}
}
\description{
Find users by keyword.
}
\details{
Additional parameters that can be passed:
\describe{
\item{type}{ It takes values in: all, public, private, forks, sources, member.
The default is: all}
\item{Accept}{Is used to set specified media type. }
}
}
|
e1327363ed6e89671bb6e1efd0792fd256b9e274 | b76e9f7f2183b1f24f2123dc41d06c483306557e | /LL Age Depth Model.R | bfea4908f7b8309726c80529b4a9acb5b22b8d9b | [] | no_license | jesshstone/SESYNC-Extinction-and-Resilience | adac4f20a846181e77db746ab715e0b58a120650 | 6b603b3af83b6267755e0c20e3dd72a007601639 | refs/heads/master | 2020-06-23T01:20:44.239051 | 2019-07-26T19:44:43 | 2019-07-26T19:44:43 | 198,458,204 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 860 | r | LL Age Depth Model.R | attach(LLAgeDepthModel)
print(LLAgeDepthModel)
library(Bchron)
LLout = Bchronology(ages=LLAgeDepthModel$ages,
ageSds=LLAgeDepthModel$agesds,
calCurves=LLAgeDepthModel$calCurves,
positions=LLAgeDepthModel$posistion,
positionThicknesses=LLAgeDepthModel$thickness,
ids=LLAgeDepthModel$id,
predictPositions=seq(0,219,by=1))
summary(LLout)
summary(LLout, type = 'convergence')
summary(LLout, type = 'outliers')
plot(LLout,
main="LL age depth model",
xlab='Age (cal yrs BP)',
ylab='Depth (cm)',
las=1)
View(LLout)
LLout$theta
ages <- predict(LLout, LL_isotopes$depth)
ages2 <-colMeans(ages)
predict(LLout, LL_isotopes$depth)
ages <- predict(LLout, LL_isotopes$depth)
View(ages)
rowmeans (ages)
ages2 <- colMeans(ages)
|
a9dac18d824b13f25e5f079e2e2d913042c7e40f | fd5a10219ac23bdbe8eee102faedd85585899298 | /Elevation lines/Script/Elevation gganimate.R | 49d457e04123533a4829a28a26c8c426ca70c2af | [] | no_license | adamflr/Visuals | 494a940301d364dafd028d3fb9a68620b16805ea | 8cb1424efc82238a08c196775c6ce257ea5f189e | refs/heads/main | 2023-05-26T21:42:52.737305 | 2023-04-30T11:03:47 | 2023-04-30T11:03:47 | 213,185,821 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,457 | r | Elevation gganimate.R | # Elevation lines, gganimate
dat <- data.frame(x = rep(1:10, 4), y = rep(1:4, each = 10), id = 1:20)
library(dplyr)
library(gganimate)
ggplot(dat, aes(x, y, group = id)) +
geom_line()
dat$t <- (dat$y %in% c(1,2)) + 1
dat$t[dat$t == 2] <- 2 + seq(0, 1, length.out = sum(dat$t == 2))
ggplot(dat, aes(x, y)) +
geom_point(group = seq_along(id)) +
transition_time(t) +
shadow_mark(past = T, future = F)
## Simplified example with a single row
dat <- data.frame(x = rep(1:5, 2), y = rep(0:1, each = 5))
dat$point_id <- dat$x
dat$t <- dat$y
dat$t[dat$t == 1] <- 1 + seq(-0.5, 0, length.out = 5)
g <- ggplot(dat, aes(x, y, col = as.character(point_id))) +
geom_point() +
transition_states(t) +
shadow_wake(wake_length = 0.5)
animate(g, duration = 5, nframes = 20)
# Expand data frame
dat
t <- unique(dat$t)
dat2 <- expand.grid(x = 1:5, t)
dt <- dat[6:10,]
dat2 <- left_join(dat2, dt, by = "x") %>%
arrange(x)
dat2$y <- ifelse(dat2$Var2 >= dat2$t, 1, 0)
{ggplot(dat2, aes(x, y, col = as.character(point_id))) +
geom_point() +
transition_time(Var2)} %>%
animate(duration = 5, nframes = 40)
# Moving in the right direction. Each point moves independently. Cumbersome data construction tough.
{ggplot(dat2, aes(x, y)) +
geom_line() +
transition_time(Var2)} %>%
animate(duration = 5, nframes = 40)
# Moving points at particular times
dat <- data.frame(x = 1:3, y = rep(0:1, each = 3), t = c(0,0,0,0.5,1,2), point_id = 1:3)
|
7d685a1de79dcf3ed7fcbc920990263318996678 | be4649735271cd61c719f3e4e543ca6108bd7f4e | /cachematrix.R | 58b7dfaccb15bb94f244ab944e6c73179e7e55a4 | [] | no_license | myrasaurus/ProgrammingAssignment2 | aa94354c0e45b4c07c21a2df04498d4425e60e45 | 50807bac1ca3a96b6999d5a0d13760472d01295f | refs/heads/master | 2021-05-27T03:54:22.729588 | 2014-04-26T21:48:44 | 2014-04-26T21:48:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,471 | r | cachematrix.R | ## This function takes matrix input of an invertible matrix
makeCacheMatrix <- function(x = matrix()) {
## i and x can be seen similar to private objects to this class.
## The function itself can be seen as a constructor.
## Attempt to map FP to Java/CPP terms.
i <- NULL
set <- function(y) {
x <<- y
## Every time we get a new matrix, set inverse to NULL.
i <<- NULL
}
get <- function() x
## setInverse wil be used from within cacheSolve()
setInverse <- function(inv) i <<- inv
## Returns the inverse of the matrix set in this class
getInverse <- function() i
## List is created with functions set, get, setInverse and getInverse
## The internally defined function objects here are assigned to the list.
## so that they can be used outside the scope of this function.
## This function is basically like a constructor()
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
## x is assumed to be a list returned by above makeCacheMatrix()
## If it has been previously computed, inv will contain valid
## inverse matrix of corresponding matrix in x.
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## Do below compute only once for that particular list object x.
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
1974b885e918d950ee1ac6e6c22e0d4dc2089388 | 80c6644d745190dd3702b5668ef5eae423a27859 | /calculo_de_indices_climaticos.R | 3f4c9269fe0fef12b770ae77f79a84783dbbc2e2 | [] | no_license | fmanquehual/proyecto_agua_en_R | 356bfdf9c4156587a2ccfd3addaadfd634aed537 | e4a51c5192f174708a982b62a42626fff51078a1 | refs/heads/master | 2023-01-14T13:22:57.448036 | 2020-11-13T05:15:59 | 2020-11-13T05:15:59 | 300,746,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,438 | r | calculo_de_indices_climaticos.R | library(climdex.pcic)
library(lubridate)
library(ggplot2)
library(trend)
rm(list=ls())
dev.off()
setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/proyecto_agua_en_R/')
source('funcion_calculo_de_pendiente_indices_climaticos.R')
source('funcion_calculo_de_pendiente_indices_climaticos_por_estacion.R')
# Lectura de datos ----
comuna <- 'Puren' # 'Teodoro_Schmidt' # 'Padre_Las_Casas' # 'Imperial'
directorio.principal <- 'C:/Users/Usuario/Documents/Francisco/proyecto_agua/CR2/base_de_datos/'
setwd(paste0(directorio.principal, comuna, '/', 'precipitacion/'))
pr <- read.csv(list.files(pattern = 'pr')[2])
setwd(paste0(directorio.principal, comuna, '/', 'temperatura_maxima/'))
tmax <- read.csv(list.files(pattern = 'tmax')[2])
setwd(paste0(directorio.principal, comuna, '/', 'temperatura_minima/'))
tmin <- read.csv(list.files(pattern = 'tmin')[2])
# fin ---
# Preparacion de datos ----
pr.fecha <- as.PCICt(pr$fecha, cal="gregorian")
tmin.fecha <- as.PCICt(tmin$fecha, cal="gregorian")
tmax.fecha <- as.PCICt(tmax$fecha, cal="gregorian")
ci <- climdexInput.raw(tmax=tmax$valor,
tmin=tmin$valor,
prec=pr$valor,
tmax.dates=tmax.fecha, tmin.dates=tmin.fecha, prec.dates=pr.fecha,
base.range=c(1979, 2018))
# fin ---
# Calculo de indices ----
fd <- climdex.fd(ci) ; length(fd) # 40 (1979:2018 = 40)
su <- climdex.su(ci) ; length(su) # 40
# id <- climdex.id(ci) ; length(id) # 40 # No lo usa la DMC y da resultados nulos
# tr <- climdex.tr(ci) ; length(tr) # 40 # No lo usa la DMC y da resultados nulos
# gsl <- climdex.gsl(ci) ; length(gsl) # 40 # No lo usa la DMC y da resultados nulos
txx <- climdex.txx(ci) ; length(txx) # 480 ((1979:2018)*12 = 480)
tnx <- climdex.tnx(ci) ; length(tnx) # 480
txn <- climdex.txn(ci) ; length(txn) # 480
tnn <- climdex.tnn(ci) ; length(tnn) # 480
tn10p <- climdex.tn10p(ci) ; length(tn10p) # 480
tx10p <- climdex.tx10p(ci) ; length(tx10p) # 480
tn90p <- climdex.tn90p(ci) ; length(tn90p) # 480
tx90p <- climdex.tx90p(ci) ; length(tx90p) # 480
wsdi <- climdex.wsdi(ci) ; length(wsdi) # 40
csdi <- climdex.csdi(ci) ; length(csdi) # 40
dtr <- climdex.dtr(ci) ; length(dtr) # 480
rx1day <- climdex.rx1day(ci) ; length(rx1day) # 480
rx5day <- climdex.rx5day(ci) ; length(rx5day) # 480
sdii <- climdex.sdii(ci) ; length(sdii) # 40
r10mm <- climdex.r10mm(ci) ; length(r10mm) # 40
r20mm <- climdex.r20mm(ci) ; length(r20mm) # 40
# rnnmm <- climdex.rnnmm(ci) ; length(rnnmm) # 40 # No lo usa la DMC
cdd <- climdex.cdd(ci) ; length(cdd) # 40
cwd <- climdex.cwd(ci) ; length(cwd) # 40
r95ptot <- climdex.r95ptot(ci) ; length(r95ptot) # 40
r99ptot <- climdex.r99ptot(ci) ; length(r99ptot) # 40
prcptot <- climdex.prcptot(ci) ; length(prcptot) # 40
# fin ---
# Db indices anuales ----
db.fd <- data.frame(fecha=names(fd), indice='fd', valor=fd)
db.fd$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.fd, anho_inicial = 1979)
db.su <- data.frame(fecha=names(su), indice='su', valor=su)
db.su$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.su, anho_inicial = 1979)
# db.id <- data.frame(fecha=names(id), indice='id', valor=id)
# db.id$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.id, anho_inicial = 1979)
# db.tr <- data.frame(fecha=names(tr), indice='tr', valor=tr)
# db.tr$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.tr, anho_inicial = 1979)
# db.gsl <- data.frame(fecha=names(gsl), indice='gsl', valor=gsl)
# db.gsl$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.gsl, anho_inicial = 1979)
db.wsdi <- data.frame(fecha=names(wsdi), indice='wsdi', valor=wsdi)
db.wsdi$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.wsdi, anho_inicial = 1979)
db.csdi <- data.frame(fecha=names(csdi), indice='csdi', valor=csdi)
db.csdi$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.csdi, anho_inicial = 1979)
db.sdii <- data.frame(fecha=names(sdii), indice='sdii', valor=sdii)
db.sdii$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.sdii, anho_inicial = 1979)
db.r10mm <- data.frame(fecha=names(r10mm), indice='r10mm', valor=r10mm)
db.r10mm$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.r10mm, anho_inicial = 1979)
db.r20mm <- data.frame(fecha=names(r20mm), indice='r20mm', valor=r20mm)
db.r20mm$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.r20mm, anho_inicial = 1979)
# db.rnnmm <- data.frame(fecha=names(rnnmm), indice='rnnmm', valor=rnnmm)
# db.rnnmm$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.rnnmm, anho_inicial = 1979)
db.cdd <- data.frame(fecha=names(cdd), indice='cdd', valor=cdd)
db.cdd$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.cdd, anho_inicial = 1979)
db.cwd <- data.frame(fecha=names(cwd), indice='cwd', valor=cwd)
db.cwd$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.cwd, anho_inicial = 1979)
db.r95ptot <- data.frame(fecha=names(r95ptot), indice='r95ptot', valor=r95ptot)
db.r95ptot$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.r95ptot, anho_inicial = 1979)
db.r99ptot <- data.frame(fecha=names(r99ptot), indice='r99ptot', valor=r99ptot)
db.r99ptot$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.r99ptot, anho_inicial = 1979)
db.prcptot <- data.frame(fecha=names(prcptot), indice='prcptot', valor=prcptot)
db.prcptot$leyenda.valor.pendiente <- calculo_de_pendiente_indices_climaticos(db.prcptot, anho_inicial = 1979)
db.indices.anuales <- rbind(db.fd, db.su, db.wsdi, db.csdi, # db.id, db.tr, db.gsl,
db.sdii, db.r10mm, db.r20mm, db.cdd, db.cwd, # db.rnnmm,
db.r95ptot, db.r99ptot, db.prcptot)
db.indices.anuales$fecha <- as.numeric(db.indices.anuales$fecha)
row.names(db.indices.anuales) <- 1:nrow(db.indices.anuales)
dim(db.indices.anuales)
str(db.indices.anuales)
head(db.indices.anuales)
# fin ---
# Db indices mensuales ----
db.txx0 <- data.frame(fecha=names(txx), indice='txx', valor=txx)
db.txx <- calculo_de_pendiente_indices_climaticos_por_estacion(db.txx0, anho_inicial = 1979)
db.tnx0 <- data.frame(fecha=names(tnx), indice='tnx', valor=tnx)
db.tnx <- calculo_de_pendiente_indices_climaticos_por_estacion(db.tnx0, anho_inicial = 1979)
db.txn0 <- data.frame(fecha=names(txn), indice='txn', valor=txn)
db.txn <- calculo_de_pendiente_indices_climaticos_por_estacion(db.txn0, anho_inicial = 1979)
db.tnn0 <- data.frame(fecha=names(tnn), indice='tnn', valor=tnn)
db.tnn <- calculo_de_pendiente_indices_climaticos_por_estacion(db.tnn0, anho_inicial = 1979)
db.tn10p0 <- data.frame(fecha=names(tn10p), indice='tn10p', valor=tn10p)
db.tn10p <- calculo_de_pendiente_indices_climaticos_por_estacion(db.tn10p0, anho_inicial = 1979)
db.tx10p0 <- data.frame(fecha=names(tx10p), indice='tx10p', valor=tx10p)
db.tx10p <- calculo_de_pendiente_indices_climaticos_por_estacion(db.tx10p0, anho_inicial = 1979)
db.tn90p0 <- data.frame(fecha=names(tn90p), indice='tn90p', valor=tn90p)
db.tn90p <- calculo_de_pendiente_indices_climaticos_por_estacion(db.tn90p0, anho_inicial = 1979)
db.tx90p0 <- data.frame(fecha=names(tx90p), indice='tx90p', valor=tx90p)
db.tx90p <- calculo_de_pendiente_indices_climaticos_por_estacion(db.tx90p0, anho_inicial = 1979)
db.dtr0 <- data.frame(fecha=names(dtr), indice='dtr', valor=dtr)
db.dtr <- calculo_de_pendiente_indices_climaticos_por_estacion(db.dtr0, anho_inicial = 1979)
db.rx1day0 <- data.frame(fecha=names(rx1day), indice='rx1day', valor=rx1day)
db.rx1day <- calculo_de_pendiente_indices_climaticos_por_estacion(db.rx1day0, anho_inicial = 1979)
db.rx5day0 <- data.frame(fecha=names(rx5day), indice='rx5day', valor=rx5day)
db.rx5day <- calculo_de_pendiente_indices_climaticos_por_estacion(db.rx5day0, anho_inicial = 1979)
# db 1
db.indices.mensuales.parte.1 <- rbind(db.txx, db.tnx, db.txn)
row.names(db.indices.mensuales.parte.1) <- 1:nrow(db.indices.mensuales.parte.1)
db.indices.mensuales.parte.1$estacion <- factor(db.indices.mensuales.parte.1$estacion,
levels = c('Primavera', 'Verano', 'Otono', 'Invierno'))
head(db.indices.mensuales.parte.1)
str(db.indices.mensuales.parte.1)
# db 2
db.indices.mensuales.parte.2 <- rbind(db.tnn, db.tn10p, db.tx10p)
row.names(db.indices.mensuales.parte.2) <- 1:nrow(db.indices.mensuales.parte.2)
db.indices.mensuales.parte.2$estacion <- factor(db.indices.mensuales.parte.2$estacion,
levels = c('Primavera', 'Verano', 'Otono', 'Invierno'))
head(db.indices.mensuales.parte.2)
str(db.indices.mensuales.parte.2)
# db 3
db.indices.mensuales.parte.3 <- rbind(db.tn90p, db.tx90p, db.dtr)
row.names(db.indices.mensuales.parte.3) <- 1:nrow(db.indices.mensuales.parte.3)
db.indices.mensuales.parte.3$estacion <- factor(db.indices.mensuales.parte.3$estacion,
levels = c('Primavera', 'Verano', 'Otono', 'Invierno'))
head(db.indices.mensuales.parte.3)
str(db.indices.mensuales.parte.3)
# db 4
db.indices.mensuales.parte.4 <- rbind(db.rx1day, db.rx5day)
row.names(db.indices.mensuales.parte.4) <- 1:nrow(db.indices.mensuales.parte.4)
db.indices.mensuales.parte.4$estacion <- factor(db.indices.mensuales.parte.4$estacion,
levels = c('Primavera', 'Verano', 'Otono', 'Invierno'))
head(db.indices.mensuales.parte.4)
str(db.indices.mensuales.parte.4)
# fin ---
# Plot Anual ----
setwd(paste0(directorio.principal, comuna, '/', 'plots'))
nombre.plot <- paste0('indices_climaticos_anuales_', comuna, '.png') ; nombre.plot
png(nombre.plot, width = 850, height = 650, units = "px", type = 'cairo')
ggplot(db.indices.anuales, aes(x=fecha, y=valor) ) +
geom_line() +
labs(x = '', y = 'Valor') +
geom_smooth(method = lm, # Recta de regresión
se = FALSE, col = 'red') + # Oculta intervalo de confianza
geom_text(
data = db.indices.anuales,
mapping = aes(x = -Inf, y = -Inf, label = leyenda.valor.pendiente), # x = -Inf, y = -Inf
check_overlap = TRUE,
hjust = -0.05, # -0.1
vjust = -1, # -1
inherit.aes=FALSE,
size=3.9
) +
scale_x_continuous(limits = c(1979, 2018), breaks=seq(1979, 2018, by=2)) +
facet_wrap(~indice, ncol = 3, scales="free_y") +
theme_bw() +
theme(text = element_text(size=14), panel.spacing = unit(1, "lines"),
axis.text.x = element_text(angle = 90, hjust = 1))
dev.off()
# fin ---
# Plots por estacion ----
# Parte 1
nombre.plot <- paste0('indices_climaticos_por_estacion_', comuna, '_parte_1', '.png') ; nombre.plot
png(nombre.plot, width = 850, height = 500, units = "px", type = 'cairo')
ggplot(db.indices.mensuales.parte.1, aes(x=fecha, y=valor)) +
geom_line() +
labs(x = '', y = 'Valor') +
geom_smooth(method = lm, # Recta de regresión
se = FALSE, col = 'red') + # Oculta intervalo de confianza
geom_text(
data = db.indices.mensuales.parte.1,
mapping = aes(x = -Inf, y = -Inf, label = leyenda.valor.pendiente),
check_overlap = TRUE,
hjust = -0.05,
vjust = -1,
inherit.aes=FALSE,
size=3.9
) +
scale_x_continuous(limits = c(1979, 2018), breaks=seq(1979, 2018, by=2)) +
facet_grid(indice~estacion, scales="free_y") +
theme_bw() +
theme(text = element_text(size=14), panel.spacing = unit(1, "lines"),
axis.text.x = element_text(angle = 90, hjust = 1))
dev.off()
# Parte 2
nombre.plot <- paste0('indices_climaticos_por_estacion_', comuna, '_parte_2', '.png') ; nombre.plot
png(nombre.plot, width = 850, height = 500, units = "px", type = 'cairo')
ggplot(db.indices.mensuales.parte.2, aes(x=fecha, y=valor)) +
geom_line() +
labs(x = '', y = 'Valor') +
geom_smooth(method = lm, # Recta de regresión
se = FALSE, col = 'red') + # Oculta intervalo de confianza
geom_text(
data = db.indices.mensuales.parte.2,
mapping = aes(x = -Inf, y = -Inf, label = leyenda.valor.pendiente),
check_overlap = TRUE,
hjust = -0.05,
vjust = -1,
inherit.aes=FALSE,
size=3.9
) +
scale_x_continuous(limits = c(1979, 2018), breaks=seq(1979, 2018, by=2)) +
facet_grid(indice~estacion, scales="free_y") +
theme_bw() +
theme(text = element_text(size=14), panel.spacing = unit(1, "lines"),
axis.text.x = element_text(angle = 90, hjust = 1))
dev.off()
# Parte 3
nombre.plot <- paste0('indices_climaticos_por_estacion_', comuna, '_parte_3', '.png') ; nombre.plot
png(nombre.plot, width = 850, height = 500, units = "px", type = 'cairo')
ggplot(db.indices.mensuales.parte.3, aes(x=fecha, y=valor)) +
geom_line() +
labs(x = '', y = 'Valor') +
geom_smooth(method = lm, # Recta de regresión
se = FALSE, col = 'red') + # Oculta intervalo de confianza
geom_text(
data = db.indices.mensuales.parte.3,
mapping = aes(x = -Inf, y = -Inf, label = leyenda.valor.pendiente),
check_overlap = TRUE,
hjust = -0.05,
vjust = -1,
inherit.aes=FALSE,
size=3.9
) +
scale_x_continuous(limits = c(1979, 2018), breaks=seq(1979, 2018, by=2)) +
facet_grid(indice~estacion, scales="free_y") +
theme_bw() +
theme(text = element_text(size=14), panel.spacing = unit(1, "lines"),
axis.text.x = element_text(angle = 90, hjust = 1))
dev.off()
# Parte 4
nombre.plot <- paste0('indices_climaticos_por_estacion_', comuna, '_parte_4', '.png') ; nombre.plot
png(nombre.plot, width = 850, height = 500, units = "px", type = 'cairo')
ggplot(db.indices.mensuales.parte.4, aes(x=fecha, y=valor)) +
geom_line() +
labs(x = '', y = 'Valor') +
geom_smooth(method = lm, # Recta de regresión
se = FALSE, col = 'red') + # Oculta intervalo de confianza
geom_text(
data = db.indices.mensuales.parte.4,
mapping = aes(x = -Inf, y = -Inf, label = leyenda.valor.pendiente),
check_overlap = TRUE,
hjust = -0.05,
vjust = -1,
inherit.aes=FALSE,
size=3.9
) +
scale_x_continuous(limits = c(1979, 2018), breaks=seq(1979, 2018, by=2)) +
facet_grid(indice~estacion, scales="free_y") +
theme_bw() +
theme(text = element_text(size=14), panel.spacing = unit(1, "lines"),
axis.text.x = element_text(angle = 90, hjust = 1))
dev.off()
# fin ---
|
11d6de44b94553db457790ae441e8a3a69a6b20e | 494c71f56647f6695bd8b046372fd42c7f1b9040 | /R/zzz.R | b51a4fd93f114ae14871f3f956b2481ce546e82e | [] | no_license | minghao2016/tidysq | 1e813a019878dac2bb1cb239d05fa97e37bc93b4 | 953b1d3c1ce1e250f9afcb3f8fe044c6f7391c76 | refs/heads/master | 2023-03-23T20:03:08.700872 | 2021-03-12T17:03:05 | 2021-03-12T17:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 446 | r | zzz.R | .onLoad <- function(libname, pkgname) {
prev_options <- options()
new_options <- list(
tidysq_NA_letter = "!",
tidysq_safe_mode = FALSE,
tidysq_on_warning = "warning",
tidysq_pillar_max_width = 15,
tidysq_print_max_sequences = 10,
tidysq_print_use_color = TRUE
)
unset_inds <- !(names(new_options) %in% names(prev_options))
if (any(unset_inds)) {
options(new_options[unset_inds])
}
invisible()
} |
a8bfff6bce9ff1a28dc503974dd2a5442e6c1668 | 9fbd7cafab56b8cb58ca7385a726a0070d9e050d | /man/sen_senator_suplentes.Rd | 40f308d94cf57437a986f2eea8cc5fc869fe3268 | [] | no_license | duarteguilherme/congressbr | 6f343935a7734dfac70c6794a031db614dafd248 | e9f05428e877f56d31966b14ca00b4ec825fabf5 | refs/heads/master | 2022-11-22T05:40:27.177434 | 2020-07-14T23:20:22 | 2020-07-14T23:20:22 | 83,827,931 | 17 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,108 | rd | sen_senator_suplentes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sen_senator_details.R
\name{sen_senator_suplentes}
\alias{sen_senator_suplentes}
\title{Downloads and tidies information on titular senators and their
\emph{suplentes} in the Federal Senate}
\usage{
sen_senator_suplentes(id = 0, ascii = TRUE)
}
\arguments{
\item{id}{\code{integer}. This number represents the id of the senator
you wish to get information on. These ids can be extracted from the API
using the \code{sen_senator_list()} function, where they will appear as the
first column in the data frame returned, under the name 'id'.}
\item{ascii}{\code{logical}. If TRUE, certain strings are converted to ascii
format.}
}
\value{
A tibble, of classes \code{tbl_df}, \code{tbl} and \code{data.frame}.
}
\description{
Downloads and tidies information on titular senators and their
\emph{suplentes} in the Federal Senate.
}
\examples{
# A titular senator, José Serra:
Serra <- sen_senator_suplentes(id = 90)
}
\seealso{
\code{sen_senator_list()}
}
\author{
Robert Myles McDonnell, Guilherme Jardim Duarte & Danilo Freire.
}
|
1bf57732951a3bc632c36d9136e0ae327cf4bbb9 | d029129f3872cccb3132b7f6a0b043dcedd91916 | /assignment3/solution3_5.R | f4e853ae860cd8d96a0bad92c9fff9ea0da90c50 | [] | no_license | samgary/cb101-2020-assignment | f44b0ddf1e8fdf7af1676f929b4ecc71b5e61049 | dbbd05088c29bdb0048360247e21e86c12f3e271 | refs/heads/main | 2023-01-31T14:01:52.840742 | 2020-12-07T02:30:38 | 2020-12-07T02:30:38 | 312,909,142 | 0 | 0 | null | 2020-11-14T21:57:06 | 2020-11-14T21:57:06 | null | UTF-8 | R | false | false | 607 | r | solution3_5.R | #Problem 5: Fermi estimation. How many liters of melted chocolate would be needed to cover enough cashews to cover a football field?
# Assumptions: area of a single cashew is ~0.25 inches and mL chocolate needed to cover one cashew is 2 mL (0.002L).
# football field is ~ 300 feet long x 160 feet wide or ~3500 inches by 2000 inches
# total area of a football field is ~ 7 x 10^6 inches
# Cashews needed to cover the area of a football field is ~ 7 x 10^6 / 0.25 = 2.8 x 10 ^7
# Volume of chocolate needed to cover enough cashews to cover a football field would be:
# 2.8 x 10^7 * 0.002L = 56,000 Liters |
63f74f3d4b2edc089da3e805595c7893a3352124 | 9066794639a122f5aabdf9740f364e3fe4b2ca2c | /Census/gwrCensus.R | c63d24df321f45a7aac240548604ac9bfc135c45 | [] | no_license | nserr/GEOG-418-Final-Project | 634b9c9a91d3f454b0f4b43460e88eeec30fe07f | 7f834d38317336553f9011572c7260aa15d85dc2 | refs/heads/master | 2023-01-25T00:30:36.336642 | 2020-12-07T01:00:52 | 2020-12-07T01:00:52 | 315,450,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,692 | r | gwrCensus.R | #####################################
## Geographically Weighted Regression
#####################################
#Add polygon coordinates to SPDF
income.tracts.no0.coords <- sp::coordinates(income.tracts.no0)
income.tracts.no0$X <- income.tracts.no0.coords[,1]
income.tracts.no0$Y <- income.tracts.no0.coords[,2]
## Determine the bandwidth for GWR
GWRbandwidth <- gwr.sel(income.tracts.no0$Income~income.tracts.no0$Pm2.5,
data=income.tracts.no0, coords=cbind(income.tracts.no0$X,income.tracts.no0$Y),adapt=T)
## Perform GWR
gwr.model = gwr(income.tracts.no0$Income~income.tracts.no0$Pm2.5,
data=income.tracts.no0, coords=cbind(income.tracts.no0$X,income.tracts.no0$Y),
adapt=GWRbandwidth, hatmatrix=TRUE, se.fit=TRUE)
gwr.model
results<-as.data.frame(gwr.model$SDF)
head(results)
#Add local R-square values
income.tracts.no0$localr <- results$localR2
#Create choropleth map of r-square values
map_r2 <- tm_shape(income.tracts.no0) +
tm_polygons(col = "localr",
title = "R2 values",
style = "jenks",
palette = "viridis", n = 6,
midpoint = 0, border.alpha = 0.1) +
tm_legend(legend.outside = TRUE)
png("r2.png")
map_r2
dev.off()
#Add coefficient values
income.tracts.no0$coeff <- results$income.tracts.no0.Pm2.5
#Create choropleth map of the coefficients
map_coeff <- tm_shape(income.tracts.no0) +
tm_polygons(col = "coeff",
title = "Coefficients",
style = "jenks",
palette = "viridis", n = 6,
midpoint = 0, border.alpha = 0.1) +
tm_legend(legend.outside = TRUE)
png("coeff.png")
map_coeff
dev.off()
|
cc6f9d5ab3f1a67bcb83050101da44ee5e28608d | 9b9c0ef49b1e80016500e336cd21eaf677faf9c2 | /binomial.R | 760934a2e222f9a268b89cecaf4cca2a1d08ea1b | [] | no_license | RafaelSdeSouza/UV_JAGS | 410bfe0b7591eab59f2f7195ce59118eb9be3f21 | 2a01fa30e0e65fa6a48f1b30b68271ac90b4fcd9 | refs/heads/master | 2021-09-28T21:16:52.915016 | 2018-11-20T17:33:13 | 2018-11-20T17:33:13 | 104,210,106 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,433 | r | binomial.R | # From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press
# (c) 2017, Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida
#
# you are kindly asked to include the complete citation if you used this
# material in a publication
# Code 5.25 - Synthetic data from a binomial model in R
set.seed(33559)
source("https://raw.githubusercontent.com/johnbaums/jagstools/master/R/jagsresults.R")
dat <- read.csv("https://raw.githubusercontent.com/mdastro/UV_ETGs/master/Coding/Model/regression_dataset.csv")
nobs <- nrow(dat)
bindata=data.frame(y=dat$number_galaxies_uvup,m=dat$number_galaxies_redsequence,
x1 = dat$average_redshift)
# Code 5.26 - Binomial model in R using JAGS
library(R2jags)
X <- model.matrix(~ x1 + I(x1^2), data = bindata)
K <- ncol(X)
model.data <- list(Y = bindata$y,
N = nrow(bindata),
X = X,
K = K,
m = bindata$m)
sink("GLOGIT.txt")
cat("
model{
# Priors
# Diffuse normal priors betas
for (i in 1:K) { beta[i] ~ dnorm(0, 0.0001)}
# Likelihood
for (i in 1:N){
Y[i] ~ dbin(p[i],m[i])
logit(p[i]) <- eta[i]
eta[i] <- inprod(beta[], X[i,])
}
# Prediction for new data
for (j in 1:N){
etax[j]<-inprod(beta[], X[j,])
logit(px[j]) <- etax[j]
Yx[j]~dbern(px[j])
}
}
",fill = TRUE)
sink()
inits <- function () {list(beta = rnorm(K, 0, 0.1)) }
params <- c("beta","px")
BINL <- jags(data = model.data,
inits = inits,
parameters = params,
model.file = "GLOGIT.txt",
n.thin = 1,
n.chains = 3,
n.burnin = 3000,
n.iter = 5000)
print(BINL, intervals=c(0.025, 0.975), digits=3)
# Plot
y <- jagsresults(x=BINL, params=c('px'))
x <- bindata$x1
gdata <- data.frame(x = x, mean = y[,"mean"],lwr1=y[,"25%"],lwr2=y[,"2.5%"],upr1=y[,"75%"],upr2=y[,"97.5%"])
bindata$frac <- bindata$y/bindata$m
ggplot(bindata,aes(x=x1,y=frac))+
geom_point(size=2.75,colour="blue3")+
geom_ribbon(data=gdata,aes(x=x,ymin=lwr1, ymax=upr1,y=NULL), alpha=0.45, fill=c("orange2"),show.legend=FALSE) +
geom_ribbon(data=gdata,aes(x=x,ymin=lwr2, ymax=upr2,y=NULL), alpha=0.35, fill = c("orange"),show.legend=FALSE) +
geom_line(data=gdata,aes(x=x,y=mean),colour="gray25",linetype="dashed",size=1,show.legend=FALSE)+
theme_bw() +xlab("Redshift") + ylab("UV/Red sequence")
|
fe7e6a07297497e41b678b69576dc3b63d2f07d3 | b94bde90fdb3e38483293d906c0b5f0669af647e | /simsem/man/simLnorm.Rd | e660a726a17e306bd004c8382068b4b1472e18a6 | [] | no_license | pairach/simsem | c2da13f31af4b8ed986647320090bbd9edc0c400 | 8194f63851ed0c0dbd447726988b0a58619ec43a | refs/heads/master | 2020-12-25T01:50:53.664082 | 2012-05-29T21:38:06 | 2012-05-29T21:38:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 830 | rd | simLnorm.Rd | \name{simLnorm}
\alias{simLnorm}
\title{
Create random log normal distribution object
}
\description{
Create random log normal distribution object. Random log normal distribution object will save the mean and standard deviation (in log scale) parameters.
}
\usage{
simLnorm(meanlog = 0, sdlog = 1)
}
\arguments{
\item{meanlog}{
The mean in log scale
}
\item{sdlog}{
The standard deviation in log scale
}
}
\value{
\item{SimLnorm}{Random Log Normal Distribution object (\code{\linkS4class{SimLnorm}}) that save the specified parameters}
}
\author{
Sunthud Pornprasertmanit (University of Kansas; \email{psunthud@ku.edu})
}
\seealso{
\itemize{
\item \code{\linkS4class{VirtualDist}} for all distribution objects.
}
}
\examples{
lognorm <- simLnorm(0, exp(1))
run(lognorm)
summary(lognorm)
} |
e53134895d89945da9fc55faef3cef4dd6c735ff | 318ae47c60f33603e5a6685f418117f6014cc8d8 | /feature_extraction/jointSummaries_features.R | ef3ee0ae608f8dc551edea9ff9769f6bcf02716b | [
"MIT"
] | permissive | itismeghasyam/psorcast-validation-analysis | e915bcfc32a097420751ca87ee8ac6f8b4cd9aef | 3ce36673c8de20cfd6a9481feddf6d95f0a442ba | refs/heads/main | 2023-08-29T17:32:13.807074 | 2021-11-05T14:50:39 | 2021-11-05T14:50:39 | 376,185,467 | 0 | 0 | MIT | 2021-06-12T02:30:08 | 2021-06-12T02:30:08 | null | UTF-8 | R | false | false | 8,301 | r | jointSummaries_features.R | ############################################################
#' This script will be used for summarizing reported each
#' joint counts for Psorcast users
#'
#' Note: it will exclude finger joints and only use
#' main joint location (ankle, hip, elbow, wrist etc.)
#'
#' @author: aryton.tediarjo@sagebase.org
#' @maintainer: aryton.tediarjo@sagebase.org
############################################################
rm(list=ls())
gc()
# import libraries
library(synapser)
library(data.table)
library(tidyverse)
library(githubr)
source("utils/feature_extraction_utils.R")
source('utils/processing_log_utils.R')
synLogin()
############################
# Global Vars
############################
PARENT_SYN_ID <- "syn22336715"
ERROR_LOG_SYN_ID <- "syn25832341"
VISIT_REF <- "syn25825626"
PPACMAN_TBL_ID <- "syn22337133"
VISIT_REF_ID <- "syn25825626"
FILE_COLUMNS <- "summary.json"
JOINT_LOCATION <- c("knee", "hip", "ankle",
"wrist", "elbow", "shoulder")
OUTPUT_FILE <- "joint_counts_comparison.tsv"
JOINT_TBL_REF <- list(
dig_jc = list(
prefix = "dig_jc",
syn_id = "syn22281786",
output_filename = "dig_jc_features.tsv"),
gs_jc = list(
prefix = "gs_jc",
syn_id = "syn22281781",
output_filename = "gs_jc_features.tsv"
),
gs_swell = list(
prefix = "gs_swell",
syn_id = "syn22281780",
output_filename = "gs_swell_features.tsv"
)
)
############################
# Git Reference
############################
SCRIPT_PATH <- file.path('feature_extraction', "jointSummaries_features.R")
GIT_TOKEN_PATH <- config::get("git")$token_path
GIT_REPO <- config::get("git")$repo
githubr::setGithubToken(readLines(GIT_TOKEN_PATH))
GIT_URL <- getPermlink(
repository = getRepo(
repository = GIT_REPO,
ref="branch",
refName='main'),
repositoryPath = SCRIPT_PATH)
#' function to group each identifiers
#' @data dataframe of flattened summary.json
calculate_reported_counts <- function(data){
data %>%
dplyr::filter(is.na(error)) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise(counts = sum(isSelected, na.rm = T)) %>%
dplyr::arrange(desc(counts))
}
#' function to get string colums of reported joint count
#' @data dataframe of flattened summary.json
create_joint_string_list <- function(data){
data %>%
dplyr::filter(is.na(error) & isSelected == TRUE) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise(joint_list = paste0(identifier, collapse = ","))
}
#' function to parse pain status symmetry
#' @data dataframe of flattened summary.json
parse_joint_pain_symmetry <- function(data){
detected_joints <- data %>%
drop_na(identifier_group) %>%
.$identifier_group %>%
unique()
purrr::map(detected_joints , function(joint_identifier){
output_cols <- glue::glue("status_{joint_identifier}")
left_side_cols <- glue::glue("left_{joint_identifier}")
right_side_cols <- glue::glue("right_{joint_identifier}")
data %>%
dplyr::filter(is.na(identifier) |
str_detect(identifier, joint_identifier)) %>%
pivot_wider(names_from = identifier,
values_from = isSelected) %>%
dplyr::mutate(!!sym(output_cols) :=
case_when((!!sym(right_side_cols) == TRUE &
!!sym(left_side_cols) == TRUE) ~ "both",
(!!sym(right_side_cols) == TRUE &
!!sym(left_side_cols) == FALSE) ~ "right",
(!!sym(right_side_cols) == FALSE &
!!sym(left_side_cols) == TRUE) ~ "left",
TRUE ~ NA_character_)) %>%
dplyr::select(recordId, !!sym(output_cols))}) %>%
purrr::reduce(dplyr::full_join, by = c("recordId"))
}
#' function to get joint report based on synapseID
#' @syn_id: synapse id of the joint report tables
get_joint_report <- function(syn_id){
get_table(syn_id, file_columns = FILE_COLUMNS) %>%
flatten_joint_summary() %>%
dplyr::group_by(recordId,
createdOn,
participantId,
identifier) %>%
dplyr::summarise_all(last) %>%
dplyr::mutate(
identifier_group =
str_extract(identifier,
str_c(JOINT_LOCATION,
collapse = "|")))
}
main <- function(){
#' - Go through each synapse id
#' - For each table flatten all the summary.json files
#' - Calculate metrics:
#' a. reported joint counts (group-by of record and identifier)
#' b. parse into string for all major joints
#' c. parse symmetrical pain status
#' get visit reference and curated ppacman table
visit_ref <- synGet(VISIT_REF_ID)$path %>% fread()
ppacman <- synGet(PPACMAN_TBL_ID)$path %>% fread()
joint_summaries <- purrr::map(names(JOINT_TBL_REF), function(activity){
#' retrieve data
prefix <- JOINT_TBL_REF[[activity]]$prefix
output_filename <- JOINT_TBL_REF[[activity]]$output_filename
tbl_id <- JOINT_TBL_REF[[activity]]$syn_id
joint_report <- get_joint_report(tbl_id) %>% dplyr::ungroup()
#' get each metrics
metrics <-
list(
joint_count = calculate_reported_counts(joint_report),
joint_str_list = create_joint_string_list(joint_report),
joint_pain_status = parse_joint_pain_symmetry(joint_report)) %>%
purrr::reduce(dplyr::full_join,
by = c("recordId")) %>%
dplyr::mutate(counts = ifelse(is.na(counts), 0, counts)) %>%
dplyr::rename_with(~paste0(prefix, "_", .),
-c("recordId"))
#' clean data
counts_columns <- paste0(prefix, "_", "counts")
joint_data <- joint_report %>%
distinct(recordId, participantId, createdOn) %>%
dplyr::left_join(metrics, by = c("recordId")) %>%
dplyr::arrange(desc(createdOn)) %>%
dplyr::mutate(error = ifelse(is.na(!!sym(counts_columns)),
"error: empty list in summary.json",
NA_character_)) %>%
dplyr::filter(is.na(error)) %>%
join_with_ppacman(
visit_ref_tbl = visit_ref,
ppacman_tbl = ppacman) %>%
dplyr::select(recordId,
participantId,
createdOn,
visit_num,
starts_with(activity)) %>%
dplyr::group_by(participantId, visit_num) %>%
dplyr::summarise_all(last) %>%
dplyr::mutate(createdOn = as.character(createdOn))
#' get error logging for removed records
error_log <- joint_report %>%
dplyr::filter(!recordId %in% unique(joint_data$recordId)) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise_all(last) %>%
dplyr::select(recordId, error) %>%
dplyr::mutate(error = ifelse(
is.na(error),
"removed from ppacman joining",
error))
#' save joint features to synapse
save_to_synapse(data = joint_data,
output_filename = output_filename,
parent = PARENT_SYN_ID,
name = "get joint summaries",
executed = GIT_URL,
used = tbl_id)
#' save error log to synapse
save_to_synapse(data = error_log,
output_filename = glue::glue("error_log_", output_filename),
parent = ERROR_LOG_SYN_ID,
name = "get error log for joint summaries",
executed = GIT_URL,
used = tbl_id)
})
}
log_process(main(), SCRIPT_PATH)
|
1dcae51444228cc15fd4b4b9f1b79ce9e27d5cab | d4288dc5baf4ca0f59fe414f55b1ad7a442abc7e | /man/predictorC_wrap.Rd | 3eac427bf2fe7ecdd16834a4fb64c248dac7c7d9 | [] | no_license | courtiol/SileR | c9c3d3e091d207c685bb6729ceb6aaf8fdb7207e | e914b6090aba8a8eacad5feec0839d8a52625035 | refs/heads/master | 2021-06-03T17:23:15.030331 | 2020-05-09T12:54:34 | 2020-05-09T12:54:34 | 130,906,264 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 946 | rd | predictorC_wrap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_model.R
\name{predictorC_wrap}
\alias{predictorC_wrap}
\title{Predict the probability of death (R wrapper for C++)}
\usage{
predictorC_wrap(data, design_matrix, indices = NULL)
}
\arguments{
\item{data}{The dataset to be used}
\item{design_matrix}{The design matrix}
\item{indices}{The list of indices computed by \code{compute_indices}}
}
\value{
A vector of the probability of death.
}
\description{
This function is called internally by other functions.
It is a wrapper for the function \code{predictorC} which
computes the probability of death in C++ (for faster computation).
}
\examples{
pmat <- build_param_matrix(param_values = c(0.1, 0.2), param_str = c("w1" = "s"))
dmat <- build_design_matrix(data = SurvEles_small, param_matrix = pmat)
p <- predictorC_wrap(data = SurvEles_small, design_matrix = dmat)
table(p)
}
\seealso{
\code{\link{predictor}}
}
|
9e8ebc51d407ca81c05f7a192e70de76858dcc0b | 943fdae2438c7beb2763b1883bb1ab267625173a | /ExData_Plotting1/Plot2.R | d09aff6dc93aef3a61da072d8aa6df7f3e36ebec | [] | no_license | Narges-Shah/Exploratory-Data-Analysis | 8deb74822c7df451112cd99577242fa4dd8c422c | 7a938f94a024310b810855be459cb08d5a5d050f | refs/heads/master | 2021-04-26T13:29:29.956666 | 2016-03-13T23:45:51 | 2016-03-13T23:45:51 | 53,810,621 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 456 | r | Plot2.R | # Reading the data set
f2m <- read.csv("First2Month.csv", header=TRUE, sep=';', na.strings='?')
# Convert Date and Time variables
f2m$datetime <- strptime(paste(f2m$Date,f2m$Time), "%d/%m/%Y %H:%M")
# Plotting the Data
png("plot2.png",width=480,height=480,units="px",bg="transparent")
message("Global Active Power Plot")
plot(f2m$datetime, f2m$Global_active_power, xlab ="", ylab = "Global Active Power", type ="l")
#Closing the Graphic Device
dev.off() |
b8d83f324bf0c59b359adfd1ca04c6bae857c69c | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /output/sources/authors/1564/NetworkAnalysis/lipschitz.R | 4d645579e7244e2e277baca6e6fb64ffe1a1d002 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 730 | r | lipschitz.R | na.DistanceOrdering <-
function(Dg,Dh){
if(sum(dim(Dg)!=dim(Dh))>0) stop("Dg and Dh do not identical dimensions.")
out <- .Call("distance_ordering",PACKAGE="NetworkAnalysis",as.matrix(Dg),as.matrix(Dh))
return(out)
}
na.IndirectPaths <-
function(W,Vpair,k,output="Summaries"){
if(k==0) stop("Order of indirect paths should not lesser than 1.")
Vpair <- Vpair-1;
out <- .Call("indirect_paths",PACKAGE="NetworkAnalysis",as.matrix(W),as.integer(Vpair),as.integer(k))
# Return Mean, Mode, Sum and variance.
if(output=="Summaries"){
vec <- out
out <- vector("numeric",4);
out[1] <- mean(vec);
out[2] <- max(vec);
out[3] <- sum(vec);
out[4] <- sd(vec);
}#Sum.
return(out)
}
|
1ff9ef630141516cc679d1798ff63376c0bf4f2b | 6ed7a8aa75fb30997d1cc9ee73bc9bc07aafea6d | /man/degree_discounted_graph.Rd | cae4013136e84058e0c7344fc79d539d9a256758 | [] | no_license | androsova/seednet | 5b4d70758649f70d4c4d0393251397bcc760b0cb | 880c53dd95a5d1dab2ffb7faf2b6bd0596daca86 | refs/heads/master | 2020-03-27T17:48:33.087164 | 2018-08-31T11:23:37 | 2018-08-31T11:23:37 | 146,873,733 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,432 | rd | degree_discounted_graph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seednet_functions.R
\name{degree_discounted_graph}
\alias{degree_discounted_graph}
\title{Degree-discounting of graph}
\usage{
degree_discounted_graph(graph, edge_attrib)
}
\arguments{
\item{graph}{An igraph graph with weights as edges attribute.}
\item{edge_attrib}{Name of the edge attribute with edges' weights.}
}
\value{
A degree-discounted graph. The edge weights in the degree-discounted
graph are normalized from 0 to 1.
}
\description{
This function reduces the weight of interactions (edges) proportionally
to the vertex degree. Degree-discounting was adapted from the notion of
reduced adjacency matrix in Laplacian matrix theory. This function requires
\code{graph} with weighted edges as an input.
}
\note{
This function results in error if edge attribute with provided
\code{edge_attrib} name does not exist.
}
\examples{
graph <- igraph::make_tree(10, 4, mode = "undirected")
E(graph)$combined_score <- 1:igraph::ecount(graph)
degree_discounted_graph(graph, "combined_score")
}
\references{
Satuluri V. and Parthasarathy S. (2011) Symmetrizations
for clustering directed graphs. In Proceedings of the 14th International
Conference on Extending Database Technology (EDBT/ICDT '11), ACM, New York, NY,
USA, 343-354.
}
\seealso{
\url{https://en.wikipedia.org/wiki/Laplacian_matrix} for reduced
adjacency matrix in Laplacian matrix theory
}
|
52a8bb665186084d9ba127ac30a95fbfa5c6a4a9 | 5f7c2796b4c7b141e3e6583b2a56505e6674cde0 | /man/Get_L_inv_y.Rd | c3a3fbf7587e3c3ab65c06d3d612b23edc429097 | [] | no_license | cran/FastGaSP | cbd332807df530ce80dc0995eaab41136c87a75c | b23d2aada97af9417b8eaa815402f617ee337523 | refs/heads/master | 2021-09-08T07:24:55.023809 | 2021-09-02T22:20:08 | 2021-09-02T22:20:08 | 164,700,662 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,133 | rd | Get_L_inv_y.Rd | \name{Get_L_inv_y}
\alias{Get_L_inv_y}
\title{
vector of half of the sum of squares
}
\description{
This function computes the inverse of L matrix multiples the output vector, where the L matrix is the Cholesky decomposition of the correlation matrix. Instead of computing the Cholesky matrix, we compute it using the forward filtering algorithm.
}
\usage{
Get_L_inv_y(GG,VV,Q,K,output)
}
\arguments{
\item{GG}{a list of matrices defined in the dynamic linear model.
}
\item{VV}{a numerical value of the variance of the nugget parameter. }
%% ~~Describe \code{response} here~~
\item{Q}{a vector defined in the dynamic linear model. }
\item{K}{a matrix defined in the filtering algorithm for the dynamic linear model. }
\item{output}{a vector of output.}
}
\value{
A vector of the inverse of L matrix multiples the output vector, where the L matrix is the Cholesky decomposition of the correlation matrix.
}
\references{
%% ~put references to the literature/web site here ~
Hartikainen, J. and Sarkka, S. (2010). \emph{Kalman filtering and smoothing solutions to temporal gaussian process regression models}. \emph{Machine Learning for Signal Processing (MLSP), 2010 IEEE International Workshop}, 379-384.
M. Gu, Y. Xu (2019), \emph{fast nonseparable gaussian stochastic process with application to methylation level interpolation}. \emph{Journal of Computational and Graphical Statistics}, In Press, arXiv:1711.11501.
Campagnoli P, Petris G, Petrone S. (2009), \emph{Dynamic linear model with R}. Springer-Verlag New York.
}
\author{
\packageAuthor{FastGaSP}
Maintainer: \packageMaintainer{FastGaSP}
}
\seealso{\code{\link{Get_C_R_K_Q}} for more details about Q vector and K matrix.
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\keyword{internal}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
d4a0c9eca8e130b33cfe2ff48bfb5fdf15269c9c | cca0e8eb35dc8d55cd2d45db7891d878be6747e6 | /tests/testthat/test-silent-message-error.R | 976245ab5177c78c933bdbc08eb364c4b783ef03 | [
"MIT"
] | permissive | phillydao/deidentify | dcae7346a7ea7db36ad24fb1ef82ee0a7dce8e34 | 61dfd8c6b94113913a06744634c3991deb3ae97a | refs/heads/master | 2023-02-25T03:18:05.332427 | 2021-02-03T18:47:51 | 2021-02-03T18:47:51 | 292,612,885 | 0 | 1 | NOASSERTION | 2021-02-03T18:47:52 | 2020-09-03T15:45:31 | R | UTF-8 | R | false | false | 2,313 | r | test-silent-message-error.R | test_that("messages work as expected", {
expect_message(make_k_score(example, columns = "groups"))
expect_message(make_k_score(example, columns = "groups"),
"Note: There are 3 groups with 3 or fewer observations.")
expect_message(make_k_score(example, minimum_k_score = 5, columns = "groups"),
"Note: There are 3 groups with 5 or fewer observations.")
})
test_that("errors work as expected", {
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = 0))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = -1))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = 1:4))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = NULL))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = NA))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = ""))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = "test"))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = mtcars))
expect_error(make_k_score(example, columns = "groups", quiet = TRUE,
minimum_k_score = mtcars$cyl))
expect_error(make_k_score(example, columns = "groups", quiet = 2))
expect_error(make_k_score(example, columns = "groups", quiet = NULL))
expect_error(make_k_score(example, columns = "groups", quiet = NA))
expect_error(make_k_score(example, columns = "groups", quiet = "test"))
expect_error(make_k_score(example, columns = "groups", quiet = ""))
expect_error(make_k_score(example, columns = "groups", quiet = mtcars))
expect_error(make_k_score(example, columns = "groups", quiet = mtcars$mpg))
expect_error(make_k_score(example, columns = "groups", quiet = -1))
expect_error(make_k_score(example, columns = "groups", quiet = 2:5))
expect_error(make_k_score(example, columns = "groups", quiet = c(TRUE, FALSE)))
})
|
d2864cc5316b4516d547fe19ecda94c71d501534 | 83e1c7cd2d90784105e52d9633c411c4036f2d0a | /PostWork7/PostWork7.R | 4e2ac3a6585105a1184fea539727048e2c77a358 | [] | no_license | panchis7u7/Bedu-Posworks | c364514d681a83afc9defb11946a0ddf64a73e59 | 18ba97af0534f81eab8beae5513958becde9829d | refs/heads/master | 2023-02-25T17:06:24.910564 | 2021-02-04T18:45:06 | 2021-02-04T18:45:06 | 330,039,041 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,578 | r | PostWork7.R | #Viridiana Escarzaga Solis
#Carlos Sebastián Madrigal Rodríguez
#Diego Armando Morales Corona
#Carlos Rodríguez Tenorio
#https://www.football-data.co.uk/mmz4281/1516/SP1.csv
#Instalamos la libreria que nos permitira la conexion a mongodb.
suppressMessages(suppressWarnings(install.packages("mongolite", dependencies = TRUE)))
#Cargamos la libreria de mongolite.
suppressMessages(suppressWarnings(library(mongolite)))
#A partir del conjunto de datos proporcinado en datos.csv,
#se importara y analizara para conocer el contenido de
#este archivo. Primero se cambia el 'Working directory'
#a la ubicacion en donde tenemos el conjunto de datos
#guardados, posteriormente se guarda en un data frame y
#se imprime las primeras filas para conocer mas sobre la
#integridad de los datos del archivo.
setwd("/media/panchis/ExtraHDD/EstadisticaRP/Sesion_7/PostWork7/")
data <- read.csv('data.csv')
head(data)
#Como este dataset no tiene el registro que nos interesa obtener (registros del 2015),
#importamos el dataset que contiene los partidos del ano 2015.
data_extension <- read.csv("https://www.football-data.co.uk/mmz4281/1516/SP1.csv")
head(data_extension)
#Añadimos la fila de ID de registro.
data_extension <- mutate(data_extension, X = row_number())
#Le damos Formato a la fecha.
data_extension$Date <- format(as.Date(data_extension$Date), "%d-%m-%Y")
#Completamos el año.
data_extension$Date <- paste0("20", data_extension$Date)
data_extension <- mutate(data_extension, Fecha = as.Date(Date, "%m-%d-%Y"))
#Seleccionamos las filas que corresponden al archivo local.
data_extension <- select(data_extension, X, Date, HomeTeam, AwayTeam, FTHG, FTAG, FTR)
#Unimos los datasets.
final_data <- rbind(data, data_extension)
#Reseteamos los ID de las tuplas.
final_data <- mutate(final_data, X = row_number())
#Ahora las columnas coinciden y se agrupo de manera correcta.
head(final_data)
#creamos la bd match_games con la coleccion match en mongo.
#Se realiza la conexion a mongo.
coleccion = mongo("match", "match_games")
#Se aloja el fichero data.csv.
coleccion$insert(final_data)
#Conocer la cantidad de registros de la base de datos.
coleccion$count()
#realizamos la consulta
#Para conocer el número de goles que metió el Real Madrid
#el 20 de diciembre de 2015 y contra que equipo jugó, ¿perdió
#ó fue goleada?
coleccion$find('{"Date":"2015-12-20", "HomeTeam":"Real Madrid"}')
#El Equipo del real madrid le dio una goliza al Vallecano! 10 - 2.
#Ya habiendo hecho la consulta, debemos de cerrar la
#conexion con la base de datos.
rm(coleccion)
|
e8ab8ed07f21cec7d360c6b5ccd69edde9150a04 | a5cd7045e10488bdc18d26c3d154b03f2ef2a4bb | /R/crssi_create_dnf_files.R | e1e529840f36ba2d95a29f7b490142e82d6f46ec | [] | no_license | BoulderCodeHub/CRSSIO | fae179cc18101e772e07504135ce4d5e39446027 | 0a6783f08b832ff63e499436e79be3845b9dbd39 | refs/heads/master | 2023-06-08T05:09:21.380436 | 2023-06-02T18:03:23 | 2023-06-02T18:03:23 | 30,599,142 | 4 | 2 | null | 2022-07-14T20:05:07 | 2015-02-10T15:39:19 | R | UTF-8 | R | false | false | 8,565 | r | crssi_create_dnf_files.R | #' Create CRSS Natural Flow Input Files From Historical Record
#'
#' `crssi_create_dnf_files()` creates the natural flow input files used by the
#' Colorado River Simulation System (CRSS) by applying the
#' [Index Sequential Method](http://onlinelibrary.wiley.com/doi/10.1111/j.1752-1688.1997.tb03557.x/abstract)
#' (ISM) to the historical natural flow record (or a subset of the historical
#' record). `crssi_create_dnf_files()` is a convenience function that wraps
#' the process of getting/formatting data ([crss_nf()] + [sac_year_type_get()] +
#' [crssi()]), applying ISM ([ism()]), changing the dates to CRSS' future start
#' year ([reindex()]), and then writing out the files ([write_crssi()]).
#'
#' **Input Data:** The input data can be read from the natural flow workbook
#' available at
#' \url{http://www.usbr.gov/lc/region/g4000/NaturalFlow/current.html}
#' or from the [CoRiverNF](https://github.com/BoulderCodeHub/CoRiverNF) data
#' package. It is much faster to use the data package rather than the Excel
#' workbook, and the files created from the two sources will match identically
#' if they are from the same natural flow release.
#'
#' **Other Files:** In addition to creating the natural flow input files, data
#' for two four other slots are also created. See [write_crssi()].
#'
#' **Scenario Number:** The scenario number in `crssi` objects provides the user
#' with a numeric representation of which supply scenario is used.
#' The observed historical natural flows (used by this
#' function) are supply scenario 1. For this supply scenario, the decimals of
#' the supply scenario number represent the start and end year that the ISM
#' method are applied to. For example, if you set `recordToUse` to
#' `c('1988-1','2012-12')`, the decimal portion will be 19882012, where the
#' first 4 numbers represent the start year and the second four numbers
#' represent the end year. The supply scenario slot will be set to 1.19882012 in
#' this example. This tells the user of CRSS that the supply scenario is the
#' observed historical natural flows with the ISM method applied to the
#' 1988-2012 data.
#'
#' @param iFile Either the string `"CoRiverNF"``, or the relative or absolute
#' path to the excel workbook. When "CoRiverNF" is used, the data from the
#' [CoRiverNF](https://github.com/BoulderCodeHub/CoRiverNF) data package is
#' used. Otherwise, it should be a valid path to the natural flow Excel
#' workbook.
#'
#' @param oFolder Path to the top level directory where the trace folders and
#' input files will be created. This folder should exist before using this
#' function.
#'
#' @param startYear The year to start the trace files in. Data will be trimmed
#' to start in this year.
#'
#' @param endYear The final year of data the trace files will contain.
#'
#' @param oFiles A matrix of the file names (input into CRSS). The default uses
#' [nf_file_names()]. This must be specified in the correct
#' order, i.e., the same order as the nodes in the input Excel file.
#'
#' @param recordToUse The start and end dates of the natural flow record to
#' perform ISM, if using something besides the full record. If it is `NA`, the
#' full record will be used. Otherwise, it should be a vector of length 2,
#' where the first entry is the start date and the second entry is the end
#' date. The vector should be of type [zoo::yearmon], and begin in January of
#' some year, and end in December of some year.
#'
#' @param overwriteFiles Boolean. Should existing files be overwritten.
#'
#' @return Invisibly returns `crssi` object.
#'
#' @examples
#'
#' \dontrun{
#' # will create 20 years for 25 traces based on the 1988-2012 record:
#' crssi_create_dnf_files("CoRiverNF",
#' "tmp",
#' startYear = 2017,
#' endYear = 2036,
#' recordToUse = zoo::as.yearmon(c('1988-1','2012-12'))
#' )
#'
#' # path to excel file
#' iFile <- 'user/docs/NaturalFlows1906-2012_withExtensions_1.8.15.xlsx'
#' # will create 50 years for 107 traces based on the full (1906-2012) record:
#' crssi_create_dnf_files(iFile,
#' 'NFSinput/',
#' startYear = 2015,
#' endYear = 2064
#' )
#'
#' # crssi_create_dnf_files() is a convenient wrapper around other functions
#' # in CRSSIO. The following should produce the same output. This would create
#' # the "stress test hydrology", which applies ISM to the 1988-present
#' # hydrology
#' # wrapper:
#' crssi_create_dnf_files(
#' "CoRiverNF",
#' "folder1/",
#' startYear = 2020,
#' endYear = 2024,
#' recordToUse = zoo::as.yearmon(c('1988-1','2018-12'))
#' )
#'
#' # individual steps:
#' # get data
#' flow <- CoRiverNF::monthlyInt["1988/2018"]
#' sac_yt <- sac_year_type_get()["1988/2018"]
#'
#' # create crssi object
#' nf <- crssi(crss_nf(flow), sac_yt, scen_number = 1.19882018)
#'
#' # apply ism
#' nf <- ism(nf, n_years_keep = 5)
#'
#' # change times to start in 2020
#' nf <- reindex(nf, 2020)
#'
#' # write the files
#' write_crssi(nf, "folder2/")
#'
#' }
#' @seealso [nf_file_names()], [crssi()], [write_crssi()]
#'
#' @export
crssi_create_dnf_files <- function(iFile,
oFolder,
startYear,
endYear,
oFiles = nf_file_names(),
recordToUse = NA,
overwriteFiles = FALSE)
{
if (tools::file_ext(iFile) != "xlsx" & iFile != "CoRiverNF")
stop(iFile, " does not appear to be valid.\n",
"It should be either an Excel (xlsx) file or 'CoRiverNF'")
check_nf_oFolder(oFolder, overwriteFiles, "crssi_create_dnf_files")
if (!anyNA(recordToUse))
recordToUse_str <- check_recordToUse(recordToUse)
# get nf data ------------------------------------------
if (iFile == 'CoRiverNF') {
# use the data in CoRiverNF::monthlyInt
nf <- CoRiverNF::monthlyInt
if (!anyNA(recordToUse)) {
# if not NA, then trim data, otherwise use full data
check_recordToUse_year2(recordToUse[2], nf)
nf <- nf[paste(recordToUse_str[1], recordToUse_str[2],sep = '/')]
} else {
nf <- nf['1906-01/'] # trim off OND 1905
}
} else {
# use the data in the Excel workbook, if it exists.
if (!file.exists(iFile)) {
stop('iFile does not exist')
}
nf <- read_and_format_nf_excel(iFile)
if (!anyNA(recordToUse)) {
check_recordToUse_year2(recordToUse[2], nf)
# trim data
nf <- nf[paste(recordToUse_str[1], recordToUse_str[2],sep = '/')]
}
}
# convert nf to crss_nf
nf <- crss_nf(nf)
# set scenario number --------------------
# get the years used before changing nf
if (!anyNA(recordToUse)) {
y1 <- year(recordToUse[1], numeric = TRUE)
y2 <- year(recordToUse[2], numeric = TRUE)
periodToUse <- paste0(y1, '-', y2)
# this only deals with historical observed NF, so that is supply scenario
# 1.xxxxxxxx, where the .xxxxxxxx are the beginning and ending years used
# for ISM
supplyScenario <- as.numeric(paste0(1,'.',y1,y2))
} else {
# uses the full record, so it's 1906 - some year. figure out some year
y1 <- 1906
y2 <- year(end(nf), numeric = TRUE)
periodToUse <- paste0(y1, '-', y2)
supplyScenario <- as.numeric(paste0('1.1906',y2))
}
simYrs <- endYear - startYear + 1
assert_that(
simYrs <= (y2 - y1 + 1),
msg = "endYear-startYear+1 should be <= the length speciifed in recordToUse."
)
# get sac_yt_data -------------------------------------
sac_yt <- sac_year_type_get()
# trim to correct years
if (!anyNA(recordToUse)) {
check_recordToUse_year2(recordToUse[2], sac_yt)
# trim data
sac_yt <- sac_yt[paste(recordToUse_str[1], recordToUse_str[2], sep = '/')]
}
# create crssi ----------------------------------------
scen_name <- paste0("ISM applied to ", y1, "-", y2, " historical hydrology.")
nf <- crssi(nf, sac_yt, supplyScenario, scen_name)
# reindex to start in specified year ------------------
nf <- reindex(nf, startYear)
# ism and trim ----------------------------------------
nf <- ism(nf, n_years_keep = simYrs)
# save files ------------------------------------------
write_crssi(nf, path = oFolder, file_names = oFiles,
overwrite = overwriteFiles, readme = FALSE)
# create the README
write_nf_readme(
get_dnf_readme_vals(iFile, startYear, endYear, periodToUse),
oFolder = oFolder
)
invisible(nf)
}
|
ba8c2dea6200f67e29482732eec3fd602116bc6b | aba7a5f337376a3759b75771d0fed43aba8f1506 | /TMDbfromR/man/print.tmdb_api.Rd | c1d515de20958fc993732001eb225075174e1fd1 | [] | no_license | 3inapril/tmdb-package | be56ca6f44dc363dc35b99c8cf4a9b7491bf2c2f | cd4ae1fa23d613a04cc899a58348f4641928a617 | refs/heads/master | 2021-05-03T10:51:53.450310 | 2016-10-05T22:13:14 | 2016-10-05T22:13:14 | 69,840,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 667 | rd | print.tmdb_api.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common_functions.R
\name{print.tmdb_api}
\alias{print.tmdb_api}
\title{Print object tmdb_api}
\usage{
print.tmdb_api(rst) or rst
}
\arguments{
\item{rst}{The returned object of function get_result() or get_result_general().
rst should be of class \code{tmdb_api}}
}
\value{
An object of class \code{tmdb_api}.
}
\description{
This function controls the output when users print the response object
}
\examples{
\dontrun{
url <- 'https://api.themoviedb.org/3/discover/movie?api_key={Put_API_Key_Here}&year=2011&with_genres=12,18&language=en-US'
result <- get_result_general(url)
result
}
}
|
f9cdf718addcb400e48c0702e6970399c3fcb16d | 239986245337e040e6dc5198b73b7e81f75863c1 | /fake data.R | 9d7667b779725f831cf58220c0202b6a74cf3190 | [] | no_license | drvalle1/github_SBM_gamma | 579984832c9b25455a791698e8bb24d7f9b7a2ca | 2d141ec6c10a12f7b6508ed73920174128f611c1 | refs/heads/master | 2020-04-16T04:16:10.205823 | 2019-02-27T16:45:34 | 2019-02-27T16:45:34 | 165,260,829 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 897 | r | fake data.R | rm(list=ls(all=TRUE))
set.seed(3)
setwd('U:\\GIT_models\\github_SBM_gamma')
ngroup.loc=5
ngroup.spp=3
#get parameters
tmp=runif(ngroup.loc)
theta.true=theta=tmp/sum(tmp)
tmp=runif(ngroup.spp)
phi.true=phi=tmp/sum(tmp)
set.seed(4)
psi=matrix(c(0.05,0.5,0.95,
0.5,0.05,0.95,
0.05,0.95,0.5,
0.5,0.95,0.05,
0.1,0.5,0.05),ngroup.loc,ngroup.spp,byrow=T)
psi.true=psi
#get latent variables
nloc=1000
tmp=rmultinom(1,size=nloc,prob=theta)
tmp1=rep(1:ngroup.loc,times=tmp)
z.true=z=tmp1 #if not scrambled
# z=sample(tmp1,nind);
nspp=50
tmp=rmultinom(1,size=nspp,prob=phi)
tmp1=rep(1:ngroup.spp,times=tmp)
w.true=w=tmp1 #if not scrambled
# w=sample(tmp1,nquest)
#generate data
y=matrix(NA,nloc,nspp)
for (i in 1:nloc){
for (j in 1:nspp){
y[i,j]=rbinom(1,size=1,prob=psi[z[i],w[j]])
}
}
image(y)
write.csv(y,'fake data.csv',row.names=F) |
86094db492c3e1df8c82f2adbcb5cbd311949179 | 80608c38040fb865d6d743d8deb015ccd6db3446 | /Forecast - Index and Future.R | 4105086f2c03bb144561dba8252f141f2527d4c2 | [] | no_license | Quynh0420/TimeSeriesAnalysisProject | 06b0cfc84b3d60258d6aa9456c9dde9fe9d5a743 | 823d70369b68359b8bd6ef9404ef2438cfa77822 | refs/heads/master | 2020-12-05T17:51:23.397442 | 2020-01-06T22:33:45 | 2020-01-06T22:33:45 | 232,196,301 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 45,731 | r | Forecast - Index and Future.R | ########################################################
# Home Project - Time Series Analysis
# Student: QUYNH BUI - 393519
# Lecturer: Dr. Piotr Wójcik
# Faculty of Economic Sciences, University of Warsaw
#
########################################################
setwd("D:\\Jeans Witch\\Studies\\MA in Quantitative Finance\\Sem 2\\Time Series Analysis\\Lab assessment\\TSA Home Project - QUYNH BUI (393519) - NGUYEN VO (393518)")
library(xts)
library(vars)
library(forecast)
library(lmtest)
library(urca)
library(MSBVAR)
library(fBasics)
library(quantmod)
library(fUnitRoots)
source("function_testdf.R")
source("function_plot_ACF_PACF_resids.R")
# penalty on scientific penalty
options(scipen = 20)
# importing data
load("data1.xts.RData")
head(data1.xts)
tail(data1.xts)
# creating first differences of variables
data1.xts$dindex <- diff.xts(data1.xts$index)
data1.xts$dindex.F <- diff.xts(data1.xts$index.F)
# plotting variables on the graph
plot(data1.xts[,c(1,2)],
col = c("black", "blue"),
major.ticks = "years",
grid.ticks.on = "months",
grid.ticks.lty = 3,
main = "Index and Future Index",
legend.loc = "topleft")
plot(data1.xts[,c(1,3)],
# plot data in two panels (each column separately)
multi.panel = 2,
main = "Original and differenced data for Index",
col = c("darkblue", "darkgreen"),
major.ticks = "years",
grid.ticks.on = "months",
grid.ticks.lty = 3,
yaxis.same = F, # otherwise scale is the same for each column!
cex = 1)
plot(data1.xts[,c(2,4)],
# plot data in two panels (each column separately)
multi.panel = 2,
main = "Original and differenced data for Future",
col = c("darkgrey", "orange"),
major.ticks = "years",
grid.ticks.on = "months",
grid.ticks.lty = 3,
yaxis.same = F, # otherwise scale is the same for each column!
cex = 1)
# testing integration order
testdf(variable = data1.xts$index,
max.augmentations = 3)
# p-value is large, so we cannot reject the null about Non-Stationary
testdf(variable = data1.xts$dindex,
max.augmentations = 3)
# p-value is small, so we reject the null about Non-stationary
# it is stationary at 1st order with 0 augmentations (adf = -22.00)
testdf(variable = data1.xts$index.F,
max.augmentations = 3)
# p-value is large, so we cannot reject the null about Non-Stationary
testdf(variable = data1.xts$dindex.F,
max.augmentations = 3)
# p-value is small, so we reject the null about Non-stationary
# it is stationary at 1st order with 0 augmentations (adf = -21.73)
# Both variables are I(1) so we can check
# whether they are cointegrated.
# Estimating cointegrating vector
model.coint <- lm(index.F ~ index, data = data1.xts)
summary(model.coint)
# Testing stationarity of residuals
testdf(variable = residuals(model.coint),
max.augmentations = 3)
# The ADF test with 1 augmentations can be used
# its result is that non-stationarity of residuals
# is STRONGLY REJECTED, so residuals are stationary,
# which means that index and future index are cointegrated
# The cointegrating vector is [1, - 13.044676, -0.825113]
# which defines the cointegrating relationship as:
# 1 * index.F - 13.044676- 0.825113 * index
# Creating first lags of residuals
# and adding it to the dataset
data1.xts$lresid <- lag.xts(residuals(model.coint))
# Estimating ECM
model.ecm <- lm(dindex.F ~ dindex + lresid - 1,
# -1 means a model without a constant
data = data1.xts)
summary(model.ecm)
# How would you interpret results of the model above?
# parameter 0.814743 describes a short term relationship
# between future index and index, so if index increases by 1,
# future index in the SHORT RUN will increase by 0.814743
# the long run relationship is described by the parameter
# 0.825113 from the cointegrating relationship:
# so if index increases by 1 in the LONG RUN future index will
# increase by 0.825113
# -0.236247 is the adjustment coefficient
# its sign is negative (as expected)
# its value means that 23.6% of the unexpected
# error (increase in gap) will be corrected
# in the next period, so any unexpected deviation
# should be corrected finally within about 4.25 periods (1/23.6%) # testing integration order
################################################################################
# index_future - Granger causality test
################################################################################
# Now let's check, whether index Granger causes future and vice versa.
# What is the proper lag length in this case?
# 3 lags
granger.test(data1.xts[,1:2], # set of variables tested
3) # lag assumed
# 4 lags
granger.test(data1.xts[,1:2], 4)
# 5 lags
granger.test(data1.xts[,1:2], 5)
# 12 lags
granger.test(data1.xts[,1:2], 12)
# What is the conclusion?
# At 5% significance level we DO NOT have so called
# 'bi-directional feedback' in all cases
# it means there is no Granger causality between index and future index
######################################################################
# ARIMA FOR INDEX FORECAST
# below we apply the Box-Jenkins procedure
#######################################################################
# step 1. INITIAL IDENTIFICATION of parameters p and q
# lets see ACF and PACF for non-stationary variable
# ACF and PACF are calculated up to 36th lag
par(mfrow = c(2, 1))
Acf(data1.xts$dindex,
lag.max = 36, # max lag for ACF
ylim = c(-0.1, 0.1), # limits for the y axis - we give c(min,max)
lwd = 5, # line width
col = "dark green",
na.action = na.pass) # do not stop if there are missing values in the data
Pacf(data1.xts$dindex,
lag.max = 36,
lwd = 5, col = "dark green",
na.action = na.pass)
par(mfrow = c(1, 1)) # we restore the original single panel
# ACF and PACF suggest that maybe ARIMA (3,1,3) could be
# a sensible model for the Index, probably without
# lag 1 for AR and MA
# lets compare different models with AIC criteria
#######################################################################
# steps 2 and 3 interchangeably. MODEL ESTIMATION and DIAGNOSTICS
###############################################################################
# lets start with ARIMA(1,1,1)
# we are using Arima function from the forecast package
arima111.index <- Arima(data1.xts$index, # variable
order = c(1, 1, 1), # (p,d,q) parameters
include.constant = TRUE) # including a constant
# a constant for a model with d = 1 is reported
# as a drift parameter
coeftest(arima111.index)
# it is statistically significant here at 5% level except for the drift
summary(arima111.index)
# maybe we should try to run a model without the drift
arima111.index <- Arima(data1.xts$index, # variable
order = c(1, 1, 1)) # (p,d,q) parameters
coeftest(arima111.index)
# are residuals of arima111 model white noise?
# resid() function applied to model results returns residuals
plot(resid(arima111.index))
# lets check ACF and PACF
plot_ACF_PACF_resids(arima111.index)
# no lags are significant
# Ljung-Box test (for a maximum of 10 lags)
Box.test(resid(arima111.index),
type = "Ljung-Box", lag = 10)
# at 5% we cannot reject the null about residuals being
# white noise, p-value is very high!
###############################################################################
# lets try ARIMA(3,1,3)
arima313.index <- Arima(data1.xts$index,
order = c(3, 1, 3),
include.constant = TRUE)
coeftest(arima313.index)
# No lags are significant
summary(arima313.index)
# Ljung-Box test for autocorrelation of model residuals
Box.test(resid(arima313.index),
type = "Ljung-Box",lag = 10)
# null cannot be rejected - p-value very high!
# Plot ACF and PACF for residuals
plot_ACF_PACF_resids(arima313.index)
# lags of ACF and PACF are not significant
###############################################################################
# lets try ARIMA(3,1,3) model without lag 1 and drift
arima313_3.index <- Arima(data1.xts$index,
order = c(3, 1, 3),
fixed = c(0, NA, NA,
0, NA, NA))
coeftest(arima313_3.index)
# Still, no lags are significant
# lets check if residuals are white noise
plot_ACF_PACF_resids(arima313_3.index)
# Ljung-Box test
Box.test(resid(arima313_3.index),
type = "Ljung-Box",lag = 10)
# the null cannot be rejected
# residuals are white noise
###############################################################################
# lets compare AIC for all models estimated so far
# CAUTION! for some of them rediduals are not white noise!
# Based on AIC which model is best?
AIC(arima111.index, arima313.index, arima313_3.index)
# arima313 without lag 1 and drift, but arima111 is also quite near
# The lower AIC, the better
# lets do the same for BIC
BIC(arima111.index, arima313.index, arima313_3.index)
# arima111,
# the lower BIC, the better
# from the perspective of sensibility arima111 and arima313_3 seems
# to be the best (residuals are white noise and low IC,
# but arima111 has all terms signifcant
# while arima313 has no terms significant
# there is also a way to automatically find the best model
arima.best.AIC.index <- auto.arima(data1.xts$index,
d = 1, # parameter d of ARIMA model
max.p = 5, # Maximum value of p
max.q = 5, # Maximum value of q
max.order = 10, # maximum p+q
start.p = 1, # Starting value of p in stepwise procedure
start.q = 1, # Starting value of q in stepwise procedure
ic = "aic", # Information criterion to be used in model selection.
stepwise = FALSE, # if FALSE considers all models
allowdrift = TRUE, # include a constant
trace = TRUE) # show summary of all models considered
# however it does not remove intermediate lags
# the result might be surprising
coeftest(arima.best.AIC.index)
# ARIMA(4,1,0)
# however lag 1 and 4 seem insignificant
AIC(arima.best.AIC.index, arima111.index, arima313.index, arima313_3.index)
# AIC better than for the best manually selected model
BIC(arima.best.AIC.index, arima111.index, arima313.index, arima313_3.index)
# BIC worse than for the best manually selected model
# Ljung-Box test
Box.test(resid(arima.best.AIC.index),
type = "Ljung-Box", lag = 10)
# residuals are also white noise (p-value is very high!)
# but the automated procedure does not
# exclude intermediate lags
# the same based on BIC
arima.best.BIC.index <- auto.arima(data1.xts$index,
d = 1, # parameter d of ARIMA model
max.p = 5, # Maximum value of p
max.q = 5, # Maximum value of q
max.order = 10, # maximum p+q
start.p = 1, # Starting value of p in stepwise procedure
start.q = 1, # Starting value of q in stepwise procedure
ic = "bic", # Information criterion to be used in model selection.
stepwise = FALSE, # if FALSE considers all models
allowdrift = TRUE, # include a constant
trace = TRUE) # show summary of all models considered
coeftest(arima.best.BIC.index)
# ARIMA(0,1,0) without a constant
BIC(arima.best.BIC.index, arima.best.AIC.index, arima111.index)
# Ljung-Box test
Box.test(resid(arima.best.BIC.index),
type = "Ljung-Box", lag = 10)
# the model selected based on BIC has almost
# autocorrelated residuals
# so should not be considered as complete
# automated procedure is not necessarily better
# than step-by-step human approach
# Lets finally decide to select 3 models:
# arima111 - sensible, manually selected based on BIC
# arima313_3 - sensible, manually selected based on AIC
# ARIMA(4,1,0) - automated selection based on AIC
# for further comparisons (forecasts)
# these models have:
# - lowest information criteria (AIC or BIC)
# - their residuals are white noise
# - (almost) all parameters significant
# We will finally use these models for forecasting
#######################################################################
# FORECAST for INDEX - model arima111
# create shorter sample
index_future <- data1.xts["/2018-03-16", -5]
tail(index_future)
# estimate the model on shorter sample
arima111s.index <- Arima(index_future$index, # variable
order = c(1,1,1)) # (p,d,q) parameters
coeftest(arima111s.index)
# lets make a prediction
forecast111.index = forecast::forecast(arima111s.index, h = 5,
level = 0.95)
# lets see the result
forecast111.index
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast111.index$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast111.index$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast111.index_data <- data.frame(f111.index_mean = as.numeric(forecast111.index$mean),
f111.index_lower = as.numeric(forecast111.index$lower),
f111.index_upper = as.numeric(forecast111.index$upper))
head(forecast111.index_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
forecast111.index_xts <- xts(forecast111.index_data,
# last 5 values of date
# from the original dataset
tail(index(data1.xts), 5))
forecast111.index_xts
# we can put it together with the original data
Index_111 <- merge(data1.xts[,1], forecast111.index_xts)
head(Index_111)
tail(Index_111, 10)
# lets finally plot the figure with the forecast
# and original series
# original data
plot(Index_111["2018",],
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "5 weeks forecast of Index - ARIMA111",
col = c("black", "blue", "red", "red"))
# checking forecast quality
# for simplicity of the formulas
# lets define two new objects:
# real values - last 5 observations
Index.r <- tail(data1.xts$index, 5)
# forecasts
Index.f.111 <- as.numeric(forecast111.index$mean)
# put it together into a data.frame
Index_forecast_111 <- data.frame(Index.r, Index.f.111)
Index_forecast_111
# lets add the basis for different measures of the forecast error
Index_forecast_111$mae.index.111 = abs(Index.r - Index.f.111)
Index_forecast_111$mape.index.111 = abs((Index.r - Index.f.111)/Index.r)
Index_forecast_111
write.csv(Index_forecast_111, file = "Index_forecast_111.csv")
# to export xts object as excel file
# and calculate its averages
# over the forecast period
colMeans(Index_forecast_111[,3:4])
#######################################################################
# FORECAST for INDEX - model arima313_3 (without lag 1 and drift)
# estimate the model on shorter sample
arima313_3s.index <- Arima(index_future$index, # variable
order = c(3,1,3), # (p,d,q) parameters
fixed = c(0, NA, NA,
0, NA,NA))
coeftest(arima313_3s.index)
# lets make a prediction
forecast313_3.index = forecast::forecast(arima313_3s.index,
h = 5, level = 0.95)
# there were 2 functions "forecast" in 2 different packages, so not to be confused,
# we should write "Package name :: function name"
# lets see the result
forecast313_3.index
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast313_3.index$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast313_3.index$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast313_3.index_data <- data.frame(f313_3.index_mean = as.numeric(forecast313_3.index$mean),
f3131_3.index_lower = as.numeric(forecast313_3.index$lower),
f313_3.index_upper = as.numeric(forecast313_3.index$upper))
head(forecast313_3.index_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
forecast313_3.index_xts <- xts(forecast313_3.index_data,
# last 5 values of date
# from the original dataset
tail(index(data1.xts), 5))
forecast313_3.index_xts
# we can put it together with the original data
Index_313_3 <- merge(data1.xts[,1], forecast313_3.index_xts)
head(Index_313_3)
tail(Index_313_3, 10)
# lets finally plot the figure with the forecast
# and original series
# original data
plot(Index_313_3["2018",],
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "5 weeks forecast of Index - ARIMA313 without lags 1 and drift",
col = c("black", "blue", "red", "red"))
# checking forecast quality
# for simplicity of the formulas
# lets define two new objects:
# real values - last 5 observations
Index.r <- tail(data1.xts$index, 5)
# forecasts
Index.f.313_3 <- as.numeric(forecast313_3.index$mean)
# put it together into a data.frame
Index_forecast_313_3 <- data.frame(Index.r, Index.f.313_3)
Index_forecast_313_3
# lets add the basis for different measures of the forecast error
Index_forecast_313_3$mae.index.313_3 = abs(Index.r - Index.f.313_3)
Index_forecast_313_3$mape.index.313_3 = abs((Index.r - Index.f.313_3)/Index.r)
Index_forecast_313_3
write.csv(Index_forecast_313_3, file = "Index_forecast_313_3.csv")
# and calculate its averages
# over the forecast period
colMeans(Index_forecast_313_3[,3:4])
#######################################################################
# FORECAST for INDEX - model arima410
# estimate the model on shorter sample
arima410s.index <- Arima(index_future$index, # variable
order = c(4,1,0)) # (p,d,q) parameters
coeftest(arima410s.index)
# lets make a prediction
forecast410.index = forecast::forecast(arima410s.index,
h = 5, level = 0.95)
# lets see the result
forecast410.index
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast410.index$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast410.index$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast410.index_data <- data.frame(f410.index_mean = as.numeric(forecast410.index$mean),
f410.index_lower = as.numeric(forecast410.index$lower),
f410.index_upper = as.numeric(forecast410.index$upper))
head(forecast410.index_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
forecast410.index_xts <- xts(forecast410.index_data,
# last 5 values of date
# from the original dataset
tail(index(data1.xts), 5))
forecast410.index_xts
# we can put it together with the original data
Index_410 <- merge(data1.xts[,1], forecast410.index_xts)
head(Index_410)
tail(Index_410, 10)
# lets finally plot the figure with the forecast
# and original series
# original data
plot(Index_410["2018",],
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "5 weeks forecast of Index - ARIMA410",
col = c("black", "blue", "red", "red"))
# checking forecast quality
# for simplicity of the formulas
# lets define two new objects:
# real values - last 5 observations
Index.r <- tail(data1.xts$index, 5)
# forecasts
Index.f.410 <- as.numeric(forecast410.index$mean)
# put it together into a data.frame
Index_forecast_410 <- data.frame(Index.r, Index.f.410)
Index_forecast_410
# lets add the basis for different measures of the forecast error
Index_forecast_410$mae.index.410 = abs(Index.r - Index.f.410)
Index_forecast_410$mape.index.410 = abs((Index.r - Index.f.410)/Index.r)
Index_forecast_410
write.csv(Index_forecast_410, file = "Index_forecast_410.csv")
# and calculate its averages
# over the forecast period
colMeans(Index_forecast_410[,3:4])
###########################################################################################
# Plot the forecast of all 3 models on 1 graph
Forecast_ARIMA_Index <- merge(Index_111["2018",c(1,2)],
Index_313_3["2018",2], Index_410["2018",2])
write.csv(Forecast_ARIMA_Index, file = "Forecast_ARIMA_Index.csv")
plot(Forecast_ARIMA_Index,
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "Forecast of 3 ARIMA models for Index",
col = c("red", "darkgreen", "darkblue", "gray"),
legend.loc = "topright")
# compare the results of MAE and MAPE for all 3 models, ARIMA111 proved to be
# the best model for forecast
# Let's see the MAE and MAPE of 3 models again
# the lower, the better
colMeans(Index_forecast_111[,3:4])
colMeans(Index_forecast_313_3[,3:4])
colMeans(Index_forecast_410[,3:4])
######################################################################
# ARIMA FOR FUTURE FORECAST
# below we apply the Box-Jenkins procedure
#######################################################################
# step 1. INITIAL IDENTIFICATION of parameters p and q
# lets see ACF and PACF for non-stationary variable
# ACF and PACF are calculated up to 36th lag
par(mfrow = c(2, 1))
Acf(data1.xts$dindex.F,
lag.max = 36, # max lag for ACF
ylim = c(-0.1, 0.1), # limits for the y axis - we give c(min,max)
lwd = 5, # line width
col = "dark green",
na.action = na.pass) # do not stop if there are missing values in the data
Pacf(data1.xts$dindex.F,
lag.max = 36,
lwd = 5, col = "dark green",
na.action = na.pass)
par(mfrow = c(1, 1)) # we restore the original single panel
# ACF and PACF suggest that maybe ARIMA (3,1,3) could be
# a sensible model for the Index, probably without lag 1
# lets compare different models with AIC criteria
###############################################################################
# steps 2 and 3 interchangeably. MODEL ESTIMATION and DIAGNOSTICS
###############################################################################
# lets start with ARIMA(1,1,1)
# we are using Arima function from the forecast package
arima111.Future <- Arima(data1.xts$index.F, # variable
order = c(1, 1, 1), # (p,d,q) parameters
include.constant = TRUE) # including a constant
# a constant for a model with d = 1 is reported
# as a drift parameter
coeftest(arima111.Future)
# it is statistically significant here at 5% level except for the drift
summary(arima111.Future)
# maybe we should try to run a model without the drift
arima111.Future <- Arima(data1.xts$index.F, # variable
order = c(1, 1, 1)) # (p,d,q) parameters
coeftest(arima111.Future)
# are residuals of arima111 model white noise?
# resid() function applied to model results returns residuals
plot(resid(arima111.Future))
# lets check ACF and PACF
plot_ACF_PACF_resids(arima111.Future)
# no lags are significant
# Ljung-Box test (for a maximum of 10 lags)
Box.test(resid(arima111.Future),
type = "Ljung-Box", lag = 10)
# at 5% we cannot reject the null about residuals being
# white noise, p-value is very high!
###############################################################################
# lets try ARIMA(3,1,3) without the drift
arima313.Future <- Arima(data1.xts$index.F,
order = c(3, 1, 3))
coeftest(arima313.Future)
# no terms are significant
summary(arima313.Future)
# Ljung-Box test for autocorrelation of model residuals
Box.test(resid(arima313.Future),
type = "Ljung-Box",lag = 10)
# null cannot be rejected - p-value very high!
# Plot ACF and PACF for residuals
plot_ACF_PACF_resids(arima313.Future)
# lags of ACF and PACF are not significant
###############################################################################
# lets try ARIMA(3,1,3) model without lag 1 and drift
# intermediate lags can be set to 0 by using the
# fixed= argument
# CAUTION! The order of parameters can be checked by:
coefficients(arima313.Future)
# lets add restrictions on ar1 and ma1 (were not significant,
# so we assume ar1 = 0 and ma1 = 0)
# CAUTION! vector provided in fixed = must have length
# equal to the number of parameters!
arima313_1.Future <- Arima(data1.xts$index.F,
order = c(3, 1, 3),
fixed = c(0, NA, NA, # vector of length
0, NA, NA)) # equal to total number of parameters,
# NA means no restriction on a parameter
coeftest(arima313_1.Future)
# Still, no terms are significant here
# lets check if residuals are white noise
plot_ACF_PACF_resids(arima313_1.Future)
# Ljung-Box test
Box.test(resid(arima313_1.Future),
type = "Ljung-Box",lag = 10)
# the null cannot be rejected (p-value very high!)
# residuals are white noise
###############################################################################
# lets compare AIC for all models estimated so far
# CAUTION! for some of them rediduals are not white noise!
# Based on AIC which model is best?
AIC(arima111.Future, arima313.Future, arima313_1.Future)
# arima313 without lag 1 and drift, but arima111 is also very near
# The lower AIC, the better
# lets do the same for BIC
BIC(arima111.Future, arima313.Future, arima313_1.Future)
# arima111,
# the lower BIC, the better
# from the perspective of sensibility arima111 seems
# to be the best (all terms significant, residuals
# are white noise and low IC)
# there is also a way to automatically find the best model
arima.best.AIC.Future <- auto.arima(data1.xts$index.F,
d = 1, # parameter d of ARIMA model
max.p = 5, # Maximum value of p
max.q = 5, # Maximum value of q
max.order = 10, # maximum p+q
start.p = 1, # Starting value of p in stepwise procedure
start.q = 1, # Starting value of q in stepwise procedure
ic = "aic", # Information criterion to be used in model selection.
stepwise = FALSE, # if FALSE considers all models
allowdrift = TRUE, # include a constant
trace = TRUE) # show summary of all models considered
# however it does not remove intermediate lags
# the result might be surprising
coeftest(arima.best.AIC.Future)
# ARIMA(5,1,3)
# however no lags are significant
AIC(arima.best.AIC.Future, arima111.Future, arima313.Future, arima313_1.Future)
# AIC worse than for the best manually selected model
BIC(arima.best.AIC.Future, arima111.Future, arima313.Future, arima313_1.Future)
# BIC worse than for the best manually selected model
# Ljung-Box test
Box.test(resid(arima.best.AIC.Future),
type = "Ljung-Box", lag = 10)
# residuals are also white noise (p-value is very high!)
# but the automated procedure does not
# exclude intermediate lags
# the same based on BIC
arima.best.BIC.Future <- auto.arima(data1.xts$index.F,
d = 1, # parameter d of ARIMA model
max.p = 5, # Maximum value of p
max.q = 5, # Maximum value of q
max.order = 10, # maximum p+q
start.p = 1, # Starting value of p in stepwise procedure
start.q = 1, # Starting value of q in stepwise procedure
ic = "bic", # Information criterion to be used in model selection.
stepwise = FALSE, # if FALSE considers all models
allowdrift = TRUE, # include a constant
trace = TRUE) # show summary of all models considered
coeftest(arima.best.BIC.Future)
# ARIMA(0,1,0) without a constant
BIC(arima.best.BIC.index, arima.best.AIC.Future, arima111.Future)
# BIC worse than best manually selected model (arima111)
# Ljung-Box test
Box.test(resid(arima.best.BIC.Future),
type = "Ljung-Box", lag = 10)
# the model selected based on BIC has almost
# autocorrelated residuals
# so should not be considered as complete
# automated procedure is not necessarily better
# than step-by-step human approach
# Lets finally decide to select 3 models:
# arima111 - sensible, manually selected based on BIC
# arima313_1 - sensible, manually selected based on AIC
# ARIMA(5,1,3) - automated selection based on AIC
# for further comparisons (forecasts)
# these models have:
# - lowest information criteria (AIC or BIC)
# - their residuals are white noise
# - (almost) all parameters significant
# We will finally use these models for forecasting
#################################################################################
# FORECAST for FUTURE - model arima111
# estimate the model on shorter sample
arima111s.future <- Arima(index_future$index.F, # variable
order = c(1,1,1)) # (p,d,q) parameters
coeftest(arima111s.future)
# lets make a prediction
forecast111.Future = forecast::forecast(arima111s.future, h = 5, level = 0.95)
# lets see the result
forecast111.Future
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast111.Future$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast111.Future$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast111.Future_data <- data.frame(f111.Future_mean = as.numeric(forecast111.Future$mean),
f111.Future_lower = as.numeric(forecast111.Future$lower),
f111.Future_upper = as.numeric(forecast111.Future$upper))
head(forecast111.Future_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
forecast111.Future_xts <- xts(forecast111.Future_data,
# last 5 values of date
# from the original dataset
tail(index(data1.xts), 5))
forecast111.Future_xts
# we can put it together with the original data
Future_111 <- merge(data1.xts[,2], forecast111.Future_xts)
head(Future_111)
tail(Future_111, 10)
# lets finally plot the figure with the forecast
# and original series
# original data
plot(Future_111["2018",],
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "5 weeks forecast of Future - ARIMA111",
col = c("black", "blue", "red", "red"))
# checking forecast quality
# for simplicity of the formulas
# lets define two new objects:
# real values - last 5 observations
Future.r <- tail(data1.xts$index.F, 5)
# forecasts
Future.f.111 <- as.numeric(forecast111.Future$mean)
# put it together into a data.frame
Future_forecast_111 <- data.frame(Future.r, Future.f.111)
Future_forecast_111
# lets add the basis for different measures of the forecast error
Future_forecast_111$mae.Future.111 = abs(Future.r - Future.f.111)
Future_forecast_111$mape.Future.111 = abs((Future.r - Future.f.111)/Future.r)
Future_forecast_111
write.csv(Future_forecast_111, file = "Future_forecast_111.csv")
# and calculate its averages
# over the forecast period
colMeans(Future_forecast_111[,3:4])
#################################################################################
# FORECAST for FUTURE - model arima313_1
# estimate the model on shorter sample
arima313_1s.future <- Arima(index_future$index.F,
order = c(3, 1, 3),
fixed = c(0, NA, NA, # vector of length
0, NA, NA)) # (p,d,q) parameters
coeftest(arima313_1s.future)
# lets make a prediction
forecast313_1.Future = forecast::forecast(arima313_1s.future, h = 5, level = 0.95)
# lets see the result
forecast313_1.Future
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast313_1.Future$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast313_1.Future$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast313_1.Future_data <- data.frame(f313_1.Future_mean = as.numeric(forecast313_1.Future$mean),
f313_1.Future_lower = as.numeric(forecast313_1.Future$lower),
f313_1.Future_upper = as.numeric(forecast313_1.Future$upper))
head(forecast313_1.Future_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
forecast313_1.Future_xts <- xts(forecast313_1.Future_data,
# last 5 values of date
# from the original dataset
tail(index(data1.xts), 5))
forecast313_1.Future_xts
# we can put it together with the original data
Future_313_1 <- merge(data1.xts[,2], forecast313_1.Future_xts)
head(Future_313_1)
tail(Future_313_1, 10)
# lets finally plot the figure with the forecast
# and original series
# original data
plot(Future_313_1["2018",],
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "5 weeks forecast of Future - ARIMA313",
col = c("black", "blue", "red", "red"))
# checking forecast quality
# for simplicity of the formulas
# lets define two new objects:
# real values - last 5 observations
Future.r <- tail(data1.xts$index.F, 5)
# forecasts
Future.f.313_1 <- as.numeric(forecast313_1.Future$mean)
# put it together into a data.frame
Future_forecast_313_1 <- data.frame(Future.r, Future.f.313_1)
Future_forecast_313_1
# lets add the basis for different measures of the forecast error
Future_forecast_313_1$mae.Future.313_1 = abs(Future.r - Future.f.313_1)
Future_forecast_313_1$mape.Future.313_1 = abs((Future.r - Future.f.313_1)/Future.r)
Future_forecast_313_1
write.csv(Future_forecast_313_1, file = "Future_forecast_313_1.csv")
# and calculate its averages
# over the forecast period
colMeans(Future_forecast_313_1[,3:4])
#################################################################################
# FORECAST for FUTURE - model arima513
# estimate the model on shorter sample
arima513s.future <- Arima(index_future$index.F, # variable
order = c(5,1,3)) # (p,d,q) parameters
coeftest(arima513s.future)
# lets make a prediction
forecast513.Future = forecast::forecast(arima513s.future, h = 5, level = 0.95)
# lets see the result
forecast513.Future
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast513.Future$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast513.Future$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast513.Future_data <- data.frame(f513.Future_mean = as.numeric(forecast513.Future$mean),
f513.Future_lower = as.numeric(forecast513.Future$lower),
f513.Future_upper = as.numeric(forecast513.Future$upper))
head(forecast513.Future_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
forecast513.Future_xts <- xts(forecast513.Future_data,
# last 5 values of date
# from the original dataset
tail(index(data1.xts), 5))
forecast513.Future_xts
# we can put it together with the original data
Future_513 <- merge(data1.xts[,2], forecast513.Future_xts)
head(Future_513)
tail(Future_513, 10)
# lets finally plot the figure with the forecast
# and original series
# original data
plot(Future_513["2018",],
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "5 weeks forecast of Future - ARIMA513",
col = c("black", "blue", "red", "red"))
# checking forecast quality
# for simplicity of the formulas
# lets define two new objects:
# real values - last 5 observations
Future.r <- tail(data1.xts$index.F, 5)
# forecasts
Future.f.513 <- as.numeric(forecast513.Future$mean)
# put it together into a data.frame
Future_forecast_513 <- data.frame(Future.r, Future.f.513)
Future_forecast_513
# lets add the basis for different measures of the forecast error
Future_forecast_513$mae.Future.513 = abs(Future.r - Future.f.513)
Future_forecast_513$mape.Future.513 = abs((Future.r - Future.f.513)/Future.r)
Future_forecast_513
write.csv(Future_forecast_513, file = "Future_forecast_513.csv")
# and calculate its averages
# over the forecast period
colMeans(Future_forecast_513[,3:4])
###########################################################################################
# Plot the forecast of all 3 models on 1 graph
Forecast_ARIMA_Future <- merge(Future_111["2018",c(1,2)],
Future_313_1["2018",2],
Future_513["2018",2])
plot(Forecast_ARIMA_Future,
major.ticks = "weeks",
grid.ticks.on = "weeks",
grid.ticks.lty = 3,
main = "Forecast of 3 ARIMA models for Future",
col = c("red", "darkgreen", "darkblue","darkgray"),
legend.loc = "topright")
# compare the results of MAE and MAPE for all 3 models,
# ARIMA111 proved to be the best model for forecast
# Let's see the MAE and MAPE of 3 models again
# the lower, the better
colMeans(Future_forecast_111[,3:4])
colMeans(Future_forecast_313_1[,3:4])
colMeans(Future_forecast_513[,3:4])
#######################################################################
# FORECAST for INDEX - FINAL SELCTION FOR FULL SAMPLE- model arima111
# lets make a prediction
forecast111.f.index = forecast::forecast(arima111.index, h = 5,
level = 0.95)
# lets see the result
forecast111.f.index
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast111.f.index$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast111.f.index$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast111.f.index_data <- data.frame(f111.f.index_mean = as.numeric(forecast111.f.index$mean),
f111.f.index_lower = as.numeric(forecast111.f.index$lower),
f111.f.index_upper = as.numeric(forecast111.f.index$upper))
head(forecast111.f.index_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
date_as_index <- as.Date(c("2018-04-27", "2018-05-04", "2018-05-11",
"2018-05-18", "2018-05-25"))
forecast111.f.index_xts <- xts(forecast111.f.index_data,
# last 5 values of date
# from the original dataset
date_as_index)
forecast111.f.index_xts
write.csv(forecast111.f.index_xts, file = "forecast111.f.index_xts.csv")
#################################################################################
# FORECAST for FUTURE - FINAL SELCTION FOR FULL SAMPLE- model arima111
# lets make a prediction on full sample period
forecast111.f.Future = forecast::forecast(arima111.Future, h = 5,
level = 0.95)
# lets see the result
forecast111.f.Future
# the forecasts are indexed with
# an observation number, not a date!
# to reach the point forecast (the first column)
# one needs to use:
forecast111.f.Future$mean
# as.numeric() allows to convert it
# to a simple numeric vector
as.numeric(forecast111.f.Future$mean)
# if we want to easily put together both real data
# and the forecast on the plot, we have to convert
# both to ts or both to xts objects
# xts objects are more convenient and modern
# we need a data.frame with data
# and index of dates to create an xts object
forecast111.f.Future_data <- data.frame(f111.f.Future_mean = as.numeric(forecast111.f.Future$mean),
f111.f.Future_lower = as.numeric(forecast111.f.Future$lower),
f111.f.Future_upper = as.numeric(forecast111.f.Future$upper))
head(forecast111.f.Future_data)
# now it is a data frame
# to create an xts object we will
# copy the values of index from
# the original dataset GSPC
date_as_index <- as.Date(c("2018-04-27", "2018-05-04", "2018-05-11",
"2018-05-18", "2018-05-25"))
forecast111.f.Future_xts <- xts(forecast111.f.Future_data,
# last 5 values of date
# from the original dataset
date_as_index)
forecast111.f.Future_xts
write.csv(forecast111.f.Future_xts, file = "forecast111.f.Future_xts.csv")
|
3b3cee8e2bf007e10c8a070e8847a97d15a12187 | 7b4bf9930648abddd9f0143d7f67399e6b66339d | /plot4.R | 411687e05016336b60e3c1fa4ead931c8a8597d7 | [] | no_license | feliao/ExData_Plotting1 | 41486b0a3c7438f910a8fbe68432de32232ab932 | c3a9fe4154a9322facbed1c74cf19ac6eed283d6 | refs/heads/master | 2020-12-28T19:32:53.722538 | 2014-09-07T02:07:43 | 2014-09-07T02:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,860 | r | plot4.R | plot4 <- function() {
power_data <- read.table(file = "./household_power_consumption.txt",header = TRUE,sep = ";")
power_data$Date <- as.Date(power_data$Date,"%d/%m/%Y")
x <- power_data$Date > as.Date("2007-01-31","%Y-%m-%d") & power_data$Date < as.Date("2007-02-03","%Y-%m-%d")
power_data <- power_data[x,]
power_data$Global_active_power <- as.numeric(as.character(power_data$Global_active_power))
date_time <- paste(power_data$Date,power_data$Time)
power_data$date_time <- ymd_hms(date_time)
power_data$Sub_metering_2 <- as.integer(as.character(power_data$Sub_metering_2))
power_data$Sub_metering_1 <- as.integer(as.character(power_data$Sub_metering_1))
power_data$Voltage<- as.integer(as.character(power_data$Voltage))
power_data$Global_reactive_power<- as.numeric(as.character(power_data$Global_reactive_power))
png(file = "plot4.png")
par(mfrow = c(2, 2))
plot(power_data$date_time,power_data$Global_active_power,type = "l",xlab = "",ylab = "Global Active Power")
plot(power_data$date_time,power_data$Voltage,type = "l",xlab = "datetime",ylab = "Voltage")
plot(power_data$date_time,power_data$Sub_metering_1,type = "l",xlab = "",ylab = "Energy sub metering")
lines(power_data$date_time,power_data$Sub_metering_2,col = "red",type="l")
lines(power_data$date_time,power_data$Sub_metering_3,col = "blue",type="l")
legend("topright", lty=1,col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty='n')
plot(power_data$date_time,as.numeric(power_data$Global_reactive_power),type = "l",xlab = "datetime",ylab = "Globalreactive_power")
dev.off()
} |
536fd26f06f2231921322cec6ccc3d62ebb06f66 | 41cf1a468130f589270b71ffe04c696d8b904860 | /analysis/ais-global/new/clump_processing.R | f3ae1dca0be10ca2c6778e6d2590c64108a4ca9f | [
"MIT"
] | permissive | dmarch/covid19-ais | 53ef60f7c7273e1d4da275f2829119937303c69a | 51239137a854d0dfed55c6e452136b932ba7a63c | refs/heads/master | 2023-04-07T12:58:30.526573 | 2021-05-06T04:51:34 | 2021-05-06T04:51:34 | 280,381,188 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,776 | r | clump_processing.R | # We keep:
# - large clumps (patches > 1000 cells)
# - small clumps wich are adjacent to the coastline (closest distance to coastline < 27,750 m)
#
# We remove:
# - clumps with no variation in SD and count
#---------------------------------------------------------------------------
# clump_processing Process clumps
#---------------------------------------------------------------------------
# This script analyses clumps from S-AIS data from exact Earth
# After visual inspection, we detected isolated cells or cells with anomalous
# data. To provide a QC analysis, we detected clumps and characterized them by
# different factors:
# ocean area size: very small patches are likely to be the results of spureous detection
# closest distance to shore: isolated patches near the coast can be the result of local movements
# variability in speed and number of vessels: visual inspection indicates that most the
# clumps in higher latitudes have exactly same speed and number of vessels. Such pattern is
# rare, and we can use the SD to identify those clumps.
library(raster)
library(fasterize)
library(sf)
library(pals)
source("scr/fun_common.R") # bb()
source("scr/fun_ais.R")
# define input directory with sAIS data
input_dir <- "data/input/exacEarth/"
# create output directory
out_dir <- "data/out/ais-global/clumps/"
if (!dir.exists(out_dir)) dir.create(out_dir, recursive = TRUE)
#-------------------------------
# 1. Import data
#-------------------------------
# import raster
dist2coast <- raster("data/out/ais-global/dist2coast.tif")
# import ocean mask
r_area <- raster("data/out/ais-global/ocean_area.tif")
#r_area_cell <- raster("data/out/ais-global/areacell.nc")
# select months to process
dates <- c(
seq.Date(as.Date("2019-01-01"), as.Date("2019-06-01"), by = "month"),
seq.Date(as.Date("2020-01-01"), as.Date("2020-06-01"), by = "month")
) %>% format("%Y%m%d")
#-----------------------------------------
# 2. Detect and get info for each clump
#-----------------------------------------
clump_list <- list()
for (i in 1:length(dates)){
print(paste("Processing month", i, "from", length(dates)))
# import shapefile
idate <- dates[i]
shp_file <- list.files(input_dir, full.names = TRUE, recursive = TRUE, pattern = sprintf("%s.*.shp$", idate))
# import shapefile
ais_sf <- st_read(shp_file)
# filter sAIS to get mask
iclumps <- getClumpInfo(pol = ais_sf, r_area = r_area, dist2coast = dist2coast)
iclumps$date <- idate
clump_list[[i]] <- iclumps
}
# combine data
clump_data <- data.table::rbindlist(clump_list)
# export file
outfile <- paste0(out_dir, "clumps.csv")
write.csv(clump_data, outfile, row.names=FALSE)
#-----------------------------------------
# 3. Exploratory analysis
#-----------------------------------------
nclumps <- nrow(clump_data)
# get number per different cases
filter(clump_data, count > 1000) %>% nrow()
filter(clump_data, count == 1) %>% nrow()/nrow(clump_data)
filter(clump_data, sd_speed == 0 | sd_count == 0) %>% nrow()/nrow(clump_data)
filter(clump_data, grid_area < 769) %>% nrow()/nrow(clump_data)
# first, we filter out clumps with only one cell and large clumps
clump_data <- filter(clump_data,
count > 1,
count < 1000,
#dist2coast >= 27750,
grid_area > 769,
sd_speed > 0,
sd_count > 0)
# histogram or boxplot per variable
boxplot(clump_data$sd_speed)
boxplot(clump_data$sd_count)
boxplot(clump_data$area)
boxplot(clump_data$dist2coast)
hist(clump_data$sd_speed)
hist(clump_data$sd_count)
hist(clump_data$area)
hist(clump_data$dist2coast)
# set threshold for size
q25 <- quantile(clump_data$area, prob=.25) # km2
# 1398.438
|
17c74b143b44ef4d69c52ba0c4bad78f1967fdb0 | 814cbfc3ff46684cfe94c7a65f73f1b705aa867a | /plot3.R | 8034a368259cd311cfb6d1b70e65dc63d1e94791 | [] | no_license | octern/ExData_Plotting1 | 3b429b32c7881dda259b6ed8f9af54a253806a54 | 9075b27d0a30f131793a2d39cd8d8f5148dee37b | refs/heads/master | 2021-01-15T13:22:54.664085 | 2014-05-07T04:00:10 | 2014-05-07T04:00:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,025 | r | plot3.R | # get the data -- we could do some head-tail fiddling to get only the rows we need, but it's way faster to import the whole shebang and use git to subset it.
setwd("/Users/testo/datasci/exploratory/ExData_Plotting1")
pow<-read.table("household_power_consumption.txt", sep=";", header=T, stringsAsFactors=F)
powsub<-pow[pow$Date=="1/2/2007" | pow$Date=="2/2/2007",]
head(powsub)
# create timestamp
powsub$ts<-strptime(paste(powsub$Date, powsub$Time), format="%d/%m/%Y %H:%M:%S")
# missing values to missing -- this happens automatically when we go to numeric, yes?
powsub$gap<-as.numeric(powsub$Global_active_power)
head(powsub)
png("plot3.png")
with(powsub, plot(ts, gap, pch="", ylab="Energy sub metering", xlab="", ylim=c(0,38)))
lines(powsub$ts, powsub$Sub_metering_1, col="black")
lines(powsub$ts, powsub$Sub_metering_2, col="red")
lines(powsub$ts, powsub$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1)
dev.off()
|
79a9a1d04a1a6a5510c406438e4e93d850e6eb85 | 7c02f2b5f02b675bd67ea0c54bfc198db90e6562 | /code/98_prepare_to_share/01_prepare_to_share.R | 30ab1f7365f769a47ca78cc7ece34053f797e7cc | [] | no_license | LieberInstitute/Visium_SPG_AD | 6ba55a92efd708719fd8f5ca6aa98a344b06c409 | 0441e194b03370524d4115d71a74dd491c419002 | refs/heads/master | 2023-08-16T10:35:34.956869 | 2023-08-16T02:20:24 | 2023-08-16T02:20:24 | 377,886,452 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,987 | r | 01_prepare_to_share.R | library("here")
library("spatialLIBD")
library("SpatialExperiment")
library("sessioninfo")
## Create output directory
dir_rdata <- here("processed-data", "98_prepare_to_share")
dir.create(dir_rdata, showWarnings = FALSE)
## Determine the suffix
suffix <-
ifelse(as.numeric(Sys.getenv("SGE_TASK_ID")) == 1, "wholegenome", "targeted")
## Load the data
spe <- readRDS(here::here(
"processed-data",
"08_harmony_BayesSpace",
suffix,
paste0("spe_harmony_", suffix, ".rds")
))
## Import GraphBased clusters
spe <- cluster_import(
spe,
cluster_dir = here::here(
"processed-data",
"08_harmony_BayesSpace",
suffix,
"clusters_graphbased"
),
prefix = "graph_"
)
## Import BayesSpace clusters
spe <- cluster_import(
spe,
cluster_dir = here::here(
"processed-data",
"08_harmony_BayesSpace",
suffix,
"clusters_BayesSpace"
),
prefix = ""
)
## Import pathology levels
spe <- cluster_import(
spe,
cluster_dir = here::here(
"processed-data",
"09_pathology_vs_BayesSpace",
"pathology_levels"
),
prefix = ""
)
## Convert from character to a factor, so they appear in the order
## we want
spe$path_groups <-
factor(
spe$path_groups,
levels = c(
"none",
"Ab",
"n_Ab",
"pTau",
"n_pTau",
"both",
"n_both"
)
)
## Convert pathology variables into factors
for (i in colnames(colData(spe))[grep("^path_", colnames(colData(spe)))]) {
colData(spe)[[i]] <- factor(colData(spe)[[i]])
}
## Change Braak info based on latest information from LIBD pathology
unique(spe$subject)
unique(spe$BCrating)
unique(spe$braak)
unique(spe$cerad)
spe$BCrating <- NULL ## This variable was removed from the phenotype table
spe$braak <- c("Br3854" = "Stage VI", "Br3873" = "Stage V", "Br3880" = "Stage VI", "Br3874" = "Stage IV")[spe$subject]
spe$cerad <- c("Br3854" = "Frequent", "Br3873" = "Frequent", "Br3880" = "Frequent", "Br3874" = "None")[spe$subject]
unique(spe$braak)
unique(spe$cerad)
## Add APOe genotype info
spe$APOe <- c("Br3854" = "E3/E4", "Br3873" = "E3/E3", "Br3880" = "E3/E3", "Br3874" = "E2/E3")[spe$subject]
## Drop variables that are empty and thus just add confusion to other work later on
spe$NGFAP <- NULL
spe$PGFAP <- NULL
spe$NLipofuscin <- NULL
spe$PLipofuscin <- NULL
spe$NMAP2 <- NULL
spe$PMAP2 <- NULL
## Load pathology colors
## This info is used by spatialLIBD v1.7.18 or newer
source(here("code", "colors_pathology.R"), echo = TRUE, max.deparse.length = 500)
spe$path_groups_colors <- colors_pathology[as.character(spe$path_groups)]
## Save the final object that we can share through spatialLIBD
save(spe, file = file.path(dir_rdata, paste0("Visium_SPG_AD_spe_", suffix, ".Rdata")))
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
|
2319aa81afc9c4e5ebe9479b73d68211e446bf3d | dcda12d7eac0414aace026e7e805159b53319554 | /ROV_Summary_12_2014.R | 141a0ebbd09530083aa0d7ecb5b6bd29150d0e20 | [] | no_license | sare-ah/Seamount_benthic_communities | f921e2036ed5336c61a26a95a8c062f9ad4c7e41 | f23b69b5169b2256469258190d2bf14ef89fd97e | refs/heads/master | 2020-12-18T17:22:23.821468 | 2017-01-11T00:07:58 | 2017-01-11T00:07:58 | 235,468,161 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,756 | r | ROV_Summary_12_2014.R | ####################################################################################################################
# Submersible summaries for Cobb Cruise Tech Report
#
# Objective: For each dive determine:
# beginning and ending timestamp
# beginning and ending depth
# average field of view
# dominant substrate codes
# relief ID codes
#
# Background: Data compiled from dive log, navigation, video miner in MS Access database
#
# Summary: Connect to database, run queries from R, read table, summarize data with xtabs() & ddply()
#
# Author: Sarah Davies
# Sarah.Davies@dfo-mpo.gc.ca
# 250-756-7124
# Date: December, 2014
###################################################################################################################
# start fresh
rm(list=ls())
setwd("H:/Pac2012-043 Tully Cobb Seamount/Sarah and Janelle analysis")
library(RODBC)
library(plyr)
options(useFancyQuotes=FALSE)
## 1. Access Cobb Analysis db
## 2. Read video miner data table
## 3. For each dive, determine the beginning and ending timestamp and depth
## 4. For each dive, determine average field of view, summarize dominant substrate, and relief codes
### Functions ###
ReadAccessDB <- function(filename.mdb) {
require(RODBC)
connection.access <<- odbcConnectAccess(filename.mdb)
print(connection.access)
cat("------------------ connected to database --------------------------","\n")
# odbcClose(connection.access)
}
### Data ###
ReadAccessDB("Cobb Analysis.mdb")
obs <- sqlFetch(connection.access, "data_ROV_video")
### Queries & Summaries ###
# Determine depth range for each dive
query.depths.start <- "SELECT [ROV dive sites].SiteName, [Transect Times].TransectName,
[Transect Times].MinOfTimeCode, B_TRACKING.Depth
FROM B_TRACKING INNER JOIN ((B_DIVES INNER JOIN ([ROV dive sites]
INNER JOIN [Transect Times] ON [ROV dive sites].Dive = [Transect Times].TransectName)
ON B_DIVES.DiveNumber = [ROV dive sites].DiveNumber) INNER JOIN data_ROV_video ON
[Transect Times].MinOfTimeCode = data_ROV_video.TimeCode) ON (B_DIVES.DiveID = B_TRACKING.DiveID)
AND (B_TRACKING.charTime = data_ROV_video.TimeText)
GROUP BY [ROV dive sites].SiteName, [Transect Times].TransectName,
[Transect Times].MinOfTimeCode, B_TRACKING.Depth;"
query.result <- sqlQuery(connection.access, query.depths.start)
depths.start <- query.result
depths.start$MinOfTimeCode <- format(depths.start$MinOfTimeCode, "%H:%M:%S")
depths.start <- depths.start[order(depths.start$TransectName),]
depths.start
query.depths.end <- "SELECT [ROV dive sites].SiteName, [Transect Times].TransectName,
[Transect Times].MaxOfTimeCode, B_TRACKING.Depth
FROM (B_DIVES INNER JOIN ([ROV dive sites] INNER JOIN [Transect Times] ON
[ROV dive sites].Dive = [Transect Times].TransectName) ON
B_DIVES.DiveNumber = [ROV dive sites].DiveNumber) INNER JOIN
(B_TRACKING INNER JOIN data_ROV_video ON B_TRACKING.charTime = data_ROV_video.TimeText)
ON (B_DIVES.DiveID = B_TRACKING.DiveID) AND ([Transect Times].MaxOfTimeCode = data_ROV_video.TimeCode)
GROUP BY [ROV dive sites].SiteName, [Transect Times].TransectName,
[Transect Times].MaxOfTimeCode, B_TRACKING.Depth;"
query.result <- sqlQuery(connection.access, query.depths.end)
depths.end <- query.result
depths.end$MaxOfTimeCode <- format(depths.end$MaxOfTimeCode, "%H:%M:%S")
depths.end
# Be tidy
odbcClose(connection.access)
cat("------------------ connection closed ------------------------------","\n")
setwd("~/R/Cobb")
# Make transect name a factor
obs$TransectName <- as.factor(obs$TransectName)
# Calculate field of view, remove NULL Field of View values
obs.fov <- subset(obs, DataCode == 15)
foView <- ddply(obs.fov, .(TransectName, TransectNum), summarize, meanFoV = mean(FieldOfView),
sd = sd(FieldOfView), count = length(FieldOfView))
foView
colnames(foView) <- c("SiteName", "TransectName", "meanFoV","sd", "count")
foView <- foView[order(foView$TransectName),]
# Hard code in end of dive 18, site DFO_8
dfo_8 <- data.frame(SiteName="DFO_8",TransectName="18",MaxOfTimeCode="02:23:11",Depth="202")
depths.end <- rbind(depths.end, dfo_8)
depths.end$TransectName <- as.numeric(depths.end$TransectName)
depths.end <- depths.end[order(depths.end$TransectName),]
depths.end
# Write out results to .csv
summary <- depths.start
summary <- cbind(summary, depths.end$MaxOfTimeCode, depths.end$Depth, foView$meanFoV, foView$sd, foView$count)
colnames(summary) <- c("Site Name", "Transect Number", "Start Time", "Start Depth (m)", "End Time", "End Depth (m)", "Field of View (cm)","sd FoV", "count FoV")
summary[,c(2,1,3,5,4,6,7,8,9)]
write.csv(summary, "ROV Summary.csv", row.names=TRUE)
# Habitat summary ....
obs$DominantSubstrate <- as.factor(obs$DominantSubstrate)
obs$DominantSubstrate <- revalue(obs$DominantSubstrate,
c("1"="Bedrock, smooth", "2"="Bedrock with crevices","3"="Boulders",
"4"="Cobble", "5"="Gravel","7"="Sand","10"="Crushed Shell",
"11"="Whole Shell","14"="Coral Rubble"))
hab<-xtabs(~TransectName+DominantSubstrate, data=obs, na.action=na.exclude)
write.csv(hab, "ROV Dominant Substrate.csv", row.names=TRUE)
hab
# Relief summary ....
obs$ReliefID <- as.factor(obs$ReliefID)
obs$ReliefID <- revalue(obs$ReliefID, c("1"="Flat or rolling","2"="Vertical relief 0.5 - 2m",
"3"="Vertical relief > 2m","4"="Slope or wall"))
relief <- (xtabs(~TransectName+ReliefID, data=obs, na.action=na.exclude))
write.csv(relief, "ROV Relief.csv", row.names=TRUE)
relief
|
7804d1e46893ab0a02f105ac4f5ad6a357d310f9 | d367eed128705ccab8f54126b64f2e5abf8c77f5 | /knn_votingAggregation.R | 9be96d7484bc0d2a811436ce76415ca6fbbed00f | [] | no_license | christianadriano/ML_VotingAggregation | a2baabd75b7445cbb71785bea7008037049cebef | e1f4c9d8c7ca40148ac418f2bbed80ff966bd1d9 | refs/heads/master | 2022-06-12T21:26:15.176765 | 2022-04-03T22:00:03 | 2022-04-03T22:00:03 | 99,424,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,539 | r | knn_votingAggregation.R | #K-nearest neighbor KNN
#Predict bug covering questions based on various values of
#the parameters in the aggregation methods
#Obtain the data
# Import data
source("C://Users//chris//OneDrive//Documentos//GitHub//ML_VotingAggregation//aggregateAnswerOptionsPerQuestion.R");
summaryTable <- runMain();
#I need to guarantee that some examples (i.e., failing methods)
#do not dominate the training or testing sets. To do that, I need to get a
#close to equal proportion of examples in both sets. One way to do that is
#to shuffle the data and sample from it.
set.seed(9850);
g<- runif((nrow(summaryTable))); #generates a random distribution
summaryTable <- summaryTable[order(g),];
###########################################################
# Below are two options to partition the data in training and testing sets
#Option-1 Training set (2/3)
totalData = length(summaryTable$Question.ID);
trainingSize = trunc(totalData * 2/3);
startTestIndex = totalData - trainingSize;
endTestIndex = totalData;
#Extract training and test data
trainingData = as.data.frame(summaryTable[1:trainingSize,]);
testData = as.data.frame(summaryTable[startTestIndex:endTestIndex,])
##################################################################
#Option-2 Mark sample with a probability
set.seed(4321);
ind<- sample(2, nrow(summaryTable),replace = TRUE, prob=c(0.67,0.33));
trainingData <- as.data.frame(summaryTable[ind==1,]);
testData <- as.data.frame(summaryTable[ind==2,]);
##Obtain the ground truth
trainingLabels <- as.data.frame(summaryTable[ind==1,"bugCovering"]);
testLabels <- as.data.frame(summaryTable[ind==2,"bugCovering"]);
##################################################################
#Build the KNN model
install.packages("class")
library(class);
#Select only the rankingVote as a feature to predict bugCovering
summaryTable <- summaryTable[,c("bugCovering","rankingVote")];
fitModel <- knn(train =trainingData, test=testData, cl=trainingLabels[,1] , k=2);
attributes(.Last.value)
summary(fitModel);
#Evaluate model
testLabels<-data.frame(testLabels[,1]);
merge <- data.frame(fitModel,testLabels);
names(merge)<- c("Predicted bug","Actual bug");
merge
install.packages("gmodels");
library(gmodels)
CrossTable(x = trainingData[,"bugCovering"], y=fitModel, prop.chisq = FALSE)
plot(fitModel)
fitModel
fitFrame <- data.frame(fitModel)
predictionFrame<-data.frame(fitModel)
mean(trainingData[predictionFrame[,1]==TRUE,"rankingVote"])
trainingData[predictionFrame[,1]==TRUE,]
predictionFrame[predictionFrame[,1]==TRUE,]
|
0751f8511a3a8a7bbbfd8babbb8d7225abcd7202 | 78e56603355988ae42605256857e991e540f228a | /plot1.R | 177fc3f61adad5f442d64600cd374045c605a894 | [] | no_license | bbqsatay/ExData_Plotting1 | f1693af2230258782ff7bae2bed7d1c3b07a167a | bc764ffe5624c4ca3611a9d20a76c0525557014a | refs/heads/master | 2021-01-12T21:05:23.204878 | 2016-01-10T13:30:34 | 2016-01-10T13:30:34 | 49,333,600 | 0 | 0 | null | 2016-01-09T17:17:29 | 2016-01-09T17:17:28 | null | UTF-8 | R | false | false | 712 | r | plot1.R | plot1 <- function( ) {
data <- read.csv("household_power_consumption.txt", header=TRUE, sep = ";",stringsAsFactors=FALSE)
## then change to date format
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
## extract data from the 2 dates we want
data2 <- subset(data, (Date == "2007-02-01") | (Date == "2007-02-02"))
## convert power field to numeric
data2$Global_active_power <- as.numeric(data2$Global_active_power)
## plot histogram
## write to png file in same directory
png(file="plot1.png")
hist(data2$Global_active_power, col = "red", main="Global Active Power",xlab="Global Active Power (kilowatts)")
## close device
dev.off()
} |
e4b680ea2d910762f2d26eebc195fef84744923c | 7fe5d6f380d029f551fae6a719215590810fc7b2 | /filter_reffile.R | 7dc6a08d94d24eb049bd97b5113631a9ca8fbbc9 | [] | no_license | aecahill/MOTHUR_saltmarsh | bd4113eb65d80986b79d13a80aa482684e17788b | 96492aba7d0f7d589da65707214014e7951072e6 | refs/heads/master | 2021-08-19T09:25:03.634753 | 2021-06-20T19:41:59 | 2021-06-20T19:41:59 | 152,486,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,416 | r | filter_reffile.R | library(seqinr)
#read in reference alignment
refalign<-read.alignment("C:/Users/acahill/Desktop/reference.align",format="fasta")
#read in table of errors thrown by mothur
#these are seqs that are in the ref but missing from the tax file; need to be removed from ref
errortab<-read.table("C:/Users/acahill/Desktop/error_list.txt",header=FALSE)
#turn names in error table into characters
seqs_2remove<-as.character(errortab$V1)
#need to clean reference names again
listnamesref<-as.list(refalign$nam) #make a list of names of sequences
clean_namesref=list() #empty vector of names
namesreflength<-c(1:length(listnamesref)) #vector from 1 to number of seqs
#change the format so that the seq names match the format in the taxonomy file
for (i in namesreflength){
e<-gsub("[\t]", "", listnamesref[i])
clean_namesref<-c(clean_namesref,e)
}
clean_namesref[1:5]
#match names in seqs_2remove to line numbers in the alignment
tossref<-c()
#make vector to toss FROM REFERENCE
for (i in clean_namesref){
rownumberref<-match(i,seqs_2remove)
tossref<-append(tossref,rownumberref)
}
length(tossref)
tossref_noNA<-na.omit(tossref)
reffiltered_names<-refalign$nam[-tossref_noNA]
reffiltered_seqs<-refalign$seq[-tossref_noNA]
write.fasta(sequences=reffiltered_seqs,names=reffiltered_names,file.out="C:/Users/acahill/Desktop/reffiltered.fasta")
|
c5906b2dfa7f350d909eed7ab648ae35823da8d3 | b74cc181865d768836668c1a74da52f0ec446175 | /R/ReadConQuestLibrary.R | e56c5815b544d6e37068a65e7e51ae3ecb53ec83 | [] | no_license | cran/conquestr | 8cf9d92dce604b7dca4269be7da2502379b61663 | 4b56911ac14bcde0a5c7e9d7af6dd4250f6babd9 | refs/heads/master | 2023-03-28T12:02:24.099503 | 2023-03-12T08:00:02 | 2023-03-12T08:00:02 | 174,560,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 39,177 | r | ReadConQuestLibrary.R | #' @title ReadDouble
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadDouble <- function(myFile)
{
readBin(myFile, double(), endian = "little", size = 8, n = 1)
}
#' @title ReadDoubleList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadDoubleList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <- ReadDouble(myFile)
}
return(Value)
}
#' @title ReadInteger
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadInteger <- function(myFile)
{
readBin(myFile, integer(), endian = "little", size = 4, n = 1)
}
#' @title ReadBoolean
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadBoolean <- function(myFile)
{
readBin(myFile, logical(), endian = "little", size = 1, n = 1)
}
#' @title ReadString
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadString <- function(myFile)
{
String <- ""
L <- readBin(myFile, integer(), endian = "little", size = 4, n = 1)
if (L == 0)
{
return(String)
} else {
String <- readCharSafe(myFile, L)
return(String)
}
}
#' @title ReadIntegerList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadIntegerList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <- ReadInteger(myFile)
}
return(Value)
}
#' @title ReadIntegerListList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadIntegerListList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadIntegerList(myFile)
}
return(Value)
}
#' @title ReadBooleanList
#' @param myFile An uncompress 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadBooleanList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <- ReadBoolean(myFile)
}
return(Value)
}
#' @title ReadStringList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadStringList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <- ReadString(myFile)
}
return(Value)
}
#' @title ReadBitSet
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadBitSet <- function(myFile)
{
String <- matrix()
Items <- ReadInteger(myFile)
Columns <- ReadInteger(myFile)
Size <- ReadInteger(myFile)
if (Size>0) {
for (i in 1:Size) {
tmpString <- readBin(myFile, what = "raw")
tmpLogical <- as.logical(rawToBits(tmpString))
if (i == 1) {
String <- tmpLogical
} else {
String <- c(String, tmpLogical)
}
}
# subset String as it is likely too big!
String <- matrix(String[1:(Items*Columns)], nrow = Items, ncol = Columns, byrow = TRUE)
}
V <- list(String, Items, Columns, Size)
names(V)<-c("String", "Items", "Columns", "Size")
return(V)
}
#' @title ReadMatrix
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadMatrix <- function(myFile)
{
A <- matrix()
Rows <- ReadInteger(myFile)
Columns <- ReadInteger(myFile)
Empty <- ReadBoolean(myFile)
Title <- ReadString(myFile)
cLabels <- ReadStringList(myFile)
rLabels <- ReadStringList(myFile)
if (Rows == 0 || Columns == 0) return(A)
# resize A
A <- matrix(1:Rows * Columns, nrow = Rows, ncol = Columns)
for (r in seq_len(Rows)) {
for (c in seq_len(Columns)) {
A[r, c] <- ReadDouble(myFile)
}
}
colnames(A) <- unlist(cLabels)
rownames(A) <- unlist(rLabels)
return(A)
}
#' @title ReadMatrixList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadMatrixList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N)) {
Value[[i]] <-ReadMatrix(myFile)
}
return (Value)
}
#' @title ReadImplicitVar
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadImplicitVar <- function(myFile)
{
L <- ReadInteger(myFile)
Name <- list()
for (i in seq_len(L))
{
Name[[i]] <-ReadString(myFile)
}
Levels <- list()
for (i in seq_len(L))
{
Levels[[i]] <-ReadInteger(myFile)
}
V <- list(Name,Levels)
names(V)<-c("Name","Levels")
return (V)
}
#' @title ReadVarList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadVarList <- function(myFile)
{
LX <- ReadInteger(myFile)
XV <- list()
for (i in seq_len(LX))
{
XV[[i]] <-ReadInteger(myFile)
}
LI <- ReadInteger(myFile)
IV <- list()
for (i in seq_len(LI))
{
IV[[i]] <-ReadInteger(myFile)
}
V <- list(XV,IV)
names(V) <- c("XV","IV")
return(V)
}
#' @title ReadLookUp
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadLookUp <- function(myFile)
{
VarNumber <- ReadInteger(myFile)
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadString(myFile)
}
V <- list(VarNumber,Value)
names(V)<-c("VarNumber","Value")
return(V)
}
#' @title ReadLookUpList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadLookUpList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadLookUp(myFile)
}
return(Value)
}
#' @title ReadBandDefine
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadBandDefine <- function(myFile)
{
Dimension <- ReadInteger(myFile)
UpperBound <- ReadDouble(myFile)
LowerBound <- ReadDouble(myFile)
BandLabel <- ReadString(myFile)
V <- list(Dimension, UpperBound, LowerBound, BandLabel)
names(V)<-c("Dimension", "Upper Bound", "Lower Bound", "Band Label")
return(V)
}
#' @title ReadBandDefinesList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadBandDefinesList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadBandDefine(myFile)
}
return(Value)
}
#' @title ReadAnchor
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadAnchor <- function(myFile)
{
Type <- ReadInteger(myFile)
I1 <-ReadInteger(myFile)
I2 <-ReadInteger(myFile)
Value <- ReadDouble(myFile)
V <- list(Type,I1,I2,Value)
names(V)<-c("Type","I1","I2","Value")
return (V)
}
#' @title ReadAnchorList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadAnchorList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadAnchor(myFile)
}
return(Value)
}
#' @title ReadVariable
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadVariable <- function(myFile)
{
Name <- ReadString(myFile)
N <- ReadInteger(myFile)
Begin <- list()
End <- list()
Record <- list()
for (i in seq_len(N))
{
Begin[[i]] <-ReadInteger(myFile)
}
for (i in seq_len(N))
{
End[[i]] <-ReadInteger(myFile)
}
for (i in seq_len(N))
{
Record[[i]] <-ReadInteger(myFile)
}
Level <- ReadInteger(myFile)
NMissing <- ReadInteger(myFile)
MissingList <- list()
for (i in seq_len(NMissing))
{
MissingList[[i]] <-ReadString(myFile)
}
NMissingMatchMethod <- ReadInteger(myFile)
MissingMatchMethod <- list()
for (i in seq_len(NMissingMatchMethod))
{
MissingMatchMethod[[i]] <-ReadInteger(myFile)
}
NDropKeepList <- ReadInteger(myFile)
DropKeepList <- list()
for (i in seq_len(NDropKeepList))
{
DropKeepList[[i]] <-ReadString(myFile)
}
NDropKeepMatchMethod <- ReadInteger(myFile)
DropKeepMatchMethod <- list()
for (i in seq_len(NDropKeepMatchMethod))
{
DropKeepMatchMethod[[i]] <-ReadInteger(myFile)
}
DropKeepType <- ReadInteger(myFile)
Categorical <- ReadBoolean(myFile)
C <- ReadStringList(myFile)
V <- list(Name,Begin,End,Level,MissingList,MissingMatchMethod,DropKeepList,
DropKeepMatchMethod,DropKeepType,Categorical,C)
names(V)<-c("Name","Begin","End","Level","MissingList","MissingMatchMethod","DropKeepList",
"DropKeepMatchMethod","DropKeepType","Categorical","c")
return (V)
}
#' @title ReadVariableList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadVariableList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadVariable(myFile)
}
return(Value)
}
#' @title ReadResponse
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadResponse <- function(myFile)
{
N <- ReadInteger(myFile)
Col <- list()
Rec <- list()
for (i in seq_len(N))
{
Col[[i]] <-ReadInteger(myFile)
}
for (i in seq_len(N))
{
Rec[[i]] <-ReadInteger(myFile)
}
Width <- ReadInteger(myFile)
V <- list(Col,Rec,Width)
names(V)<-c("Col","Rec","Width")
return (V)
}
#' @title ReadResponseList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadResponseList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadResponse(myFile)
}
return(Value)
}
#' @title ReadKey
#' @keywords internal
ReadKey <- function(myFile)
{
N <- ReadInteger(myFile)
Key <- list()
for (i in seq_len(N))
{
Key[[i]] <-ReadString(myFile)
}
Score <- ReadString(myFile)
V <- list(Key,Score)
names(V)<-c("Key","Score")
return (V)
}
#' @title ReadKeyList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadKeyList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadKey(myFile)
}
return(Value)
}
#' @title ReadLabel
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadLabel <- function(myFile)
{
VarNum <- ReadInteger(myFile)
VarType <- ReadInteger(myFile) #IMPLICIT=0, EXPLICIT=1, DIMENSION=2, PARAMETER=3, FIT=4
N <- ReadInteger(myFile)
Code <- list()
for (i in seq_len(N))
{
Code[[i]] <-ReadString(myFile)
}
Label <- list()
for (i in seq_len(N))
{
Label[[i]] <-ReadString(myFile)
}
V <- list(VarNum,VarType,Code,Label)
names(V)<-c("VarNum","VarType","Code","Label")
return (V)
}
#' @title ReadLabelList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadLabelList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadLabel(myFile)
}
return(Value)
}
#' @title ReadTerms
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadTerms <- function(myFile) {
N <- ReadInteger(myFile)
NP <- ReadInteger(myFile)
VariableNumber <- list()
VariableType <- list()
ParamNumber <- list()
ParamType <- list()
for (i in seq_len(N))
{
VariableNumber[[i]] <- ReadInteger(myFile) # MAYBE: sequential count of terms in model
}
for (i in seq_len(N))
{
VariableType[[i]] <- ReadInteger(myFile) # 0 if implicit, 1 if explicit
}
for (i in seq_len(NP))
{
ParamNumber[[i]] <- ReadInteger(myFile) # param number in the model (0 offset)
}
for (i in seq_len(NP))
{
ParamType[[i]] <- ReadInteger(myFile) # 0 if estimated, 1 if constrained
}
Sign <- readChar(myFile, 1)
Label <- ReadString(myFile)
V <- list(VariableNumber,VariableType,ParamNumber,ParamType,Sign,Label)
names(V)<-c("VariableNumber","VariableType","ParamNumber","ParamType","Sign","Label")
return (V)
}
#' @title ReadTermsList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadTermsList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadTerms(myFile)
}
return(Value)
}
#' @title ReadVarInfo
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadVarInfo <- function(myFile)
{
VarNumber <- ReadInteger(myFile)
VarType <- ReadInteger(myFile)
V <- list(VarNumber,VarType)
names(V)<-c("VarNumber","VarType")
return (V)
}
#' @title ReadCodeList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadCodeList <- function(myFile)
{
VarInfo <- ReadVarInfo(myFile)
S <- ReadStringList(myFile)
V <- list(VarInfo,S)
names(V)<-c("VarInfo","S")
return (V)
}
#' @title ReadIRecode
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadIRecode <- function(myFile)
{
Before <- ReadStringList(myFile)
NAfter <- ReadInteger(myFile)
AfterList <- list()
for (i in seq_len(NAfter))
{
AfterList[[i]] <-ReadStringList(myFile)
}
NList <- ReadInteger(myFile)
CList <- list()
for (i in seq_len(NList))
{
CList[[i]] <-ReadCodeList(myFile)
}
V <- list(Before,AfterList,CList)
names(V)<-c("Before","AfterList","CList")
return (V)
}
#' @title ReadIRecodeList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadIRecodeList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadIRecode(myFile)
}
return(Value)
}
#' @title ReadParameters
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadParameters <- function(myFile)
{
Levels <- ReadIntegerList(myFile)
Step <- ReadInteger(myFile)
Sign <- readChar(myFile, 1)
V <- list(Levels,Step,Sign)
names(V)<-c("Levels","Step","Sign")
return (V)
}
#' @title ReadParametersList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadParametersList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadParameters(myFile)
}
return(Value)
}
#' @title ReadCategorise
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadCategorise <- function(myFile)
{
Type <- ReadInteger(myFile)
Varnum <- ReadInteger(myFile)
ValueList <- ReadStringList(myFile)
MatchType <- ReadIntegerList(myFile)
V <- list(Type, Varnum, ValueList, MatchType)
names(V) <- c("Type", "Varnum", "ValueList", "MatchType")
return(V)
}
#' @title ReadCategoriseList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadCategoriseList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <- ReadCategorise(myFile)
}
return(Value)
}
#' @title ReadFit
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadFit <- function(myFile)
{
UnWeightedMNSQ <- ReadDouble(myFile)
UnWeightedtfit <- ReadDouble(myFile)
WeightedCW2 <-ReadDouble(myFile)
WeightedMNSQ <- ReadDouble(myFile)
Weightedtfit <- ReadDouble(myFile)
WeightedNumerator <- ReadDouble(myFile)
WeightedDenominator <- ReadDouble(myFile)
UnWeightedSE <- ReadDouble(myFile)
WeightedSE <- ReadDouble(myFile)
Failed <- ReadBoolean(myFile)
V <- list(UnWeightedMNSQ,UnWeightedtfit,WeightedCW2,WeightedMNSQ,Weightedtfit,
WeightedNumerator,WeightedDenominator,UnWeightedSE,WeightedSE,Failed)
names(V)<-c("UnWeightedMNSQ","UnWeightedtfit","WeightedCW2","WeightedMNSQ","Weightedtfit",
"WeightedNumerator","WeightedDenominator","UnWeightedSE","WeightedSE","Failed")
return (V)
}
#' @title ReadFitList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadFitList <- function(myFile)
{
N <- ReadInteger(myFile)
Names <- list()
Value <- list()
V <- list(Names,Value)
names(V)<-c("Names","Value")
for (i in seq_len(N))
{
Names[[i]] <-ReadString(myFile)
Value[[i]] <-ReadFit(myFile)
}
V <- list(Names,Value)
names(V)<-c("Names","Value")
return (V)
}
#' @title ReadRegression
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadRegression <- function(myFile)
{
Varnum <- ReadInteger(myFile)
ValueList <- ReadStringList(myFile)
V <- list(Varnum,ValueList)
names(V)<-c("Varnum","ValueList")
return (V)
}
#' @title ReadRegressionListLeg
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadRegressionListLeg <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadRegression(myFile)
}
return(Value)
}
#' @title ReadRegressionList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadRegressionList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadRegression(myFile)
}
tmpRandGroupName <- ReadString(myFile)
tmpRandGroupExists <- ReadBoolean(myFile)
Result <- list(Value, tmpRandGroupName, tmpRandGroupExists)
names(Result)<- c("RegVars", "RandGroupName", "RandGroupExists")
return(Result)
}
#' @title ReadItemSet
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadItemSet <- function(myFile)
{
Name <- ReadString(myFile)
ValueList <- ReadIntegerList(myFile)
V <- list(Name,ValueList)
names(V)<-c("Name","ValueList")
return (V)
}
#' @title ReadItemSetList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadItemSetList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <-ReadItemSet(myFile)
}
return(Value)
}
#' @title ReadHistory
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadHistory <- function(myFile)
{
N <- ReadInteger(myFile)
RunNo <- list()
Iter <- list()
Likelihood <- list()
Beta <- list()
WithinVariance <- list()
Xsi <- list()
Tau <- list()
RanTermVariance <- list()
BetweenVariance <- list()
for (i in seq_len(N))
{
myTmpInt <- ReadInteger(myFile)
RunNo[[i]] <- myTmpInt
Iter[[i]] <- ReadInteger(myFile)
Likelihood[[i]] <-ReadDouble(myFile)
Beta[[i]] <-ReadMatrix(myFile)
WithinVariance[[i]] <-ReadMatrix(myFile)
Xsi[[i]] <-ReadMatrix(myFile)
Tau[[i]] <-ReadMatrix(myFile)
RanTermVariance[[i]] <-ReadMatrix(myFile)
BetweenVariance[[i]] <-ReadMatrix(myFile)
}
V <- list(RunNo, Iter,Likelihood,Beta,WithinVariance,Xsi,Tau,RanTermVariance,BetweenVariance)
names(V)<-c("RunNo", "Iter","Likelihood","Beta","Variance","Xsi","Tau","RanTermVariance","BetweenVariance")
return (V)
}
#' @title ReadEstimatesRecord
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param Dimensions An integer representation of 'ACER ConQuest' object gNDim.
#' @param NPlausibles An integer representation of 'ACER ConQuest' object gNPlausibles.
#' @param n An integer representation of 'ACER ConQuest' object gNCases.
#' @return A list
#' @keywords internal
ReadEstimatesRecord <- function(myFile, Dimensions, NPlausibles, n)
{
if (Dimensions > 0)
{
alla_eap <- vector(mode = "double", Dimensions)
alla_eaperr <- vector(mode = "double", Dimensions)
eap <- vector(mode = "double", Dimensions)
eaperr <- matrix(1:Dimensions * Dimensions, nrow = Dimensions, ncol = Dimensions)
wle <- vector(mode="double",Dimensions)
wleerr <- matrix(1:Dimensions*Dimensions,nrow = Dimensions,ncol = Dimensions)
jml <- vector(mode = "double", Dimensions)
jmlerr <- matrix(1:Dimensions*Dimensions,nrow = Dimensions,ncol = Dimensions)
scores <- vector(mode = "double",Dimensions)
maxscores <- vector(mode = "double", Dimensions)
if (NPlausibles > 0)
{
pvs <- matrix(1:Dimensions * NPlausibles, nrow = Dimensions, ncol = NPlausibles)
}
}
# read data into objects
for (i in seq_len(Dimensions))
{
alla_eap[i] <- ReadDouble(myFile)
}
for (i in seq_len(Dimensions))
{
alla_eaperr[i] <- ReadDouble(myFile)
}
for (i in seq_len(Dimensions))
{
eap[i] <- ReadDouble(myFile)
}
for (r in seq_len(Dimensions))
{
for (c in seq_len(Dimensions))
{
eaperr[r,c] <- ReadDouble(myFile)
}
}
for (i in seq_len(Dimensions))
{
wle[i] <-ReadDouble(myFile)
}
for (r in seq_len(Dimensions))
{
for (c in seq_len(Dimensions))
{
wleerr[r,c] <-ReadDouble(myFile)
}
}
for (r in seq_len(Dimensions))
{
for (c in seq_len(NPlausibles))
{
pvs[r,c] <- ReadDouble(myFile)
}
}
for (i in seq_len(Dimensions))
{
jml[i] <- ReadDouble(myFile)
}
for (r in seq_len(Dimensions))
{
for (c in seq_len(Dimensions))
{
jmlerr[r,c] <- ReadDouble(myFile)
}
}
for (i in seq_len(Dimensions))
{
scores[i] <-ReadDouble(myFile)
}
for (i in seq_len(Dimensions))
{
maxscores[i] <-ReadDouble(myFile)
}
fit <- ReadDouble(myFile)
weight <- ReadDouble(myFile)
pid <- n
V <- list(
pid, alla_eap, alla_eaperr, eap, eaperr, wle,
wleerr, pvs, jml, jmlerr, scores, maxscores, fit, weight
)
names(V) <- c(
"pid", "alla_eap", "alla_eaperr", "eap", "eaperr", "wle",
"wleerr", "pvs", "jml", "jmlerr", "scores","maxscores", "fit", "weight"
)
return(V)
}
#' @title ReadAllCaseEstimates
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param Dimensions An integer representation of 'ACER ConQuest' object gNDim.
#' @param N An integer representation of 'ACER ConQuest' object gNCases
#' @param NPlausibles An integer representation of 'ACER ConQuest' object gNPlausibles.
#' @return A list
#' @keywords internal
ReadAllCaseEstimates <- function(myFile,Dimensions,N,NPlausibles)
{
V <- list()
chainLen <- ReadInteger(myFile) # the chain length can be updated by subsequent calls to estimate, gNPlausibles is not the number of PVs in the last run
for (i in seq_len(N))
{
V[[i]] <-ReadEstimatesRecord(
myFile = myFile,
Dimensions = Dimensions,
NPlausibles = chainLen,
n = i
)
}
return(V)
}
#' @title ReadDataRecord
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadDataRecord <- function(myFile)
{
Pid <- ReadInteger(myFile)+1 # note in CQ this is a seq from 0:n-1 cases, this ensures the PID is the same as the seqnum in gAllCaseEstimates
Rsp <- ReadInteger(myFile)
Item <- ReadInteger(myFile)
PreKeyRsp <- ReadInteger(myFile)
RspFlag <- ReadInteger(myFile)
V <- list(Pid,Rsp,Item,PreKeyRsp,RspFlag)
names(V)<-c("Pid","Rsp","Item","PreKeyRsp","RspFlag")
return (V)
}
#' @title ReadAllResponseData
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param N An integer representation of 'ACER ConQuest' object gNCases.
#' @return A list
#' @keywords internal
ReadAllResponseData <- function(myFile,N)
{
V <- list()
for (i in seq_len(N))
{
V[[i]]=ReadDataRecord(myFile)
}
return (V)
}
#' @title ReadADesignMatrices
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param Columns An integer representation of 'ACER ConQuest' object gNParameters.
#' @param Items An integer representation of 'ACER ConQuest' object gNGins.
#' @param ItemSteps An integer representation of 'ACER ConQuest' object gItemSteps.
#' @return A list
#' @keywords internal
#' @description ReadSys Read the A design matrix (A list of length gNGins of matrices. For each matrix the number of rows is gItemSteps (for that item) and the number of columns is gNParameters_C).
ReadADesignMatrices <- function(myFile,Columns,Items,ItemSteps)
{
V <- list() # this is a list of matrices, for review
if (Columns == 0) return(V)
for (i in seq_len(Items))
{
if (ItemSteps[[i]]>0)
{
V[[i]] <-matrix(1:ItemSteps[[i]]*Columns,nrow = ItemSteps[[i]],ncol = Columns)
for (r in seq_len(ItemSteps[[i]]))
{
for (c in seq_len(Columns))
{
V[[i]][r,c] <-ReadDouble(myFile);
}
}
}
else
{
V[[i]]=matrix()
}
}
return(V)
}
#' @title ReadBDesignMatrices
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param Items An integer representation of 'ACER ConQuest' object gNGins.
#' @param ItemSteps An integer representation of 'ACER ConQuest' object gItemSteps.
#' @return A list
#' @keywords internal
#' @description ReadSys Read the B design matrix (A list of length gNGins of lists, one per item. For each item a list of length gItemSteps of matrices).
ReadBDesignMatrices <- function(myFile,ItemSteps,Items)
{
V <- list() # this will be a 2D list of matrices, for review
for (i in seq_len(Items))
{
V[[i]] <-list()
TempIndex <- ItemSteps[[i]]
if (TempIndex==0)TempIndex <- 1
for (j in seq_len(TempIndex))
{
V[[i]][[j]] <-ReadMatrix(myFile);
}
}
return(V)
}
#' @title ReadCDesignMatrices
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param Dimensions An integer representation of 'ACER ConQuest' object gNDim.
#' @param Items An integer representation of 'ACER ConQuest' object gNGins.
#' @param ItemSteps An integer representation of 'ACER ConQuest' object gItemSteps.
#' @return A list
#' @keywords internal
#' @description ReadSys Read the C design matrix (A list of length gNGins of lists, one per item. For each item a list of length gItemSteps of matrices).
ReadCDesignMatrices <- function(myFile,Dimensions,ItemSteps,Items)
{
#print(paste(Dimensions, " : printing Dimensions - gNDim")); # debug
#print(paste(ItemSteps, " : printing ItemSteps - gItemSteps")); # debug
#print(paste(Items, " : printing Items - gNGins")); # debug
V <- list()
for (d in seq_len(Dimensions))
{
# print(d); print("d in seq_len(Dimensions)") # debug
V[[d]] <-list()
for (i in seq_len(Items))
{
#print(paste(i, ": item. from call: (i in seq_len(Items))")) # debug
V[[d]][[i]] <-list()
TempIndex <- ItemSteps[[i]]
#print(paste(ItemSteps[[i]], ": item steps for item i, from call: ItemSteps[[i]]")) # debug
if (TempIndex==0)TempIndex <- 1
#print(paste(TempIndex, ": TempIndex from call: TempIndex <- ItemSteps[[i]]")) # debug
for (k in seq_len(TempIndex))
{
#print(paste(k, ": kth item step from call, seq_len(ItemSteps[[i]])")) # debug
V[[d]][[i]][[k]] <-ReadMatrix(myFile);
}
}
}
return(V)
}
#' @title ReadYOneCase
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param NReg An integer representing the number of regressors in the model, a representation of 'ACER ConQuest' object gNReg.
#' @return A list
#' @keywords internal
ReadYOneCase <- function(myFile,NReg)
{
Y <- vector(mode="double",NReg)
Weight <- ReadDouble(myFile)
for (i in seq_len(NReg))
{
Y[i] <-ReadDouble(myFile)
}
V <- list(Weight,Y)
names(V)<-c("Weight","Y")
return (V)
}
#' @title ReadAllY
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param N An integer representation of 'ACER ConQuest' object gNCases
#' @param NReg An integer representing the number of regressors in the model, a representation of 'ACER ConQuest' object gNReg.
#' @keywords internal
ReadAllY <- function(myFile,N,NReg)
{
V <- list()
for (n in seq_len(N))
{
V[[n]] <-ReadYOneCase(myFile,NReg = NReg)
}
return(V)
}
#' @title ReadGroupsOneCase
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param GroupVariables A list of group variables for this case.
#' @param AllVariables A list of variables for this case.
#' @param CaseNum An integer representing the case number used in the lookup tables.
#' @return A list
#' @keywords internal
ReadGroupsOneCase <- function(myFile, GroupVariables, AllVariables, CaseNum)
{
Dummy <- readChar(myFile,1)
NGvars <- length(GroupVariables$XV)
GData <- vector()
V <- list()
if (NGvars==0)return (V)
GData <- vector(mode="character",NGvars)
for (k in seq_len(NGvars))
{
WhichVarNum <- GroupVariables$XV[[k]]
WhichVar <- AllVariables[[WhichVarNum+1]]
L <- WhichVar$End[[1]]-WhichVar$Begin[[1]]+1
# when missing values (" ", ".") in group data
# sys file encodes embedded nuls
# this is a safe work around
# tS <- readBin(myFile, "raw", L)
# if (any(tS == as.raw(0))) {
# tS <- charToRaw(paste0(rep(" ", L), collapse = ""))
# }
GData[k] <- readCharSafe(myFile, L)
#if (nchar(GData[k]) != L) {
# warning("Group data contained missing value - file may not be read correctly")
# print(paste0("CaseNum: ", CaseNum))
# seek(con = myFile, where = (-1*L)) # rewind file
# readBin(myFile, "raw", L) # re-read raw - safe if some embedded nulls
# GData[k] <- paste0(rep(" ", L), collapse = "") # empty value
#}
}
V <- list(CaseNum, GData)
names(V) <- c("CaseNum", "GData")
return(V)
}
#' @title ReadAllGroupsData
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @param N An integer representation of 'ACER ConQuest' object gNCases.
#' @param GroupVariables A list of group variables.
#' @param AllVariables A list of variables.
#' @return A list
#' @keywords internal
ReadAllGroupsData <- function(myFile,N,GroupVariables,AllVariables)
{
V <- list()
for (n in seq_len(N))
{
V[[n]] <- ReadGroupsOneCase(
myFile = myFile,
GroupVariables = GroupVariables,
AllVariables = AllVariables,
CaseNum = n
)
}
#print(paste0("exited on casenum = ", n)) # debug
#print(paste0("N was passed in as = ", N)) # debug
return(V)
}
#' @title ReadMatrixVars
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadMatrixVars <- function(myFile)
{
# intitate lists
m <- list()
# read length of list of matrix objects
nMatricies <- ReadInteger(myFile)
for (n in seq_len(nMatricies))
{
myTempName <- ReadString(myFile)
m[[myTempName]] <- ReadMatrix(myFile)
}
return(m)
}
#' @title ReadPoint
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadPoint <- function(myFile)
{
x <- ReadDouble(myFile)
y <- ReadDouble(myFile)
z <- ReadDouble(myFile)
Label <- ReadString(myFile)
Point <- list(x,y,z,Label)
names(Point)<-c("x","y","z","Label")
return(Point)
}
#' @title ReadSeries
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadSeries <- function(myFile)
{
SType <- ReadInteger(myFile)
# print(paste0("SType = ", SType))
PointCount <- ReadInteger(myFile)
# print(PointCount)
Points <- list()
for (i in seq_len(PointCount))
{
# print(p("point number ",i))
Points[[i]]=ReadPoint(myFile)
}
MinX <- ReadDouble(myFile)
MaxX <- ReadDouble(myFile)
MinY <- ReadDouble(myFile)
MaxY <- ReadDouble(myFile)
Name <- ReadString(myFile)
DrawSeries <- ReadBoolean(myFile)
DrawPoints <- ReadBoolean(myFile)
JoinPoints <- ReadBoolean(myFile)
LabelPoints <- ReadBoolean(myFile)
LineWidth <- ReadInteger(myFile)
PointColour <- ReadInteger(myFile)
LineColour <- ReadInteger(myFile)
PointStyle <- ReadInteger(myFile)
LabelStyle <- ReadInteger(myFile)
LineStyle <- ReadInteger(myFile)
Series = list(
SType = SType,
PointCount = PointCount,
Points = Points,
MinX = MinX,
MaxX = MaxX,
MinY = MinY,
MaxY = MaxY,
Name = Name,
DrawSeries = DrawSeries,
DrawPoints = DrawPoints,
JoinPoints = JoinPoints,
LabelPoints = LabelPoints,
LineWidth = LineWidth,
PointColour = PointColour,
LineColour = LineColour,
PointStyle = PointStyle,
LabelStyle = LabelStyle,
LineStyle = LineStyle
)
# print(Series)
return(Series)
}
#' @title ReadGraph
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadGraph <- function(myFile)
{
GType <- ReadInteger(myFile)
# print(GType)
NSeries <- ReadInteger(myFile)
#print(NSeries)
Series <- list()
for (i in seq_len(NSeries))
{
#print("Series ",i)
Series[[i]] <- ReadSeries(myFile)
}
MinX <- ReadDouble(myFile)
MaxX <- ReadDouble(myFile)
MinY <- ReadDouble(myFile)
MaxY <- ReadDouble(myFile)
PointColourIndex <- ReadInteger(myFile)
LineColourIndex <- ReadInteger(myFile)
PointStyleIndex <- ReadInteger(myFile)
FreeTextCount <- ReadInteger(myFile)
L <- ReadInteger(myFile)
GraphTitleText <- ReadString(myFile)
GraphSubTitleText <- ReadString(myFile)
xAxisLabelText <- ReadString(myFile)
yAxisLabelText <- ReadString(myFile)
DifficultyLabelText <- ReadString(myFile)
FitLabelText <- ReadString(myFile)
NStrings <- L-6;
Strings <- list()
# print(p("other strings ",NStrings))
for (i in seq_len(NStrings))
{
Strings[[i]]=ReadPoint(myFile)
}
Graph <- list(
GType = GType,
NSeries = NSeries,
Series = Series,
MinX = MinX,
MaxX = MaxX,
MinY = MinY,
MaxY = MaxY,
PointColourIndex = PointColourIndex,
LineColourIndex = LineColourIndex,
PointStyleIndex = PointStyleIndex,
GraphTitleText = GraphTitleText,
GraphSubTitleText = GraphSubTitleText,
xAxisLabelText = xAxisLabelText,
yAxisLabelText = yAxisLabelText,
DifficultyLabelText = DifficultyLabelText,
FitLabelText = FitLabelText,
NStrings = NStrings,
Strings = Strings
)
return(Graph)
}
#' @title ReadRandomStructure
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list
#' @keywords internal
ReadRandomStructure <- function(myFile)
{
randomStructureList <- list()
termName <- ReadString(myFile)
termNumber <- ReadInteger(myFile)
randomL <- ReadBooleanList(myFile)
randomV <- ReadMatrix(myFile)
randomStructureList[["termName"]] <- termName
randomStructureList[["termNumber"]] <- termNumber
randomStructureList[["randomL"]] <- randomL
randomStructureList[["randomV"]] <- randomV
return(randomStructureList)
}
#' @title ReadGExportOptions
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list of a single ExportOption (e.g., used in ExportXsi())
#' @keywords internal
ReadGExportOptions <- function(myFile)
{
FileFormat <- ReadInteger(myFile)
Sort <- ReadInteger(myFile)
DataType <- ReadInteger(myFile)
FileName <- ReadString(myFile)
V <- list(FileFormat, Sort, DataType, FileName)
names(V) <- c("File Format", "Sort", "Data Type", "File Name")
return(V)
}
#' @title ReadGExportOptionsList
#' @param myFile An uncompressed 'ACER ConQuest' system file created by 'ACER ConQuest'.
#' @return A list of ExportOptions
#' @keywords internal
ReadGExportOptionsList <- function(myFile)
{
N <- ReadInteger(myFile)
Value <- list()
for (i in seq_len(N))
{
Value[[i]] <- ReadGExportOptions(myFile)
}
return(Value)
} |
8a1cd2d920c044b3d29946e307ae1bd8a3fef10c | d0ff71c623df1a81500a76fb29d6171aa3816686 | /plot5.R | bf3678f5e42c537cfd89cce43476f28032a85847 | [] | no_license | wayneheller/ExploratoryDataFinalProject | 9476ae8aab1be64c75c085d7784d218e7495ec0f | 17ec1bf2101588e90529c8889b3b97cff3b50097 | refs/heads/master | 2021-01-21T05:15:41.125556 | 2017-02-27T13:03:18 | 2017-02-27T13:03:18 | 83,163,000 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,330 | r | plot5.R | ################################################################################
# Exploratory Data Analysis - John's Hopkins University | Coursera #
# Week 4 Course Project #
# Wayne Heller #
# 2/25/2017 #
# Assignment- to recreate plots from the EPA National Emissions Inventory #
# #
# #
# Key Assumptions: #
# Data file summarySCC_PM25.rds is in the working directory #
# Libaries ggplot2 and dplyr have been loaded #
################################################################################
# Loads the data.frame from the source file
# First checks for the existence of the file and prints an error message if
# not found
readEmissionsData <- function() {
if(file.exists("summarySCC_PM25.rds")){
NEI <- readRDS("summarySCC_PM25.rds")
return(NEI)
}
else {
print("Set Working Directory to location of summarySCC_PM25.rds")
return(FALSE)
}
}
readSourceClassificationCodes <- function() {
if(file.exists("Source_Classification_Code.rds")){
SCC <- readRDS("Source_Classification_Code.rds")
return(SCC)
}
else {
print("Set Working Directory to location of Source_Classification_Code.rds")
return(FALSE)
}
}
# Assignment: How have emissions from motor vehicle sources changed from
# 1999–2008 in Baltimore City?
createPlot5 <- function() {
dfSCC <- readSourceClassificationCodes()
# check for error: data file not found
if(class(dfSCC)!="data.frame") {
stop()
}
# Find the SCC codes where the SCC.Level.One contains "Mobile Sources"
motorVehicleSCCCodes <- dfSCC$SCC[grep("Mobile Sources", dfSCC$SCC.Level.One,
fixed = TRUE)]
# Read in the dataset
dfNEI <- readEmissionsData()
# check for error: data file not found
if(class(dfNEI)!="data.frame") {
stop()
}
# filter the results to just those from Motor Vehicles in Baltimore City
dfNEIFiltered <- dfNEI[dfNEI$SCC %in% motorVehicleSCCCodes &
dfNEI$fips=="24510", ]
# Group By Year
byYear <- group_by(dfNEIFiltered, year)
# Sum up all the Emissions
dfSum <- summarize(byYear, sum(Emissions))
# Rename columns for convenience
names(dfSum) <- c('year', 'emissions')
# Plot the total amount of emissions by year in Kilotons breaking up each
# Type into a facet
p <- ggplot(dfSum, aes(year, emissions / 1000)) + geom_point() +
geom_smooth(method = "lm", col="red", se=FALSE) +
labs(y="PM2.5 Emissions (Kilotons)", x="Year",
title="PM2.5 Emissions Have Decreased in Baltimore City, MD from 1999 to 2008",
subtitle="From All Motor Vehicle Sources")
ggsave("plot5.png", plot = p, units= "in", width = 11.0, height= 5.5)
} |
067a4de3c824e8ed3a9a4605509f86b5c69b6b40 | 68ea8cc5ca78458c13a6c26020fcd2767c6584d0 | /Rough work/function_all_distances.R | 3ff4d536b3401207c96c6c82e29e2f8c734e9034 | [] | no_license | elsie-h/ward_gower | dfb7ec9a621d8f1e011ae5d4e834a797c14a9ab1 | da88ff8b471c269e79ea318eb3bc1a3f030ba0a1 | refs/heads/master | 2021-06-10T12:41:50.738182 | 2021-04-07T16:00:17 | 2021-04-07T16:00:17 | 166,024,208 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,740 | r | function_all_distances.R | ## Author: Elsie Horne
## Date created: 21st Jan 2019
## Description: function for direct calculation of Ward's with Gower measure
## Packages:
library(tidyverse)
library(cluster) # for agnes and daisy
library(arrangements) # for combinations
# re-write this function to reduce the number of distance matrices that are calculated
# i.e. store all the means of possible merges, and then calculate one distance matrix,
# rather than calculating a new distance matrix for each potential merge
################################################################################
# my_wards function
################################################################################
my_wards <- function(x, dist) {
# function to compute the error sum of squares for cluster C
# sum of gower distances between all samples in a cluster to the cluster centroid (mean)
ess_direct <- function(C) {
C <- unlist(str_split(C, ","))
if (length(C) == 1)
return(0)
else {
mean_i <- nrow(x) + 1 # the index for the mean row
x_mean <- colMeans(x[C, ]) # compute mean of cluster C
x_C <- rbind(x, x_mean) # samples and mean in one dataset
d_C <-
daisy(x_C, metric = "euclidean", stand = FALSE) # compute distances
d_C <-
as.matrix(d_C)[mean_i, C] # keep only the row of distances to mean and columns in cluster
return(sum(d_C * d_C)) # return sum over square of all distances to mean
}
}
# function to compute the error sum of squares for merging two clusters in list L
change_ess_direct <- function(L) {
ess_direct(c(L[1], L[2])) - ess_direct(L[1]) - ess_direct(L[2])
}
levs <- nrow(x) - 1
merges <- vector(mode = "list", length = levs)
names <- vector(mode = "list", length = levs)
clusters <- as.character(1:nrow(x))
for (i in 1:levs) {
combos <- as.data.frame(t(combinations(x = clusters, k = 2))) %>%
mutate_all(as.character) # store as character for when clusters get bigger
d_combos <- lapply(combos, change_ess_direct)
names(d_combos) <-
unname(apply(as.matrix(combos), 2, function(x)
str_c(x, collapse = " and ")))
d_combos <- unlist(d_combos)
d_combos <- (2 * d_combos) ^ 0.5 # to match the distances in AGNES
d_min <- min(d_combos)
c_rem <- combos[d_combos == d_min] # clusters to combine
# merges[i] <- list(d_combos) # store the distance between the merging clusters
merges[i] <- d_min # if only store the minimum ditance
c_rem <- as.character(unlist(c_rem))
c_new <- str_c(unlist(c_rem), collapse = ",")
clusters <-
clusters[!(clusters %in% c_rem)] # remove the merged clusters
clusters <- c(clusters, c_new) # add new merged cluster
names[i] <- str_c(c_rem, collapse = " and ")
# merges[i][[1]] <- sort(merges[i][[1]]) # if storing all distances
}
names(merges) <- names
return(merges)
}
################################################################################
# example data
set.seed(898)
rows <- sample(1:nrow(iris), size = 5)
x <- iris[rows, 1:2] # sample 5 lines of iris as example data
x <- as.data.frame(scale(x)) # standardise to mean = 0 sd = 1
rownames(x) <- as.character(1:nrow(x))
merges_gow <- my_wards(x, dist = "gower")
merges_euc <- my_wards(x, dist = "euclidean")
dist_euc <- daisy(x, metric = "euclidean", stand = FALSE)
ag_euc <- agnes(dist_euc, method = "ward", stand = FALSE)
sort(ag_euc$height)
plot(ag_euc)
dist_gow <- daisy(x, metric = "gower", stand = FALSE)
ag_gow <- agnes(dist_gow, method = "ward", stand = FALSE)
sort(ag_gow$height)
plot(ag_gow)
# visualise example data
x %>% ggplot(aes(x = Sepal.Length, y = Sepal.Width, label = rownames(x))) +
geom_point(shape=15,color="white",size=6)+geom_text()
|
ff3d4447f2bc0c30f16a7c5c7c58e29a13d4af70 | 79edc8a9d34a8ff70bf6b977d4545bfe741dc739 | /R/plot.fitted.bsam.R | b2c6e42a3e501dbeca58fa512d4a6efcae75d69f | [] | no_license | cran/bsamGP | ed52b508d20ca806f2878c208f64dd76aae98ce9 | 9ecc1d5140fc0df05415675a141c52c07ff2f641 | refs/heads/master | 2022-04-30T04:38:58.685326 | 2022-03-17T19:40:02 | 2022-03-17T19:40:02 | 99,417,918 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 17,541 | r | plot.fitted.bsam.R | "plot.fitted.bsam" <- function(x, type = "response", ask = FALSE, ggplot2 = TRUE, legend.position = "none", ...) {
yobs <- x$y
xobs <- x$x
nobs <- x$n
nfun <- x$nfun
smcmc <- x$mcmc$smcmc
xgrid <- x$fit.draws$xgrid
ngrid <- x$nint + 1
wbm <- x$wbeta$mean
fxobsm <- x$fxobs$mean
fxobsl <- x$fxobs$lower
fxobsu <- x$fxobs$upper
fxgridm <- x$fxgrid$mean
fxgridl <- x$fxgrid$lower
fxgridu <- x$fxgrid$upper
prob <- (1 - x$alpha) * 100
HPD <- x$HPD
xname <- x$xname
shape <- x$shape
par(ask = ask, ...)
if (ggplot2) {
if (nfun == 1) {
if (HPD) {
datl <- data.frame(x = rep(xgrid, 3), fx = c(fxgridu, fxgridm, fxgridl),
Estimates = c(rep(paste(prob, "% HPD UCI", sep = ""), ngrid),
rep("Posterior Mean", ngrid),
rep(paste(prob, "% HPD LCI", sep = ""), ngrid)))
dato <- data.frame(x = rep(xobs, 3), fx = c(fxobsu, fxobsm, fxobsl),
Estimates = c(rep(paste(prob, "% HPD UCI", sep = ""), nobs),
rep("Posterior Mean", nobs),
rep(paste(prob, "% HPD LCI", sep = ""), nobs)))
} else {
datl <- data.frame(x = rep(xgrid, 3), fx = c(fxgridu, fxgridm, fxgridl),
Estimates = c(rep(paste(prob, "% Equal-tail UCI", sep = ""), ngrid),
rep("Posterior Mean", ngrid),
rep(paste(prob, "% Equal-tail LCI", sep = ""), ngrid)))
dato <- data.frame(x = rep(xobs, 3), fx = c(fxobsu, fxobsm, fxobsl),
Estimates = c(rep(paste(prob, "% Equal-tail UCI", sep = ""), nobs),
rep("Posterior Mean", nobs),
rep(paste(prob, "% Equal-tail LCI", sep = ""), nobs)))
}
plt1 <- ggplot(datl)
plt1 <- plt1 + aes_string(x = 'x', y = 'fx')
plt1 <- plt1 + aes_string(group = 'Estimates')
plt1 <- plt1 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt1 <- plt1 + geom_line(size = 0.8)
plt1 <- plt1 + xlab(x$xname[1])
plt1 <- plt1 + ylab(paste('f(', x$xname[1], ')', sep = ''))
plt1 <- plt1 + theme_bw()
plt1 <- plt1 + theme(legend.position = legend.position)
plt1 <- plt1 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid"))
if (x$model == "gbsar") {
if (x$family == "bernoulli") {
if (x$link == 'probit') {
dato$fx = pnorm(c(rep(median(x$wbeta$upper), nobs),
rep(median(x$wbeta$mean), nobs),
rep(median(x$wbeta$lower), nobs)) + dato$fx)
} else {
logit <- function(xx) 1 / (1 + exp(-xx))
dato$fx = logit(c(rep(median(x$wbeta$upper), nobs),
rep(median(x$wbeta$mean), nobs),
rep(median(x$wbeta$lower), nobs)) + dato$fx)
}
datp <- data.frame(x = xobs[, 1], y = yobs, Estimates = rep("Observations", nobs))
plt2 <- ggplot(dato)
plt2 <- plt2 + geom_point(data = datp, mapping = aes_string(x = 'x', y = 'y'), shape = 21, alpha = 0.6)
plt2 <- plt2 + aes_string(x = 'x', y = 'fx')
plt2 <- plt2 + aes_string(group = 'Estimates')
plt2 <- plt2 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt2 <- plt2 + geom_line(size = 0.8)
plt2 <- plt2 + xlab(x$xname[1])
plt2 <- plt2 + theme_bw()
plt2 <- plt2 + theme(legend.position = legend.position)
plt2 <- plt2 + ylab(paste('P(', x$yname[1], ')', sep = ''))
plt2 <- plt2 + ylim(c(0,1))
plt2 <- plt2 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid", "solid"))
} else {
dato$fx = exp(c(rep(median(x$wbeta$upper), nobs),
rep(median(x$wbeta$mean), nobs),
rep(median(x$wbeta$lower), nobs)) + dato$fx)
datp <- data.frame(x = xobs[, 1], y = yobs, Estimates = rep("Observations", nobs))
plt2 <- ggplot(dato)
plt2 <- plt2 + geom_point(data = datp, mapping = aes_string(x = 'x', y = 'y'), shape = 21, alpha = 0.6)
plt2 <- plt2 + aes_string(x = 'x', y = 'fx')
plt2 <- plt2 + aes_string(group = 'Estimates')
plt2 <- plt2 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt2 <- plt2 + geom_line(size = 0.8)
plt2 <- plt2 + xlab(x$xname[1])
plt2 <- plt2 + theme_bw()
plt2 <- plt2 + theme(legend.position = legend.position)
plt2 <- plt2 + ylab(x$yname[1])
plt2 <- plt2 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid", "solid"))
}
} else {
datp <- data.frame(x = xobs[, 1], y = yobs - wbm, Estimates = rep("Parametric Residuals", nobs))
plt2 <- ggplot(dato)
plt2 <- plt2 + geom_point(data = datp, mapping = aes_string(x = 'x', y = 'y'), shape = 21, alpha = 0.6)
plt2 <- plt2 + aes_string(x = 'x', y = 'fx')
plt2 <- plt2 + aes_string(group = 'Estimates')
plt2 <- plt2 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt2 <- plt2 + geom_line(size = 0.8)
plt2 <- plt2 + xlab(x$xname[1])
plt2 <- plt2 + theme_bw()
plt2 <- plt2 + theme(legend.position = legend.position)
plt2 <- plt2 + ylab('Parametric Residuals')
plt2 <- plt2 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid", "solid"))
}
grid.arrange(plt1, plt2, nrow = 2)
} else {
for (i in 1:nfun) {
# plot.new()
if (HPD) {
datl <- data.frame(x = rep(xgrid[, i], 3), fx = c(fxgridu[, i], fxgridm[, i], fxgridl[, i]),
Estimates = c(rep(paste(prob, "% HPD UCI", sep = ""), ngrid),
rep("Posterior Mean", ngrid),
rep(paste(prob, "% HPD LCI", sep = ""), ngrid)))
dato <- data.frame(x = rep(xobs[, i], 3), fx = c(fxobsu[, i], fxobsm[, i], fxobsl[, i]),
Estimates = c(rep(paste(prob, "% HPD UCI", sep = ""), nobs),
rep("Posterior Mean", nobs),
rep(paste(prob, "% HPD LCI", sep = ""), nobs)))
} else {
datl <- data.frame(x = rep(xgrid[, i], 3), fx = c(fxgridu[, i], fxgridm[, i], fxgridl[, i]),
Estimates = c(rep(paste(prob, "% Equal-tail UCI", sep = ""), ngrid),
rep("Posterior Mean", ngrid),
rep(paste(prob, "% Equal-tail LCI", sep = ""), ngrid)))
dato <- data.frame(x = rep(xobs[, i], 3), fx = c(fxobsu[, i], fxobsm[, i], fxobsl[, i]),
Estimates = c(rep(paste(prob, "% Equal-tail UCI", sep = ""), nobs),
rep("Posterior Mean", nobs),
rep(paste(prob, "% Equal-tail LCI", sep = ""), nobs)))
}
plt1 <- ggplot(datl)
plt1 <- plt1 + aes_string(x = 'x', y = 'fx')
plt1 <- plt1 + aes_string(group = 'Estimates')
plt1 <- plt1 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt1 <- plt1 + geom_line(size = 0.8)
plt1 <- plt1 + xlab(parse(text=x$xname[i]))
plt1 <- plt1 + ylab(paste('f(', x$xname[i], ')', sep = ''))
plt1 <- plt1 + theme_bw()
plt1 <- plt1 + theme(legend.position = legend.position)
plt1 <- plt1 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid"))
if (x$model == "gbsar") {
if (x$family == "bernoulli") {
if (x$link == "probit") {
dato$fx = pnorm(dato$fx)
} else {
logit <- function(xx) 1 / (1 + exp(-xx))
dato$fx = logit(dato$fx)
}
datp <- data.frame(x = xobs[, i], y = yobs, Estimates = rep("Observations", nobs))
plt2 <- ggplot(dato)
plt2 <- plt2 + geom_point(data = datp, mapping = aes_string(x = 'x', y = 'y'), shape = 21, alpha = 0.6)
plt2 <- plt2 + aes_string(x = 'x', y = 'fx')
plt2 <- plt2 + aes_string(group = 'Estimates')
plt2 <- plt2 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt2 <- plt2 + geom_line(size = 0.8)
plt2 <- plt2 + xlab(x$xname[i])
plt2 <- plt2 + theme_bw()
plt2 <- plt2 + theme(legend.position = legend.position)
plt2 <- plt2 + ylab(paste('P(', x$yname[1], ')', sep = ''))
plt2 <- plt2 + ylim(c(0,1))
plt2 <- plt2 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid", "solid"))
} else {
dato$fx = exp(dato$fx)
datp <- data.frame(x = xobs[, i], y = yobs, Estimates = rep("Observations", nobs))
plt2 <- ggplot(dato)
plt2 <- plt2 + geom_point(data = datp, mapping = aes_string(x = 'x', y = 'y'), shape = 21, alpha = 0.6)
plt2 <- plt2 + aes_string(x = 'x', y = 'fx')
plt2 <- plt2 + aes_string(group = 'Estimates')
plt2 <- plt2 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt2 <- plt2 + geom_line(size = 0.8)
plt2 <- plt2 + xlab(x$xname[i])
plt2 <- plt2 + theme_bw()
plt2 <- plt2 + theme(legend.position = legend.position)
plt2 <- plt2 + ylab(x$yname[1])
plt2 <- plt2 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid", "solid"))
}
} else {
datp <- data.frame(x = xobs[, i], y = yobs - wbm - rowSums(fxobsm[, -i, drop = FALSE]),
Estimates = rep("Partial Residuals", nobs))
plt2 <- ggplot(dato)
plt2 <- plt2 + geom_point(data = datp, mapping = aes_string(x = 'x', y = 'y'), shape = 21, alpha = 0.6)
plt2 <- plt2 + aes_string(x = 'x', y = 'fx')
plt2 <- plt2 + aes_string(group = 'Estimates')
plt2 <- plt2 + aes_string(shape = 'Estimates', linetype = 'Estimates', colour = 'Estimates')
plt2 <- plt2 + geom_line(size = 0.8)
plt2 <- plt2 + xlab(parse(text=x$xname[i]))
plt2 <- plt2 + theme_bw()
plt2 <- plt2 + theme(legend.position = legend.position)
plt2 <- plt2 + ylab('Partial Residuals')
plt2 <- plt2 + scale_linetype_manual(values = c("dotdash", "dotdash", "solid", "solid"))
}
grid.arrange(plt1, plt2, nrow = 2)
}
}
} else {
if (nfun == 1) {
o = order(xobs[, 1])
if (x$model == 'gbsar') {
if (x$family == "bernoulli") {
if (x$link == 'probit') {
fxl = pnorm(fxobsl + rep(median(x$wbeta$upper), nobs))
fxm = pnorm(fxobsm + rep(median(x$wbeta$mean), nobs))
fxu = pnorm(fxobsu + rep(median(x$wbeta$lower), nobs))
} else {
logit <- function(xx) 1 / (1 + exp(-xx))
fxl = logit(fxobsl + rep(median(x$wbeta$upper), nobs))
fxm = logit(fxobsm + rep(median(x$wbeta$mean), nobs))
fxu = logit(fxobsu + rep(median(x$wbeta$lower), nobs))
}
switch(type,
term = {
plot(x = xgrid[, 1], y = fxgridm[, 1], main = '', pch = NA, ylim = range(x$fxgrid),
xlab = x$xname[1], ylab = paste('f(', x$xname, ')', sep = ''), ...)
polygon(x = c(xgrid[, 1], rev(xgrid[, 1])),
y = c(fxgridl[, 1], rev(fxgridu[, 1])), col = 'gray70', lty = 2)
lines(x = xgrid[, 1], y = fxgridm[, 1], lwd = 2, col = 2)
},
response = {
plot(x = xobs[, 1], y = yobs, pch = NA, xlab = x$xname[1], main = '',
ylab = paste('P(', x$yname, ')', sep = ''), ylim = c(0, 1), ...)
polygon(x = c(xobs[o, 1], rev(xobs[o, 1])),
y = c(fxl[o], rev(fxu[o])), col = 'gray70', lty = 2)
lines(x = xobs[o, 1], y = fxm[o], lwd = 2, col = 2)
rug(x = xobs[yobs == 0, 1], side = 1)
rug(x = xobs[yobs == 1, 1], side = 3)
}
)
} else {
switch(type,
term = {
plot(x = xgrid[, 1], y = fxgridm[, 1], main = '', pch = NA, ylim = range(x$fxgrid),
xlab = x$xname[1], ylab = paste('f(', x$xname, ')', sep = ''), ...)
polygon(x = c(xgrid[, 1], rev(xgrid[, 1])),
y = c(fxgridl[, 1], rev(fxgridu[, 1])), col = 'gray70', lty = 2)
lines(x = xgrid[, 1], y = fxgridm[, 1], lwd = 2, col = 2)
},
response = {
fxl = exp(fxobsl + rep(median(x$wbeta$upper), nobs))
fxm = exp(fxobsm + rep(median(x$wbeta$mean), nobs))
fxu = exp(fxobsu + rep(median(x$wbeta$lower), nobs))
o = order(xobs[, 1])
plot(x = xobs[, 1], y = yobs, pch = NA, xlab = x$xname[1], main = '',
ylab = x$yname[1], ylim = range(c(yobs, fxl, fxu)), ...)
polygon(x = c(xobs[o, 1], rev(xobs[o, 1])),
y = c(fxl[o], rev(fxu[o])), col = 'gray70', lty = 2)
points(x = xobs[, 1], y = yobs, lwd = 2, pch = 1)
lines(x = xobs[o, 1], y = fxm[o], lwd = 2, col = 2)
}
)
}
} else {
resid <- yobs - wbm
plot(x = xgrid, y = fxgridm, pch = NA,
ylim = range(c(resid, fxgridl, fxgridu)), main = '',
xlab = x$xname[1], ylab = paste('f(', x$xname[1], ')', sep = ''), ...)
polygon(x = c(xgrid, rev(xgrid)),
y = c(fxgridl, rev(fxgridu)), col = 'gray70', lty = 2)
lines(x = xgrid, y = fxgridm, lwd = 2, col = 2)
plot(xobs, resid, pch = NA, ylim = range(c(resid, fxobsu, fxobsl)),
xlab = x$xname, ylab = 'Parametric Residuals', main = '', ...)
polygon(x = c(xobs[o,1], rev(xobs[o,1])),
y = c(fxobsl[o], rev(fxobsu[o])), col = 'gray70', lty = 2)
points(xobs, resid, lwd = 2)
lines(xobs[o,1], fxobsm[o], lwd = 3, lty = 1, col = 2)
}
} else {
for (i in 1:nfun) {
switch(type,
term = {
resid = yobs - wbm - rowSums(fxobsm[, -i, drop = FALSE])
plot(x = xgrid[, i], y = fxgridm[, i], pch = NA,
ylim = range(c(resid, fxgridl[, i], fxgridu[, i])), main = '',
xlab = x$xname[i], ylab = paste('f(', x$xname[i], ')', sep = ''), ...)
polygon(x = c(xgrid[, i], rev(xgrid[, i])),
y = c(fxgridl[, i], rev(fxgridu[, i])), col = 'gray70', lty = 2)
lines(x = xgrid[, i], y = fxgridm[, i], lwd = 2, col = 2)
},
response = {
o = order(xobs[, i])
if (x$model == 'gbsar') {
if (x$family == 'bernoulli') {
if (x$link == 'probit') {
fxl = pnorm(fxobsl[, i])
fxm = pnorm(fxobsm[, i])
fxu = pnorm(fxobsu[, i])
} else {
logit <- function(xx) 1 / (1 + exp(-xx))
fxl = logit(fxobsl[, i])
fxm = logit(fxobsm[, i])
fxu = logit(fxobsu[, i])
}
plot(x = xobs[o, i], y = fxm[o], pch = NA, ylim = c(0, 1), main = '',
xlab = x$xname[i], ylab = paste('P(', x$yname, ')', sep = ''), ...)
polygon(x = c(xobs[o, i], rev(xobs[o, i])),
y = c(fxl[o], rev(fxu[o])), col = 'gray70', lty = 2)
lines(x = xobs[o, i], y = fxm[o], lwd = 2, col = 2)
rug(xobs[yobs == 0, i], side = 1)
rug(xobs[yobs == 1, i], side = 3)
} else {
fxl = exp(fxobsl[, i])
fxm = exp(fxobsm[, i])
fxu = exp(fxobsu[, i])
plot(x = xobs[o, i], y = fxm[o], pch = NA, ylim = range(c(yobs, fxl, fxu)), main = '',
xlab = x$xname[i], ylab = x$yname, ...)
polygon(x = c(xobs[o, i], rev(xobs[o, i])),
y = c(fxl[o], rev(fxu[o])), col = 'gray70', lty = 2)
points(x = xobs[, i], y = yobs, lwd = 2)
lines(x = xobs[o, i], y = fxm[o], lwd = 2, col = 2)
}
} else {
resid = yobs - wbm - rowSums(fxobsm[, -i, drop = FALSE])
plot(x = xobs[, i], y = resid, pch = NA, ylim = range(c(resid, fxobsl[, i], fxobsu[, i])),
main = '', xlab = x$xname[i], ylab = 'Partial Residuals', ...)
polygon(x = c(xobs[o, i], rev(xobs[o, i])),
y = c(fxobsl[o, i], rev(fxobsu[o, i])), col = 'gray70', lty = 2)
points(x = xobs[, i], y = yobs - wbm - rowSums(fxobsm[, -i, drop = FALSE]), lwd = 2)
lines(x = xobs[o, i], y = fxobsm[o, i], lwd = 2, col = 2)
}
}
)
}
}
}
}
|
a5a47a302cb5d21470d9efe17435ea8a0188b56e | a5a1dfa861d42495ea1013c42f8edd460ca89561 | /genotype/joint_genotyping/plot_sensitivity.R | 1006879d6c4b22b9f6705a750da641b9a0abb608 | [] | no_license | chanibravo/hcasmc_eqtl | 4cca8abab75d63196d005294bf42d291128fac24 | 0c4f25b5a336349d8e590f2ac357ce0519e16032 | refs/heads/master | 2021-09-20T05:52:29.831159 | 2018-08-05T05:23:01 | 2018-08-05T05:23:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 906 | r | plot_sensitivity.R | #!/usr/bin/env Rscript
# bosh liu
# 2016/05/13
# durga
# plot sensitivity vs other metrics
# library:
library(cowplot)
# paths:
input_file="../data/joint2/recalibrate_INDEL.tranches"
figure_path='../figures/160513_plot_sensitivity'
# read input:
input=fread(input_file,header=T)
# remove sensitivity = 100%:
input=input[1:11,]
# plot sensitiviy vs numKnown, numNovel, minVQSLod:
p1=ggplot(input,aes(x=truthSensitivity,y=numKnown))+geom_point()+scale_x_continuous(breaks=round(input[,truthSensitivity],3))
p2=ggplot(input,aes(x=truthSensitivity,y=numNovel))+geom_point()+scale_x_continuous(breaks=round(input[,truthSensitivity],3))
p3=ggplot(input,aes(x=truthSensitivity,y=minVQSLod))+geom_point()+scale_x_continuous(breaks=round(input[,truthSensitivity],3))
p=plot_grid(p1,p2,p3,labels=c('A','B','C'),align='v',ncol=1)
save_plot(paste0(figure_path,'sensitivity.pdf'),p,base_height=12,base_width=6) |
5449d2b059f747ce044774702ee5012c1f7f98b1 | 93e2aa9f2709e36dede8b1645c14d69e6774e5ec | /plot4.R | c16a25f48c261266adc792c27e94542bdb838c80 | [] | no_license | t-redactyl/ExData_Plotting1 | cff32562d9b5079683791b8f8a98ec4bfb2aca7f | 6e645b63dbedf639c9dc09bc3fc51e4011a7f830 | refs/heads/master | 2021-01-18T09:14:17.333862 | 2015-03-07T12:04:19 | 2015-03-07T12:04:19 | 31,810,143 | 0 | 0 | null | 2015-03-07T11:37:14 | 2015-03-07T11:37:14 | null | UTF-8 | R | false | false | 2,152 | r | plot4.R | # Plot of global active power against time
#Reading in the data
rm(list = ls())
# Importing the data
if(!file.exists("./data")) {
dir.create("./data")
}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,"./data/Dataset.zip", mode="wb")
unzip("./data/Dataset.zip")
energy <- read.table("household_power_consumption.txt", header = TRUE, sep = ";")
# Processing the data
is.na(energy) <- energy == "?"
energy$datetime <- paste(energy$Date, energy$Time, sep = " ")
energy$datetime <- strptime(energy$datetime, "%d/%m/%Y %H:%M:%S")
energy$Date <- as.Date(energy$Date, "%d/%m/%Y")
energy$Global_active_power <- as.numeric(as.character(energy$Global_active_power))
energy$Sub_metering_1 <- as.numeric(as.character(energy$Sub_metering_1))
energy$Sub_metering_2 <- as.numeric(as.character(energy$Sub_metering_2))
energy$Voltage <- as.numeric(as.character(energy$Voltage))
energy$Global_reactive_power <- as.numeric(as.character(energy$Global_reactive_power))
# Keeping relevant subset of dates
energy <- energy[ which(energy$Date == "2007-02-01" | energy$Date == "2007-02-02"),]
# Creating plot and exporting to .png format
png(filename = "plot4.png",
width = 480,
height = 480)
par(mfrow = c(2, 2),
bg = "transparent",
cex = 0.80)
with(energy, {
plot(datetime, Global_active_power, type = "n", ylab = "Global Active Power", xlab = " ")
lines(energy$datetime, energy$Global_active_power)
plot(datetime, Voltage, type = "n", ylab = "Voltage")
lines(energy$datetime, energy$Voltage)
plot(datetime, Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = " ")
lines(energy$datetime, energy$Sub_metering_1, col = "black")
lines(energy$datetime, energy$Sub_metering_2, col = "red")
lines(energy$datetime, energy$Sub_metering_3, col = "blue")
legend("topright", lwd = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
plot(datetime, Global_reactive_power, type = "n", ylab = "Global_reactive_power")
lines(energy$datetime, energy$Global_reactive_power)
})
dev.off() |
b3a4d95ec5fccfe5a59289fe4fcde9bc6f0f0a4f | 01f20211b667045c9f3d255ef93a30bba7be27b3 | /KEGGpathway_plots_AND_genes_to_excel.R | 1a5058dbd910658f74b848632ae7b0a31f346caa | [] | no_license | Genebio/HCV.SARS | 17a0943fcab4f585bca9d5143e82588a6e9d2112 | 03bb867714243ad4c25aa5d454b285a46e1c6bb0 | refs/heads/main | 2023-06-19T00:26:38.171381 | 2021-07-20T13:47:07 | 2021-07-20T13:47:07 | 380,697,073 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,967 | r | KEGGpathway_plots_AND_genes_to_excel.R | library(KEGGprofile)
library(openxlsx)
db <- read.csv("human_gene_id.csv")
head(db)
HCV.NEW_db <- as.character(db$GeneID[na.omit(match(HCV.NEW_DE, toupper(db$Symbol)))])
KEGG_HCV.NEW <- find_enriched_pathway(HCV.NEW_db, species = "hsa", returned_adjpvalue = 0.05, download_latest=T)[[1]][,c(1,6)]
KEGG_HCV.NEW_diff_pathways <- as.character(KEGG_HCV.NEW$Pathway_Name)
KEGG_HCV.NEW_all_pathways <- find_enriched_pathway(HCV.NEW_db, returned_pvalue=1, returned_adjpvalue = 1, species = "hsa",download_latest=T)[[1]][,c(1,6)]
SARS_db <- as.character(db$GeneID[na.omit(match(SARS_DE, toupper(db$Symbol)))])
KEGG_SARS <- find_enriched_pathway(SARS_db, species = "hsa",returned_adjpvalue = 0.05, download_latest=T)[[1]][,c(1,6)]
KEGG_SARS_diff_pathways <- as.character(KEGG_SARS$Pathway_Name)
KEGG_SARS_all_pathways <- find_enriched_pathway(SARS_db, returned_pvalue=1, returned_adjpvalue = 1, species = "hsa",download_latest=T)[[1]][,c(1,6)]
SARS.HCV_db <- as.character(db$GeneID[na.omit(match(SARS.HCV_DE, toupper(db$Symbol)))])
KEGG_SARS.HCV <- find_enriched_pathway(SARS.HCV_db, species = "hsa",returned_adjpvalue = 0.05, download_latest=T)[[1]][,c(1,6)]
KEGG_SARS.HCV <- KEGG_SARS.HCV %>% arrange(pvalueAdj, .by_group = T)
KEGG_SARS.HCV_diff_pathways <- as.character(KEGG_SARS.HCV$Pathway_Name)[-2] #top 30 SARS.HCV pathways
KEGG_SARS.HCV_all_pathways <- find_enriched_pathway(SARS.HCV_db, returned_pvalue=1, returned_adjpvalue = 1, species = "hsa",download_latest=T)[[1]][,c(1,6)]
#HCV pathways DE
Unique_DE_pathways <- KEGG_HCV.NEW_diff_pathways
HCV.NEW_df <- KEGG_HCV.NEW_all_pathways[match(Unique_DE_pathways, KEGG_HCV.NEW_all_pathways$Pathway_Name),]
SARS_df <- KEGG_SARS_all_pathways[match(Unique_DE_pathways, KEGG_SARS_all_pathways$Pathway_Name),]
SARS.HCV_df <- KEGG_SARS.HCV_all_pathways[match(Unique_DE_pathways, KEGG_SARS.HCV_all_pathways$Pathway_Name),]
merged_pathway_df <- data.frame(Pathway_name=Unique_DE_pathways, SARS.HCV=SARS.HCV_df$pvalueAdj, HCV.NEW=HCV.NEW_df$pvalueAdj, SARS=SARS_df$pvalueAdj)
merged_pathway_df[,2:ncol(merged_pathway_df)] <- -log10(merged_pathway_df[,2:ncol(merged_pathway_df)])
merged_pathway_df <- merged_pathway_df %>% arrange(-(HCV.NEW), .by_group = T)
merged_pathway_df[is.na(merged_pathway_df)] <- 0
names(merged_pathway_df) <- c("Pathway_name", "SARS+HCV","HCV", "SARS")
melted_DE_pathways <- melt(merged_pathway_df, id.vars = "Pathway_name")
pdf("HCV_DE_pathways_paper.pdf", height = 14, width = 14)
ggplot(melted_DE_pathways, aes(x = Pathway_name, y = value, fill=variable)) +
geom_bar(position=position_dodge(width=0.85), stat = "identity", width=0.7) + coord_flip() + theme_bw()+
scale_y_continuous(name=bquote(~-Log[10]~ '(adjusted p-value)')) +
scale_x_discrete(name="", limits=rev(merged_pathway_df$Pathway_name)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.title = element_blank(), legend.position = c("top"),
axis.text.x = element_text(face="bold", size=20, angle=0, color = 'black'),
axis.text.y = element_text(face="bold", size=17, angle=0, color = 'black'),
axis.title.x = element_text(size=14, face="bold"),
legend.text = element_text(face="bold", size=17, angle=0, color = 'black'),
panel.border = element_rect(colour = "black", fill=NA, size=2))
dev.off()
HCV_pathways_total <- find_enriched_pathway(HCV.NEW_db, species = "hsa",returned_adjpvalue = 0.05, download_latest=T)
names(HCV_pathways_total$detail) <- HCV_pathways_total$stastic$Pathway_Name
for (i in 1:length(HCV_pathways_total$detail)){
HCV_pathways_total$detail[[i]] <- db$Symbol[match(as.numeric(HCV_pathways_total$detail[[i]]), db$GeneID)]
}
# HCV_pathways_total <- HCV_pathways_total$detail
# save(HCV_pathways_total, file="HCV_pathways_total.Rdata")
xx <- lapply(HCV_pathways_total$detail, unlist)
max <- max(sapply(xx, length))
HCV_pathways_total <- do.call(cbind, lapply(xx, function(z)c(z, rep(NA, max-length(z)))))
HCV_pathways_total <- data.frame(HCV_pathways_total)
write.xlsx(HCV_pathways_total, file="HCV_pathways_total.xlsx")
#SARS pathways DE
Unique_DE_pathways <- KEGG_SARS_diff_pathways
HCV.NEW_df <- KEGG_HCV.NEW_all_pathways[match(Unique_DE_pathways, KEGG_HCV.NEW_all_pathways$Pathway_Name),]
SARS_df <- KEGG_SARS_all_pathways[match(Unique_DE_pathways, KEGG_SARS_all_pathways$Pathway_Name),]
SARS.HCV_df <- KEGG_SARS.HCV_all_pathways[match(Unique_DE_pathways, KEGG_SARS.HCV_all_pathways$Pathway_Name),]
merged_pathway_df <- data.frame(Pathway_name=Unique_DE_pathways, SARS.HCV=SARS.HCV_df$pvalueAdj, HCV.NEW=HCV.NEW_df$pvalueAdj, SARS=SARS_df$pvalueAdj)
merged_pathway_df[,2:ncol(merged_pathway_df)] <- -log10(merged_pathway_df[,2:ncol(merged_pathway_df)])
merged_pathway_df <- merged_pathway_df %>% arrange(-(SARS), .by_group = T)
merged_pathway_df[is.na(merged_pathway_df)] <- 0
names(merged_pathway_df) <- c("Pathway_name", "SARS+HCV","HCV", "SARS")
melted_DE_pathways <- melt(merged_pathway_df, id.vars = "Pathway_name")
pdf("SARS_DE_pathways_paper.pdf", height = 14, width = 14)
ggplot(melted_DE_pathways, aes(x = Pathway_name, y = value, fill=variable)) +
geom_bar(position=position_dodge(width=0.85), stat = "identity", width=0.7) + coord_flip() + theme_bw()+
scale_y_continuous(name=bquote(~-Log[10]~ '(adjusted p-value)')) +
scale_x_discrete(name="", limits=rev(merged_pathway_df$Pathway_name)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.title = element_blank(), legend.position = c("top"),
axis.text.x = element_text(face="bold", size=20, angle=0, color = 'black'),
axis.text.y = element_text(face="bold", size=17, angle=0, color = 'black'),
axis.title.x = element_text(size=14, face="bold"),
legend.text = element_text(face="bold", size=17, angle=0, color = 'black'),
panel.border = element_rect(colour = "black", fill=NA, size=2))
dev.off()
SARS_pathways_total <- find_enriched_pathway(SARS_db, species = "hsa",returned_adjpvalue = 0.05, download_latest=T)
names(SARS_pathways_total$detail) <- SARS_pathways_total$stastic$Pathway_Name
for (i in 1:length(SARS_pathways_total$detail)){
SARS_pathways_total$detail[[i]] <- db$Symbol[match(as.numeric(SARS_pathways_total$detail[[i]]), db$GeneID)]
}
# SARS_pathways_total <- SARS_pathways_total$detail
# save(SARS_pathways_total, file="SARS_pathways_total.Rdata")
xx <- lapply(SARS_pathways_total$detail, unlist)
max <- max(sapply(xx, length))
SARS_pathways_total <- do.call(cbind, lapply(xx, function(z)c(z, rep(NA, max-length(z)))))
SARS_pathways_total <- data.frame(SARS_pathways_total)
write.xlsx(SARS_pathways_total, file="SARS_pathways_total.xlsx")
#SARS.HCV pathways DE
Unique_DE_pathways <- KEGG_SARS.HCV_diff_pathways
HCV.NEW_df <- KEGG_HCV.NEW_all_pathways[match(Unique_DE_pathways, KEGG_HCV.NEW_all_pathways$Pathway_Name),]
SARS_df <- KEGG_SARS_all_pathways[match(Unique_DE_pathways, KEGG_SARS_all_pathways$Pathway_Name),]
SARS.HCV_df <- KEGG_SARS.HCV_all_pathways[match(Unique_DE_pathways, KEGG_SARS.HCV_all_pathways$Pathway_Name),]
merged_pathway_df <- data.frame(Pathway_name=Unique_DE_pathways, SARS.HCV=SARS.HCV_df$pvalueAdj, HCV.NEW=HCV.NEW_df$pvalueAdj, SARS=SARS_df$pvalueAdj)
merged_pathway_df[,2:ncol(merged_pathway_df)] <- -log10(merged_pathway_df[,2:ncol(merged_pathway_df)])
merged_pathway_df <- merged_pathway_df %>% arrange(-(SARS.HCV), .by_group = T)
merged_pathway_df[is.na(merged_pathway_df)] <- 0
names(merged_pathway_df) <- c("Pathway_name", "SARS+HCV","HCV", "SARS")
melted_DE_pathways <- melt(merged_pathway_df, id.vars = "Pathway_name")
pdf("SARS.HCV_DE_pathways_paper.pdf", height = 14, width = 14)
ggplot(melted_DE_pathways, aes(x = Pathway_name, y = value, fill=variable)) +
geom_bar(position=position_dodge(width=0.85), stat = "identity", width=0.7) + coord_flip() + theme_bw()+
scale_y_continuous(name=bquote(~-Log[10]~ '(adjusted p-value)')) +
scale_x_discrete(name="", limits=rev(merged_pathway_df$Pathway_name)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.title = element_blank(), legend.position = c("top"),
axis.text.x = element_text(face="bold", size=20, angle=0, color = 'black'),
axis.text.y = element_text(face="bold", size=17, angle=0, color = 'black'),
axis.title.x = element_text(size=14, face="bold"),
legend.text = element_text(face="bold", size=17, angle=0, color = 'black'),
panel.border = element_rect(colour = "black", fill=NA, size=2))
dev.off()
SARS.HCV_pathways_total <- find_enriched_pathway(SARS.HCV_db, species = "hsa",returned_adjpvalue = 0.05, download_latest=T)
names(SARS.HCV_pathways_total$detail) <- SARS.HCV_pathways_total$stastic$Pathway_Name
for (i in 1:length(SARS.HCV_pathways_total$detail)){
SARS.HCV_pathways_total$detail[[i]] <- db$Symbol[match(as.numeric(SARS.HCV_pathways_total$detail[[i]]), db$GeneID)]
}
xx <- lapply(SARS.HCV_pathways_total$detail, unlist)
max <- max(sapply(xx, length))
SARS.HCV_pathways_total <- do.call(cbind, lapply(xx, function(z)c(z, rep(NA, max-length(z)))))
SARS.HCV_pathways_total <- data.frame(SARS.HCV_pathways_total)
write.xlsx(SARS.HCV_pathways_total, file="SARS.HCV_pathways_total.xlsx")
#All unique pathways DE
Unique_DE_pathways <- unique(c(KEGG_HCV.NEW_diff_pathways, KEGG_SARS_diff_pathways, KEGG_SARS.HCV_diff_pathways))
HCV.NEW_df <- KEGG_HCV.NEW_all_pathways[match(Unique_DE_pathways, KEGG_HCV.NEW_all_pathways$Pathway_Name),]
SARS_df <- KEGG_SARS_all_pathways[match(Unique_DE_pathways, KEGG_SARS_all_pathways$Pathway_Name),]
SARS.HCV_df <- KEGG_SARS.HCV_all_pathways[match(Unique_DE_pathways, KEGG_SARS.HCV_all_pathways$Pathway_Name),]
merged_pathway_df <- data.frame(Pathway_name=Unique_DE_pathways, SARS.HCV=SARS.HCV_df$pvalueAdj, HCV.NEW=HCV.NEW_df$pvalueAdj, SARS=SARS_df$pvalueAdj)
merged_pathway_df[,2:ncol(merged_pathway_df)] <- -log10(merged_pathway_df[,2:ncol(merged_pathway_df)])
merged_pathway_df[,2:ncol(merged_pathway_df)] <- log(merged_pathway_df[,2:ncol(merged_pathway_df)])
merged_pathway_df[is.na(merged_pathway_df)] <- c(-2)
merged_pathway_df[mapply(is.infinite, merged_pathway_df)] <- c(-2)
numeric_df <- merged_pathway_df[,2:ncol(merged_pathway_df)]
numeric_df[numeric_df>2] <- 2
numeric_df[numeric_df<c(-2)] <- c(-2)
croped_df <- data.frame(numeric_df, row.names = merged_pathway_df$Pathway_name)
colnames(croped_df) <- c("SARS+HCV","HCV", "SARS")
pdf("Heatmap_log_croped.pdf", width = 10, height = 14)
pheatmap(croped_df, fontsize_row=10, fontsize_col=15, angle_col=0)
dev.off()
# names(merged_pathway_df) <- c("Pathway_name", "SARS+HCV","HCV", "SARS")
# merged_total_df <- data.frame(merged_pathway_df[,2:4], row.names = merged_pathway_df$Pathway_name)
library(pheatmap)
pdf("ALL_DE_total_heatmap_log_no_scaling.pdf", width = 10, height = 14)
pheatmap(merged_total_df, fontsize_row=10, fontsize_col=15, angle_col=0)#scale="row",
dev.off()
log_merged_total_df <- log(merged_total_df)
pheatmap(log_merged_total_df, scale="row", fontsize_row=10, fontsize_col=15, angle_col=0)
# HCV_pathways_total
# SARS_pathways_total
# SARS.HCV_pathways_total
# unique_total_pathways <- unique(c(names(HCV_pathways_total), names(SARS_pathways_total), names(SARS.HCV_pathways_total)))
# total_list <- unlist(list(HCV_pathways_total), list(SARS_pathways_total), list(SARS.HCV_pathways_total))
# str(total_list)
#
# test_HCV <- HCV_pathways_total$detail
# test_SARS <- SARS_pathways_total$detail
# test_SARS.HCV <- SARS.HCV_pathways_total$detail
# # lsNames <- c("test_HCV","test_SARS","test_SARS.HCV")
# # do.call(mapply, c(FUN=c, sapply(lsNames, as.symbol), SIMPLIFY=T))
# names(test_HCV)#47
# names(test_SARS)#34
# names(test_SARS.HCV)#38
# new_list <- list()
# new_list[1:47] <- test_HCV
# names(new_list[1:47]) <- names(test_HCV)
# new_list[48:82] <- test_SARS
# names(new_list[48:82]) <- names(test_SARS)
#
# length(test_SARS.HCV)
# new_list[83:121] <- test_SARS.HCV
# names(new_list[83:121]) <- names(test_SARS.HCV)
|
ef44f0a90dae614c035ffabab0c6751b83c3a715 | 8fd589c0c2a809e0c1c54eb7f9dc1f4e93bf063b | /man/dst_query_match.Rd | 46c377d28f96fd5bb2fb14250bf6f8a0e1e90db2 | [] | no_license | rOpenGov/dkstat | 08c22c85db4fa7ca0ae11befad0a6dfca195a51a | f8865d301fd6738905b004edc08b8431fe4f1099 | refs/heads/master | 2021-11-20T08:36:10.153166 | 2021-10-13T15:02:21 | 2021-10-13T15:02:21 | 21,630,033 | 29 | 8 | null | 2016-10-16T17:36:42 | 2014-07-08T21:24:24 | R | UTF-8 | R | false | true | 465 | rd | dst_query_match.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dst_query_match.R
\name{dst_query_match}
\alias{dst_query_match}
\title{Helper function to return ids based on text values}
\usage{
dst_query_match(table, lang, meta_data, query, format)
}
\arguments{
\item{table}{Table from StatBank.}
\item{lang}{language. "en" for english and "da" for danish.}
}
\description{
This is a helper function to return the ids based on the text values.
}
|
d195f90ac0bc2fc3fca0918039065763b48d6f33 | d75b7bc015b47d94254bcc9334ba15972d3ec9a1 | /1. FIRST YEAR/Introduction to Computing/Exercices_Laura/exercici99.R | e6912cb926ff1026c2719efb841ed9f24b4e0a03 | [] | no_license | laurajuliamelis/BachelorDegree_Statistics | a0dcfec518ef70d4510936685672933c54dcee80 | 2294e3f417833a4f3cdc60141b549b50098d2cb1 | refs/heads/master | 2022-04-22T23:55:29.102206 | 2020-04-22T14:14:23 | 2020-04-22T14:14:23 | 257,890,534 | 0 | 0 | null | null | null | null | ISO-8859-3 | R | false | false | 306 | r | exercici99.R | n <- 5
if (n==0){
cat ("La successió és buida\n")
} else if ( n==1 || n==2){
e1 <- 1
e2 <- 2
cat (e1, "\n",e2, "\n")
} else {
e1 <- 1
e2 <- 2
cat (e1, "\n",e2, "\n")
for( i in 3:n){
x <- e1 + e2
cat(x , "\n")
e1<- e2
e2<- x
}
}
|
d6e538f1b017a3203d82647eb638e59823743e3c | 66db54ec2ca479e182953a8479dbee941289d240 | /R/download_network_hdf5.R | 96857785a66d60acd5e61cf7c4d2ac708abb85ba | [] | no_license | sarbal/OutDeCo | edb039b2181bf559973bbc43cc7095f0710795a7 | 09eea73882b5a763d7a241460e499e809348ca3b | refs/heads/master | 2022-12-16T11:37:10.906690 | 2020-09-24T06:08:56 | 2020-09-24T06:08:56 | 239,221,202 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,008 | r | download_network_hdf5.R | #' Download networks from dropbox
#'
#' @param network_type name of pre-built network to use (generic, blood, brain)
#' @param flag_occr boolean to select occurence network or coexpression network
#' @param dir dummy directory for the networks
#' @examples
#' network_type='generic'
#'
#' @import utils
#' @export
#'
download_network_hdf5 <- function(network_type = "generic", flag_occr = TRUE, dir = "") {
net_files_keys <- c("b0v6405hz5zlmv8", "qkoenzheon8nafj", "299y0pnwewv9ee6", "2np3e78gjnvoe10", "wsqrji519uyh03k", "tayd6axapwt29ck")
gene_files_keys <- c("8fo67lvq6jemjs4", "bs232ltz50yez7o", "mi25kj1dtxubzw7", "s4865kljzg5p8pv", "waqkeem6agg05ve", "fyuq0xkhq4s0ars")
med_files_keys <- c("ucc8uj6p6gc14hu", "gr9ghxp17pe1gaf", "1qwcvvdjr92o22o", "xkioxsl7989ems6", "uykxeie4qgz6dns", "2xoutlukp4cv29x")
i <- 0
if (network_type == "generic" ) { i <- 1 }
if (network_type == "blood" ) { i <- 3 }
if (network_type == "brain" ) { i <- 5 }
if (flag_occr == FALSE) { i <- i + 1 }
if (flag_occr == TRUE ) { network_type <- paste0(network_type, ".occr") }
genes_hdf5 <- paste0(network_type, ".genes.h5")
median_hdf5 <- paste0(network_type, ".med.h5")
net_hdf5 <- paste0(network_type, ".net.h5")
url <- "https://www.dropbox.com/s/"
if( i > 0 ) {
genes_hdf5_dl <- paste0(url, gene_files_keys[i], "/", genes_hdf5, "?raw=1")
median_hdf5_dl <- paste0(url, med_files_keys[i], "/", median_hdf5, "?raw=1")
net_hdf5_dl <- paste0(url, net_files_keys[i], "/", net_hdf5, "?raw=1")
if(!file.exists(genes_hdf5)){
tryCatch( download.file(genes_hdf5_dl, destfile=genes_hdf5) )
}
if(!file.exists(median_hdf5)){
tryCatch( download.file(median_hdf5_dl, destfile=median_hdf5) )
}
if(!file.exists(net_hdf5)){
tryCatch( download.file(net_hdf5_dl, destfile=net_hdf5) )
}
}
}
|
832c15194632a9547936de9b0bd9648514689ccb | 16502827f4c63a4dbb1a52517d777740b4253dcf | /R/phy_to_ldf.R | 4b45d9fae8673ee81a3166976941c84e0c9f2c47 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | microsud/microbiomeutilities | 62a3dd12fa5396dff29a17f8f7612d25251e0269 | 046a9f928f35fc2a02dd6cfb9175a0c7ddf65c4b | refs/heads/master | 2022-12-06T14:44:11.259855 | 2022-11-23T08:07:05 | 2022-11-23T08:07:05 | 104,251,264 | 32 | 8 | NOASSERTION | 2021-04-14T09:31:29 | 2017-09-20T18:10:49 | R | UTF-8 | R | false | false | 2,431 | r | phy_to_ldf.R | #' @title Convert \code{\link{phyloseq-class}} object to long data format
#' @description An alternative to psmelt function from \code{\link{phyloseq-class}} object.
#' @param x \code{\link{phyloseq-class}} object
#' @param transform.counts Data transform to be used in plotting
#' (but not in sample/taxon ordering). The options are 'Z-OTU', 'Z-Sample',
#' 'log10' and 'compositional'. See the \code{\link{transform}} function
#' @return A data frame in long format with appropriate transfomation if requested
#' @import tidyr
#' @import dplyr
#' @importFrom tibble rownames_to_column
#' @import microbiome
#' @export
#' @examples
#' \dontrun{
#' # Example data
#' library(microbiomeutilities)
#' data("zackular2014")
#' pseq <- zackular2014
#' pseq_df <- phy_to_ldf(pseq, transform.counts = NULL)
#' }
#' @keywords utilities
phy_to_ldf <- function(x, transform.counts) {
if (!is(x, "phyloseq")) {
stop("Input is not an object of phyloseq class")
}
if (is.null(transform.counts)) {
x <- x
} else if (transform.counts == "log10") {
x <- microbiome::transform(x, "log10")
} else if (transform.counts == "Z-OTU") {
x <- microbiome::transform(x, "Z", "OTU")
} else if (transform.counts == "Z-Sample") {
x <- microbiome::transform(x, "Z", "Sample")
} else if (transform.counts == "compositional") {
x <- microbiome::transform(x, "compositional", "OTU")
} else {
stop("Please provide appropriate transformation")
}
message("An additonal column Sam_rep with sample names is created for reference purpose")
meta_df <- get_tibble(x,
slot = "sam_data",
column_id = "Sam_rep")
#meta_df <- microbiome::meta(x)
#meta_df$Sam_rep <- rownames(meta_df)
# tax_df <- data.frame(tax_table(x)) %>%
# rownames_to_column("OTUID")
tax_df <- get_tibble(x,
slot = "tax_table",
column_id = "OTUID")
#<- tax_table(x) %>%
# as("matrix") %>%
#as.data.frame() %>%
#rownames_to_column("OTUID")
otu_df <- data.frame(abundances(x),
check.names = FALSE
) %>% rownames_to_column("OTUID")
suppressWarnings(suppressMessages(otu_df %>%
left_join(tax_df) %>%
gather_(
"Sam_rep",
"Abundance", setdiff(
colnames(otu_df),
"OTUID"
)
) %>%
left_join(meta_df)))
}
|
3cc7ac0475c630a8af9eff5ed00d642f1644abaa | d4edb03b1bf3b0a0fbc58208b6437fda4dbd3d6f | /inst/simulation_and_plotting_scripts/generate_auRC-auPRC_replicates(imbalanced-data).R | 73132f9c5b9302d644588e217d849964640320a2 | [
"MIT"
] | permissive | insilico/npdr | 245df976150f06ed32203354819711dfe60f211b | 1aa134b6e82b700460e4b6392ef60ae2f80dfcfc | refs/heads/master | 2023-06-23T20:24:58.581892 | 2023-06-10T00:53:23 | 2023-06-10T00:53:23 | 163,314,265 | 10 | 5 | NOASSERTION | 2021-09-21T18:15:34 | 2018-12-27T16:16:52 | R | UTF-8 | R | false | false | 6,698 | r | generate_auRC-auPRC_replicates(imbalanced-data).R | # generate replicates of auRC and auPRC from simulated data with varying degrees of class imbalance
#
# generates data for plotting script: make_auRC-auPRC_boxplots(imbalanced-hitmiss-nbds).R
library(npdr)
library(reshape2)
library(ggplot2)
library(PRROC)
save.files = F
if (save.files){
cat("Results files for ",num.iter, " replicate simulation(s) will be saved in ", getwd(),".", sep="")
}
# sim.type (options)
#
# "mainEffect": simple main effects
# "mainEffect_Erdos-Renyi": main effects with added correlation from Erdos-Renyi network
# "mainEffect_Scalefree": main effects with added correlation from Scale-free network
# "interactionErdos": interaction effects from Erdos-Renyi network
# "interactionScalefree": interaction effects from Scale-free network
# "mixed": main effects and interaction effects
# mix.type (options)
#
# "main-interactionErdos": main effects and interaction effects from Erdos-Renyi network
# "main-interactionScalefree": main effects and interaction effects from Scale-free network
# data.type (options)
#
# "continuous": random normal data N(0,1) (e.g., gene expression data)
# "discrete": random binomial data B(n=2,prob) (e.g., GWAS data)
num.samples <- 100 # number of samples
num.variables <- 100 # number of variables
pct.imbalance <- 0.5 # fraction of instances that are cases (class=1)
pct.signals <- 0.1 # fraction of num.variables that are functional
main.bias <- 0.1 # effect size parameter for main effects
interaction.bias <- 0.95 # effect size parameter for interaction effects
mix.type <- "mainEffect-interactionErdos" # mixed simulation type
sim.type <- "interactionErdos" # simulation type
#sim.type <- "mainEffect"
data.type <- "discrete"
###################################################################################################
#
separate.hitmiss.nbds <- T # run with both T/F to generate all files for plots
#
###################################################################################################
num.iter <- 1 # generate num.iter replicates for each level of imbalance (THIS WILL TAKE AWHILE!!!)
#num.iter <- 30 # 30 might be too much
imbalances <- c(0.1, 0.2, 0.3, 0.4, 0.5)
for(iter in 1:length(imbalances)){
cat("Class Imbalance: ",imbalances[iter],"\n")
pct.imbalance <- imbalances[iter]
chosen.k.mat <- matrix(0,nrow=num.iter,ncol=num.variables)
accu.vec <- numeric()
auPRC.vec <- numeric()
set.seed(1989)
for(i in 1:num.iter){
cat("Replicate Data Set: ",i,"\n")
# simulated data
dataset <- createSimulation2(num.samples=num.samples,
num.variables=num.variables,
pct.imbalance=pct.imbalance,
pct.signals=pct.signals,
main.bias=main.bias,
interaction.bias=1,
hi.cor=0.85,
lo.cor=0.1,
mix.type=mix.type,
label="class",
sim.type=sim.type,
pct.mixed=0.5,
pct.train=0.5,
pct.holdout=0.5,
pct.validation=0,
plot.graph=FALSE,
verbose=TRUE,
use.Rcpp=FALSE,
prob.connected=0.95,
out.degree=(num.variables-2),
data.type=data.type)
dats <- rbind(dataset$train, dataset$holdout, dataset$validation)
dats <- dats[order(dats[,ncol(dats)]),]
# run vwak on data set to give beta and p-value matrices
out <- vwak(dats=dats,
k.grid=NULL, # can provide custom grid of k's
verbose=TRUE,
attr.diff.type="allele-sharing", # attribute diff used by npdr
separate.hitmiss.nbds=separate.hitmiss.nbds, # set = T for equal size hit/miss nbds
label="class")
betas <- out$beta.mat # num.variables x (num.samples - 1) matrix of betas
pvals <- out$pval.mat # num.variables x (num.samples - 1) matrix of p-values
best.ks <- apply(betas,1,which.max) # best k's computed from max beta for each attribute
best.betas <- numeric() # betas corresponding to best k's
best.pvals <- numeric() # p-values corresponding to best k's
for(j in 1:nrow(betas)){
best.betas[j] <- betas[j,as.numeric(best.ks[j])]
best.pvals[j] <- pvals[j,as.numeric(best.ks[j])]
}
# data frame of attributes, betas, and p-values from vwak
df.betas <- data.frame(att=row.names(betas),
betas=best.betas,
pval.att=best.pvals)
df.betas <- df.betas[order(df.betas[,2],decreasing=T),] # sort data frame by decreasing beta
functional.vars <- dataset$signal.names # functional variable names
pcts <- seq(0,1,.05)
npdr.detected <- sapply(pcts,function(p){npdrDetected(df.betas,functional.vars,p)})
chosen.k.mat[i,] <- best.ks
if(i==1){
colnames(chosen.k.mat) <- names(best.ks)
}
accu.vec[i] <- sum(npdr.detected)/length(npdr.detected) # area under the recall curve
idx.func <- which(c(as.character(df.betas[,"att"]) %in% functional.vars))
func.betas <- df.betas[idx.func,"betas"] # functional variable betas
neg.betas <- df.betas[-idx.func,"betas"] # noise variable betas
# precision-recall curve and area
pr.npdr <- PRROC::pr.curve(scores.class0 = func.betas,
scores.class1 = neg.betas,
curve = T)
plot(pr.npdr) # plot precision-recall curve
auPRC.vec[i] <- pr.npdr$auc.integral # area under the precision-recall curve
}
accu.df <- data.frame(auRC=accu.vec,auPRC=auPRC.vec)
if (save.files){
balanced.hit.miss <- strsplit(as.character(separate.hitmiss.nbds), split="")[[1]][1]
file <- paste("separate-hitmiss-nbds-",balanced.hit.miss,"_",sim.type,"_",data.type,"_imbalance-",iter,".csv",sep="")
write.csv(accu.df,file,row.names=F)
file <- paste("separate-hitmiss-nbds-",balanced.hit.miss,"_",sim.type,"_k-matrix_",data.type,"_imbalance-",iter,".csv",sep="")
write.csv(chosen.k.mat,file,row.names=F)
}
}
|
a9fe55bedca0df37be26822466aed7d0b038cafa | 07e52ac913df25c8420e67aeea4425e0d2e7183a | /R/methods.R | 3dec7a59a9b2a784b9bb9756c1d5d41199380a77 | [] | no_license | rgcca-factory/RGCCA | b772a93ec0630f3617def32d8fdbef22dee2722e | 068c56194637b65b3371e53701005504670b1f26 | refs/heads/main | 2023-08-04T11:01:51.467178 | 2023-07-17T09:53:09 | 2023-07-17T09:53:09 | 194,089,737 | 9 | 12 | null | 2023-07-17T09:53:11 | 2019-06-27T12:19:01 | R | UTF-8 | R | false | false | 1,914 | r | methods.R | #' Available methods for RGCCA
#'
#' List the methods that can be used with the rgcca function.
#' @return A vector of the methods implemented with the rgcca function.
#' @examples
#' available_methods()
#' @export
available_methods <- function() {
c(
"rgcca", "sgcca", "pca", "spca", "pls", "spls", "cca",
"ifa", "ra", "gcca", "maxvar", "maxvar-b", "maxvar-a",
"mfa", "mcia", "mcoa", "cpca-1", "cpca-2", "cpca-4", "hpca", "maxbet-b",
"maxbet", "maxdiff-b", "maxdiff", "sabscor",
"ssqcor", "ssqcov-1", "ssqcov-2", "ssqcov",
"sumcor", "sumcov-1", "sumcov-2", "sumcov", "sabscov-1",
"sabscov-2"
)
}
one_block_methods <- function() {
c("pca", "spca")
}
two_block_methods <- function() {
c("cca", "ra", "ifa", "pls", "spls")
}
superblock_methods <- function() {
c(
"pca", "spca", "gcca", "maxvar", "maxvar-b", "maxvar-a",
"mfa", "mcia", "mcoa", "cpca-1", "cpca-2", "cpca-4", "hpca"
)
}
cov_methods <- function() {
c(
"pca", "spca", "pls", "ifa", "maxvar-a", "mfa", "mcia", "mcoa", "cpca-1",
"cpca-2", "cpca-4", "hpca", "maxbet-b", "maxbet", "maxdiff-b",
"maxdiff", "ssqcov-1", "ssqcov-2", "ssqcov", "sumcov-1",
"sumcov-2", "sumcov", "sabscov-1", "sabscov-2"
)
}
cor_methods <- function() {
c("cca", "gcca", "maxvar", "maxvar-b", "sabscor", "ssqcor", "sumcor")
}
horst_methods <- function() {
c(
"pca", "spca", "pls", "spls", "cca", "ifa", "ra", "cpca-1",
"maxbet", "maxdiff", "sumcor", "sumcov-1", "sumcov-2",
"sumcov"
)
}
factorial_methods <- function() {
c(
"gcca", "maxvar", "maxvar-b", "maxvar-a", "mfa", "mcia", "mcoa",
"cpca-2", "maxbet-b", "maxdiff-b", "ssqcor", "ssqcor",
"ssqcov-1", "ssqcov-2", "ssqcov"
)
}
centroid_methods <- function() {
c("sabscor", "sabscov-1", "sabscov-2")
}
x4_methods <- function() {
c("cpca-4", "hpca")
}
sparse_methods <- function() {
c("sgcca", "spls", "spca")
}
|
c0a0b61923a90a28101122c03d7257df566ec9be | 648ebf72f913f90fe9575a8325ce1d8633ac449e | /R/predict.MFA.R | bc4832d30d50a501af1805a4174a2b545f9c71b2 | [] | no_license | husson/FactoMineR | 1b2068dc925647603899607bf2a90e926ef002d5 | 1f7d04dcf7f24798ce6b322a4dd3ab2bd8ddd238 | refs/heads/master | 2023-04-28T09:19:02.186828 | 2023-04-23T11:32:21 | 2023-04-23T11:32:21 | 32,511,270 | 26 | 9 | null | 2023-04-21T13:34:38 | 2015-03-19T09:08:51 | HTML | UTF-8 | R | false | false | 2,496 | r | predict.MFA.R | predict.MFA <- function(object, newdata, ...){
## newdata : les donnees pour les individus supplementaires
## object : la sortie de l'AFM sur les donnees actives
ec <- function(V, poids) {
res <- sqrt(sum(V^2 * poids,na.rm=TRUE)/sum(poids[!is.na(V)]))
}
if (!is.null(object$quanti.var$coord)) ncp <- ncol(object$quanti.var$coord)
else ncp <- ncol(object$quali.var$coord)
tab.supp <- matrix(NA,nrow(newdata),0)
for (g in 1:length(object$call$group)){
if (object$call$nature.group[g]=="quanti"){
# tab.aux <- sweep(newdata[,(c(1,1+cumsum(object$call$group))[g]):cumsum(object$call$group)[g]],2,object$separate.analyses[[g]][["call"]]$centre,FUN="-")
# tab.aux <- sweep(tab.aux,2,object$separate.analyses[[g]][["call"]]$ecart.type,FUN="/")
tab.aux <- t(t(newdata[,(c(1,1+cumsum(object$call$group))[g]):cumsum(object$call$group)[g]]) - object$separate.analyses[[g]][["call"]]$centre)
tab.aux <- t(t(tab.aux) / object$separate.analyses[[g]][["call"]]$ecart.type)
tab.supp <- cbind(tab.supp,as.matrix(tab.aux))
} else {
tab.disj <- tab.disjonctif(object$separate.analyses[[g]][["call"]]$X)
tab.disj.supp <- tab.disjonctif(rbind.data.frame(object$separate.analyses[[g]][["call"]]$X[1:2,],newdata[,(c(1,1+cumsum(object$call$group))[g]):cumsum(object$call$group)[g]])[-(1:2),,drop=FALSE]) ### pour que les donnees supplementaires aient les memes modalites
if (!is.null(object$call$row.w.init)) SomRow <- sum(object$call$row.w.init)
else SomRow <- length(object$call$row.w)
M <- object$separate.analyses[[g]]$call$marge.col/SomRow
# Z <- sweep(tab.disj/SomRow, 2, M*2, FUN = "-")
# Zsup <- sweep(tab.disj.supp/SomRow, 2, M*2, FUN = "-")
# Zsup <- sweep(Zsup, 2,apply(Z,2,ec,object$global.pca$call$row.w.init),FUN="/")
Z <- t(t(tab.disj/SomRow)-M*2)
Zsup <- t(t(tab.disj.supp/SomRow) - M*2)
Zsup <- t(t(Zsup) / apply(Z,2,ec,object$global.pca$call$row.w.init))
tab.supp <- cbind(as.matrix(tab.supp),Zsup)
}
}
tab.supp <- sweep(tab.supp,2,sqrt(object$call$col.w),FUN="*")
coord <- crossprod(t(tab.supp),object$global.pca$svd$V*sqrt(object$call$col.w))
dist2 <- rowSums(tab.supp^2)
cos2 <- (coord)^2/dist2
coord <- coord[, 1:ncp, drop = FALSE]
cos2 <- cos2[, 1:ncp, drop = FALSE]
colnames(coord) <- colnames(cos2) <- paste("Dim", c(1:ncp), sep = ".")
rownames(coord) <- rownames(cos2) <- names(dist2) <- rownames(newdata)
result <- list(coord = coord, cos2 = cos2, dist = sqrt(dist2))
}
|
19cedc73c224a282074674321beff8613800b745 | 5137d2b74fa00d70152f5a18d0a24d1b4785cff0 | /Pathways/annotatePathwayCategories.R | cff2c5893f751e1fe5d09cc008a554c642edbe3c | [] | no_license | noemiandor/Utils | 02462f322102d602cb22d028fb7ac3ff4084489e | 9de792c32e46dbef7485e34400b4e4153cd41c4d | refs/heads/master | 2022-05-02T08:00:38.590908 | 2022-04-04T16:06:42 | 2022-04-04T16:06:42 | 192,091,982 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,178 | r | annotatePathwayCategories.R | annotatePathwayCategories<-function(targets, verbose=T, annotationFiles="~/Projects/code/RCode/github/Utils/Pathways"){
pr=read.table(paste0(annotationFiles,filesep,"ReactomePathwaysRelation.txt"),sep="\t", stringsAsFactors = F)
pnames=read.table(paste0(annotationFiles,filesep,"NCBI2Reactome_All_Levels_HSapiens.txt"),sep="\t", comment.char = "", stringsAsFactors = F, check.names = F)
colnames(pnames)=c("SourceDBidentifier", "ReactomeStableidentifier","URL","Event Name","Evidence Code","Species")
pnames=pnames[pnames$Species=="Homo sapiens",]
pnames=pnames[!duplicated(pnames$ReactomeStableidentifier),]
rownames(pnames)=pnames$ReactomeStableidentifier
pr=pr[pr$V1 %in% pnames$ReactomeStableidentifier & pr$V2 %in% pnames$ReactomeStableidentifier,]
l1=pathwayName2ReactomeID(targets=targets, pnames, verbose=verbose)
l0=cbind(l1,repmat(NA,length(l1),2)); colnames(l0)[2:3]=c("l0","l0_name")
ii=which(!is.na(l0[,"l1"])); print(paste("Tracing",length(ii),"targets..."))
l0[ii,"l0"]= sapply(l0[ii,"l1"], function(x) tracePathwayPath_Reactome(x, pr=pr)[1])
l0[ii,"l0_name"]= sapply(l0[ii,"l0"], function(x) pnames[x,"Event Name"])
return(l0)
} |
71a87be0d14896c469172aaba1fab43251e32f79 | b471b23efd99dc6ccc1dfdeeee9f45b9b0770830 | /cachematrix.R | 84e7f4d46f60fd82cdfed6974f149da003d65ffc | [] | no_license | Josealigo/ProgrammingAssignment2 | 2f5d004502278c26fd3b96edbdfd44ef9ce9d45d | 246b1d37f4520864f3aaa5c09618750ec60e93f7 | refs/heads/master | 2020-07-22T01:49:21.905425 | 2019-09-08T00:19:20 | 2019-09-08T00:19:20 | 207,036,991 | 0 | 0 | null | 2019-09-07T23:28:10 | 2019-09-07T23:28:09 | null | UTF-8 | R | false | false | 1,141 | r | cachematrix.R | ## Because matrix inversion is usually a costly computacion the following
## functions basicly cache the inverse of a matrix
## makeCacheMatrix function will save us a matrix and its inverse matrix also
## a few functions to get that matrix, to change it and also its inverse matrix.
makeCacheMatrix <- function(x = matrix()) {
m_inverse <- NULL
set <- function(y) {
x <<- y
m_inverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) m_inverse <<- inverse
getinverse <- function() m_inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function gets the output of the makeCacheMatrix (x), looks
## if its inverse exists and if it does then give that matrix otherwise it
## computes the respective inverse matrix and stores it in the objet x. In both
## cases it returns the inverse matrix.
cacheSolve <- function(x, ...) {
m_inverse <- x$getinverse()
if(!is.null(m_inverse)) {
message("getting cached data")
return(m_inverse)
}
data <- x$get()
m_inverse <- solve(data, ...)
x$setinverse(m_inverse)
m_inverse
}
|
6b7ec5ff6b5a74c47b2cad592616e657b66d682c | 16557af8fc3b53d6a6fbf22c755a67974e56c108 | /man/EEControl.Rd | e25afc8fd32d1369ea5082925ceb0e28a73852d5 | [] | no_license | cran/RelValAnalysis | 965abb939e245857d2aebc5dbc420a668437a6d9 | 8fef3417f6bd3e23b9a8b1df9c114375ac37d619 | refs/heads/master | 2020-05-26T18:01:24.605537 | 2014-06-25T00:00:00 | 2014-06-25T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,831 | rd | EEControl.Rd | \name{EEControl}
\alias{EEControl}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~]
Control Term in the Energy-entropy Decomposition
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
The function computes the control term of the energy-entropy decomposition.
}
\usage{
EEControl(pi.current, pi.next, nu.next, nu.implied = nu.next)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{pi.current}{
%% ~~Describe \code{x} here~~
a numeric vector of the current portfolio weights.
}
\item{pi.next}{
a numeric vector of the portfolio weights for the next period.
}
\item{nu.next}{
a numeric vector of the benchmark weights for the next period.
}
\item{nu.implied}{
a numeric vector of the implied benchmark weights. The default value is \code{nu.next} (in this case the benchmark is buy-and-hold).
}
}
\details{
%% ~ If necessary, more details than the description above ~~
The control term measures how much the portfolio moves towards the current market weights. For details, see Section 2 of Pal and Wong (2013). Here the formula is modified slightly so that the energy-entropy decomposition holds identically whether the market is buy-and-hold or not.
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% ...
A number.
}
\references{
Pal, S. and T.-K. L. Wong (2013). Energy, entropy, and arbitrage. \emph{arXiv preprint arXiv:1308.5376}.}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{FreeEnergy}},
\code{\link{RelativeEntropy}},
\code{\link{EnergyEntropyDecomp}}
}
\examples{
pi.current <- c(0.2, 0.3, 0.5)
pi.new <- c(0.3, 0.3, 0.4)
mu.new <- c(0.5, 0.3, 0.2)
EEControl(pi.current, pi.new, mu.new)
}
|
c4c9a333eb8089bb2019cfccf62340cc60293828 | d546e040c361023b68a53ce4a6969ba44b21861c | /models/geonet/model.r | f5bc7d13c7daeafa19cb4317bd8402b24c27618c | [] | no_license | factn/reality-reliability | 115f0266eecb41723f577f9a1ca2b13590be33a6 | e6bd7d965b941fd68932be53d32b014afd303554 | refs/heads/master | 2023-02-20T13:54:59.419613 | 2021-01-20T22:05:59 | 2021-01-20T22:05:59 | 167,264,355 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 453 | r | model.r | library(rstan)
library(Matrix)
library(brms)
options(mc.cores = parallel::detectCores())
load('data/data.rdata')
W <- 1*as.matrix(sparseMatrix(i=c(W_sparse[, 1], W_sparse[, 2]), j=c(W_sparse[, 2], W_sparse[, 1])))
row.names(W) <- seq(nrow(W))
claims$value <- claims$value/10.0
# Fit the model
model <- brm(value ~ (1| agent), family='Beta', data=claims, autocor = cor_car(W, formula = ~ 1 | index))
saveRDS(model, file='model.rds')
|
aef705a196a43199209bece12d540a7ed6d9d80d | ffa12d1a799a5c2b753600e9f447badda81f8f26 | /R/few.R | 1d4065438772cd08fa9904dd0e986c778dbf5b20 | [] | no_license | Yaneri16/ggthemes | ed8fe746eec3868abae00b33f1bae77efd85e5b4 | 29c3f151bb69a013345d8a3e7c19933e98696c4d | refs/heads/master | 2020-03-19T00:52:01.113860 | 2018-05-27T21:46:58 | 2018-05-27T21:46:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,487 | r | few.R | #' Color Palettes from Few's "Practical Rules for Using Color in Charts"
#'
#' Qualitative color palettes from Stephen Few,
#'
#' Use the light palette for filled areas, such as bar charts.
#' The medium palette should be used for points and lines.
#' The dark palette should be used for either highlighting specific points,
#' or if the lines and points are small or thin.
#' All these palettes contain nine colors.
#'
#' @references
#' Few, S. (2012) \emph{Show Me the Numbers: Designing Tables and Graphs to Enlighten}.
#' 2nd edition. Analytics Press.
#'
#' \href{http://www.perceptualedge.com/articles/visual_business_intelligence/rules_for_using_color.pdf}{"Practical Rules for Using Color in Charts"}.
#'
#' @export
#' @param palette One of \code{c("medium", "dark", "light")}.
#' @family colour few
#' @example inst/examples/ex-few_pal.R
few_pal <- function(palette="medium") {
## The first value, gray, is used for non-data parts.
values <- ggthemes_data$few[[palette]]
n <- length(values)
manual_pal(unname(values[2:n]))
}
#' Color scales from Few's "Practical Rules for Using Color in Charts"
#'
#' See \code{\link{few_pal}}.
#'
#' @inheritParams ggplot2::scale_colour_hue
#' @inheritParams few_pal
#' @family colour few
#' @rdname scale_few
#' @export
scale_colour_few <- function(palette = "medium", ...) {
discrete_scale("colour", "few", few_pal(palette), ...)
}
#' @export
#' @rdname scale_few
scale_color_few <- scale_colour_few
#' @export
#' @rdname scale_few
scale_fill_few <- function(palette = "light", ...) {
discrete_scale("fill", "few", few_pal(palette), ...)
}
#' Theme based on Few's "Practical Rules for Using Color in Charts"
#'
#' Theme based on the rules and examples from Stephen Few's
#' \emph{Show Me the Numbers} and "Practical Rules for Using Color in Charts".
#'
#' @references
#' Few, S. (2012) \emph{Show Me the Numbers: Designing Tables and Graphs to Enlighten}.
#' 2nd edition. Analytics Press.
#'
#' Stephen Few, "Practical Rules for Using Color in Charts",
#' \url{http://www.perceptualedge.com/articles/visual_business_intelligence/rules_for_using_color.pdf}.
#'
#' @inheritParams ggplot2::theme_bw
#' @family themes few
#' @export
#' @example inst/examples/ex-theme_few.R
theme_few <- function(base_size = 12, base_family="") {
colors <- ggthemes_data$few
gray <- colors$medium["Gray"]
black <- colors$dark["Gray"]
theme_bw(base_size = base_size, base_family = base_family) +
theme(
line = element_line(colour = gray),
rect = element_rect(fill = "white", colour = NA),
text = element_text(colour = black),
axis.ticks = element_line(colour = gray),
legend.key = element_rect(colour = NA),
## Examples do not use grid lines
panel.border = element_rect(colour = gray),
panel.grid = element_blank(),
strip.background = element_rect(fill = "white", colour = NA)
)
}
#' Shape palette from "Show Me the Numbers" (discrete)
#'
#' Shape palette from Stephen Few's, "Show Me the Numbers".
#' It consists of five shapes: circle, square, triangle, plus, times.
#'
#' @references Few, S. (2012)
#' \emph{Show Me the Numbers: Designing Tables and Graphs to Enlighten},
#' Analytics Press, p. 208.
#'
#' @export
few_shape_pal <- function() {
max_n <- 5
f <- function(n) {
msg <- paste("The shape palette can deal with a maximum of ", max_n,
"discrete values;",
"you have ", n, ".",
" Consider specifying shapes manually if you ",
"must have them.", sep = "")
if (n > max_n) {
stop(msg, call. = FALSE)
}
# circle, square, triangle, plus, cross
c(1, 0, 2, 3, 4)[seq_len(n)]
}
attr(f, "max_n") <- max_n
f
}
#' Scales for shapes from "Show Me the Numbers"
#'
#' \code{scale_shape_few} maps discrete variables to five easily
#' discernible shapes. It is based on the shape palette suggested in
#' Few (2012).
#'
#' @param ... Common \code{discrete_scale} parameters. See
#' \code{\link[ggplot2]{discrete_scale}} for more details.
#'
#' @references Few, S. (2012)
#' \emph{Show Me the Numbers: Designing Tables and Graphs to Enlighten},
#' Analytics Press, p. 208.
#' @seealso \code{\link{scale_shape_few}} for the shape palette that this
#' scale uses.
#' @export
scale_shape_few <- function(...) {
discrete_scale("shape", "few", few_shape_pal(), ...)
}
|
b3e8a262f97ecc95c6683126b9a26c57793463b4 | de00ff6505f4e888e23ef8adc2fcaca64af54b82 | /FinalProject/text_mining.R | 33bfc601e4c1271d2c9647036084598428d7ed66 | [] | no_license | lrshum17/STA523Project | 35491f95b237712dad7cd9bd9bf466dba7a1ae25 | b2f7e3176c9790491e564b8292eaf074805dd71c | refs/heads/master | 2020-12-24T10:55:58.630111 | 2015-12-13T02:24:50 | 2015-12-13T02:24:50 | 73,118,599 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,884 | r | text_mining.R | install.packages("twitteR")
library(twitteR)
consumer_key<-"zSeAWHNpaL5G7GpHuCO4zTffT"
consumer_secret<-"dZnarPgWiQCnb1bJ0tvN3xFBmWVMRQCDDWl9UsFtkiTGpzztfG"
access_token<-"1071126529-DLvrKbaT9ju1yQsAHBWZz5h3vHGEWWyWYeTHP4Z"
access_secret<-"h3XgPKhsKkF66ShXfbFPEnby2VUofGD6AYvu53CFvUFX7"
requestURL <- "https://api.twitter.com/oauth/request_token"
accessURL <- "https://api.twitter.com/oauth/access_token"
authURL <- "https://api.twitter.com/oauth/authorize"
setup_twitter_oauth(consumer_key, consumer_secret, access_token,access_secret)
##Seach for random Topic
tweets = userTimeline("rundel",n = 1000)
##Search for users use
#install.packages("tm")
#install.packages("SnowballC")
library(tm)
library(SnowballC)
library(stringr)
n.tweet = length(tweets)
tweets[1:5]
tweets.df = twListToDF(tweets)
tweets.df$text=str_replace_all(tweets.df$text,"[^[:graph:]]", " ")
library(magrittr)
removeURL = function(x) gsub("http[[:alnum:]]*", "", x)
myStopwords = c(stopwords("english"), "isis","rt","amp","twitter", "tweets", "tweet", "retweet",
"tweeting", "account", "via", "cc", "ht")
myCorpus = Corpus(VectorSource(tweets.df$text)) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(content_transformer(removePunctuation)) %>%
tm_map(content_transformer(removeNumbers)) %>%
tm_map(content_transformer(removeURL)) %>%
tm_map(removeWords, myStopwords) %>%
tm_map(content_transformer(stemDocument))
for(i in 1:5) {
cat(paste("[[", i, "]]", sep = ""))
writeLines(as.character(myCorpus[[i]]))
}
#myCorpus <- tm_map(myCorpus, content_transformer(stemCompletion), dictionary = myCorpusCopy, lazy=TRUE)
tdm <- TermDocumentMatrix(myCorpus, control = list(wordLengths = c(1, Inf)))
m = as.matrix(tdm)
word.freq = sort(rowSums(m), decreasing = T)
wordcloud(words = names(word.freq),random.color = TRUE, colors=rainbow(10), freq = word.freq, min.freq = 10, random.order = F)
#idx = which(dimnames(tdm)$Terms == "family")
#inspect(tdm[idx+(0:5), 101:110])
freq.terms <- findFreqTerms(tdm, lowfreq=3)
term.freq = rowSums(as.matrix(tdm))
term.freq = subset(term.freq, term.freq >= 80)
df = data.frame(term = names(term.freq), freq = term.freq)
library(ggplot2)
ggplot(df, aes(x = term, y = freq)) + geom_bar(stat = "identity") + xlab("Terms") + ylab("Count") + coord_flip()
us=as.data.frame(findAssocs(tdm, "us", 0.2))
findAssocs(tdm, "obama", 0.25)
# http://www.bioconductor.org/packages/release/bioc/html/graph.html
#source("https://bioconductor.org/biocLite.R")
#biocLite("graph")
library(graph)
library(Rgraphviz)
plot(tdm, term = freq.terms, corThreshold = 0.12, weighting = T)
library(wordcloud)
m = as.matrix(tdm)
word.freq = sort(rowSums(m), decreasing = T)
wordcloud(words = names(word.freq),random.color = TRUE, colors=rainbow(10), freq = word.freq, min.freq = 10, random.order = F)
|
5e24bb3ce4670708739389c4f8c9c8b58c950f8a | 4716097bd47f69b5f71facd0bef1e49ff66cc87d | /R/print.PolyTrend.R | 5b5b6b8e918f58a5d68f3b5b299d487350f2fc80 | [] | no_license | cran/PolyTrend | e3f8aa12da6873d2289164b46533b2faaa1f6a7a | a265e92c070e3c4b4c1861f6081d10408ef6f3d0 | refs/heads/master | 2016-09-13T16:39:09.945585 | 2016-05-31T11:10:07 | 2016-05-31T11:10:07 | 58,641,187 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 971 | r | print.PolyTrend.R | print.PolyTrend <-
function(x, ...) {
cat("\nPolyTrend input data:\n")
cat(sprintf("\nY: %s\n", paste(x$Y,collapse=" ")))
cat(sprintf("alpha: %.2f\n", x$alpha))
cat("\nPolyTrend classification:\n")
strTrendType <-c("concealed", "no trend", "linear", "quadratic", "cubic")
cat(sprintf("\nTrend type: %i (%s)\n", x$TrendType, strTrendType[x$TrendType+2] ))
cat(sprintf("Slope: %.4f\n", x$Slope))
strDirection <- "positive"
if(x$Direction < 0) strDirection <- "negative"
cat(sprintf("Direction: %i (%s)\n", x$Direction, strDirection))
strSignificance <- "statistically significant"
if(x$Significance < 0) strSignificance <- "statistically insignificant"
cat(sprintf("Significance: %i (%s)\n", x$Significance, strSignificance))
strPolynomialDegree <-c("no trend", "linear", "quadratic", "cubic")
cat(sprintf("Polynomial degree: %i (%s)\n", x$PolynomialDegree, strPolynomialDegree[x$PolynomialDegree+1]))
invisible(x)
}
|
5623292347c397df9b08bd8b4f7122cbb8d7d90d | 1c5e993681ab00b3eb698d05f84e1daf2b4723e9 | /man/runDBN.Rd | 7184e4d7b84e51c822136afd4e3d903ff31402db | [] | no_license | cran/EDISON | d4e9ecb9187ec33efa2276d00ca5b37a487159e1 | 9d3d3660629fe0ee9fa81697452c47369485fa3a | refs/heads/master | 2021-01-21T21:55:02.171225 | 2016-03-30T21:04:12 | 2016-03-30T21:04:12 | 17,678,892 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,571 | rd | runDBN.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runDBN.R
\name{runDBN}
\alias{runDBN}
\title{Setup and run the MCMC simulation.}
\usage{
runDBN(targetdata, preddata = NULL, q, n, multipleVar = TRUE,
minPhase = 2, niter = 20000, scaling = TRUE, method = "poisson",
prior.params = NULL, self.loops = TRUE, k = 15, options = NULL,
outputFile = ".", fixed.edges = NULL)
}
\arguments{
\item{targetdata}{Target input data: A matrix of dimensions NumNodes by
NumTimePoints.}
\item{preddata}{Optional: Input response data, if different from the target
data.}
\item{q}{Number of nodes.}
\item{n}{Number of timepoints.}
\item{multipleVar}{\code{TRUE} when a specific variance is estimated for
each segment, \code{FALSE} otherwise.}
\item{minPhase}{Minimal segment length.}
\item{niter}{Number of MCMC iterations.}
\item{scaling}{If \code{TRUE}, scale the input data to mean 0 and standard
deviation 1, else leave it unchanged.}
\item{method}{Network structure prior to use: \code{'poisson'} for a sparse
Poisson prior (no information sharing), \code{'exp_hard'} or
\code{'exp_soft'} for the exponential information sharing prior with hard or
soft node coupling, \code{'bino_hard'} or \code{'bino_soft'} with hard or
soft node coupling.}
\item{prior.params}{Initial hyperparameters for the information sharing
prior.}
\item{self.loops}{If \code{TRUE}, allow self-loops in the network, if
\code{FALSE}, disallow self-loops.}
\item{k}{Initial value for the level-2 hyperparameter of the exponential
information sharing prior.}
\item{options}{MCMC options as obtained e.g. by the function
\code{\link{defaultOptions}}.}
\item{outputFile}{File where the output of the MCMC simulation should be
saved.}
\item{fixed.edges}{Matrix of size NumNodes by NumNodes, with
\code{fixed.edges[i,j]==1|0} if the edge between nodes i and j is fixed, and
-1 otherwise. Defaults to \code{NULL} (no edges fixed).}
}
\value{
A list containing the results of the MCMC simulation: network
samples, changepoint samples and hyperparameter samples. For details, see
\code{\link{output}}.
}
\description{
This function initialises the variabes for the MCMC simulation, runs the
simulation and returns the output.
}
\author{
Sophie Lebre
Frank Dondelinger
}
\references{
For more information about the MCMC simulations, see:
Dondelinger et al. (2012), "Non-homogeneous dynamic Bayesian networks with
Bayesian regularization for inferring gene regulatory networks with
gradually time-varying structure", Machine Learning.
}
\seealso{
\code{\link{output}}
}
|
2787c48154d90bc3749ceee32061f1cbbd55c1b3 | 4982fe092c56874d7ef622f1519f0a796ec8430c | /rankall.R | 22329314c70ad52d452372768bc857b0e7d92d3d | [] | no_license | ccurley/r_programming | 321b3f6d7485371efed8183e4d559169adc651eb | 2f0fd3084a31854ecc062b287d262edb78f001d6 | refs/heads/master | 2016-09-09T23:12:26.208118 | 2015-10-03T11:30:56 | 2015-10-03T11:30:56 | 42,875,845 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,520 | r | rankall.R | # rankall.R
# Coursera - Introduction to R
# Assignment 3 - Week 4 - Part 3
#
# This is bascially the same script as the rankhosptial, with the operation in the if/else if/else state-
# ment slightly altered to build a data frame. As always, I overcomplicated that step with an rbind call in
# my first pass -- which almost worked, except that I was getting NA in the state name where hospital was NA
# I switched approaces based on the cachematrix.R exercise -- where I build the data frame from two separate
# lists -- a lost of hospitals corresponding to the row called and the list of states.
#
#
rankall <- function(outcome, num = "best"){
# get the file... yeah, I don't need the paste statement, but I was going build some logic into it
my.file <- paste(getwd(), "/hospdata/outcome-of-care-measures.csv", sep="")
# seed the list for hospital data and the data frame for ranking by state
my.hospital<-character()
df.state.rank <- data.frame()
# read it in
df <- read.csv(my.file, colClasses = "character", na.strings="Not Available")
# get the list of states
my.states <- sort(unique(df[, "State"]))
# validate the outcome
valid.outcome = c("heart attack", "heart failure", "pneumonia")
check.outcome <- grep(outcome, valid.outcome)
if (!length(check.outcome)) {
stop("invalid outcome")
}
# match the column names in the df to the outcome arg
f.col.name <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack",
"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure",
"Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
col.name <- f.col.name[match(outcome,valid.outcome)]
# now... for each of the states, subset THAT state, order THAT subset, and depending on the
# num passed in as an arg, push the hosptial name to an indexed list by i in my.states
for (i in seq_along(my.states)) {
df.state <- subset(df, df$State == my.states[i])
s.df.state <- df.state[order(as.numeric(df.state[[col.name]]),
df.state[["Hospital.Name"]],
decreasing = FALSE,
na.last = NA), ]
if ( num == "best") {
my.hospital[i] <- s.df.state[1,"Hospital.Name"]
} else if ( num == "worst") {
my.hospital[i] <- s.df.state[nrow(s.df.state),"Hospital.Name"]
} else {
my.hospital[i] <- s.df.state[num,"Hospital.Name"]
}
} # end of for
# build a data frame and spit it out
df.out <- data.frame(hospital=my.hospital,state=my.states,row.names=my.states)
df.out
} # end of function
# tested with
#
# > head(rankall("heart attack", 20), 10)
# hospital state
# AK <NA> AK
# AL D W MCMILLAN MEMORIAL HOSPITAL AL
# AR ARKANSAS METHODIST MEDICAL CENTER AR
# AZ JOHN C LINCOLN DEER VALLEY HOSPITAL AZ
# CA SHERMAN OAKS HOSPITAL CA
# CO SKY RIDGE MEDICAL CENTER CO
# CT MIDSTATE MEDICAL CENTER CT
# DC <NA> DC
# DE <NA> DE
# FL SOUTH FLORIDA BAPTIST HOSPITAL FL
# > tail(rankall("pneumonia", "worst"), 3)
# hospital state
# WI MAYO CLINIC HEALTH SYSTEM - NORTHLAND, INC WI
# WV PLATEAU MEDICAL CENTER WV
# WY NORTH BIG HORN HOSPITAL DISTRICT WY
# > head(rankall("heart failure"), 10)
# hospital state
# AK SOUTH PENINSULA HOSPITAL AK
# AL GEORGE H. LANIER MEMORIAL HOSPITAL AL
# AR VA CENTRAL AR. VETERANS HEALTHCARE SYSTEM LR AR
# AZ BANNER GOOD SAMARITAN MEDICAL CENTER AZ
# CA CENTINELA HOSPITAL MEDICAL CENTER CA
# CO PARKER ADVENTIST HOSPITAL CO
# CT YALE-NEW HAVEN HOSPITAL CT
# DC PROVIDENCE HOSPITAL DC
# DE BAYHEALTH - KENT GENERAL HOSPITAL DE
# FL FLORIDA HOSPITAL HEARTLAND MEDICAL CENTER FL
# > tail(rankall("heart failure"), 10)
# hospital state
# TN WELLMONT HAWKINS COUNTY MEMORIAL HOSPITAL TN
# TX FORT DUNCAN MEDICAL CENTER TX
# UT VA SALT LAKE CITY HEALTHCARE - GEORGE E. WAHLEN VA MEDICAL CENTER UT
# VA SENTARA POTOMAC HOSPITAL VA
# VI GOV JUAN F LUIS HOSPITAL & MEDICAL CTR VI
# VT SPRINGFIELD HOSPITAL VT
# WA HARBORVIEW MEDICAL CENTER WA
# WI AURORA ST LUKES MEDICAL CENTER WI
# WV FAIRMONT GENERAL HOSPITAL WV
# WY CHEYENNE VA MEDICAL CENTER WY |
8c21ef4d549f6f565e0cd680231dd7798f0f939e | 3218909350b6f8388a6452c1f444184054598173 | /API_ZZZ/utqe-RestRserve/RestRserve/utqe_dev/api_utqe_table_read.R | 13af31e604478c41ca512d902f58f1742bae9613 | [] | no_license | ashther/practice | 340bcbe6bc823e118baf4ae257dc65effecc1eff | 13229521acf8ab1574747735719be334f99354e1 | refs/heads/master | 2020-05-21T03:21:45.526293 | 2018-12-07T10:35:30 | 2018-12-07T10:35:30 | 40,460,359 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,743 | r | api_utqe_table_read.R |
list_to_string <- function(params, .collapse = ' and ') {
# sqlInterpolate will reganize chinese character as position variable!!
# if there is chinese colnames, we will need data structure like this:
# params <- list(names = list(code = '代码'), values = list(code = 001))
paste0('`', names(params), '`',
' = ?',
names(params),
collapse = .collapse)
}
# params <- list(XH = '320130900011', XM = 'some body')
utqe_table_read <- function(table_name, params = NULL, pageIndex, pageSize) {
library(RMySQL)
tryCatch({
# get rows and pages number ----------------------------------------------
pageIndex <- as.integer(pageIndex)
pageSize <- as.integer(pageSize)
if (pageIndex <= 0) {
return(list(
errorMsg = 'pageIndex must be positive integer.',
errorCode = 40003
))
}
if (pageSize <= 0) {
return(list(
errorMsg = 'pageSize must be positive integer.',
errorCode = 40003
))
}
# sql <- paste0('select count(*) from ', table_name)
sql <- sprintf('select count(*) from `%s`', table_name)
if (length(params) > 0) {
sql <- paste0(sql, ' where ', list_to_string(params))
sql <- sqlInterpolate(pool, sql, .dots = params)
}
rows_total <- dbGetQuery(pool, sql)[1, 1]
pages_total <- ceiling(rows_total / pageSize)
# get table data ---------------------------------------------------------
args <- list(offset = (pageIndex - 1) * pageSize,
limit = pageSize)
if (length(params) > 0) {
sql <- paste0('select * from `',
table_name,
'` where ',
list_to_string(params),
' limit ?limit offset ?offset;')
args <- c(args, params)
} else {
sql <- paste0('select * from `',
table_name,
'` limit ?limit offset ?offset;')
}
sql <- sqlInterpolate(pool, sql, .dots = args)
result <- dbGetQuery(pool, sql)
list(rowsTotal = rows_total,
pagesTotal = pages_total,
data = result)
}, error = function(e) {
list(errorMsg = e$message,
errorCode = 40099)
})
}
utqe_table_read_filter <- function(request, response) {
library(RestRserve)
library(jsonlite)
# check required parameters ----------------------------------------------
endpoint <- request$path
query <- request$query
req_params <- endpoint_table_params[[endpoint]]$req_params
opt_params <- endpoint_table_params[[endpoint]]$opt_params
all_params <- c(req_params, opt_params)
table_name <- endpoint_table_params[[endpoint]]$table_name
if (!is.null(req_params)) {
if (!all(names(req_params) %in% names(query))) {
return(RestRserveResponse$new(
body = toJSON(list(errorMsg = 'Required parameters must be supplied.',
errorCode = 40001),
auto_unbox = TRUE),
content_type = 'application/json',
status_code = 400L,
headers = 'Access-Control-Allow-Origin:*'
))
}
}
# check required and optional parameters type ----------------------------
inter_params_names <- intersect(names(all_params), names(query))
inter_params <- purrr::map(inter_params_names, ~ {
eval(parse(text = sprintf('%s(query[["%s"]])', all_params[.x], .x)))
})
inter_params <- setNames(inter_params, inter_params_names)
if (any(is.na(inter_params)) | any(is.null(inter_params))) {
return(RestRserveResponse$new(
body = toJSON(list(errorMsg = 'Parameters type is invalid.',
errorCode = 40002),
auto_unbox = TRUE),
content_type = 'application/json',
status_code = 400L,
headers = 'Access-Control-Allow-Origin:*'
))
}
# call real function -----------------------------------------------------
pageIndex <- query[['pageIndex']]
pageSize <- query[['pageSize']]
params <- as.list(query)
params <- params[setdiff(names(params), c('pageIndex', 'pageSize'))]
result <- utqe_table_read(table_name, params, pageIndex, pageSize)
if ('errorCode' %in% names(result)) {
return(RestRserveResponse$new(
body = toJSON(result, auto_unbox = TRUE, na = 'null', null = 'null'),
content_type = 'application/json',
status_code = 400L,
headers = 'Access-Control-Allow-Origin:*'
))
} else {
return(RestRserveResponse$new(
body = toJSON(result, auto_unbox = TRUE, na = 'null', null = 'null'),
content_type = 'application/json',
status_code = 200L,
headers = 'Access-Control-Allow-Origin:*'
))
}
}
|
6a38f1e69336a89f4ded4201eeb11004873902a2 | f8e90f1d7e1765986d1e54518e36be3421340abf | /R/MiRKATS.R | c7a9b5b21daef83cde1d71b2d96b16f6e03816df | [] | no_license | cran/MiRKAT | dc223b705b1e95a30ecf32be9187dfe1b2ce7895 | 7458a2346404eb5e6cde863bf2de58a8b9675837 | refs/heads/master | 2023-03-04T06:52:51.698101 | 2023-02-17T13:50:02 | 2023-02-17T13:50:02 | 98,353,574 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 9,854 | r | MiRKATS.R | #'Microiome Regression-based Kernel Association Test for Survival
#'
#' Community level test for association between microbiome composition and survival outcomes (right-censored time-to-event data)
#' using kernel matrices to compare similarity between microbiome profiles with similarity in survival times.
#'
#' obstime, delta, and X should all have n rows, and the kernel or distance matrix should be a single n by n matrix.
#' If a distance matrix is entered (distance=TRUE), a kernel matrix will be constructed from the distance matrix.
#'
#' Update in v1.1.0: MiRKATS also utilizes the OMiRKATS omnibus test if more than one kernel matrix is provided by the user.
#' The OMiRKATS omnibus test calculates an overall p-value for the test via permutation.
#'
#' Missing data is not permitted. Please remove individuals with missing data on y, X or in the kernel or distance matrix prior
#' to using the function.
#'
#' The Efron approximation is used for tied survival times.
#'
#' @param obstime A numeric vector of follow-up (survival/censoring) times.
#' @param delta Event indicator: a vector of 0/1, where 1 indicates that the event was observed for a subject (so "obstime" is
#' survival time), and 0 indicates that the subject was censored.
#' @param X A vector or matrix of numeric covariates, if applicable (default = NULL).
#' @param Ks A list of or a single numeric n by n kernel matrices or matrix (where n is the sample size).
#' @param beta A vector of coefficients associated with covariates. If beta is NULL and covariates are present, coxph is used to
#' calculate coefficients (default = NULL).
#' @param perm Logical, indicating whether permutation should be used instead of analytic p-value calculation (default=FALSE).
#' Not recommended for sample sizes of 100 or more.
#' @param omnibus A string equal to either "Cauchy" or "permutation" (or nonambiguous abbreviations thereof), specifying whether
#' to use the Cauchy combination test or residual permutation to generate the omnibus p-value.
#' @param nperm Integer, number of permutations used to calculate p-value if perm==TRUE (default=1000) and to calculate omnibus p-value if omnibus=="permutation".
#' @param returnKRV A logical indicating whether to return the KRV statistic. Defaults to FALSE.
#' @param returnR2 A logical indicating whether to return the R-squared coefficient. Defaults to FALSE.
#'
#'@return
#'Return value depends on the number of kernel matrices inputted. If more than one kernel matrix is given, MiRKATS returns two
#'items; a vector of the labeled individual p-values for each kernel matrix, as well as an omnibus p-value from the Optimal-MiRKATS
#'omnibus test. If only one kernel matrix is given, then only its p-value will be given, as no omnibus test will be needed.
#' \item{p_values}{individual p-values for each inputted kernel matrix}
#' \item{omnibus_p}{overall omnibus p-value}
#' \item{KRV}{A vector of kernel RV statistics (a measure of effect size), one for each candidate kernel matrix. Only returned if returnKRV = TRUE}
#' \item{R2}{A vector of R-squared statistics, one for each candidate kernel matrix. Only returned if returnR2 = TRUE}
#'
#' @author
#' Nehemiah Wilson, Anna Plantinga
#'
#' @references
#' Plantinga, A., Zhan, X., Zhao, N., Chen, J., Jenq, R., and Wu, M.C. MiRKAT-S: a distance-based test of association between
#' microbiome composition and survival times. Microbiome, 2017:5-17. doi: 10.1186/s40168-017-0239-9
#'
#' Zhao, N., Chen, J.,Carroll, I. M., Ringel-Kulka, T., Epstein, M.P., Zhou, H., Zhou, J. J., Ringel, Y., Li, H. and Wu, M.C. (2015)).
#' Microbiome Regression-based Kernel Association Test (MiRKAT). American Journal of Human Genetics, 96(5):797-807
#'
#' Chen, J., Chen, W., Zhao, N., Wu, M~C.and Schaid, D~J. (2016) Small Sample Kernel Association Tests for Human Genetic and
#' Microbiome Association Studies. 40:5-19. doi: 10.1002/gepi.21934
#'
#' Efron, B. (1977) "The efficiency of Cox's likelihood function for censored data." Journal of the American statistical
#' Association 72(359):557-565.
#'
#' Davies R.B. (1980) Algorithm AS 155: The Distribution of a Linear Combination of chi-2 Random Variables, Journal of the Royal
#' Statistical Society Series C, 29:323-333
#'
#' @importFrom survival coxph Surv
#'
#'@examples
#'
#'###################################
#'# Generate data
#'library(GUniFrac)
#'
#'
#'# Throat microbiome data
#'data(throat.tree)
#'data(throat.otu.tab)
#'
#'unifracs = GUniFrac(throat.otu.tab, throat.tree, alpha = c(1))$unifracs
#'if (requireNamespace("vegan")) {
#' library(vegan)
#' BC= as.matrix(vegdist(throat.otu.tab, method="bray"))
#' Ds = list(w = unifracs[,,"d_1"], uw = unifracs[,,"d_UW"], BC = BC)
#'} else {
#' Ds = list(w = unifracs[,,"d_1"], uw = unifracs[,,"d_UW"])
#'}
#'
#'Ks = lapply(Ds, FUN = function(d) D2K(d))
#'
#'# Covariates and outcomes
#'covar <- matrix(rnorm(120), nrow=60)
#'S <- rexp(60, 3) # survival time
#'C <- rexp(60, 1) # censoring time
#'D <- (S<=C) # event indicator
#'U <- pmin(S, C) # observed follow-up time
#'
#'MiRKATS(obstime = U, delta = D, X = covar, Ks = Ks, beta = NULL)
#'
#'
#' @export
MiRKATS <- function(obstime, delta, X = NULL, Ks, beta = NULL, perm=FALSE, omnibus="permutation", nperm=999, returnKRV = FALSE, returnR2 = FALSE){
om <- substring(tolower(omnibus), 1, 1)
if(!is.list(Ks)){
if (!is.matrix(Ks)) stop("Please convert your kernel into a matrix.")
Ks <- list(Ks)
}
if(!length(Ks)==1){
if(is.null(names(Ks))){
message("Your p-values are not labeled with their corresponding kernel matrix. In order to have them labeled,
make your list of kernel matrices for the input of the form 'list(name1=K1, name2=K2'...) in order for the output
p-values to be labeled with 'name1,' 'name2,' etc.")
}
}
# light checks for input
if(length(obstime) != length(delta)) stop("Please make sure you have n observed times and n event indicators.")
if(!is.null(beta) & is.null(X)) warning("Your input includes coefficients but no covariates. Did you intend to include covariates?")
if(nrow(Ks[[1]]) != length(obstime)) stop("Number of observed times does not match distance or kernel matrix. Please check your object dimensions.")
if(length(obstime) >= 100 & perm==TRUE){warning("Permutation p-values are not recommended unless n<100. Computation time may be long.")}
if(length(obstime) <= 50 & perm==FALSE){warning("Permutation p-values are recommendeded when n <= 50.")}
if (returnKRV | returnR2) {
resids = scale(residuals(coxph(Surv(time = obstime, event = delta) ~ 1), type="martingale"))
L = resids %*% t(resids)
if (returnKRV) {
KRVs <- unlist(lapply(Ks, FUN = function(k) calcKRVstat(k, L)))
} else {
KRVs = NULL
}
if (returnR2) {
R2 <- unlist(lapply(Ks, FUN = function(k) calcRsquared(k, L)))
} else {
R2 = NULL
}
} else {
KRVs = R2 = NULL
}
# Calculate individual p-values
pvals <- c()
for(i in 1:length(Ks)){
pvals[i] <- inner.MiRKATS(obstime=obstime, delta=delta, covar = X, K = Ks[[i]], beta=beta, perm=perm, nperm=nperm)
}
names(pvals) <- names(Ks)
if(length(Ks) > 1){
if (om == "p") {
######################################
# Optimal-MiRKATS omnibus test begins
######################################
if (is.null(X)) {X <- rep(1, length(obstime))}
r <- coxph(Surv(obstime, delta) ~ ., data=as.data.frame(X))$residuals
r.s <- list() # becomes a list of permuted vectors of residuals
for (j in 1:nperm) {
r.s[[j]] <- r[shuffle(length(r))]
}
T0s.mirkats <- list()
for (j in 1:length(Ks)) {
T0s.mirkats.inv <- rep(NA, nperm)
for (k in 1:nperm) {
T0s.mirkats.inv[k] <- t(r.s[[k]])%*%Ks[[j]]%*%r.s[[k]]
}
T0s.mirkats[[j]] <- T0s.mirkats.inv
}
Q.mirkats <- min(pvals) # The test statistic for OMiRKATS, Q.mirkats, is the minimum of the p-values from MiRKATS
Q0.mirkats <- rep(NA, nperm)
for (l in 1:nperm) { # Creating a list of omnibus test statistics (minimum p-values from null distributions of test statistics)
Q0.mirkats.s.n <- list()
for (m in 1:length(Ks)) {
Q0.mirkats.s.n[[m]] <- T0s.mirkats[[m]][-l]
}
a.Qs.mirkats <- unlist(lapply(Ks,function(x) return(t(r.s[[l]])%*%x%*%r.s[[l]])))
a.pvs <- unlist(mapply(function(x,y)length(which(abs(x) >= abs(y)))/(nperm-1),Q0.mirkats.s.n,a.Qs.mirkats))
Q0.mirkats[l] <- min(a.pvs)
}
p.omirkats <- length(which(Q0.mirkats <= Q.mirkats))/nperm # The omnibus p-value
} else if (om == "c") {
cauchy.t <- sum(tan((0.5 - pvals)*pi))/length(pvals)
p.omirkats <- 1 - pcauchy(cauchy.t)
} else {
stop("I don't know that omnibus option. Please choose 'permutation' or 'Cauchy'.")
}
## return if multiple kernels
if (is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, omnibus_p = p.omirkats))
} else if (is.null(KRVs) & !is.null(R2)) {
return(list(p_values = pvals, omnibus_p = p.omirkats, R2 = R2))
} else if (!is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, omnibus_p = p.omirkats, KRV = KRVs))
} else {
return(list(p_values = pvals, omnibus_p = p.omirkats, KRV = KRVs, R2 = R2))
}
}
## return if only one kernel
if (is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals))
} else if (is.null(KRVs) & !is.null(R2)) {
return(list(p_values = pvals, R2 = R2))
} else if (!is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, KRV = KRVs))
} else {
return(list(p_values = pvals, KRV = KRVs, R2 = R2))
}
}
|
671274739f28d4f9dcb6e852b6a82ee86f006ea6 | 197f5cb3cba1fc192cc58a20afaa73e71538787c | /R/BNP_Dataframe.R | 67c43259d48a308429326a877316ad512a11995e | [
"MIT"
] | permissive | mar-esther23/boolnet-perturb | 84370fc4c986ec3bb55c19f7b64e9e3c0563d7d4 | 047ba2cfe98d00cf7424f62e9089504cbf535f78 | refs/heads/master | 2021-01-12T21:37:11.039229 | 2020-04-18T03:17:30 | 2020-04-18T03:17:30 | 143,438,464 | 5 | 2 | null | 2020-03-02T04:39:10 | 2018-08-03T14:39:24 | R | UTF-8 | R | false | false | 10,369 | r | BNP_Dataframe.R | ###################
#### DATAFRAME ####
###################
#' Convert a BoolNet attractor to dataframe.
#'
#' If Booleans converts a BoolNet attractor to data frame with nodes displayed in Boolean format. First column is the attractor number, second is the number of state inside the attractor, the rest of the columns correspond to each node.
#' If not Boolean it converts a BoolNet attractor to dataframe with properties as columns. The rownames correspond to the int value of each attractor, in the case of cycles the state are joined by sep. Each property of attr$attractors corresponds to a dataframe column. If the property has elements with length > 1 it converts them to a string and joins them with sep.
#'
#' @param attr BoolNet attractor object
#' @param node.names node names, by default taken from attractor object
#' @param sep string to join elements with length > 1, default "/"
#' @param Boolean return attractor in Boolean or integer format, default FALSE
#' @return If Boolean=TRUE return dataframe, each column corresponds to the numebr of attractor, state, or node. If Boolean=FALSE return dataframe, each column corresponds to a property of the attractor
#'
#' @examples
#' attr <- getAttractors(cellcycle)
#' attractorToDataframe(attr)
#' # involvedStates basinSize
#' #1 162 512
#' #2 25/785/849/449/389/141/157 512
#'
#' attractorToDataframe(attr, Boolean=TRUE)
#' # attractor state CycD Rb E2F CycE CycA p27 Cdc20 Cdh1 UbcH10 CycB
#' #1 1 1 0 1 0 0 0 1 0 1 0 0
#' #2 2 1 1 0 0 1 1 0 0 0 0 0
#' #3 2 2 1 0 0 0 1 0 0 0 1 1
#' #4 2 3 1 0 0 0 1 0 1 0 1 1
#' #5 2 4 1 0 0 0 0 0 1 1 1 0
#' #6 2 5 1 0 1 0 0 0 0 1 1 0
#' #7 2 6 1 0 1 1 0 0 0 1 0 0
#' #8 2 7 1 0 1 1 1 0 0 1 0 0
#'
#' @export
attractorToDataframe <- function(attr, sep="/", node.names=NULL, Boolean=FALSE) {
if (Boolean) {
if (is(attr, "AttractorInfo")) {
if (is.null(node.names)) node.names <- attr$stateInfo$genes
attr <- sapply(attr$attractors, function(a) paste(a$involvedStates, collapse=sep) )
}
if (is.null(node.names)) stop("Invalid node.names")
if (is.list(attr)) { attr <- unlist(attr) }
df <- data.frame(matrix(ncol=length(node.names)+2, nrow=0))
for (i in seq(length(attr))) {
s <- attr[i]
if(is.character(s)) s<-unlist(strsplit(s,sep))
for (j in seq(length(s))) {
df <- rbind(df, c(attractor=i,state=j,int2binState(s[j],node.names)))
}
}
colnames(df)<-c('attractor','state',node.names)
return(df)
}
else {
# check if valid attr object
if (!is(attr, "AttractorInfo")) { stop("Error: non-valid attractor") }
attr <- attr$attractors
# create properties list, if labeled we will have more
attr.properties <- vector("list", length(attr[[(1)]]))
names(attr.properties) <- names(attr[[(1)]])
#print(attr.properties)
for (n in names(attr.properties) ) { #create list for each property
attr.properties[[n]] <- lapply(attr, function(a) a[[n]])
#verify number of elements inside list
ncol <- max(sapply(attr.properties[[n]], length))
if ( ncol > 1) { #collapse
attr.properties[[n]] <- sapply(attr.properties[[n]], function(a) {
paste(as.character(a), collapse=sep)
})}
attr.properties[[n]] <- unlist(attr.properties[[n]])
#print(attr.properties[[n]])
}
return(data.frame(attr.properties, stringsAsFactors=F))
}
}
#' Convert a list of attractors to a data frame.
#'
#' Convert a list of BoolNet attractor objects to a data frame. Each property of each attr$attractors corresponds to a dataframe column. Columns are named attrName.propertyName, if the list has no names numbers will be used. If the property has elements with length > 1 it converts them to a string and joins them with sep.
#'
#' @param attr.list list of BoolNet attractor objects
#' @param sep string to join elements with length > 1, default "/"
#' @param returnDataFrame if returnDataFrame='occurrence' returns a df where each column corresponds to a network and the rows to the attractor/label or labels. The values indicate the frequency of the attractor/label
#' if returnDataFrame='basinSize' returns a df where the values indicate the basin size of the attractor/label
#' if returnDataFrame='attrList' returns a list of AttractorInfo objects
#'
#' @return Dataframe, each column corresponds to a property of the attractor
#'
#' @examples
#' data(cellcycle)
#' attrs <- list(getAttractors(cellcycle))
#' attractorListToDataframe(attrs)
#'
#' @keywords internal
attractorListToDataframe <- function(attr.list, sep='/', returnDataFrame=c('occurrence','basinSize'), ...) {
# Receives a list of BoolNet attractors and return a dataframe
# Each column is named attrName.propertyName
returnDataFrame <- match.arg(returnDataFrame)
# Transform from attr to dataframe
attr.list <- lapply(attr.list, attractorToDataframe, sep)
# Verify names exist
if ( is.null(names(attr.list)) ) names(attr.list) <- 1:length(attr.list)
# set involvedStates as rowname, delete and rename df columns
for (n in names(attr.list)) {
rownames(attr.list[[n]]) <- attr.list[[n]]$involvedStates #set involvedStates as rowname
if (returnDataFrame=='occurrence') attr.list[[n]] <- replace(attr.list[[n]],!is.na(attr.list[[n]]),1)
attr.list[[n]]$involvedStates <- NULL #delete
names(attr.list[[n]]) <- n # rename df columns
#print(attr.list[[n]])
}
#merge and reduce by rownames
attr.df <- Reduce(function(x, y){
df <- merge(x, y, by= "row.names", all=TRUE)
rownames(df) <- df$Row.names
df$Row.names <- NULL
return(df)
}, attr.list)
attr.df
}
#' Convert a data frame with nodes displayed in Boolean format to a BoolNet attractor.
#'
#' Convert a data frame with nodes displayed in Boolean format to a BoolNet attractor. First column is the attractor number, second is the number of state inside the attractor, the rest of the columns correspond to each node.
#'
#' @param Dataframe, see\code{\link{attractorToDataframe}} each column corresponds to the number of attractor, state, or node
#' @param fixedGenes fixedGenes of network
#'
#' @return attr BoolNet attractor object
#'
#' @examples
#' > data("cellcycle")
#' > attr <- getAttractors(cellcycle)
#' > attr.df <- attractorToDataframe(attr)
#' > print(dataframeToAttractor(attr.df))
#'
#' @keywords internal
#' @export
dataframeToAttractor <- function(df, fixedGenes) {
bin2intState <- function(x){
x <- rev(x)
sum(2^(which(rev(unlist(strsplit(as.character(x), "")) == 1))-1))
}
attractors = vector("list", length = max(df$attractor))
#df <- df[seq(dim(df)[1],1),]
for(i in 1:nrow(df)) {
row <- df[i,]
n <- row[[1]]
state <- bin2intState(row[c(-1,-2)])
attractors[[ n ]] <- c(attractors[[ n ]], state)
}
for (i in seq(length(attractors))) {
l = length(attractors[[i]])
attractors[[i]] <- list(
involvedStates = array(attractors[[i]], dim=c(1,l)),
basinSize = NA
)
}
node.names <- colnames(df)[c(-1,-2)]
if (missing(fixedGenes)) {
fixedGenes <- rep(-1, length(attr$stateInfo$genes))
names(fixedGenes) <- node.names
}
stateInfo = list( genes = node.names, fixedGenes = fixedGenes )
result <- list( stateInfo = stateInfo,attractors = attractors )
class(result) <- "AttractorInfo"
result
}
#' Label a list of int attractors and agregate by label
#'
#' Takes a dataframe where the rownames are states in integer format (cycles are strings joined with sep). The function labels the states and agregates the dataframe using label. Agregate splits the data into subsets by label, computes summary statistics for each, and returns the result in a convenient form. See \code{\link[stats]{aggregate}}
#'
#' @param df dataframe with states as rownames
#' @param node.names node names of the state, the length must be the same that the state's
#' @param label.rules dataframe with labels (1st col) and rules (2nd col), if more than one rule is true all labels are appendedl the node names present in the rules must be in node.names
#' @param sep string to join elements with length > 1, default "/"
#'
#' @return Dataframe, each row corresponds to a label and each column corresponds to a property of the original dataframe
#'
#' @keywords internal
#' @export
aggregateByLabel <- function(df, node.names, label.rules, sep='/') {
labels <- lapply(rownames(df), function(state) {
state <- as.numeric(unlist(strsplit(state, sep)))
label <- lapply(state, function(s) {
l <- labelState(s, node.names, label.rules)
})
label <- paste(label, collapse=sep)
})
labels <- unlist(labels)
df[is.na(df)] <- 0
df <- aggregate(df, list(labels), sum)
rownames(df) <- df$Group.1
df$Group.1 <- NULL
as.data.frame(df)
}
#' Count won or lost values in comparison with those present in a reference column.
#'
#' Counts how many values were won or lost in comparison between dataframe. It measures ocurrences and then counts the values with sum.
#'
#' @param df dataframe with numeric values
#' @param reference name of column to use as reference
#'
#' @return axis, axis to obtain new/lost values, see \code{\link[base]{sum}}
#'
#' @examples
#' df <- data.frame(WT=c(1,2,0,0), a=c(1,2,0,0),
#' b=c(0,2,1,0), c=c(1,0,3,1))
#'countChangesDataframe(df)
#' # WT a b c
#' # new 0 0 1 2
#' # lost 0 0 1 1
#'countChangesDataframe(df, axis=1)
#' # WT a b c
#' # new 0 0 2 1
#' # lost 1 1 0 0
#'
#' @keywords internal
#' @export
countChangesDataframe <- function(df, reference='WT', axis=2) {
df <- df/df
df[is.na(df)] <- 0
df <- df-df[[reference]]
new <- apply(df, axis, function(x) sum(x==1))
lost <- apply(df, axis, function(x) sum(x==-1))
rbind(new, lost)
}
|
fdde6839d6f5889a4eba97fd4fbf8ebed8cb0e3a | 9dc1278807d585d24cf5b9ba2f74b9b5f40d8c2d | /tests/testthat/test_parseImage.R | a8daafda582b4db584b7691af1674bca133b2efd | [
"MIT"
] | permissive | stephenwilliams22/Spaniel | b6387e686d9e280deeab89d63655a93bb5476f05 | 6dada98d8a9eddde4a4610457b8d4311f9ecb2ec | refs/heads/master | 2020-08-01T22:26:42.014191 | 2019-09-25T14:09:30 | 2019-09-25T14:09:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 617 | r | test_parseImage.R | # Tests for parseImage function
# ------------------------------------------------------------------------------
context("Testing parseImage")
# These tests were created to ensure that the parseImage functions works
# correctly and creates a grob
# Test parseImage
# ------------------------------------------------------------------------------
# load image
imgPath <- file.path(system.file(package = "Spaniel"),
"HE_Rep1_resized.jpg")
testGrob <- parseImage(imgPath)
test_that("parseImage loads an image and creates a grob", {
expect_is(testGrob, c("rastergrob","grob","gDesc"))
})
|
30ce6f71612864f1e6e0d045a4c0e5067522111d | 316c717e4d40ebccec50658cf22c81930908e620 | /training_testing_subset.R | 0b662a02426099c58f2bea5c9a8bd9ee05e3a1fb | [] | no_license | selinawj/Reviews-Classifier | c7951ded37d53df0a5147655e50aa52329d2576d | 70881b7fdedf0e1a0674962992d82e0148b58054 | refs/heads/master | 2020-06-10T23:01:26.671614 | 2016-12-07T16:09:24 | 2016-12-07T16:09:24 | 75,850,735 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,770 | r | training_testing_subset.R | #removing empty reviews
education_df <- na.omit(education_df)
finance_df <- na.omit(finance_df)
game_df <- na.omit(game_df)
social_df <- na.omit(social_df)
weather_df <- na.omit(weather_df)
#using a smaller subset of training and testing data
training_data_5000 <- rbind(education_df[1:1000,], finance_df[1:1000,], game_df[1:1000,], social_df[1:1000,], weather_df[1:1000,])
testing_data_1000 <- rbind(education_df[1001:1200,], finance_df[1001:1200,], game_df[1001:1200,], social_df[1001:1200,], weather_df[1001:1200,])
dataset_6000 <- rbind(training_data_5000, testing_data_1000)
dataset_pp <- dataset_6000
dataset_pp <- str_replace_all(dataset_pp[,1],"[^[:graph:]]", " ")
dataset_pp <- tolower(dataset_pp)
dataset_pp <- removeWords(dataset_pp, stopwords())
dataset_pp <- removePunctuation(dataset_pp)
dataset_pp <- removeNumbers(dataset_pp)
dataset_pp <- stemDocument(dataset_pp, language = "english")
#build dtm
dataset_pp <- as.data.frame(dataset_pp)
dtm <- create_matrix(dataset_pp[,1])
dtm <- removeSparseTerms(dtm, 0.998)
dtm <- weightTfIdf(dtm, normalize = TRUE)
mat <- as.matrix(dtm)
#train the model
#naive bayes learning algorithm
nb_classifier = naiveBayes(mat[1:5000,], as.factor(dataset_6000[1:5000,2]))
#produce predictions
predicted = predict(nb_classifier, mat[5001:6000,])
#generate confusion matrix
confusion_matrix = table(dataset_6000[5001:6000, 2], predicted)
# education finance game social weather
#education 4 0 177 13 6
#finance 20 3 151 14 12
#game 8 0 176 7 9
#social 16 0 155 17 12
#weather 3 0 173 14 10
#calculate recall_accuracy
recall_accuracy(dataset_6000[5001:6000, 2], predicted)
#0.21 |
60aa004fd28d22375a36fea17fc52b13dac5bed4 | 5cc65e4cf454eaf7adce2924d9935401c0780f81 | /cachematrix.R | 20588a84aa3100bb006a6f798f884cf1b5c312e2 | [] | no_license | cplearning/ProgrammingAssignment2 | 73157f94bd7870c1f3c2b46ef631280a1f6fed0d | 98a46580583c06b8fdbf09469f7302225eebb326 | refs/heads/master | 2021-07-23T04:33:42.627803 | 2017-10-31T19:18:24 | 2017-10-31T19:18:24 | 109,038,864 | 0 | 0 | null | 2017-10-31T18:47:28 | 2017-10-31T18:47:27 | null | UTF-8 | R | false | false | 1,160 | r | cachematrix.R | ## File created by cplearning on 2017-10-31
## The functions calculate the inverse of a matrix and cache teh results for the next use
## makeCacheMatrix creates a list of functions that store and retrieve the matrix 'x'.
## When used with the companion function cachSolve to calculate the matrix inverse,
## it supports storing and retrieving the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
dim_x <- dim(x)
if(dim_x[1] != dim_x[2]) {
message("sorry, not a square matrix")
return(NULL)}
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve receives as input 'x', a list of functions created with makeCacheMatrix and calculates and stores
## the inverse of the matrix in x.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
|
2961632a8f495f5c59463a072aa9170095102cca | 8734b26adb5975f7b6e7685cf899d8b23599a154 | /R/utils_get_dots.R | 1833e5f72a0000db02179a9d8094892d09a21022 | [] | no_license | strengejacke/sjlabelled | 3606b01702e59a6ac47459fd216ab468f777c266 | 548fa397bd013ec7e44b225dd971d19628fdc866 | refs/heads/master | 2022-11-12T10:34:47.538335 | 2022-10-23T20:16:20 | 2022-10-23T20:16:20 | 92,868,296 | 78 | 16 | null | 2020-11-25T14:50:47 | 2017-05-30T19:24:17 | R | UTF-8 | R | false | false | 3,215 | r | utils_get_dots.R | # function to evaluate dots in a tidyselect-style and return
# the variable names as character vector
.get_dot_data <- function(dat, dots, verbose = TRUE) {
if (!is.data.frame(dat) || length(dots) == 0) {
return(dat)
}
columns <- colnames(dat)
x <- unlist(lapply(dots, function(i) {
# contains-token
if (grepl("^contains\\(", i)) {
pattern <- gsub("contains\\(\"(.*)\"\\)", "\\1", i)
columns[string_contains(pattern, columns)]
# starts-with token
} else if (grepl("^starts\\(", i) || grepl("^starts_with\\(", i)) {
pattern <- gsub("(.*)\\(\"(.*)\"\\)", "\\2", i)
columns[string_starts_with(pattern, columns)]
# ends-with token
} else if (grepl("^ends\\(", i) || grepl("^ends_with\\(", i)) {
pattern <- gsub("(.*)\\(\"(.*)\"\\)", "\\2", i)
columns[string_ends_with(pattern, columns)]
# one-of token
} else if (grepl("^one_of\\(", i)) {
pattern <- gsub("(\"|\\s)", "", unlist(strsplit(gsub("one_of\\(\"(.*)\"\\)", "\\1", i), ",")))
columns[string_one_of(pattern, columns)]
# num_range token
} else if (grepl("^num_range\\(", i)) {
columns[match(.get_num_range(i), columns)]
# from-to token
} else if (grepl(":", i, fixed = TRUE)) {
tmp <- unlist(strsplit(i, ":", fixed = TRUE))
start <- if (.is_num_chr(tmp[1]))
as.numeric(tmp[1])
else
which(columns == tmp[1])
end <- if (.is_num_chr(tmp[2]))
as.numeric(tmp[2])
else
which(columns == tmp[2])
columns[start:end]
# simple name
} else {
i
}
}))
x <- unlist(lapply(x, function(i) {
if (.is_num_chr(i))
columns[as.numeric(i)]
else if (.is_num_fac(i))
columns[as.numeric(as.character(i))]
else
i
}))
not_found <- setdiff(x, columns)
if (length(not_found) && isTRUE(verbose)) {
insight::print_color(sprintf(
"%i variables were not found in the dataset: %s\n",
length(not_found),
paste0(not_found, collapse = ", ")
),
color = "red")
}
dat[, intersect(x, columns), drop = FALSE]
}
#' @importFrom stats na.omit
.is_num_chr <- function(x) {
is.character(x) && !anyNA(suppressWarnings(as.numeric(stats::na.omit(x))))
}
.is_num_fac <- function(x) {
is.factor(x) && !anyNA(suppressWarnings(as.numeric(levels(x))))
}
.get_num_range <- function(i) {
r1 <- trimws(unlist(strsplit(gsub("num_range\\((.*)\\)", "\\1", i), ",")))
r2 <- gsub("\"", "", trimws(gsub("(.*)(=)(.*)", "\\3", r1)), fixed = TRUE)
es <- grepl("=", r1)
if (any(es)) {
names(r2)[es] <- trimws(gsub("(.*)(=)(.*)", "\\1", r1[es]))
}
args <- c("prefix", "range", "width")
if (is.null(names(r2))) {
names(r2) <- args[1:length(r2)]
}
na_names <- which(is.na(names(r2)))
if (length(na_names)) {
names(r2)[na_names] <- args[na_names]
}
if (length(r2) > 3) {
r2 <- r2[1:3]
}
from <- as.numeric(gsub("(\\d):(.*)", "\\1", r2["range"]))
to <- as.numeric(gsub("(.*):(\\d)", "\\2", r2["range"]))
width <- as.numeric(r2["width"])
if (is.na(width)) {
sprintf("%s%i", r2["prefix"], from:to)
} else {
sprintf("%s%.*i", r2["prefix"], width, from:to)
}
}
|
9f954378dd001b6ef113c5a657f73898972ddfdb | 3e5d8d362b3367e4ff0e152b0242b7a285d8484f | /R/write_gifti.R | f08ac8e87e474d9fc7bc4654ec4c7873250ad6b0 | [] | no_license | mandymejia/ciftiTools | d591a6e8732dd9df17dd62d959a7a808eee16bef | 7becc99a6301c47541c883739f7fb2f0f3413e60 | refs/heads/master | 2023-08-17T06:07:35.229385 | 2023-01-23T20:00:17 | 2023-01-23T20:00:17 | 241,136,369 | 30 | 10 | null | 2023-08-21T21:47:22 | 2020-02-17T15:06:01 | HTML | UTF-8 | R | false | false | 9,278 | r | write_gifti.R | #' Write a data matrix to a GIFTI metric file
#'
#' Write the data for the left or right cortex to a metric GIFTI file.
#'
#' @param x A \eqn{V x T} data matrix (V vertices, T measurements). This can also
#' be an object from \code{gifti::readgii}, or a length \eqn{T} list of length
#' \eqn{V} vectors.
#' @param gifti_fname Where to write the GIFTI file.
#' @param hemisphere \code{"left"} (default) or \code{"right"}. Ignored if
#' \code{data} is already a \code{"gifti"} object.
#' @param intent "NIFTI_INTENT_*". \code{NULL} (default) will use
#' metadata if \code{data} is a \code{"gifti"} object, or "NONE" if it cannot be
#' inferred. If not \code{NULL} and \code{data} is a \code{"gifti"} object, it will
#' overwrite the existing intent. See
#' https://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/group__NIFTI1__INTENT__CODES.html/document_view .
#' @param data_type the type of \code{data}:
#' "NIFTI_TYPE_*" where * is "INT32" or "FLOAT32". If \code{NULL} (default), the
#' data type will be inferred. If not \code{NULL} and \code{data} is a
#' \code{"gifti"} object, it will overwrite the existing data type.
#' @param encoding One of "ASCII", "Base64Binary", or "GZipBase64Binary". If
#' \code{NULL} (default), will use the metadata if \code{data} is a GIFTI object,
#' or "ASCII" if the \code{data_type} is "NIFTI_TYPE_INT32" and
#' "GZipBase64Binary" if the \code{data_type} is "NIFTI_TYPE_FLOAT32". If not
#' \code{NULL} and \code{data} is a \code{"gifti"} object, it will overwrite the
#' existing data type.
#' @param endian "LittleEndian" (default) or "BigEndian". If \code{data} is a
#' \code{"gifti"} object, it will overwrite the existing endian.
#' @param col_names The names of each data column in \code{gii} (or entries in
#' \code{gii$data}).
#' @param label_table A data.frame with labels along rows. The row names should
#' be the label names. The column names should be among: "Key", "Red", "Green",
#' "Blue", and "Alpha". The "Key" column is required whereas the others are
#' optional (but very often included). Values in the "Key" column should be
#' non-negative integers, typically beginning with 0. The other columns should
#' be floating-point numbers between 0 and 1.
#'
#' Although CIFTI files support a different label table for each data column,
#' GIFTI files only support a single label table. So this label table should be
#' applicable to each data column.
#'
#' @return Whether the GIFTI was successfully written
#'
#' @importFrom gifti writegii
#' @family writing
#' @export
write_metric_gifti <- function(
x, gifti_fname, hemisphere=c("left", "right"),
intent=NULL, data_type=NULL, encoding=NULL, endian=c("LittleEndian", "BigEndian"),
col_names=NULL, label_table=NULL){
# Match args.
hemisphere <- match.arg(hemisphere, c("left", "right"))
endian <- match.arg(endian, c("LittleEndian", "BigEndian"))
# If gii is a "gifti", use its metadata to determine unspecified options.
if (is.gifti(x)) {
if (is.null(intent)) { intent <- x$data_info$Intent }
if (is.null(data_type)) { data_type <- x$data_info$DataType }
if (is.null(encoding)) { encoding <- x$data_info$Encoding }
# If x is not a "gifti", convert it to a GIFTI and use default options
# where unspecified.
} else {
if (is.null(intent)) { intent <- "NONE" }
x <- as.metric_gifti(x, intent=intent)
if (is.null(data_type)) {
data_type <- ifelse(all_integers(do.call(cbind, x$data)), "INT32", "FLOAT32")
}
if (is.null(encoding)) {
encoding <- ifelse(grepl("INT", data_type), "ASCII", "GZipBase64Binary")
}
}
T_ <- length(x$data)
if (data_type=="INT32") {
for (ii in seq(T_)) { mode(x$data[[ii]]) <- "integer" }
}
# Format options
x$data_info$Intent <- paste0("NIFTI_INTENT_", gsub("NIFTI_INTENT_", "", toupper(intent)))
x$data_info$DataType <- paste0("NIFTI_TYPE_", gsub("NIFTI_TYPE_", "", toupper(data_type)))
x$data_info$Encoding <- encoding
x$data_info$Endian <- endian
hemisphere_idx <- which(names(x$file_meta)=="AnatomicalStructurePrimary")[1]
x$file_meta[hemisphere_idx] <- list(left="CortexLeft", right="CortexRight")[hemisphere]
# Column Names
if (!is.null(col_names)) {
col_names <- as.character(col_names)
if (length(col_names) != T_) {
stop("The length of the data `col_names` must be the same length as the data (number of columns).")
}
for (ii in seq(T_)) {
if (length(x$data_meta) < T_) {break}
stopifnot(is.matrix(x$data_meta[[ii]]))
if (ncol(x$data_meta[[ii]]) == 2 && all(sort(colnames(x$data_meta[[ii]])) == sort(c("names", "vals")))) {
md_names <- x$data_meta[[ii]][,colnames(x$data_meta[[ii]]) == "names"]
if ("Name" %in% md_names) {
if (x$data_meta[[ii]][which(md_names=="Name")[1],2] != "") {
ciftiTools_warn(paste0("Replacing the existing data column name for column ", ii))
}
x$data_meta[[ii]][which(md_names=="Name")[1],2] <- col_names[ii]
} else {
x$data_meta[[ii]] <- rbind(x$data_meta[[ii]], c("names"="Name", "vals"=x$data_meta[[ii]]))
}
} else {
ciftiTools_warn(paste0("Data meta entry for data column ", ii, "did not have the expected columns `names` and `vals`. Overwriting."))
x$data_meta[[ii]] <- matrix(c("Name", col_names[ii]), nrow=1)
colnames(x$data_meta[[ii]]) <- c("names", "vals")
}
}
}
# Label Table
if (!is.null(label_table)) {
## Must be a matrix or data.frame
stopifnot(is.matrix(label_table) || is.data.frame(label_table))
## Column names
if (length(unique(colnames(label_table))) != length(colnames(label_table))) {
stop("Label table column names must be unique.")
}
if (!all(colnames(label_table) %in% c("Key", "Red", "Green", "Blue", "Alpha"))) {
stop("Label table columns must be among: `Key` (required), `Red`, `Green`, `Blue`, and `Alpha`.")
}
if (!("Key" %in% colnames(label_table))) { stop("`Key` column is required in the label table.") }
## Data type and values
if (data_type != "INT32") {
warning("The data type was not INT32, yet there is a label table (with integer keys). Writing the GIFTI anyway.\n")
} else {
label_vals <- as.numeric(label_table[,colnames(label_table) == "Key"])
data_vals <- unique(as.vector(do.call(cbind, x$data)))
if (!all(data_vals %in% c(NA, label_vals))) {
stop(paste0("These data values were not in the label table:", paste(data_vals[!(data_vals %in% label_vals)], collapse=", ")))
}
}
label_table[,] <- as.matrix(apply(label_table, 2, as.character))
x$label <- label_table
}
writegii(x, gifti_fname, use_parsed_transformations=TRUE)
}
#' Write a \code{"surf"} to a GIFTI surface file
#'
#' Write the data for the left or right surface to a surface GIFTI file.
#'
#' @param x A \code{"surf"} object, an object from \code{gifti::readgii}, or a
#' list with elements "pointset" and "triangle".
#' @param gifti_fname Where to write the GIFTI file.
#' @param hemisphere "left" (default) or "right". Ignored if \code{data} is already
#' a "gifti" object, or if it is a \code{"surf"} object with the hemisphere metadata
#' already specified.
#' @param encoding A length-2 vector with elements chosen among "ASCII",
#' "Base64Binary", and "GZipBase64Binary". If \code{NULL} (default), will use
#' the metadata if \code{data} is a "gifti" object, or "GZipBase64Binary" for the
#' "pointset" and "ASCII" for the "triangles" if \code{data} is not already
#' a GIFTI.
#' @param endian "LittleEndian" (default) or "BigEndian".
#'
#' @return Whether the GIFTI was successfully written
#'
#' @importFrom gifti writegii
#' @family writing
#' @family surfing
#' @export
write_surf_gifti <- function(
x, gifti_fname, hemisphere=c("left", "right"),
encoding=NULL, endian=c("LittleEndian", "BigEndian")){
# Match args.
hemisphere <- match.arg(hemisphere, c("left", "right"))
endian <- match.arg(endian, c("LittleEndian", "BigEndian"))
# If gii is a "gifti", use its metadata to determine unspecified options.
if (is.gifti(x)) {
if (is.null(encoding)) { encoding <- x$data_info$Encoding }
# If gii is not a "gifti", convert it to a GIFTI and use default options
# where unspecified.
} else {
x <- as.surf_gifti(x, hemisphere=hemisphere)
if (is.null(encoding)) {
encoding <- as.character(list(pointset="GZipBase64Binary", triangle="ASCII")[names(x$data)])
}
}
# Format options
x$data_info$Endian <- endian
x$data_info$Encoding <- encoding
# Issue #5
tri_enc <- x$data_info$Encoding[names(x$data) == "triangle"]
if (tri_enc != "ASCII") {
ciftiTools_warn(paste(
"The encoding for the triangle component was", tri_enc,
"but only ASCII is supported for integer data types.",
"Overwriting.\n"
))
x$data_info$Encoding[names(x$data) == "triangle"] <- "ASCII"
}
writegii(x, gifti_fname, use_parsed_transformations=TRUE)
}
#' @rdname write_surf_gifti
#' @export
write_surf <- function(
x, gifti_fname, hemisphere=c("left", "right"),
encoding=NULL, endian=c("LittleEndian", "BigEndian")){
write_surf_gifti(x, gifti_fname, hemisphere, encoding, endian)
}
|
0a02abe7bf7acb8edaa8e03443ddcba3cfbcc11f | d8585f141faff24dacdde002672a37aee6b5c796 | /redudantCode.r | 2a9f2d3f0fbf4366e878d67c3145ae83cac48521 | [] | no_license | yangxu698/RateSub | c8cd5ecc951dc3f9bd2847b8c57ecdef2683a814 | 90b7ada7faa0bb27da33d51bfcd8d51fab27068d | refs/heads/master | 2020-04-29T05:15:17.548730 | 2019-04-29T18:50:09 | 2019-04-29T18:50:09 | 175,876,041 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,715 | r | redudantCode.r | read_csv("../MSA0680.csv") %>% filter(accountnumber == "CA02600002")
data = read_csv("../MSA0680.csv") %>% filter(accountnumber = CA)
mutate(date_num = as.numeric(as.POSIXct(surveydate))) %>%
left_join(data_complement, by = "accountnumber") %>%
select(accountnumber, INST_NM) %>%
group_by(INST_NM) %>%
unique() %>%
branchB$surveydate
summary(dataA1)
data_sorted = MSA0680 %>% mutate(date_num = as.numeric(as.POSIXct(surveydate))) %>% ## convert the survey data to number
filter(accountnumber %in% data_199901 & productcode == "12MCD10K") %>% ## select accountnumber that has data in Jan.1999 and productcode "12MCD10K"
left_join(data_complement, by = "accountnumber") %>% ## append the info: institution name and branch deposits
group_by(accountnumber) %>% ## grouping by accountnumber
mutate(survey_span = max(date_num) - min(date_num)) %>% ## calculate the survey span
ungroup() %>% group_by(INST_NM) %>% top_n(1, survey_span) %>% ## grouping by institution name and select the longest survey span
ungroup()
str(data_sorted)
## At this point, we need to check if there is tie for each institution
tier_indicator = data_sorted %>% group_by(INST_NM) %>% summarise(branchNBR = table(unique(accountnumber)))
str(tier_indicator)
head(tier_indicator,25)
table(data_sorted$INST_NM)
top_n(1, BRANCHDEPOSITS) %>%
ungroup() %>% select(-date_num)
summary(data_sorted,25)
data = Deposit_InstitutionDetails %>% select(ACCT_NBR, CNTY_FPS, STATE_FPS, MSA, CBSA)
|
4ad3217bd41e3c7140b799dd42af2b078b856a41 | 563f761040775133293553aaf17c4b8eac01d4ee | /scripts/4.5_evaluate_best_model.R | a3ddf62ff27e7ca155ad5f8cbf5a0e8349edb336 | [
"MIT"
] | permissive | brendanhcullen/personality-diabetes | d77a30b34f6722833b13891d12585051f3ee606c | 6ac3ce8f5dc34eb03f78429e1f83edc45266beb6 | refs/heads/master | 2023-02-10T10:49:39.990733 | 2021-01-01T19:59:08 | 2021-01-01T19:59:08 | 198,284,368 | 0 | 0 | MIT | 2020-12-17T00:12:01 | 2019-07-22T18:53:46 | R | UTF-8 | R | false | false | 1,005 | r | 4.5_evaluate_best_model.R | library(here)
library(tidyverse)
library(caret)
# Load holdout test data --------------------------------------------------
best_mods_fits <- readRDS(here("output/machine_learning/training/best_mods_fits.RDS"))
test_data_pp <- readRDS(here("output/machine_learning/testing/test_data_pp.RDS"))
# convert diabetes to factor
test_data_pp$diabetes <- as.factor(test_data_pp$diabetes)
# Evaluate best model on test data ----------------------------------------
# pull out final models
spi_5_final_mod <- best_mods_fits$rf_spi_5$finalModel
spi_27_final_mod <- best_mods_fits$rf_spi_27$finalModel
spi_135_final_mod <- best_mods_fits$rf_spi_135$finalModel
# NOTE: using predict() here is not generating the right number of rows
# use model to predict diabetes status in test data
pred_spi_5 <- predict(spi_5_final_mod, new_data = test_data_pp)
# This is currently giving an error since the number of rows don't match
# confusion matrix
conf_mat_spi_5 <- confusionMatrix(pred_spi_5, test_data_pp$diabetes)
|
35cb0cfee49d385470e5f8b76c16d06a600b22e0 | 9ea27859f36ecf8bf273b34a768865f03af8e771 | /R/RedisParam-logger.R | 22592f2a72a27a80cf08962aee6c78f2d341a8e1 | [] | no_license | mtmorgan/RedisParam | dbe44f9f5478b72e7757468d677866788b39d32b | fca99f1834fdb5323321e485a2594cf6323b92d7 | refs/heads/master | 2023-07-21T11:53:03.884482 | 2022-06-10T15:48:14 | 2022-06-10T15:48:14 | 171,951,305 | 2 | 1 | null | 2023-07-14T14:15:03 | 2019-02-21T21:54:37 | R | UTF-8 | R | false | false | 1,847 | r | RedisParam-logger.R | ## Get the logger name
get.logger.name <-
function(x)
{
paste0("RedisParam.", bpjobname(x))
}
## Get the file used by the logger
get.log.file <-
function(x)
{
if (is.na(rpisworker(x))||!rpisworker(x)) {
paste0("RedisParam_manager_log_", Sys.getpid())
} else {
paste0("RedisParam_worker_log_", Sys.getpid())
}
}
config.logger <-
function(x)
{
logger.name <- get.logger.name(x)
if (bplog(x)) {
set.log.threshold(x)
if (!is.na(bplogdir(x))) {
filename <- get.log.file(x)
flog.appender(
appender.file(filename),
name = logger.name
)
}
}
}
set.log.threshold <-
function(x)
{
threshold <- bpthreshold(x)
logger.name <- get.logger.name(x)
flog.threshold(get(threshold), name = logger.name)
}
.trace <-
function(x, ...)
{
if (!missing(x) && bplog(x))
flog.trace(..., name = get.logger.name(x))
}
.debug <-
function(x, ...)
{
if (!missing(x) && bplog(x))
flog.debug(..., name = get.logger.name(x))
}
.info <-
function(x, ...)
{
if (!missing(x) && bplog(x))
flog.info(..., name = get.logger.name(x))
}
.warn <-
function(x, fmt, ...)
{
if (!missing(x) && bplog(x)) {
value <- flog.warn(fmt, ..., name = get.logger.name(x))
} else {
value <- sprintf(fmt, ...)
}
warning(value, call. = FALSE)
}
.error <-
function(x, fmt, ...)
{
if (!missing(x) && bplog(x)) {
value <- flog.error(fmt, ..., name = get.logger.name(x))
} else {
value <- sprintf(fmt, ...)
}
stop(value, call. = FALSE)
}
.redisLog <-
function(x, fmt, ...)
{
if (!missing(x) && x$redis.log) {
value <- sprintf(fmt, ...)
x$redisClient$RPUSH(.redisLogQueue(x$jobname), value)
}
} |
906c73b4f8d9e71d38931f090405d6601cce7fed | bc48ffd209e2678a6f42c26fee8b3d74c3497fdb | /man-roxygen/mouter.R | 1b187c1735d99eb1b3fece42de8939af09d6aa87 | [] | no_license | mbojan/mouter | 8f224a5c608bed28e8fa6b33d2d98116ada9bd47 | df63aadbf21070d83993cbbd147497e1f774d57a | refs/heads/master | 2021-01-17T19:25:48.763708 | 2017-02-06T19:39:06 | 2017-02-06T19:39:06 | 65,302,549 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 334 | r | mouter.R | # As an array
a <- mouter(
A = c(a=1, b=2) / 3,
M = matrix(1:4 / 10, 2, 2, dimnames=list(D=letters[5:6], E=letters[7:8])),
B = c(c=2, d=3) / 5,
retval="a"
)
str(a)
# Data frame
mouter(
A = c(a=1, b=2) / 3,
M = matrix(1:4 / 10, 2, 2, dimnames=list(D=letters[5:6], E=letters[7:8])),
B = c(c=2, d=3) / 5,
retval="df"
)
|
a67c3c2fcbc0c11c6b97bb296232fb8c8c297cb9 | 4edab06f5f798ccb53fee765b8d7a9b8c2d4ad34 | /plot.R | b8e038400157d1b7e87747f1cb8f6d0bfeb8af57 | [] | no_license | bharathirajajewin/testrepo | 33ace96af05165421f266e421c4cdbe4574bdd21 | 4d288f626e6affe4b6e843b43acaa7ed8e54b2de | refs/heads/master | 2020-04-07T15:35:33.765282 | 2014-10-14T14:36:06 | 2014-10-14T14:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 39 | r | plot.R | a <- rnorm(100)
plot(a, mail="Barplot") |
fe6680c60a90d7bd82c3f75b8d1d978771010820 | 8986d42911f0d3f4ea330dd8df4668ba6dac403c | /R/agreguj_wskazniki_2rm.R | 6a29797547d31814d15d603af4f2cdbfed9a703b | [
"MIT"
] | permissive | tzoltak/MLASZdane | 07a25b412b2c0a987581967251206e52e067d4c9 | 3f51cce2959436d6e648c7394cf4dfe9b7fee274 | refs/heads/master | 2023-08-31T15:50:35.253211 | 2021-08-09T12:54:37 | 2021-08-09T12:54:37 | 77,215,737 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,046 | r | agreguj_wskazniki_2rm.R | #' @title Obliczanie wskaznikow z 2 rundy monitoringu na poziomie zagregowanym
#' @description Funkcja oblicza wartości wskaźników na poziomie zagregowanym
#' na podstawie ramki danych z wynikami ankiety CAWI.
#' @param wskazniki ramka danych z wynikami 2 rundy monitoringu
#' @param grupy ramka danych zawierająca definicje podziałów na grupy -
#' np. zwrócona przez funkcję \code{\link{utworz_grupowanie_ze_zmiennej}}
#' @return data frame
#' @seealso \code{\link{agreguj_wskazniki}} oraz przekazywane do niej funkcje
#' używane do obliczania konkretnych wskaźników zagregowanych:
#' \itemize{
#' \item{\code{\link{liczba_zbadanych}},}
#' \item{\code{\link{dane_szkoly}},}
#' \item{\code{\link{liczba_kobiet_2rm}},}
#' \item{\code{\link{firma_badawcza}},}
#' \item{\code{\link{formy}},}
#' \item{\code{\link{zawod_liczebnosc}},}
#' \item{\code{\link{zawod_przygotowanie_szkola}},}
#' \item{\code{\link{uczestnictwo_pnz}},}
#' \item{\code{\link{szkola_1_wyboru}},}
#' \item{\code{\link{ponowny_wybor}},}
#' \item{\code{\link{przygotowanie_do_zawodu}},}
#' \item{\code{\link{przyg_zawodu_prakt_PL}},}
#' \item{\code{\link{przyg_zawodu_prakt_niePL}},}
#' \item{\code{\link{przyg_zaw_prakt_ANY}},}
#' \item{\code{\link{przyg_zawodu_zaj_PL}},}
#' \item{\code{\link{przyg_zawodu_zaj_niePL}},}
#' \item{\code{\link{przyg_zawodu_zaj_szkola}},}
#' \item{\code{\link{przyg_zawodu_zaj_ckp}},}
#' \item{\code{\link{przyg_zaw_zaj_ANY}},}
#' \item{\code{\link{nauka_zawod}},}
#' \item{\code{\link{plany_6m}},}
#' \item{\code{\link{czy_plany_eduk}},}
#' \item{\code{\link{plany_eduk_tak}},}
#' \item{\code{\link{plany_eduk_nie}},}
#' \item{\code{\link{praca_zarobkowa}},}
#' \item{\code{\link{praca_poza_wyuczonym}},}
#' \item{\code{\link{brak_pracy}},}
#' \item{\code{\link{mlodociani_praca}},}
#' }
#' @export
#' @importFrom dplyr .data
agreguj_cawi_ucz_2rm = function(wskazniki, grupy) {
stopifnot(is.data.frame(wskazniki),
is.data.frame(grupy))
nazwy = c("ID_rspo", "szk_nazwa", "szk_adres", "M1", "Firma", "S1",
"woj_nazwa", "S2_zawod", "W1", "W2", "W3", "PNZ3_1", "PNZ3_2",
"PNZ5_1", "PNZ5_2", "PNZ5_3", "PNZ5_4", "PNZ9", "PL2_1", "PL2_2",
"PL2_3", "PL2_4", "PL2_5", "PL2_6", "PL3", "PL4","PL5", "PL6",
"PL9", "PL10", "PNZ8")
sprawdz_nazwy(names(wskazniki), nazwy)
wskazniki = agreguj_wskazniki(
wskazniki, grupy,
l_zbadanych = liczba_zbadanych(.data),
dane_szkoly = dane_szkoly(.data),
l_kobiet = liczba_kobiet_2rm(.data),
firma = firma_badawcza(.data),
formy_gramatyczne = formy(.data),
l_zawod = zawod_liczebnosc(.data),
l_zawod_przyg = zawod_przygotowanie_szkola(.data),
uczestnictwo_pnz = uczestnictwo_pnz(.data),
szk_1_wyb = szkola_1_wyboru(.data),
ponowny_wybor = ponowny_wybor(.data),
przyg_do_zaw = przygotowanie_do_zawodu(.data),
przyg_zawodu_prakt_PL = przyg_zawodu_prakt_PL(.data),
przyg_zawodu_prakt_niePL = przyg_zawodu_prakt_niePL(.data),
przyg_zaw_prakt_ANY = przyg_zaw_prakt_ANY(.data),
przyg_zawodu_zaj_PL = przyg_zawodu_zaj_PL(.data),
przyg_zawodu_zaj_niePL = przyg_zawodu_zaj_niePL(.data),
przyg_zawodu_zaj_szkola = przyg_zawodu_zaj_szkola(.data),
przyg_zawodu_zaj_ckp = przyg_zawodu_zaj_ckp(.data),
przyg_zaw_zaj_ANY = przyg_zaw_zaj_ANY(.data),
nauka_zawod = nauka_zawod(.data),
ocena_pnz = ocena_pnz(.data),
plany_6m_bs1 = plany_6m(.data, "PL2_1",
"[%] W szkole branżowej drugiego stopnia"),
plany_6m_licd = plany_6m(.data, "PL2_2", "[%] W liceum dla dorosłych"),
plany_6m_stud = plany_6m(.data, "PL2_4", "[%] Na studiach"),
czy_plany_edu = czy_plany_eduk(.data),
plany_edu_tak = plany_eduk_tak(.data),
plany_edu_nie = plany_eduk_nie(.data),
praca_zarobkowa = praca_zarobkowa(.data),
praca_poza_wyuczonym = praca_poza_wyuczonym(.data),
brak_pracy = brak_pracy(.data),
mlodociani_praca = mlodociani_praca(.data)
)
return(wskazniki)
}
|
0f82ae271f4ae0677d9a535722ed73450b41c1e9 | 9409270923e73e0e1cbf8099a729ab26bd3efff2 | /combine_Utrack.R | d697dd1bc61e6ef8ec211102371ce760b19d0353 | [] | no_license | vrrenske/spotprocessR | cf229f52fc476fe0dd8b2e8de782b1dc99bf9b85 | 9cf944564a7b1411712dd312f9fb6e273e92e7f1 | refs/heads/master | 2020-04-05T13:07:44.924705 | 2018-03-05T13:15:09 | 2018-03-05T13:15:09 | 95,105,125 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,025 | r | combine_Utrack.R | ####################COMBINING DATA UTRACK###############################################
##Renske van Raaphorst 25-04-2016
##To combine outputs from different movies; same experiment; into 1 plot and/or combine different conditions to produce comparison plots
rm(list=ls(all=TRUE))
library(ggplot2)
library(ggthemes)
##FUNCTIONS##################################################################################
#####load object in seperate environment so you can name it yourself#########################
load_obj <- function(f)
{
env <- new.env()
nm <- load(f, env)[1]
env[[nm]]
}
##load files from Utrack:
pickmovies <- function(C, Z){
for(X in 1:C){
MSDfile <- choose.files(caption=paste("Condition ", Z, "; file ", X, sep=""))
allMSDs <- load_obj(MSDfile)
allMSDs$mov <- X
allMSDs$track <- as.numeric(allMSDs$track)
if(X==1){
totMSD <- allMSDs
}
if(X>1){
allMSDs$track <- allMSDs$track + max(totMSD$track)
totMSD <- rbind(totMSD, allMSDs)
}
}
return(totMSD)
}
########LM display function from: http://stackoverflow.com/questions/7549694/ggplot2-adding-regression-line-equation-and-r2-on-graph
##by Jayden
lm_eqn = function(m) {
l <- list(a = format(coef(m)[1], digits = 2),
b = format(abs(coef(m)[2]), digits = 2),
r2 = format(summary(m)$r.squared, digits = 3));
if (coef(m)[2] >= 0) {
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,l)
} else {
eq <- substitute(italic(y) == a - b %.% italic(x)*","~~italic(r)^2~"="~r2,l)
}
as.character(as.expression(eq));
}
###########estimate Diffusion coefficient from histogram
D_K <- function(allcontracks, par){
##make histogram
for(n in unique(allcontracks$time[!is.na(allcontracks$time)&allcontracks$con==par])){
histdat <- hist(allcontracks$distlist[allcontracks$con==par&allcontracks$time==n], breaks=50, plot=FALSE)
histdat <- data.frame(x=histdat$mids, y=histdat$density)
##make formula and fit the formula to the histogram
f <- function(x, k=kvalue, D=Dvalue){k*exp(-x^2/(4*D))/((pi*4*D)^(-1/2))}
fit <- nls(y~f(x, k, D), data=histdat, start=list(k=30, D=0.003))
##get D and K out of the fit
kvalue <- as.numeric(summary(fit)$parameters[,"Estimate"][1])
Dvalue <- as.numeric(summary(fit)$parameters[,"Estimate"][2])/n
kse <- as.numeric(summary(fit)$parameters[,"Std. Error"][1])
Dse <- as.numeric(summary(fit)$parameters[,"Std. Error"][2])
hisframe <- data.frame(k=kvalue, D=Dvalue, kse=kse, Dse=Dse, con=par, time=n)
if(n==(min(unique(allcontracks$time),na.rm=T))){allhis <- hisframe}
if(n>(min(unique(allcontracks$time),na.rm=T))){allhis <- rbind(allhis, hisframe)}
}
return(data.frame(k = mean(kvalue), D=mean(Dvalue), kse=mean(kse), Dse=mean(Dse), con=par))
}
#plot the histogram of all Ddistances with the variance (=MSD of the total) displayed.
#make normal distribution prediction to superimpose and plot
totalMSDplot<-function(alltracks, sumframe){
sumframe$con <- as.character(sumframe$con)
plist <- list()
Z <- 0
for(X in 1:nrow(sumframe)){
#make normal distribution prediction to superimpose
grid <- with(allcontracks[allcontracks$con==sumframe$con[X],], seq(min(distlist, na.rm=T), max(distlist,na.rm=T), length = 100))
normpred <- data.frame(predicted = grid, density = dnorm(grid, mean(allcontracks$distlist[allcontracks$con==sumframe$con[X]], na.rm=T), sd(allcontracks$distlist[allcontracks$con==sumframe$con[X]], na.rm=T)))
if(max(normpred$density)>Z){Z<-max(normpred$density)}
plot <- ggplot(alltracks[alltracks$con==sumframe$con[X],], aes(x=distlist)) + geom_histogram(aes(y=..density..), fill="#0072B2", alpha=0.7, binwidth =0.05) + theme_minimal() + xlab("\u0394d (um)") +
ggtitle(paste("D = ", round(sumframe$D[X], digits=4), "+/-", round(sumframe$Dse[X],digits=4), "\u03BCm/s\u00B2\nMSD(\u0394t=", alltracks$time[2]-alltracks$time[1], "s) = ", round(sumframe$MSD_vanHove[X], digits=4), "\u03BCm\u00B2", sep="")) +
geom_line(data = normpred, aes(x = predicted, y = density), colour = "#D55E00", size=1) +
#coord_fixed() +
xlim(c(-1,1)) + ylim(c(0,(Z+1.5)))
plist[[X]] <- plot
}
return(plist)
}
##plot MSD track as calculated in Jacobs-Wagner, Cell 2014:
MSDtrackplot <- function(allMSDs){
Z <- table(allMSDs$track)
Z <- data.frame(Z)
colnames(Z) <- c("track", "tracklength")
allMSDs <- merge(Z, allMSDs)
allMSDs <- allMSDs[allMSDs$tracklength>9,]
for(i in unique(allMSDs$deltat[!is.na(allMSDs$deltat)])){
if(i==1){
MSDplotframe <- data.frame(MSD = mean(allMSDs$meansd[allMSDs$deltat==1]), se = mean(allMSDs$se[allMSDs$deltat==1]), time = 1)
}
if(i>1){
if(sum(table(unique(allMSDs$track[allMSDs$deltat==i])))>1)
MSDplotframe <- rbind(MSDplotframe,c(mean(allMSDs$meansd[allMSDs$deltat==i]),mean(allMSDs$se[allMSDs$deltat==i]), i))
}
}
return(MSDplotframe)
}
##Plot all MSD combotracks in one plot (if you have more than one ;) )
MSDfinplotframe <- function(allconMSD, U=U){
listcon <- unique(allconMSD$con)
for(X in 1:U){
MSDplotframe <- MSDtrackplot(allconMSD[allconMSD$con==listcon[X],])
MSDplotframe$con <- listcon[X]
if(X==1){finalplotframe <- MSDplotframe}
if(X>1){finalplotframe <- merge(MSDplotframe, finalplotframe,all=TRUE)}
}
return(finalplotframe)
}
#################Summary of MSDs ########################################################################################
MSDsummary <- function(allcontracks, allconMSD, U=U){
listM <- c()
listW <- c()
listS <- c()
listcon <- unique(allcontracks$con)
for(X in 1:U){
listM[X] <- sd(allcontracks$distlist[allcontracks$con==listcon[X]],na.rm=T)^2
listW[X] <- median(allconMSD$meansd[allconMSD$con==listcon[X]])
listS[X] <- median(allconMSD$se[allconMSD$con==listcon[X]])
if(X==1){DKs <- D_K(allcontracks,listcon[X])}
if(X>1){DKs <- rbind(DKs, D_K(allcontracks, listcon[X]))}
}
MSDsummary <- data.frame(con=unique(allcontracks$con), MSD_vanHove = listM, MSD_Wagner = listW, SE_Wagner=listS)
MSDsummary <- merge(MSDsummary, DKs, all=T)
return(MSDsummary)
}
############plot together:#######################
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
###################### code ##################################################################
##set your working directory
setwd(choose.dir(default = "F:/microscope files 2015/", caption = "Choose working directory"))
##First question: how many conditions; then how many files for a single condition
U <- readline("How many conditions?: ")
##Per condition: load the files into one data frame:
for(X in 1:U){
C <- readline(paste("How many files for condition ", X, "?: ", sep=""))
totMSD <- pickmovies(C, X)
tottracks <- pickmovies(C, X)
condition <- readline(paste("Name condition ", X, ": "))
totMSD$con <- condition
tottracks$con <- condition
if(X==1){
allconMSD <-totMSD
allcontracks <- tottracks
}
if(X>1){
allconMSD <- rbind(allconMSD, totMSD)
allcontracks <- rbind(allcontracks, tottracks)
}
}
##Make summary table
MSDsum <- MSDsummary(allcontracks, allconMSD, U)
##Make plot of all histograms
cairo_pdf("MSDhists.pdf")
multiplot(plotlist=totalMSDplot(allcontracks, MSDsum), cols=3)
dev.off()
##and of all tracks
MSDplotframe <- MSDfinplotframe(allconMSD, U)
MSDalltracksoneplot <- ggplot(MSDplotframe, aes(x=time, y=MSD, color=con)) + geom_point() + geom_errorbar(aes(ymin=MSD-se, ymax=MSD+se), width=0.1) + theme_minimal() + xlab("time (s)") + ylab("MSD (\u03BCm\u00B2)") + geom_smooth(data=MSDplotframe[MSDplotframe$time<5,],method="lm") +
scale_colour_brewer(palette="Set2") + scale_x_continuous(limits=(c(0, max(MSDplotframe$time)*.67))) + scale_y_continuous(limits=c(0, (max(MSDplotframe$MSD[MSDplotframe$time<max(MSDplotframe$time*0.67)])+0.01)))
cairo_pdf("alltracks.pdf")
print(MSDalltracksoneplot)
dev.off()
###################################################################################
##plot fit with the original histogram
#ggplot(allcontracks[allcontracks$con=="O",], aes(x=distlist)) + geom_histogram(aes(y=..density..), bins=50, colour="#56B4E9", fill="#56B4E9", alpha=0.8) + stat_function(fun=f, colour="#E69F00", size=1) + theme_minimal() + xlab("displacements (um)") + ggtitle(paste("D = ", round(Dvalue, digits=4), "+/-", round(Dse,digits=4), "\u03BCm/s\u00B2\nMSD = ", round(MSDsum$MSD_vanHove[MSDsum$con=="O"], digits=4), "\u03BCm\u00B2", sep=""))
|
fbe14964c1604e442991bbd6a799ebd5963e8854 | 5bab0a6c27ce35f248b87fa223336df789ce64bc | /Pulsar Classification/Models.R | 6ca824edda1d1ff13d61c092c49f748ace4f3078 | [] | no_license | othersideoflearning/portfolio | aae6647658048aa32ba929c4f5493d9b57c05abb | a1d5f3f31f8090e0e7144185e7cf6ed902185df8 | refs/heads/master | 2021-10-11T07:06:33.990780 | 2021-10-01T15:54:23 | 2021-10-01T15:54:23 | 195,103,163 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,086 | r | Models.R | library(ROSE)
library(rpart)
library(readr)
library(psych)
library(caret)
library(e1071)
library(rattle)
# library(naivebayes)
# library(Boruta)
HTRU_2 <- read_csv("D:/Academics/Semester-IV(Late Spring 2017)/Machine Learning - I/Project work/Data/HTRU2/HTRU_2.csv", col_types = cols(Class = col_factor(levels = c("0", "1"))), na = "NA")
# shuffling row qise
HTshuf <- HTRU_2[sample(nrow(HTRU_2)),]
# split into training and testing set being 80% data in training set
size <- round(nrow(HTshuf)*0.8)
trainset <- HTshuf[1:size,]
testset <- HTshuf[size: nrow(HTshuf),]
# feaSel <- Boruta(Class~., data = trainset, doTrace = 2)
#
# feaSel$ImpHistory
# Chekcing the proportion of training and testing set pulsar Vs. noise
# prop.table(table(trainset$Class))
# prop.table(table(testset$Class))
# Creting decision tree moel
tree <- rpart(Class~., data = trainset)
# tree$variable.importance
predtree <- predict(tree, newdata = testset)
# Checking for accuracy of the model.
accuracy.meas(testset$Class, predtree[,2])
roc.curve(testset$Class, predtree[,2])
# Balancing imbalanced dataset with ROse Function
trainRose <- ROSE(Class~.,data = trainset, seed = 1)$data
testRose <- ROSE(Class~., data = testset, seed = 1)$data
# Checking the proportion of training and test set pulsar Vs. noise
# table(trainRose$Class)
# table(testRose$Class)
# Modeling with Decision Tree algorothm
treeRose <- rpart(Class~., data = trainRose)
# treeRose$variable.importance
PredtreeRose <- predict(treeRose, newdata = testRose)
# accuracy.meas(testRose$Class, PredtreeRose[,2])
# roc.curve(testRose$Class, PredtreeRose[,2])
# plotting tree
# fancyRpartPlot(treeRose, palettes=c("Greys", "Oranges"))
confusionMatrix(round(PredtreeRose[,2], digits = 0), testRose$Class)
# round(PredtreeRose[,2], digits = 0)
# plot(treeRose, uniform=TRUE, main="Classification Tree for Plusars")
# text(treeRose, use.n=TRUE, all=TRUE, cex=.7)
# labels(treeRose, digits = 4, minlength = 1L, pretty, collapse = FALSE)
#
# plotcp(treeRose)
# text(treeRose)
# treeRoseImp <- rpart(Class~SkeIGP+EKIGP+MeanIG+SDDMSNR+EKDMSNR+MeanDMSNR+SDIGP, data = trainRose)
# treeRoseImp$variable.importance
# PredtreeRoseImp <- predict(treeRoseImp, newdata = testRose)
# accuracy.meas(testRose$Class, PredtreeRose[,2])
# roc.curve(testRose$Class, PredtreeRose[,2])
# accuracy.meas(testRose$Class, PredtreeRose[,2])
# roc.curve(testRose$Class, PredtreeRose[,2])
# confusionMatrix(PredtreeRose, testset$Class)
# naive bayes
# Modleing with Naive Bayes Algorithm
# model2 <- naive_bayes(Class~.,data = trainRose)
# PredModel2 <- predict(model2, newdata = testRose)
#
# plot(model2, which = NULL, ask = TRUE, legend = TRUE, main = "Naive Bayes Plot")
# Modeling with Naive Bayes Algorithn
model <- naiveBayes(Class~., data = trainRose)
# Stats of model
# class(model)
# summary(model)
# print(model)
# Predecting pulsar or noise using developed model
predmodel <- predict(model, newdata = testRose)
# Checking accuracy of the model
confusionMatrix(predmodel, testRose$Class)
# roc.curve(testRose$Class, predmodel )
|
d6941b7ef84d8e4f30e2f020d913a7701a3a3dde | a85d5b1041c03415a670091cba8dfc08500c2960 | /R programing/refrigration.r | 1841054dc2c38a35eacadec364d1a699d94fced4 | [] | no_license | sandeepsingh800/test | 6f921d4648dbdf94aca66cf80c8265af679a3c94 | c253d4b895cec8651c8bd36a85884650f341e129 | refs/heads/master | 2021-05-05T07:53:35.527740 | 2018-01-26T04:28:07 | 2018-01-26T04:28:07 | 118,894,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,419 | r | refrigration.r | ##The R-Code has been simplified for the individual product categories.
##Let's refer to this during our meeting.
##~~~~~~~~~~~~~~~~~~~~~~~~~
require(forecast)
require(xts)
require(lubridate)
require(RODBC)
#Historical Input (Train)
db <- odbcConnect("SHC EDW PROD1", uid = "dsankar", pwd = "dheeraj88")
DF <- sqlQuery(db, "select a.*, case when Ny=1 then 1
when Md=1 then 1
when Id=1 then 1
when Ld=1 then 1
when Chd=1 then 1
when Thd=1 then 1
else 0 end as hol from hs_perm_tbls.lv_ih_dcp_history_daily_mix a where prodcat1='COOK' ")
odbcClose(db)
#Forecast Info Input
db <- odbcConnect("SHC EDW PROD1", uid = "dsankar", pwd = "dheeraj88")
DFxreg <- sqlQuery(db, "select a.*,
case when Ny=1 then 1
when Md=1 then 1
when Id=1 then 1
when Ld=1 then 1
when Chd=1 then 1
when Thd=1 then 1
else 0 end as hol from hs_perm_tbls.lv_ih_dcp_xreg_daily_fcst_mix a where prodcat1='COOK' ")
odbcClose(db)
#Segments to model
DF$bycat<-paste(DF$pay_met2,DF$prodcat1,DF$region,DF$dru_id,DF$iru_id,DF$fru_id,DF$sub_cat, sep="-")
DFxreg$bycat<-paste(DFxreg$pay_met2,DFxreg$prodcat1,DFxreg$region,DFxreg$dru_id,DFxreg$iru_id,DFxreg$fru_id,DFxreg$sub_cat,sep="-")
##Forecast Function
mymain <- function (mydata,mydata1, strColMeasure, strColForecastBy, strColDate, iDaysToForecast, iConfidenceLevel)
{
## Count how many time series need to be forecasted
strTimeSeriesNames = unique(mydata[, strColForecastBy])
iTimeSeriesCount <- length(strTimeSeriesNames)
mysubset <- NULL
mysubset1 <- NULL
myforecast <- NULL
myforecast1 <- NULL
mytempfutureset <- NULL
myfuturesetcollection <- NULL
hd<-NULL
hd2<-NULL
xreg<-NULL
xreg1<-NULL
h1<-NULL
for (i in 1:iTimeSeriesCount)
{
## Create an individual and sorted dataset for the current time series
mysubset <- mydata [mydata[, strColForecastBy]==strTimeSeriesNames[i],]
mysubset <- mysubset[order(mysubset[, strColDate]),]
mysubset1 <- mydata1 [mydata1[, strColForecastBy]==strTimeSeriesNames[i],]
mysubset1 <- mysubset1[order(mysubset1[, strColDate]),]
## HOLIDAYS
hd <- cbind(wday=model.matrix(~as.factor(mysubset$hol)))
hd <- hd[,-1]
xreg <- cbind(hd)
hd2 <- cbind(wday=model.matrix(~as.factor(mysubset1$hol)))
hd <- hd2[,-1]
xreg1 <- cbind(hd)
h1 <- length(unique(mysubset1$acctg_yr_wk))
# Rob Hyndman daily forecasting guide
# Training
y <- ts(mysubset$completed, frequency=365.25/7) # using diff freq
bestfit <- list(aicc=Inf)
for(tt in 1:25)
{
fit <- auto.arima(y, xreg=cbind(xreg, fourier(y, K=tt)), seasonal=FALSE)
if(fit$aicc < bestfit$aicc)
bestfit <- fit
else break;
}
K_<-tt-1
print(K_)
## Carry out the forecast
myforecast <- try(forecast(bestfit, xreg=cbind(xreg1,fourier(y, K=K_, h=h1)), h=h1,level=iConfidenceLevel))
if (class(myforecast) == 'try-error')next;
## Plot chart if within maxiumum of previously defined number of charts
myheader <- paste(strTimeSeriesNames[i], '\n', ' ', i, '/', iTimeSeriesCount, ' ',sep='')
plot(myforecast,main=myheader)
print(myheader)
## Bring forecast into format for union with actuals
mytempfutureset <- data.frame(Date=paste(mysubset1$acctg_yr_wk,sep=""), ForecastBy=strTimeSeriesNames[i], Measure=as.numeric(myforecast$mean), Type='Forecast', PILower=as.numeric(myforecast$lower), PIUpper=as.numeric(myforecast$upper))
## Union current forecast with previously forecasted time series
myfuturesetcollection <- rbind(mytempfutureset, myfuturesetcollection)
}
## Union actuals and forecasts for all time series
output <- rbind(myfuturesetcollection)
## Return the actuals and forecasts
return(list(out=output))
}
#Input for function
asd<-mymain(DF,DFxreg,"completed","bycat","acctg_yr_wk",78,.95)
#Output Treatment
asd12<-data.frame(asd)
specify_decimal <- function(x, k) format(round(x, k), nsmall=k)
asd12$pay_met2 <- lapply(strsplit(as.character(asd12$out.ForecastBy), "-"), "[", 1)
asd12$prodcat1 <- lapply(strsplit(as.character(asd12$out.ForecastBy), "-"), "[", 2)
asd12$region <- lapply(strsplit(as.character(asd12$out.ForecastBy), "-"), "[", 3)
asd12$dru_id <- lapply(strsplit(as.character(asd12$out.ForecastBy), "-"), "[", 4)
asd12$iru_id <- lapply(strsplit(as.character(asd12$out.ForecastBy), "-"), "[", 5)
asd12$fru_id <- lapply(strsplit(as.character(asd12$out.ForecastBy), "-"), "[", 6)
asd12$sub_cat <- lapply(strsplit(as.character(asd12$out.ForecastBy), "-"), "[", 7)
asd12$predict <- specify_decimal(asd12$out.Measure,4)
a <- data.frame(lapply(asd12, as.character), stringsAsFactors=FALSE)
head(a)
a<-subset(a,select=c("out.Date","pay_met2","out.Type","region","prodcat1","dru_id","iru_id","fru_id","sub_cat","out.Measure"))
#Write Call
fcst<-subset(a,out.Type=='Forecast')
path1 <- "D:\\R\\Wk_fcst"
appendage1 <- "._fcst_arima_COOK.csv"
string <- format(Sys.time(), format = "%Y-%B-%d-%H%M%S")
outputFile1 <- file.path(path1, paste0(string, appendage1))
write.csv(fcst, file = outputFile1) |
d796d83be3753cfea3b09fe6275355f1b514c2b3 | c903fb060f7fdec352d33a1bc482e230136ad10f | /r/kellogg_vcards.R | e982f66aa682f0b957b7555476c4e08e1680d2ba | [] | no_license | kftb/kellogg-1y-vcf-cards | baa26f9dfdc4d4f98f49f0f241bba305586a31f0 | a3adbed4ea86022ea753e3243415eb97342a97f0 | refs/heads/main | 2023-05-28T13:12:03.743655 | 2021-06-20T14:10:55 | 2021-06-20T14:10:55 | 374,499,849 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,867 | r | kellogg_vcards.R |
library(tidyverse)
library(dplyr)
library(dialr)
library(xlsx)
# Import data
netids<- read_csv("input/1y_survey.csv") %>% select('netID', 'perm_email', 'kellogg_email', 'first_name', 'last_name')
df <- read_csv("input/1Y Class of 2021 Directory - 1Y Class of 2021 Directory.csv", skip=3,
col_types = c('Phone Number' = 'c',
'WhatsApp Number' = 'c'))
names(df) <- c('x1', 'first_name', 'last_name', 'student_jv', 'perm_email', 'fixed_country_code', 'fixed_phone', 'wa_country', 'fixed_wa_phone', 'industry', 'function', 'employer_clean', 'geo_country','geo_city_primary', 'geo_city_secondary' )
df <- df %>% mutate(mergeid = paste0(first_name, last_name))
netids <- netids %>% mutate(mergeid = paste0(first_name, last_name)) %>% select('netID', 'kellogg_email', 'mergeid')
# Merge netids with other dataset
df <- left_join(df, netids, by="mergeid")
# Set option for dialr format
getOption("dialr.format")
options(dialr.format = "INTERNATIONAL")
df_new <- df %>%
# Format email addresses
mutate(perm_email_corr = tolower(perm_email),
netID = tolower(netID)) %>%
# Reformat numbers
mutate_at("fixed_phone", ~phone(., fixed_country_code)) %>%
mutate_at("fixed_phone",
list(valid_phone = is_valid,
region = get_region,
type = get_type,
phone_final = format)) %>%
mutate_at("fixed_wa_phone", ~phone(., wa_country)) %>%
mutate_at("fixed_wa_phone",
list(valid_wa = is_valid,
region = get_region,
type = get_type,
wa_final = format))
write.csv(df_new, "output/1y_dir_output.csv")
backup_name = format(Sys.time(), 'output/backups/%y%m%d_%H%M%S_1y_dir_output_db.csv')
write.csv(df_new, backup_name)
#writexl::write_xlsx(df_exp, "output/1y_dir_output_db.xlsx")
|
2fddfb32e7361a252aeb9fcb09b21312aaa376d3 | fdae81101c47e85301841c1dc74bd0bf527e4409 | /R/anova4Fits.R | d1503bcad5c0bca58c5cf18b2e22c6250f6b3021 | [] | no_license | cran/CalciOMatic | 5e08d237fafa8b39131f7eb5006c1d438afc40ec | 2ca130e89e581137ecc3d0da57019e75d2967037 | refs/heads/master | 2021-01-19T00:08:47.036248 | 2009-10-06T00:00:00 | 2009-10-06T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 689 | r | anova4Fits.R | anova4Fits <-
function(Fit_1, Fit_2) {
## Perform an ANalyse Of VAriance to determine the best fit among 2
## Useful to check whether a monoexponential or biexponential fit,
## preformed by nls, best predicts experimental data
if(inherits(Fit_1,"nls") & inherits(Fit_2,"nls")) {
result <- anova(Fit_1,Fit_2)
print(result)
SumSquares <- result[["Res.Sum Sq"]]
ind <- which(SumSquares==min(SumSquares))
Fit <- list(Fit_1,Fit_2)
cat(sprintf("\nExperimental data were best explained with the %s.\n",attr(Fit[[ind]],"Name")))
} else {
cat(sprintf("\nFit_1 or Fit_2 do not correspond to fits performed with nls.\n"))
ind <- NULL
}
return(ind)
}
|
43e87422a39ade213dbf1db859a6b348e629c594 | d859174ad3cb31ab87088437cd1f0411a9d7449b | /autonomics.plot/man/plot_sample_distrs_n_pca.Rd | d2b13a2efd43c7adbf8f039989e3584accb9e8cf | [] | no_license | bhagwataditya/autonomics0 | 97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c | c7ca7b69161e5181409c6b1ebcbeede4afde9974 | refs/heads/master | 2023-02-24T21:33:02.717621 | 2021-01-29T16:30:54 | 2021-01-29T16:30:54 | 133,491,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,159 | rd | plot_sample_distrs_n_pca.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca.R
\name{plot_sample_distrs_n_pca}
\alias{plot_sample_distrs_n_pca}
\title{Plot sample distributions and pca}
\usage{
plot_sample_distrs_n_pca(
object,
descr = "",
color_var = default_color_var(object),
color_values = default_color_values(object),
shape_var = default_shape_var(object),
txt_var = default_txt_var(object),
displayed_features = NULL
)
}
\arguments{
\item{object}{SummarizedExperiment}
\item{descr}{string: used as plot title and plot name}
\item{color_var}{string: svar mapped to color}
\item{color_values}{named string vector: names = color_var levels, values = colors}
\item{shape_var}{string: svar mapped to shape}
\item{txt_var}{string: svar mapped to txt}
\item{displayed_features}{number or string vector: features to be displayed in the sample distributions)}
}
\value{
list(descr.distr = plot1, descr.pca = plot2)
}
\description{
Plot sample distributions and pca
}
\examples{
if (require(autonomics.data)){
# GLUTAMINASE
require(magrittr)
object <- autonomics.data::glutaminase
object \%>\% plot_sample_distrs_n_pca()
}
}
|
25704d8e4f027095ea239f377165fe94d42a5ea6 | 3d3502b01a3dbf15f0799d873c7b414bb35802fb | /man/predict_nodes.Rd | 3fd0d46e047f14b4e8efa02013b35438c552ae4d | [] | no_license | molson2/subgroupTree | f8b7c9477859e2b51a0e3d9d66143dd337af263f | 4263665d8c00ca98485c5422be5cd41ce903d276 | refs/heads/master | 2020-08-08T15:58:02.907607 | 2019-12-03T08:29:59 | 2019-12-03T08:29:59 | 213,864,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 456 | rd | predict_nodes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/split_functions.R
\name{predict_nodes}
\alias{predict_nodes}
\title{return leaf number containing predictions}
\usage{
predict_nodes(rpart.obj, newdata)
}
\arguments{
\item{rpart.obj}{rpart fit object}
\item{newdata}{data.frame with test predictors}
}
\value{
integer vector pointing to leaves containing predictions
}
\description{
return leaf number containing predictions
}
|
26eacd842e6136ec1250a9a835f67ae4f752c5f8 | f7c8cdbc396d525e22a4cb663bde50f57f1b2030 | /Drainage_Soil.R | 543da24bf621d7ca53d238bd801a4fd2baf95927 | [] | no_license | yuanhongdeng/SoilHydrology-Temperature | e0c041f9240e0b9b32cc0cab548a12ffb7cc730b | 9521342929ff19bed4ed68fec8b80df6ce96bf36 | refs/heads/main | 2023-02-22T04:59:51.552824 | 2021-01-14T16:36:11 | 2021-01-14T16:36:11 | 390,379,273 | 0 | 1 | null | 2021-07-28T14:23:08 | 2021-07-28T14:23:07 | null | UTF-8 | R | false | false | 1,827 | r | Drainage_Soil.R | ## Calculation of Drainage from the Soil ##
# Equation (as lower boundary condition of finite difference approximation)
# drain.t = -(k / dz )*(psi - psi.n1) - k
# k: hydraulic conductivity of soil at time t
# dz: soil depth
# psi: matric potential at time t
# psi.n1: matric potential of soil beneath soil layer at time t
# input state variables/parameter
stat.var <- read.csv("State_variables.csv", header = T, sep = ';')
psi.sat <- stat.var[1,2] # saturated hydraulic conductivity
theta.sat <- stat.var[1,1] # saturated volumetric water content
clay <- stat.var[1,8] # % of clay in the soil
SOC <- stat.var[1,5] # soil organic carbon
BD <- stat.var[1,4] # bulk density
SD <- stat.var[1,7] # soil sampling depth
param <- read.csv("Parameter.csv", header = T, sep = '\t')
b <- param[1,1] # Exponent
# input variables from models/calculations
theta <- # calculated water content
k <- # hydraulic conductivity of soil
psi <- # matric potential for soil
psi.n1 <- psi.sat * (s^-B)# matric potential for layer N+1 (layer beneath layer N) -> equation taken from CLM4.5
s <- 0.5 ((theta.sat + theta[t])/theta.sat)
B <- (1 - f) * B.min + f * B.om
B.min <- 2.91 + 0.159 * clay
B.om <- 2.7
f <- (SOC / (BD * SD)) * 1.72 # soil organic matter fraction; f = %C * 1.72
# SOC(kg/ha) = SOC (%)× BD (g/cm3)× SD (cm) x 1000
# where, SOC - Concentration of soil organic carbon (%); BD - Bulk density (g/cm3); SD- soil sampling depth (cm)
# calculating drainage
time <- seq(1, 52608) # [0.5h over data time period]
drain.t <- rep(NA, length(time))
for (t in time) {
drain.t <- -(k[t] / SD) * (psi[t] - psi.n1[t]) - k[t]
}
|
5941f7af55437f94a784b0c5da79835adfa511ae | 0f505ae41f3a94183a5f395741cd607929ab0ccd | /inst/shiny-examples/shinyProliferation/server.R | 16959f8c5c80591b2c65b5a5693a94d5a667832a | [] | no_license | aejensen/ProlifAnalysis | 5dda7750fc64a50e7afde7ca00f22e5819a05916 | 2c965beb8165a6e3187422596d0c57e9c534ead4 | refs/heads/master | 2020-03-18T16:23:03.249779 | 2018-06-02T16:50:36 | 2018-06-02T16:50:36 | 134,962,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,760 | r | server.R | shinyAppServer <- function(input, output, session){
#d <- read.csv("180524 stat.txt", sep="\t", dec=",", skip=7)
filedata <- reactive({
infile <- input$file
if (is.null(infile)) {
return(NULL)
}
d <- read.csv(infile$datapath, sep="\t", dec=",", skip=7) ###here is some fileformat configurations!
choiceList <- as.list(1:ncol(d))
names(choiceList) <- colnames(d)
shiny::updateSelectInput(session, "selectX", choices = choiceList)
shiny::updateSelectInput(session, "select", choices = choiceList)
d
})
rv <- shiny::reactiveValues(func=NULL)
m <- shiny::reactiveValues(func=NULL)
shiny::observeEvent(input$run, {
rv$func <- input$cutoff
d <- filedata()
if (is.null(d)) {
return(NULL)
}
data <- data.frame(x = d[, as.numeric(input$selectX)], y = d[, as.numeric(input$select)])
data <- subset(data, x < as.numeric(input$cutoff))
m$func <- ProlifAnalysis::estimateL5(data$x, data$y)
})
funcval <- shiny::reactive({
input$cutoff
})
output$msg = shiny::renderPrint({
if (is.null(rv$func)) return("not running")
fv <- funcval()
sprintf("%.3f, %.3f", rv$func, input$cutoff)
})
output$text <- shiny::renderPrint({
if(is.null(m$func)) {
cat("No model has been estimated yet.")
} else {
summary(m$func)
}
})
output$distPlot <- plotly::renderPlotly({
d <-filedata()
if (is.null(d)) {
return(NULL)
}
data <- data.frame(x = d[, as.numeric(input$selectX)], y = d[,as.numeric(input$select)])
data <- subset(data, x < as.numeric(input$cutoff))
plotly::plot_ly(data = data, x = ~x, y = ~y, type="scatter", mode = "markers") %>% plotly::layout(xaxis = list(title="Time"), yaxis = list(title="Confluency [%]"))
})
output$plot2 <- renderPlot({
if(!is.null(m$func)) {
if(input$plotType == 1) {
plot(m$func)
} else if(input$plotType == 2) {
plot(m$func, type="velocity")
} else if(input$plotType == 3) {
plot(m$func, type="acceleration")
}
}
})
shiny::observeEvent(input$about, {
shinyalert::shinyalert(
title = "About",
text = "Shiny App for easy non-linear modeling of proliferation curves.<br><br>The author assumes no responsibility for the usage of this app.<br><br><br><br>Andreas Kryger Jensen.<br>Biostatistics, Institute of Public Health<br>University of Copenhagen.<br>2018",
closeOnEsc = TRUE,
closeOnClickOutside = TRUE,
html = TRUE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "Close",
confirmButtonCol = "#AEDEF4",
timer = 0,
imageUrl = "",
animation = TRUE)
})
}
|
ea311e2f42ac4f70faa60e9d314972daca70fdcd | cf3d35a51ca24a2434826c81730c18904ceab1db | /train_test_50_times.R | 93720bf9b4cdd063140b3505b2470e5cba9e3f35 | [] | no_license | Hardervidertsie/DILI_screen_paper | 2953c4715da122810a31d167dd9c2c7b0d1bcec2 | a09aa51ace36ec4284d5b96c0181e2f3722d9a10 | refs/heads/master | 2021-03-27T08:27:04.852256 | 2017-06-19T14:38:54 | 2017-06-19T14:38:54 | 74,959,948 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,312 | r | train_test_50_times.R | # testing consistency
output.confus = alist()
output_testComps = alist()
output_feats = alist()
output_tune = alist()
output_test = alist()
output_predict = alist()
for( j in 1:200){
caseInd <- createDataPartition(all.treatsnoDMSO_dili$class,p=0.8,list=FALSE)
trainData_long<- as.data.frame(ML_data_final_data_long[ treatment %in% all.treatsnoDMSO_dili[caseInd, treatment] ])
testData_long <- as.data.frame(ML_data_final_data_long[ treatment %in% all.treatsnoDMSO_dili[-caseInd, treatment] ])
trainData_long <- as.data.table(trainData_long)
# results.ks = alist()
# for( i in seq_along(all.ks.feats)){
# results.ks[[i]] <- ks.test(x = trainData_long[ variable == all.ks.feats[i] & class == "nonSevere" , value ],
# y = trainData_long[ variable == all.ks.feats[i] & class == "severe" , value ],
# alternative = "two.sided", exact = FALSE)
#
# }
#
# ks_results <- data.table(data.frame(feature = all.ks.feats,
# statistic = sapply(results.ks, "[[", "statistic"),
# p.value = sapply(results.ks, "[[", "p.value")))
#
#
# ks_results <- ks_results[ order(ks_results$p.value), ]
# sign_ks_results <- ks_results[ p.value <= 0.05, ]
#
#
# ML_data_final_data$treatment[!unique(ML_data_final_data$treatment) %in% unique(BMC_data_add$treatment)] # dmso
#
#
#sel_feats_200run<-selFeats filtered on correlation using all data
ind <- colnames(ML_data_final_data) %in% sel_feats_200run
# sel_data is training data
sel_data <- as.data.frame(
ML_data_final_data[ treatment %in% all.treatsnoDMSO_dili[caseInd, treatment], ind, with = FALSE]
)
cor_data <- cor(sel_data)
df2 = cor(sel_data)
# hc = findCorrelation(abs(df2), cutoff=0.8) # putt any value as a "cutoff"
# hc = sort(hc)
# reduced_Data = sel_data[,-c(hc)]
# sel_data <- reduced_Data
# dim(sel_data)
#
# selFeats <- colnames(sel_data)
selFeats <- sel_feats_200run
#write.table(sel_feats_200run, file ="../tmp/sel_feats_200run.txt", sep ="\t", col.names = NA)
indKeep <- colnames(ML_data_final_data) %in% selFeats
sum(indKeep)
indKeep <- indKeep | colnames(ML_data_final_data) %in% c("diliClass")
ML_data_final_data[, diliClass := ML_data_final_annot$diliClass ]
ML_data_final_data[, diliClass := factor(diliClass) ]
trainData <- as.data.frame(
ML_data_final_data[ treatment %in% all.treatsnoDMSO_dili[caseInd, treatment] , indKeep, with = FALSE]
)
testData <- as.data.frame(
ML_data_final_data[ treatment %in% all.treatsnoDMSO_dili[-caseInd, treatment] , indKeep, with = FALSE]
)
trainData$diliClass <- factor(trainData$diliClass, levels =c("severe","nonSevere"))
testData$diliClass <- factor(testData$diliClass, levels =c("severe","nonSevere"))
dim(testData)
# feature selection on 26 features using ga selection with svm
trainX <-trainData[, !colnames(trainData) %in% 'diliClass'] # Create training feature data frame
testX <- testData[,!colnames(trainData) %in% 'diliClass'] # Create test feature data frame
y=trainData$diliClass # Target variable for training
trainX2 <- as.data.frame(trainX) # training data: selected features
testX2 <- as.data.frame(testX) # test data: selected features
## SUPPORT VECTOR MACHINE MODEL
dim(testX2)
#Note the default method of picking the best model is accuracy and Cohen's Kappa
# Set up training control
ctrl <- trainControl(method="repeatedcv", # 10fold cross validation
number = 10,
repeats=10, # do 5 repititions of cv
summaryFunction=twoClassSummary, # Use AUC to pick the best model
classProbs=TRUE)
#Use the expand.grid to specify the search space
#Note that the default search grid selects 3 values of each tuning parameter
grid <- expand.grid(interaction.depth = c(1,2,3), #tree depths from 1 to 4
n.trees=seq(10,100,by=10), # let iterations go from 10 to 100
shrinkage=c(0.01,0.1), # Try 2 values fornlearning rate
n.minobsinnode = 20)
#
# Set up for parallel processing
#set.seed(1951)
registerDoParallel(7,cores=8)
#support vector machine or random forest that deals well with a large number of predictors.
#Train and Tune the SVM
psvmTuneGrid <- expand.grid(C=seq(0.005,0.25, length.out=3), sigma=seq(0.001, 0.1, length.out=8))
svm.tune <- train(x=trainX2,
y= trainData$diliClass,
method = "svmRadial",
degree = 2,
tuneLength = 3, # 9 values of the cost function
preProc = c("center","scale"),
metric="ROC",
trControl=ctrl,
tuneGrid=psvmTuneGrid
)
#closeAllConnections()
#svm.tune$results[rev(order(svm.tune$results$ROC)), ]
#Finally, assess the performance of the model using the test data set.
#Make predictions on the test data with the SVM Model
svm.pred <- predict(svm.tune, newdata = testX2, type = "raw")
output_predict[[j]] <- svm.pred
output_test[[j]] <- testData
output_tune[[j]] <- svm.tune
output_testComps[[j]] <- all.treatsnoDMSO_dili[-caseInd, treatment]
output.confus[[j]] <- confusionMatrix(svm.pred,testData$diliClass)
#output_feats[[j]] <- sign_ks_results$feature
output_feats[[j]] <- selFeats
print(j)
closeAllConnections()
}
compound_prediction <- data.frame(unlist(output_testComps),
unlist(output_predict),
unlist(lapply(output_test, "[[", 'diliClass')))# to count how often compounds are correctly tested
colnames(compound_prediction) <- c("treatment", "predicted", "diliClass")
compound_prediction$count <- 1
compound_prediction$correct <- compound_prediction$predicted == compound_prediction$diliClass
count_comps_correct <- aggregate(data = compound_prediction, .~treatment, sum)
count_comps_correct$frac_correct <- count_comps_correct$correct / count_comps_correct$count
# check out what dili class they are
count_comps_correct$diliClass <- NULL
count_comps_correct <- merge(count_comps_correct, annotationData, by = 'treatment', all.x = TRUE)
colnames(count_comps_correct)
count_comps_correct<- count_comps_correct[, c("treatment", "count", "frac_correct",
"SeverityLabel", "DILIConcern", "dissolveInfo")]
write.table(count_comps_correct,file = '../generated/results/SVM/compounds_pred_correct.txt', sep ="\t", col.names=NA)
head(count_comps_correct)
names(output.confus[[1]])
output.confus[[1]]$byClass[1]
median(sapply(lapply(output.confus, '[[', 'overall'),'[[','Accuracy')) # 0.6818182
median(sapply(lapply(output.confus, '[[', 'byClass'),'[[','Sensitivity')) # 0.6
median(sapply(lapply(output.confus, '[[', 'byClass'),'[[','Specificity')) # 0.75
median(sapply(lapply(output.confus, '[[', 'byClass'),'[[','Balanced Accuracy')) # 0.67
max(sapply(lapply(output.confus, '[[', 'overall'),'[[',1))
min(sapply(lapply(output.confus, '[[', 'overall'),'[[',1))
myresult = alist()
for( i in seq_along(output_tune)){
myresult[[i]] <- (output_tune[[i]]$results[which(max(output_tune[[i]]$results$ROC) == output_tune[[i]]$results$ROC),][, 3:5])
}
colMeans(do.call('rbind', myresult))
output_tune[[1]]["trainingData"]
svm.probs = alist()
svm.ROC = alist()
for( i in seq_along(output_tune)){
svm.probs[[i]] <- predict(output_tune[[i]], newdata =
output_test[[i]][, colnames(output_test[[i]]) != "diliClass"],type="prob") # Gen probs for ROC
svm.ROC[[i]] <- roc(predictor=svm.probs[[i]]$severe,
response=output_test[[i]]$diliClass,
levels=rev(levels(output_test[[i]]$diliClass)))
}
svm.ROC
plot(svm.ROC[[1]],main="ROC for SVM", ylim = c(0,1), lt = 2)
names(svm.ROC[[1]])
sensdata <- as.data.frame(sapply(svm.ROC, "[[", 'sensitivities' ))
specdata <- as.data.frame(sapply(svm.ROC, "[[", 'specificities' ))
colnames(sensdata) <- paste0("run", 1:ncol(sensdata))
colnames(specdata) <- paste0("run", 1:ncol(specdata))
head(specdata)
sensdata <- melt(sensdata)
specdata <- melt(specdata)
colnames(sensdata)[2] <- "Sensitivity"
colnames(specdata)[2] <- "Specificity"
ROCdata <- cbind(sensdata, specdata)
head(ROCdata)
colnames(ROCdata)[3] <- "run"
lapply(svm.ROC, lines, pch = '.')
pdf(file = '../generated/results/SVM/final/roc200final.pdf', width = 8, height = 8)
ggplot(ROCdata,aes(1-Specificity, Sensitivity))+geom_line(aes(group = run), stat="smooth",method = "loess",
size = 1.5,
linetype ="dotted",
alpha = 0.4)+
labs(title= "ROC curves 200 test-set runs: median ROC = 0.73",
x = "False Positive Rate (1-Specificity)",
y = "True Positive Rate (Sensitivity)") + theme_minimal() + coord_cartesian(ylim = c(0,1.2))
dev.off()
# figure for how often predicted
comps_correct_hm <- data.frame(count_comps_correct$frac_correct)
rownames(comps_correct_hm) <- count_comps_correct$treatment
correct_comps_Colors <- myColors
correct_comps_Colors$type <- NULL
correct_comps_Colors$diliClass <- NULL
correct_comps_Colors$SeverityLabel <- SeverityLabel
pdf("../generated/results/SVM/final/correct_predictions.pdf", width= 6, height = 18)
aheatmap(comps_correct_hm, Colv=NA,
color = my.colors, #breaks = colBreaks,
fontsize = 10, width = 10, height=10, legend = TRUE,
annRow = count_comps_correct[, c("SeverityLabel", "DILIConcern")],annColors = correct_comps_Colors #,txt =hm_data_data[ indOrder ,colIndex]
)
dev.off()
names(svm.ROC[[1]])
mean(sapply(svm.ROC, "[[", 'auc'))
median(sapply(svm.ROC, "[[", 'auc'))
which(sapply(lapply(output.confus, '[[', 'overall'),'[[','Accuracy') == 0.4)
output_feats[[158]]
length(table(as.character(unlist(output_feats)))[table(as.character(unlist(output_feats)))>150])
# small test
# select the >150 feats from selection of 200 runs. filter with corr filter
sel_feats_200run <- names(table(as.character(unlist(output_feats)))[table(as.character(unlist(output_feats)))>150])
|
407caf99f81ff6a1ff37993f9d62f1d1816c27f1 | 54f96e0e609e9aa36ab085622c519fa8d353ab9f | /_analysis/bluff-lake-model/_r/8-analysis-hour-edt-all-elevations-NOdrawdown.R | db0d6479f3c004bbf2744db1409e5e4f23c4b4f3 | [] | no_license | mcolvin/Bluff-Lake-Project | 479258613a47e11a0ecd6c9780c22bdd3bb08352 | 8a005f1dd1d6e1cc12ba06b0dd6a57e5ed01ae7e | refs/heads/master | 2022-10-16T05:48:46.843723 | 2021-11-10T20:53:41 | 2021-11-10T20:53:41 | 151,443,525 | 0 | 0 | null | 2022-10-06T03:22:42 | 2018-10-03T16:22:28 | HTML | UTF-8 | R | false | false | 17,848 | r | 8-analysis-hour-edt-all-elevations-NOdrawdown.R | source("_r/1-global.R")
source("_r/2-functions.R")
source("_r/3-load-and-clean.R")
source("_r/CLEAN-Objective metrics_new.R")
a=1.408
b=0.302
#----------------------------------------------------------------------
#
# NOXUBEE RIVER DISCHARGE DATA: HOURLY
#
#----------------------------------------------------------------------
# PULL DISCHARGE AND GAUGE DATA FROM USGS PORTAL
## PARAMETER CODE 00065 Gage height, feet 00060; Discharge, cubic feet per second
discharge_hourly<-fread("_dat/discharge_hourly.csv")
discharge_hourly[,date:=as.Date(date)]
tmp<-as.numeric(Sys.Date()-as.Date(max(discharge_hourly$date)))
if(tmp>15)# pull data again if more than 15 days have passed since last pull
{
discharge_hourly <- dataRetrieval::readNWISuv(siteNumbers = "02448000",
parameterCd = c("00060","00065"),
startDate = as.Date("1986-10-10"),
endDate = Sys.Date(),
tz="America/Chicago")
discharge_hourly<-as.data.table(discharge_hourly)
names(discharge_hourly)[4]<-"discharge"
names(discharge_hourly)[6]<-"gage"
discharge_hourly[,date:=as.Date(dateTime)]
discharge_hourly[,year:=as.numeric(format(date,"%Y"))]
discharge_hourly[,doy:=as.numeric(format(date,"%j"))]
fwrite(discharge_hourly,"_dat/discharge_hourly.csv")
}
# scale discharge to watershed area m^3/second
discharge_hourly[,Q_bl:=(discharge/bluff_lake)*0.0283168]
discharge_hourly$dateTime<-as_datetime(discharge_hourly$dateTime)
discharge_hourly$dateTime<-round_date(discharge_hourly$dateTime, "1 hour")
discharge_hourly<- discharge_hourly %>% dplyr::group_by(dateTime) %>% dplyr::summarise(doy=mean(doy), Q_bl=mean(Q_bl), discharge=mean(discharge))
discharge_hourly$hour<-hour(discharge_hourly$dateTime)
discharge_hourly$minute<-minute(discharge_hourly$dateTime)
discharge_hourly$doy<-yday(discharge_hourly$dateTime)
discharge_hourly$year<-year(discharge_hourly$dateTime)
elevation<-subset(elevation, elevation>=66.6)
#1993 and 2012 do not start on 1/1
years<-c(1990:1992,1994:2011, 2013:2020)
# subset hourly discharge data to year of concern
datalist <- list()
datalist2<- list()
for(i in 1:length(years)){
discharge_year<- subset(discharge_hourly, discharge_hourly$year==years[i]) #years[i]
#Sub in any missing data
dateTime<-seq(from=discharge_year$dateTime[1],discharge_year$dateTime[1]+days(364), "1 hour")
dateTime<-as.data.frame(dateTime)
dateTime$doy<-as.numeric(format(dateTime$dateTime,"%j"))
dateTime$hour<-hour(dateTime$dateTime)
dateTime$minute<-minute(dateTime$dateTime)
discharge_year<-left_join(dateTime, discharge_year, by = c("dateTime"="dateTime",
"doy"="doy", "hour"="hour",
"minute"="minute"))
discharge_year<-discharge_year[!is.na(discharge_year$year), ]
discharge_year$discharge_cms<-discharge_year$discharge*0.0283168
discharge_year$doy<-as.numeric(discharge_year$doy)
# make a field for 'continuous time' which is a fractional day starting at 0 for the first row of data an increasing fractinally for each hour and minute (i.e., 5:30 am would be 330 minutes in, 330/1440 = 0.2291667, the same time on the next day would be 1.2291667)
discharge_year<-unique(discharge_year)
discharge_year$cont_time<-(discharge_year$doy*1440)+(discharge_year$hour*60)+
(discharge_year$minute)-1440
#check for missing values
discharge_year$gap<-c(NA, with(discharge_year, cont_time[-1] -
cont_time[-nrow(discharge_year)]))
#fill in missing data
discharge_year$discharge<-na.approx(discharge_year$Q_bl, na.rm = F)
discharge_year$period<-ifelse(test = discharge_year$doy>=1 & discharge_year$doy<=14, yes= "1", no=ifelse(discharge_year$doy>=15 & discharge_year$doy<=181, yes="2", no=ifelse(discharge_year$doy>=182 & discharge_year$doy<=195, yes="3", no=ifelse(discharge_year$doy>=196 & discharge_year$doy<=212, yes="4", no=ifelse(discharge_year$doy>=213 & discharge_year$doy<=226, yes="5", no=ifelse(discharge_year$doy>=227 & discharge_year$doy<=243, yes="6", no=ifelse(discharge_year$doy>=244 & discharge_year$doy<=334, yes="7", no=ifelse(discharge_year$doy>=335 & discharge_year$doy<=348, yes="8", no="9"))))))))
discharge_year <- discharge_year %>% dplyr::group_by(year,period) %>%
dplyr::mutate(HOP = seq_along(period))
#create hourly discharges
datalist3 <- list()
discharges<- c(0, 2.8, 5.6, 8.5, 11.3, 14.1, 17)
for(x in 1:length(discharges)){
#add in p-fish discharges
discharge_year<-mutate(discharge_year, Dweek=ifelse(test= HOP>=1 & HOP<=167, yes="1", no=ifelse(HOP>=168 & HOP<=335, yes="2", no=ifelse(HOP>=336 & HOP<=503, yes="3", no=ifelse(HOP>=504 & HOP<=671, yes="4", no=ifelse(HOP>=672 & HOP<=839, yes="5", no=ifelse(HOP>=840 & HOP<=1007, yes= "6", no=ifelse(HOP>=1008 & HOP<=1175, yes="7", no=ifelse(HOP>=1176 & HOP<=1343, yes="8", no=ifelse(HOP>=1344 & HOP<=1511, yes="9", no=ifelse(HOP>=1512 & HOP<=1679, yes="10", no=ifelse(HOP>=1680 & HOP<=1847, yes="11", no=ifelse(HOP>=1848 & HOP<=2015, yes="12", no=ifelse(HOP>=2016 & HOP<=2183, yes="13", no=ifelse(HOP>=2184 & HOP<=2351, yes="14", no=ifelse(HOP>=2352 & HOP<=2519, yes="15", no=ifelse(HOP>=2520 & HOP<=2687, yes="16", no=ifelse(HOP>=2688 & HOP<=2855, yes="17", no=ifelse(HOP>=2856 & HOP<=3023, yes="18", no=ifelse(HOP>=3024 & HOP<=3191, yes="19", no=ifelse(HOP>=3192 & HOP<=3359, yes="20", no=ifelse(HOP>=3360 & HOP<=3527, yes="21", no=ifelse(HOP>=3528 & HOP<=3695, yes="22", no=ifelse(HOP>=3696 & HOP<=3863, yes="23", no="24"))))))))))))))))))))))))
discharge_hourly2<-discharge_year%>%group_by(period,Dweek)%>%slice(c(1:8))
discharge_hourly2$PfD<-discharges[x]*60*60
discharge_year3<-left_join(discharge_year,discharge_hourly2)
discharge_year3$PfD[is.na(discharge_year3$PfD)] <- 0
discharge_year3$WCS_strategy<-discharges[x]
datalist3[[x]] <- discharge_year3
}
discharge_hourly4 <- rbindlist(datalist3)
#discharge_hourly<-discharge_year3
datalistz <- list()
for(z in 1:length(elevation)){
discharge_hourly4$Board1<-elevation[z] #start Elevation
datalistz[[z]] <- discharge_hourly4
}
discharge_hourly4 <- rbindlist(datalistz)
discharge_hourly4$period_dist<-paste(discharge_hourly4$period, discharge_hourly4$WCS_strategy,discharge_hourly4$year, discharge_hourly4$Board1, sep="-")
combos<-unique(discharge_hourly4$period_dist)
for(k in 1:length(combos)){
Period<-subset(discharge_hourly4, discharge_hourly4$period_dist==combos[k]) #combos[k]
period<-mean(as.numeric(Period$period))
Board1<-mean(as.numeric(Period$Board1))
Board2<-68.4
#----------------------------------------------------------------------
#
# lake hydrodynanamic model
#
#----------------------------------------------------------------------
wse_dyn<-function(t,x,parms)
{
#----------------------------------------------------------------------
#
# parameters and conversions
#
#----------------------------------------------------------------------
# lake volume in m^3
V<-x[1]
# convert lake volume to water surface elevation
wse<- Vol_2_EL(V)
# lake surface area
sa<- Vol_2_SA(V)
#DOY
doy<-DOYfun(t) #t
# acceleration due to gravity m/sec^2
acc_due_to_gravity<- 9.81
# wse at intake
ele_intake<-predict(gam_4, newdata=data.frame(Q_bl=Q_blfun(t), doy))
# wse lake
ele_lake<- wse# wse_lake(t)
#----------------------------------------------------------------------
#
# water releasing over the WCS
#
#----------------------------------------------------------------------
# board elevation
WCS1_wse<-c(Board2,Board2,Board2,Board2,Board2,
Board2,Board2) # seven bays
# wcs_width<-rep(1.6764,8)
# water control structure head
# set head to zero when wse<=board
wcs_head<- sapply(WCS1_wse,function(x)
{
max(0,ele_lake-WCS1_wse)
})
# amount of water flowing out of each bay of the wcs
wcs_out<- weir(g=9.81,w=wcs_width[1], h=wcs_head[1])+
weir(g=9.81,w=wcs_width[2], h=wcs_head[2])+
0*weir(g=9.81,w=wcs_width[3], h=wcs_head[3])+
0*weir(g=9.81,w=wcs_width[4], h=wcs_head[4])+
0*weir(g=9.81,w=wcs_width[5], h=wcs_head[5])+
0*weir(g=9.81,w=wcs_width[6], h=wcs_head[6])+
weir(g=9.81,w=wcs_width[7], h=wcs_head[7])
wcs_out<-(wcs_out*60*60)*0.8 #per hour
# emergency spillway
#emergency overflow measurements (meters)
EOFwidth<-23
EOFheight<-68.698
EOF_head<-max(0,ele_lake-EOFheight)
EOF_out<-broad_weir(w=EOFwidth, h=EOF_head)
EOF_out<-EOF_out*60*60 #per hour
PfD<-PfDfun(t)
#----------------------------------------------------------------------
#
# water coming in (intake)
#
#----------------------------------------------------------------------
# board elevation
intake_board_wse<- c(68.800,68.800) # meters bay 1 and 2
intake_width<- c(1.6764,1.6764) # meters bay 1 and 2
intake_head<- c(max(0,ele_intake-intake_board_wse[1]),
max(0,ele_intake-intake_board_wse[2]))
# water inputs to intake (cms)
intake_in<-weir(g=9.81,h=intake_head[1],w=intake_width[1])+ # bay 1
weir(g=9.81,h=intake_head[2],w=intake_width[2]) # bay 2
# convert to cubic meters per 60 minutes
p<-a+(intake_in*b)
intake_in<-(intake_in*60*60)*p
#----------------------------------------------------------------------
#
# change in volume
#
#----------------------------------------------------------------------
V<-V+(intake_in-(wcs_out+EOF_out))-PfD # iteration needs the actual volume, not dV
return(list(V=V,wse=wse,
ele_lake=ele_lake,
sa=sa,
intake_in=intake_in,
EOF_out=EOF_out,
wcs_out=wcs_out))
}
#######################################
Q_blfun<- approxfun(Period$HOP, Period$Q_bl)
DOYfun<- approxfun(Period$HOP, Period$doy)
PfDfun<-approxfun(Period$HOP, Period$PfD)
Period$gap<-c(NA, with(Period, HOP[-1] -
HOP[-nrow(Period)]))
ini_values<-EL_2_Vol(Board1)
parameters<-NULL # no parameters yet
solution<- ode(
y=ini_values,
times=Period$HOP,
func=wse_dyn,
parms=parameters,
method="iteration")
colnames(solution)[2] <- "V"
solution<-as.data.table(solution)
solution$HOP<-solution$time
solution<-left_join(solution,Period)
solution$EL<-Vol_2_EL(solution$V)
solution$V<-ifelse(solution$V<0, 0, solution$V)
datalist2[[k]] <- solution
plot(EL~time,solution,ylab="Lake elevation",las=1,
main=combos[k])
abline(a=66.568,b=0)
}
Solution <- do.call(rbind, datalist2)
#datalist[[i]]<-Solution
print(i/length(years))
fwrite(Solution,paste("_outputs/yearsND/",years[i],
"-hydro-sims-Final-NOdrawdown.csv",sep=""))
}
#
#
# # Calculating Utilties
# # changing the working directory not needed
#
# file_list <- list.files("_outputs/yearsND")
#
# file_path<- "_outputs/yearsND/" # filepath relative to repo root dir
#
# file_list<-paste(file_path,file_list,sep="") # file paths to be read in
#
# # Read all csv files in the folder and create a list of dataframes
#
# ldf <- lapply(file_list, read.csv)
#
# datalistFinal<-list()
#
# for(r in 1:length(ldf)){
# All_Years<-ldf[[r]]
# All_Years$elevation<-All_Years$EL
# All_Years$WB<-WBM(All_Years$elevation)
# All_Years<-All_Years%>%group_by(WCS_strategy, period, Board1)%>%
# mutate(Avg14days=rollmean(elevation, k=336, fill=EL))
# All_Years$WF<-WFM(All_Years$Avg14days)
# All_Years$Fish<-FishM(All_Years$elevation)
# All_Years$Anglers<-AnglersM(All_Years$elevation)
# All_Years$Ramp<-RampM(All_Years$elevation)
# All_Years$Paddlefish<-rescale(All_Years$WCS_strategy, to=c(0,1))
#
# #Seasonal Weights
# All_Years$DOY<-All_Years$doy
# All_Years<-merge(All_Years, dates, by="DOY")
#
# All_Years$WB<-All_Years$WF*All_Years$WF_S
# All_Years$WF<-All_Years$WB*All_Years$WB_S
# All_Years$Ramp<-All_Years$Ramp*All_Years$BoatS
# All_Years$Anglers<-All_Years$Anglers*All_Years$BankS
#
# #Rescale
# All_Years$WB<-rescale(All_Years$WF, to=c(0,1))
# All_Years$WF<-rescale(All_Years$WB, to=c(0,1))
# All_Years$Ramp<-rescale(All_Years$Ramp, to=c(0,1))
# All_Years$Anglers<-rescale(All_Years$Anglers, to=c(0,1))
# All_Years$Fish<-rescale(All_Years$Anglers, to=c(0,1))
#
# #Weight Utilities acording to CCP to form Cumulative Utility
# W<- c(.15,.20,.25,.30,.1)
# All_Years$Utility<-(W[5]*All_Years$Paddlefish)+(W[1]*((All_Years$Ramp*.5) +
# (All_Years$Anglers*.5))) + (W[2]*All_Years$Fish) +
# (W[3]*All_Years$WB) + (W[4]*All_Years$WF)
#
# #Don't Drain the Lake!!
# All_Years<- All_Years%>%group_by(WCS_strategy, period, Board1)%>%
# mutate(MinEL= min(EL))
# All_Years$Utility<-ifelse(All_Years$MinEL<=66.6, 0, All_Years$Utility)
#
# #Group and average
# All_Years$period<-as.factor(All_Years$period)
#
# discharges<- c(0, 2.8, 5.6, 8.5, 11.3, 14.1, 17)
#
# PERIODS2 <-All_Years
# datalist5<-list()
# for(p in 1:length(discharges)){
# p1<-subset(PERIODS2, PERIODS2$WCS_strategy==discharges[p])
# p1 <- p1 %>% dplyr::group_by(period, Board1) %>%dplyr::arrange(doy) %>%
# dplyr::mutate(CumUt = cumsum(Utility), WCS_strategy=discharges[p])
# datalist5[[p]] <- p1
# }
# PERIODS2 <- rbindlist(datalist5)
#
# final<- PERIODS2 %>%
# dplyr::group_by(WCS_strategy, period, Board1) %>%
# dplyr::arrange(doy) %>%
# dplyr::slice(n())
#
# datalistFinal[[r]]<-final
# }
# Final <- rbindlist(datalistFinal)
# write.csv(Final, "_outputs/yearsND/final-cappedDED.csv")
#
# Final<-read.csv("_outputs/yearsND/final-cappedDED.csv")
#
# # # fin1<-subset(Final, Final$period==1)
# # # fin1$penalty<-PenaltyM1(fin1$elevation)
# # # fin2<-subset(Final, Final$period==2)
# # # fin2$penalty<-PenaltyM2(fin2$elevation)
# # # fin3<-subset(Final, Final$period==3)
# # # fin3$penalty<-PenaltyM3(fin3$elevation)
# # # fin4<-subset(Final, Final$period==4)
# # # fin4$penalty<-PenaltyM4(fin4$elevation)
# # # fin5<-subset(Final, Final$period==5)
# # # fin5$penalty<-PenaltyM5(fin5$elevation)
# # # fin6<-subset(Final, Final$period==6)
# # # fin6$penalty<-PenaltyM6(fin6$elevation)
# # # fin7<-subset(Final, Final$period==7)
# # # fin7$penalty<-PenaltyM7(fin7$elevation)
# # # fin8<-subset(Final, Final$period==8)
# # # fin8$penalty<-PenaltyM8(fin8$elevation)
# # # fin9<-subset(Final, Final$period==9)
# # # fin9$penalty<-PenaltyM9(fin9$elevation)
# # # Final<-rbind(fin1,fin2,fin3,fin4,fin5,fin6,fin7,fin8,fin9)
# # #
# # # Final$penalty<-ifelse(Final$elevation>=(Final$Board2), 1, Final$penalty)
# # # Final$penalty<-ifelse(Final$elevation<=66.568, 0, Final$penalty)
# # #
# # # Final$CumUt<-Final$CumUt*Final$penalty
# #
# # for(q in 1:nrow(Final)){
# # num<-as.numeric(Final$period[q])+1
# # num<-ifelse(num==10, 9, num)
# # Final$Period_board[q]<-Board_Time(num)
# # }
# #
# # Final$CumUt<-ifelse(Final$elevation<Final$Period_board-0.2, 0, Final$CumUt)
# #
# # Final$period<-as.factor(Final$period)
# # Final$elevation<-as.numeric(Final$elevation)
# #
# # Final<- Final%>%group_by(period, Board1, WCS_strategy) %>%summarise(CumUt=mean(CumUt), El=mean(EL))
# #
# # Final2<- Final %>% dplyr::group_by(period, Board1) %>%
# # dplyr::mutate(Utility=rescale(CumUt, to=c(0,1)), WCS_strategy=WCS_strategy)
# #
# # plots<-list()
# # elevation<-unique(Final2$Board1)
# # for(u in 1:length(elevation)){
# # fnl<-subset(Final2, Final$Board1==elevation[u])
# # plt<-ggplot(fnl, aes(x=period, y=WCS_strategy)) +
# # geom_tile(aes(fill = Utility)) +
# # scale_fill_distiller(palette = "Greys") +
# # labs(title = elevation[u],
# # y = "Strategy",
# # x = "Period")+theme(legend.position = "none", axis.title.x=element_blank(),
# # axis.title.y=element_blank(), axis.text.x=element_blank(),
# # axis.text.y=element_blank())
# # plots[[u]]<-plt
# # }
# #
# # grid.arrange(grobs = plots, ncol = 5)
# #
# # decision<- Final%>%group_by(period, Board1)%>% filter(CumUt== max(CumUt)) %>%
# # select(WCS_strategy, CumUt, El)
# #
# # #Alternate method for above
# # #Final<-as.data.table(Final)
# # # dd<-split(Final, by=c("period","Board"))
# # # dd<-lapply(dd,function(x)
# # # {
# # # x[which.max(x$Utility)]
# # # })
# # # dd<-rbindlist(dd)
# # decision$WCS_strategy<-ifelse(decision$CumUt<1, 0, decision$WCS_strategy)
# #
# # decision<-subset(decision, decision$Board1<=68.4)
# # decision$Board1<-as.factor(decision$Board1)
# # decision$period<-as.factor(decision$period)
# # decision$WCS_strategy<-as.factor(decision$WCS_strategy)
# #
# # #p<-
# # ggplot(decision, aes(x=period, y=Board1)) +
# # geom_tile(aes(fill = WCS_strategy)) + scale_fill_grey()+
# # theme_classic()+
# # labs(y = "Elevation",
# # x = "Period")+
# # geom_segment(data=transform(subset(decision, period==1&Board1==68.2|period==2&Board1==68.4|period==3&Board1==68.2|period==4&Board1==68|period==5&Board1==67.8|period==6&Board1==67.6|period==7&Board1==67.4|period==8&Board1==67.8|period==9&Board1==68), period=as.numeric(period), Board1=as.numeric(Board1)),
# # aes(x=period-.49, xend=period+.49, y=Board1, yend=Board1),
# # color="black", size=1)
# # ggsave("outputs.jpg",plot=p) |
c6d86390dc1feaf35ce4ad51e5152189b12cef97 | 72f3fa9a49488dfd9a5c0dd6db792582e3a0f862 | /man/drmutWaterfall.Rd | 3b79129c8cfadfbe86dfcbd347cab6ffecde8461 | [] | no_license | yumengw/utilR | 8df9e3c7b48813ef3e7e81d52807ffd63e9c7b50 | 463a6f0651fb3f13302d411ac761235fafd017b2 | refs/heads/master | 2021-09-01T21:32:42.819743 | 2017-12-28T18:45:34 | 2017-12-28T18:45:34 | 115,483,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 419 | rd | drmutWaterfall.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dr.mutWaterfall.R
\name{drmutWaterfall}
\alias{drmutWaterfall}
\title{mutWaterfall function}
\usage{
drmutWaterfall(mafile, job_dir, format = "json")
}
\arguments{
\item{mafile, }{maf file name}
}
\value{
results
}
\description{
mutWaterfall function
}
\examples{
drmutWaterfall(mafile, "/Users/yumengw/projects/drright/drright-modules/")
}
|
dad6bee7e1f8ce1438fc5b5ab03771891f117aa1 | cf5daf723e5e5daae992acf6a55a6e3fe82c19bc | /setup/mcSetup/copyGadgetModel.R | 9cb7151566989dc1604bb5125404c230d0b72640 | [] | no_license | inspktrgadget/gadgetTest | 8b260f55fc1701dbe52c7d2b151ab175e37766e5 | c120c45d6c3676edc634d9e209b65b2ce4f8b70f | refs/heads/master | 2021-09-19T11:51:16.572983 | 2018-07-27T19:54:17 | 2018-07-27T19:54:17 | 116,150,467 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 177 | r | copyGadgetModel.R | # script to copy model from initMod over to new model
# copy files over
copy_from_dir(initmod_dir, gd, recursive = TRUE)
unlink(paste(gd, "WGTS", sep = "/"), recursive = TRUE)
|
a2061b97bf3df26382be3089af76d48685e9263d | 360b88fd10e8b92d4fc058c4c68a70e8edc06ca2 | /R/star.R | f9173cf5db361787b74e79d7418e8ad591a201ab | [
"MIT"
] | permissive | pkgspell/gistr | 9afa1dbb0f896d88a6581bbbf8c268f30806e168 | 5f14ded84d69762c010ebe2420eaeed29b24437b | refs/heads/master | 2020-05-22T18:30:02.919074 | 2017-03-11T16:48:53 | 2017-03-11T16:48:53 | 84,714,215 | 0 | 0 | null | 2017-03-12T09:25:57 | 2017-03-12T09:25:57 | null | UTF-8 | R | false | false | 1,498 | r | star.R | #' Star a gist
#'
#' @export
#' @param gist A gist object or something that can be coerced to a gist object.
#' @template all
#' @return A message, and a gist object, the same one input to the function.
#' @examples \dontrun{
#' id <- '7ddb9810fc99c84c65ec'
#' gist(id) %>% star()
#' gist(id) %>% star_check()
#' gist(id) %>% unstar()
#' gist(id) %>% unstar() %>% star()
#' gist(id) %>% star_check()
#' gist(id) %>%
#' star() %>%
#' star_check()
#'
#' # pass in a url
#' x <- "https://gist.github.com/expersso/4ac33b9c00751fddc7f8"
#' gist(x) %>% star
#' gist(x) %>% unstar
#' }
star <- function(gist, ...){
gist <- as.gist(gist)
res <- gist_PUT(url_star(gist$id), gist_auth(), ghead(), add_headers(`Content-Length` = 0), ...)
star_mssg(res, 'Success, gist starred!')
gist
}
#' @export
#' @rdname star
unstar <- function(gist, ...){
gist <- as.gist(gist)
res <- gist_DELETE(url_star(gist$id), gist_auth(), ghead(), ...)
star_mssg(res, 'Success, gist unstarred!')
gist
}
#' @export
#' @rdname star
star_check <- function(gist, ...){
gist <- as.gist(gist)
res <- GET(url_star(gist$id), gist_auth(), ghead(), ...)
msg <- if(res$status_code == 204) TRUE else FALSE
message(msg)
gist
}
url_star <- function(x) sprintf('%s/gists/%s/star', ghbase(), x)
star_mssg <- function(x, y) if(x$status_code == 204) message(y) else warn_for_status(x)
star_action <- function(x, y){
if(x$status_code == 204) switch(y, star="starred", unstar="unstarred") else x$status_code
}
|
62316f66af0c2f858a9dc023e6242ad0b249f46c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mazeinda/examples/test_associations.Rd.R | 3f0866e5db3e7252ca1e35438e20cd804a69f686 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 525 | r | test_associations.Rd.R | library(mazeinda)
### Name: test_associations
### Title: test_associations
### Aliases: test_associations
### ** Examples
v1=c(0,0,10,0,0,12,2,1,0,0,0,0,0,1)
v2=c(0,1,1,0,0,0,1,1,64,3,4,2,32,0)
test_associations(v1,v2)
m1=matrix(c(0,0,10,0,0,12,2,1,0,0,0,0,0,1,1,64,3,4,2,32,0,0,43,54,3,0,0,3,20,1),6)
test_associations(m1,m1)
m2=matrix(c(0,1,1,0,0,0,1,1,64,3,4,2,32,0,0,43,54,3,0,0,3,20,10,0,0,12,2,1,0,0),6)
test_associations(m1,m2)
m3= matrix(abs(rnorm(36)),6)
m4= matrix(abs(rnorm(36)),6)
test_associations(m3,m4)
|
fa8ebc3660bb0821dfa32fea0a42ea76937f82af | 2cdc493dfd603c85b5836714be598eabf228bcca | /Sensor_Workflows/mergingGPSdata.R | 8671e4acd8cef6aefa1f6ce964f58a89cd3160af | [] | no_license | Kriddie/Ecuador | 988df69016c80ebfb960d74bbf6b6169e92ba996 | f7e0920c04c22b499cfa500555769b225f524a6c | refs/heads/master | 2020-07-03T19:18:24.338415 | 2019-08-06T14:12:12 | 2019-08-06T14:12:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,311 | r | mergingGPSdata.R | #install.packages("tmaptools")
#install.packages("here")
library(tmaptools)
library(ggplot2)
library(here)
library(dplyr)
#Howdy parnters. Let's use this space to put some GPs points together and make them look #coolll
#First I need to make a new column in all of the files that have the proper attritbution
#let's set that up
#here's what it'll be:
#streamhike
#focuswatershed
#WestCatchment
#GeneralGeog
#EOStransects
#Levelandbarro
#Synoptics
#Let's import it one by one
date <- '071019'
GPS1 <- read_shape(here("FieldData/GPS", paste0("GPS_",date,".shp")))
GPS1$name <- as.character(GPS1$name)
GPS1$name[13] <- "STR49"
GPS1$Category <- "WestCatchment"
GPS1$Category[3] <- "GeneralGeog"
write_shape(GPS1,here("FieldData/GPS", paste0("GPS_",date, ".shp")))
#now let's put everything in one file and separate them by category
### Get a list of all the files
gps_Files <- list.files(here::here("FieldData/GPS"),pattern = '.shp')
#create an empty data.frame
allgeogData <- st_read(here::here("FieldData/GPS",gps_Files[1]))
### Combine all geog data
for(i in 2:length(gps_Files)){
file <- gps_Files[i]
data <- st_read(here::here("FieldData/GPS",file))
allgeogData <- rbind(allgeogData,data)
}
#Now save the dang thang
write_shape(allgeogData,here("FieldData/GPS/allgeogdata.shp"))
|
28e17ef7338ea5ab8c8fb5370a0730c7744cbbcd | 9969b02c26fa5388ac971b8212c761c6abf98efb | /man/backEnhFit.Rd | 2ec27df4de624760636c03732b5532dc9a145c64 | [] | no_license | tmcd82070/CAMP_RST | 0cccd7d20c8c72d45fca31833c78cd2829afc169 | eca3e894c19936edb26575aca125e795ab21d99f | refs/heads/master | 2022-05-10T13:33:20.464702 | 2022-04-05T21:05:35 | 2022-04-05T21:05:35 | 10,950,738 | 0 | 0 | null | 2017-05-19T20:42:56 | 2013-06-25T21:24:52 | R | UTF-8 | R | false | true | 5,023 | rd | backEnhFit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backEnhFit.r
\name{backEnhFit}
\alias{backEnhFit}
\title{backEnhFit}
\usage{
backEnhFit(
tmp.df,
df,
initialVars,
possibleVars,
m.i,
eff.ind.inside,
max.df.spline,
eff.inside.dates,
trap,
model.info,
fits,
plot.file,
option,
bsplBegDt,
bsplEndDt
)
}
\arguments{
\item{tmp.df}{The reduced data frame originating from \code{df} containing
only efficiency-trial dates; i.e., those with non-\code{NA}
\code{nReleased} values.}
\item{df}{The data frame for a specific \code{TrapPositionID} containing
efficiency-trial information and covariates, if available, at the time of
fitting enhanced efficiency trials in \code{eff_model.r} (or
\code{F.efficiency.model }).}
\item{initialVars}{The set of possible variables available for model
inclusion, following application of the 90% presence rule. These variables
form a subset of the \code{possibleVars}.}
\item{possibleVars}{The set of all available variables for possible inclusion
to the fitting of an enhanced efficiency model. Usually include
efficiency-trial variables, environmental covariate variables, and
CAMP-collected variables.}
\item{m.i}{An integer containing the number of rows in data frame
\code{tmp.df}.}
\item{eff.ind.inside}{A logical vector of length equal to the number of rows
of data frame \code{df}, expressing which \code{batchDates} fall within the
temporal data-range for which estimation occurs.}
\item{max.df.spline}{An integer expressing the highest allowable spline
degree.}
\item{eff.inside.dates}{A \code{POXIX}-type vector of length 2, housing the
first and last \code{batchDates} for which vector \code{eff.ind.inside} is
\code{TRUE}.}
\item{trap}{An alphanumeric \code{TrapPositionID} value, as itemized within
\code{df} (and \code{tmp.df}).}
\item{model.info}{A list containing information derived from an immediate
preceding call to function \code{plotbsSpline}. Used to help get the
current fit iteration going.}
\item{fits}{A list object of length equal to the number of
\code{TrapPositionID}s requiring a model fit. Ultimately passed to the
boostrapping routine.}
\item{plot.file}{The name of the file prefix under which output is to be
saved. Set to \code{NA} to plot to the plot window.}
\item{option}{A graphical option for displaying spline fits. Typically (and hard-coded) set to \code{2}.}
\item{bsplBegDt}{The first date, via the spline-1959-1960 paradigm, to which
all efficiency years and dates collapse.}
\item{bsplEndDt}{The last date, via the spline-1959-1960 paradigm, to which
all efficiency years and dates collapse.}
}
\value{
Several objects, all housed within a list.
\describe{
\item{fit}{The final fit for the current \code{TrapPositionID} of interest.}
\item{model.info}{List of model information resulting from the application of \code{backEnhFit} to each unique \code{TrapPositionID} in \code{df}.}
\item{fits}{List of fits resulting from the application of \code{backEnhFit} to each unique \code{TrapPositionID} in \code{df}.}
\item{covarStringPlot}{A record of considered variables necessary model plotting outside of the function. }
\item{interimVars1Num}{Set of variables remaining for model inclusion when both CAMP \code{waterTemp_C} and Environmental-covariate {temp_C} are present.}
\item{interimVars2Num}{Set of variables remaining for model inclusion when both CAMP \code{discharge_cfs} and Environmental-covariate \code{flow_cfs} are present.}
}
}
\description{
For a given set of covariates, fit a genearlized additive model
via a backwards-selection paradigm, allowing for both selection of
confounding covariates, as well as variable temporal B-splines.
}
\details{
Function \code{bachEnhFit} is the workhorse function that fits the
backwards-fitting algorithm for enhanced-efficiency splines. It utilizes
tests of deviances to identify variables for possible exclusion from an
initial model containing all avalabile covariates, and smaller subets
following the removal of the most non-significant covariate identified on
the most recent preceding model fitting.
An \eqn{alpha} value of 0.10 is used to test for possible stopping of the
model; i.e., if all included covariates in a model have p-values less than
0.10, all covariates are retained, and the model-fitting procedure stops.
Note that the value of 0.10 is set within the function via initial
parameter \code{pCutOff}.
}
\examples{
\dontrun{
out <- backEnhFit(tmp.df,
df,
initialVars,
possibleVars,
m.i,
eff.ind.inside,
max.df.spline,
eff.inside.dates,
trap,
model.info,
fits,
plot.file,
option,
bsplBegDt,
bsplEndDt)
}
}
\seealso{
\code{eff_model_enh}, \code{plotbsSpline}
}
\author{
WEST Inc.
}
|
912c688e1e1549dce7696567f989ac47a62bd41a | 44cbfd997078881f1e4b6adb5076f26bb84ec56f | /in-situ_comparison/compare_function.r | cceec759145499edb5b85df828e00404493da2ba | [] | no_license | Spamiad/HYDRAS | 6a85ce9fc3f2b861b229ba6a2e8ac208621594a0 | f475628bebea66139b238459998d5a1755f79c6e | refs/heads/master | 2020-04-16T06:13:32.343689 | 2015-09-19T12:49:38 | 2015-09-19T12:49:38 | 30,418,642 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,192 | r | compare_function.r |
# read clm netcdf output streams
read_clm = function(ncdf_dir) {
files = list.files(ncdf_dir,"t2.clm2.h1.*", full.names = TRUE)
r = stack(files)
}
read_clmpix = function(r,latlon) {
result = extract(r, latlon)
result = result[1,]
from = as.Date("2000-01-01")
to = as.Date("2007-04-23")
days = seq.Date(from=from,to=to,by="day")
days = as.character(days)
ts = as.data.frame(cbind(days,result))
z2 <- read.zoo(ts, header = F, FUN = as.Date)
}
# read in-situ .stm file
ismn_ts = function(stmfile) {
header = read.csv(stmfile, nrow=1, header=F, sep="")
lat = header$V4
lon = header$V5
sm = read.csv(stmfile, sep="", header=F, skip=1)
sm = sm[,1:3]
zsm = read.zoo(sm, index = 1:2, FUN = f, na.strings = "NaN")
zsm = aggregate(zsm, as.Date, mean)
return(zsm)
}
# read in-situ .stm file
ismn_latlon = function(stmfile) {
header = read.csv(stmfile, nrow=1, header=F, sep="")
lat = header$V4
lon = header$V5
latlon = cbind(lon, lat)
return(latlon)
}
# function for reading zoo object
f <- function(d, t) as.chron(paste(as.Date(d,"%Y/%m/%d"),t))
|
282d85bf34c6516538d2cb6b28a36226532202ac | 5ae342134b85d1b7a0059ce8695a6836d17f5a12 | /deconv_5cell/causal_5cell_step1.R | 0731aa50cfe7f9cd118aeb9c8d85d1eda9f1480b | [] | no_license | wenrurumon/non_negative_attribution_model | 15d5e5b6b54b64cce0942e58c73fc653a37dd607 | 37780b43187697d0ec2f572796c8b91ebcef77f2 | refs/heads/master | 2020-06-17T14:10:17.332642 | 2019-10-28T10:10:35 | 2019-10-28T10:10:35 | 74,996,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,324 | r | causal_5cell_step1.R | ##############################
# Data Processing
##############################
rm(list=ls())
args <- as.numeric(commandArgs(trailingOnly=TRUE))
if(length(args)==0){args <- 1}
library(assist)
library(dHSIC)
library(fANCOVA)
library(psych)
library(bnlearn)
library(infotheo)
library(descr)
load("gene39761.rda")
rownames(disease) <- disease[,1]
disease[,5] <- ifelse(disease[,5]%in%c(4,5),1,0);
disease <- disease[,-1]
load('data4deconv_5cell.rda')
load('data45cell.rda')
load('rlt4causal_5cell.rda')
source('discreteANM_1.1.R')
source('contiANM_1.3.R')
source('contidANM.R')
deconv.rlt <- deconv.rlt[,0:2-ncol(deconv.rlt)]
deconv.rlt <- t(apply(deconv.rlt,1,function(x){
tapply(x,colnames(deconv.rlt),sum)
}))
deconv.rlt <- cbind(R=1,deconv.rlt)
ad <- disease[,3]
###################################
# Causal to 5
###################################
x <- apply(raw_exp,2,function(x){
list(x * deconv.rlt)
})
x <- do.call(c,x)
sel <- as.numeric(cut(1:length(x),25))
x <- x[sel==args]
ctest <- function(x,y=ad,nop=100){
set.seed(args)
rlt <- apply(x,2,function(x){
rlt1 <- unlist(permcontidANM(x,y,nop))[1:2]
rlt2 <- try(t.test(x~y)$p.value)
if(!is.numeric(rlt2)){rlt2 <- NA}
rlt <- c(rlt1,p_ttest=rlt2)
})
rlt
}
rlt <- lapply(x,ctest)
save(rlt,file=paste0('rlt_step1_',args,'.rda'))
|
caf479c91c8f020cc6b64a121f57d72f04ef109f | 0fd0fb1ada1b966357fe55bfab6540fc7358b62a | /R/is_even.R | 6b416621fe89a366270411db5ca5fda6424c8de6 | [
"MIT"
] | permissive | nelsonroque/ruf | f1f8598964ade0085ad9ecc77b92b99a4c544a4a | dbf86c97ce5ad03e417cd379c47a410c4dbd0566 | refs/heads/master | 2021-06-26T18:31:48.057608 | 2021-02-28T14:04:12 | 2021-02-28T14:04:12 | 206,207,103 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 150 | r | is_even.R | #' ruf
#' @name is_even
#' @param x class:numeric
#' @import tidyverse
#' @examples
#' is_even(x)
#' @export
is_even <- function(x) {
x %% 2 == 0
} |
47da6f1b8b361af6ae384d7b64ee40996a2a4ebf | dda4da8dd129c29db39e3524a131a0c09ba5d030 | /man/calc_pvals.Rd | 1b2ceef00789ea066d2eaab4a8fdc0d43723ff13 | [] | no_license | Euphrasiologist/VCVglmm | 86c7c6bb5f3528724ab8d7bdd0c60b2a1c0d0a66 | 799eb4cb8759d40c9603a86eb73a96aee835b4e6 | refs/heads/master | 2020-06-09T20:19:54.656186 | 2020-06-03T13:33:18 | 2020-06-03T13:33:18 | 193,499,740 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,277 | rd | calc_pvals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_pvals.R
\name{calc_pvals}
\alias{calc_pvals}
\title{Calculating p-values for the lme4 Gaussian mixed model.}
\usage{
calc_pvals(lmermod)
}
\arguments{
\item{lmermod}{model of class lmerMod}
}
\description{
P-values are not reported for this model type due to difficulties in estimating degrees of freedom in these types of model and the fact that it is not known for sure whether the t-values do indeed follow the t distribution.
With that in mind, the following function should be used with caution. If we accept that the t-value will at least approximately follow the t-distribution, then degrees of freedom must lie between the highest order grouping variable
in the data and the number of observations themselves.
}
\details{
Please see https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html and https://stat.ethz.ch/pipermail/r-help/2006-May/094765.html for detailed information.
Personal observations are that the results from this function lie broadly in line with output from MCMCglmm, at least for simple models.
}
\examples{
# needs data.table
library(data.table)
mod <- lmer(y ~ x + (1 | z))
calc_pvals(mod)
}
\keyword{effects,}
\keyword{fixed}
\keyword{lme4,}
\keyword{p-values}
|
b8f40d609ba05b651a39cbda3f734b1f1bc32a8f | 959d194222c345f792db3ab838ce65dd2ba2c6af | /man/regressIntensity.Rd | 35871ea1de47f3cd87a2d336a63aa13dba6a0bf1 | [] | no_license | crukci-bioinformatics/qPLEXanalyzer | c5dc6275ad6bd8ce3fbb7ec94d343be633073a3c | 9780505602b31e30ee4c3c5972fecf4a35058415 | refs/heads/master | 2023-04-29T23:57:22.170033 | 2023-04-17T10:59:32 | 2023-04-17T10:59:32 | 130,681,325 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,041 | rd | regressIntensity.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regressIntensity.R
\name{regressIntensity}
\alias{regressIntensity}
\title{Regression based analysis}
\usage{
regressIntensity(MSnSetObj, ProteinId, controlInd = NULL, plot = TRUE)
}
\arguments{
\item{MSnSetObj}{MSnSet; an object of class MSnSet}
\item{ProteinId}{character; Uniprot protein ID}
\item{controlInd}{numeric; index of IgG within MSnSet}
\item{plot}{character; Whether or not to plot the QC histograms}
}
\value{
An object of class \code{MSnSet} (see \code{\link{MSnSet-class}}).
This consists of corrected protein levels. In addition, the function can
also plot histograms of correlation of target protein with all other
proteins before and after this correction.
}
\description{
Performs linear regression on protein intensities based on selected protein
(qPLEX-RIME bait)
}
\details{
This function performs regression based analysis upon protein intensities
based on a selected protein. In qPLEX RIME this method could be used to
regress out the effect of target protein on other interactors. This function
corrects this dependency of many proteins on the target protein levels by
linear regression. It sets the target protein levels as the independent
variable (x) and each of the other proteins as the dependent variable (y).
The resulting residuals of the linear regressions y=ax+b are the protein
levels corrected for target protein dependency.
}
\examples{
data(human_anno)
data(exp3_OHT_ESR1)
MSnSet_data <- convertToMSnset(exp3_OHT_ESR1$intensities_qPLEX1,
metadata=exp3_OHT_ESR1$metadata_qPLEX1,
indExpData=c(7:16),
Sequences=2,
Accessions=6)
MSnset_P <- summarizeIntensities(MSnSet_data, sum, human_anno)
IgG_ind <- which(pData(MSnset_P)$SampleGroup == "IgG")
MSnset_reg <- regressIntensity(MSnset_P,
controlInd=IgG_ind,
ProteinId="P03372")
}
|
d8a47ad8552cd95855348c108f94e6fcd981fa2b | 98ec6fa1d6dae1d0ea05a3df13050154b12b327a | /src/flow-edges/gephi/data/rank/rank.r | 53ab41f90b67a8ece316f309039fd5617b769270 | [] | no_license | flyeven/lattesGephi | 3472c93c56da58e26a1540b889d5a8a92980d182 | 010919e39ca49504af16b1d19789765e6d049cf4 | refs/heads/master | 2021-01-22T10:25:26.571588 | 2015-07-11T00:22:50 | 2015-07-11T00:22:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 554 | r | rank.r | rank <- read.csv("~/Documents/code/github/lucachaves/lattesGephi/src/flow-edges/gephi/data/rank/rank.csv")
# http://flowingdata.com/2010/02/11/an-easy-way-to-make-a-treemap/
# library(portfolio)
# map.market(id=rank$id,area=rank$grau,group=rank$group,color=rank$colour)
# https://github.com/mtennekes/treemap
# http://cran.r-project.org/web/packages/treemap/treemap.pdf
library(treemap)
df <- data.frame(index=rank$id,vSize=rank$grau,vColor=rank$grau)
treemap(df,
index="index",
vSize="vSize",
vColor="vColor",
type="value") |
96dea306b575b015db0ec66fa560a770628355fa | 170caf49bc5cc99db5db9833fac8db34deb5631a | /funnelPlot.R | 0fd99559762f2b024357143738a5d4655f56bd80 | [] | no_license | SeunTo/aos | 51970b713a6e3dc58356b974f6fe0fa3a05c71ce | 87860320eea5eba546346b54e07547b2310c8772 | refs/heads/master | 2020-12-20T20:44:01.308975 | 2020-06-03T11:00:35 | 2020-06-03T11:00:35 | 236,205,300 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,070 | r | funnelPlot.R | #A
library(readxl)
library(dplyr)
library(forcats)
setwd("C:/Users/u1552345/Dropbox/Ecology Analysis/Ecology_Analysis")
PWB <- read_excel("popWB.xlsx")
F4Funnel <- Grp %>%
left_join(PWB, by = "Countries")
FinalFinal <- F4Funnel %>%
left_join(Cat25, by = "Countries")
FinalFinal$Catastrophic10 <- as.numeric(FinalFinal$Catastrophic10)
FinalFinal$pop <- as.numeric(FinalFinal$pop)
FinalFinal$VHI_CHE <- as.numeric(FinalFinal$VHI_CHE)
FinalFinal$SHI_CHE <- as.numeric(FinalFinal$SHI_CHE)
FinalFinal$ExT_CHE <- as.numeric(FinalFinal$ExT_CHE)
FinalFinal$HDI <- as.numeric(FinalFinal$HDI)
FinalFun <- na.omit(F4Funnel)
FinalFun <- FinalFun[-c(21,35), ]
if(!require("berryFunctions")) install.packages("berryFunctions")
library(tidyverse)
dataFunnel <- FinalFinal %>%
select(Countries, Catastrophic10, pop) %>%
mutate(event = Catastrophic10/1000000, number = pop/1000000)
r <- dataFunnel$event
n <- dataFunnel$number
Name <- dataFunnel$Countries
funnelPlot(x=r, n=n, labels=Name,
at=list(cex=0.5, col="black"),
ap=list(cex=0.7, col="black"),
am=list(lwd = 3, col="black"),
a2=list(lty=9, lwd = 3, col="blue"),
a3=list(lty=5, lwd = 3, col="red"),
ylab = "Percentage of people that spent > 10 OOP",
xlab = "Population")
dev.copy2pdf(file="Figure5.pdf")
dev.off()
# B
data(medpar)
library(FunnelPlotR)
library(COUNT)
library(ggplot2)
FinalFinal$Countries<-factor(FinalFinal$Countries)
FinalFinal <- na.omit(FinalFinal)
mod <- glm(Catastrophic10 ~ GDP + HDI + CHE_GDP + ExT_CHE + GGHED_CHE + SHI_CHE + VHI_CHE + pop, family="poisson", data= FinalFinal)
summary(mod)
FinalFinal$prds <- predict(mod, type="response")
a<-funnel_plot(numerator=FinalFinal$Catastrophic10, denominator=FinalFinal$prds, group = FinalFinal$Countries,
title = 'Catastropic Expenditure Funnel plot', Poisson_limits = TRUE,
OD_adjust = FALSE,label_outliers = TRUE, return_elements = "plot")
a
|
e394e9db49b8a6b36f92fd4730c18afe4934d0b7 | f3a5b85ba4e40e271904ea05b033f4731b3f0c0b | /Model/Others/CV/cv_xgboost_03.R | e68b822d5c7b3c1559028e4322fd15ec48b31dbc | [] | no_license | vikasnitk85/RossmannStoreSales | d8dfebe449465470caa1299e0562ad3e22989b85 | a6fa5b4d40877cbb10d850f4fd13bcacb02791a0 | refs/heads/master | 2021-04-12T08:09:27.886348 | 2017-06-16T07:07:05 | 2017-06-16T07:07:05 | 94,515,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,284 | r | cv_xgboost_03.R | #----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
# Link DropBox
# library(RStudioAMI)
# linkDropbox()
# install.packages(c("xgboost", "forecast", "doSNOW"))
# library(forecast)
library(xgboost)
# library(doSNOW)
setwd("/home/rstudio/Dropbox/Public/Rossmann")
# cl <- makeCluster(30, type="SOCK")
# registerDoSNOW(cl)
subversion <- "03"
startTime <- Sys.time()
print(startTime)
#----------------------------------------------------------------
# Data
#----------------------------------------------------------------
train <- read.csv("input/train.csv")
store <- read.csv("input/store.csv")
test <- read.csv("input/test.csv")
# Merge store information with train and test
train <- merge(train, store)
test <- merge(test, store)
# Modify date variables
train$Date <- as.Date(as.character(train$Date), "%Y-%m-%d")
test$Date <- as.Date(as.character(test$Date), "%Y-%m-%d")
# New Features
train$Year <- as.numeric(format(train$Date, "%Y"))
train$Month <- as.numeric(format(train$Date, "%m"))
train$Day <- as.numeric(format(train$Date, "%d"))
test$Year <- as.numeric(format(test$Date, "%Y"))
test$Month <- as.numeric(format(test$Date, "%m"))
test$Day <- as.numeric(format(test$Date, "%d"))
# Reorder data
train <- train[order(train$Date), ]
# Remove redundant variables
y <- train$Sales
train <- train[, setdiff(names(train), c("Sales", "Date", "Customers"))]
for(i in names(train)) {
if(class(train[, i]) == "factor") {
train[, i] <- as.numeric(train[, i])
}
}
# Holdout sample
hold <- tail(1:nrow(train), 48*length(unique(train$Store)))
xgtrain <- xgb.DMatrix(as.matrix(train[-hold, ]), label = y[-hold], missing = NA)
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
gc()
#----------------------------------------------------------------
# Model
#----------------------------------------------------------------
RMSPE <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
zeroInd <- which(labels==0)
labels <- labels[-zeroInd]
preds <- preds[-zeroInd]
pe <- (labels - preds)/labels
spe <- pe^2
mspe <- mean(spe)
err <- round(sqrt(mspe), 5)
return(list(metric = "RMSPE", value = err))
}
watchlist <- list('train' = xgtrain, 'val' = xgval)
param0 <- list(
"objective" = "reg:linear"
, "booster" = "gbtree"
, "eta" = 0.25
, "subsample" = 0.7
, "colsample_bytree" = 0.7
# , "min_child_weight" = 6
, "max_depth" = 8
# , "alpha" = 4
)
sink(file=paste0("cv/cv_xgboost_", subversion, ".txt"))
set.seed(2012)
model = xgb.train(
nrounds = 1000
, params = param0
, data = xgtrain
, watchlist = watchlist
, maximize = FALSE
, feval = RMSPE
, print.every.n = 5
# , nthread=4
)
sink()
#----------------------------------------------------------------
# Model
#----------------------------------------------------------------
# Extract best tree
tempOut <- readLines(paste0("cv/cv_xgboost_", subversion, ".txt"))
Error <- sapply(tempOut, function(x) as.numeric(unlist(strsplit(x, split=":"))[3]))
names(Error) <- NULL
modPerf <- data.frame(Error)
tree <- sapply(tempOut, function(x) unlist(strsplit(x, split=":"))[1])
names(tree) <- NULL
tree <- gsub("\\t", "", tree)
tree <- gsub("train-RMSPE", "", tree)
tree <- gsub(" ", "", tree)
tree <- gsub("]", "", tree)
tree <- gsub("\\[", "", tree)
tree <- as.numeric(tree)
modPerf$tree <- tree
modPerf <- modPerf[order(modPerf$Error, decreasing=FALSE), ]
head(modPerf)
# Score validation data
val_preds <- predict(model, xgval, ntreelimit = modPerf$tree[1])
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
RMSPE(val_preds, xgval)
# Score test data
test <- test[order(test$Id), names(train)]
for(i in names(test)) {
if(class(test[, i]) == "factor") {
test[, i] <- as.numeric(test[, i])
}
}
xgtest <- xgb.DMatrix(as.matrix(test), missing = NA)
test_preds <- predict(model, xgtest, ntreelimit = modPerf$tree[1])
test_preds[test_preds < 0] <- 0
sub <- read.csv("input/sample_submission.csv")
sub$Sales <- test_preds
write.csv(sub, paste0("submission/cv_xgboost_", subversion, ".csv"), row.names=FALSE)
endTime <- Sys.time()
difftime(endTime, startTime)
|
5d0d3eb4e1ef429e6f650746cb4594603f4d86f3 | b643e2a6c4eb2cfc314ba48629fe4b0f9917b338 | /cachematrix.R | 573d14ffddefbef265251c75e07cf795f917714c | [] | no_license | jabled/ProgrammingAssignment2 | 29825337c441e8dbd17002c59aed062b7fcfbe55 | a48bdbc2f2a1d337561fcdefa2d073dfc970084f | refs/heads/master | 2020-12-28T00:02:10.346214 | 2015-04-26T23:26:08 | 2015-04-26T23:26:08 | 34,629,293 | 0 | 0 | null | 2015-04-26T20:11:58 | 2015-04-26T20:11:57 | null | UTF-8 | R | false | false | 1,547 | r | cachematrix.R |
## Create the makeCacheMatrix matrix function, which creates a set
## of stepped functions for the cacheSolve function to use
## functions create matrix, get matrix value, invert and store value,
## then get inverted value from cache and return value to working environment
# create the cache matrix function
makeCacheMatrix <- function (x=matrix()){
# create the cache function and initialize it (to null)
data_cache<-NULL
# create the function to set the matrix
set<-function(y){
x<<-y
data_cache<<-NULL}
# create the additional functions to grab the matrix, invert it and then store it
get<-function()x
setMat<-function(inverse)data_cache<<-inverse
getInv<-function()data_cache
# create a list with all of the matrix values
list(set=set,get=get,setMat=setMat,getInv=getInv)
}
## cacheSolve function depends on the makeCacheMatrix function
## cacheSolve tests to see if cached data is present and then
## creates Matrix, runs through error checking of matrix, then
## inverts, stores and returns matrix
cacheSolve<-function(x,...){
data_cache<-x$getInv()
# check to see if the cache contains data or not
# assumption is that the cache will contain data per assignment instructions so error checking is not necessary
if(!is.null(data_cache)){
message("retrieving cached data if present")
return(data_cache)}
final_matrix<-x$get()
#employ the solve function to create the final inverse matrix
data_cache<-solve(final_matrix,...)
#set/store and return the value of the cached inverted matrix
x$setMat(data_cache)
return(data_cache)
}
|
62edebb6b00527521f8560a69a14d88b2d66608b | c61b367db07762465c749e85ed0c933a0d2e9f5d | /Code/plot_brain_samples_staining_res.R | 000bd92068696ad1a78a6ffb7461c647f7e56480 | [] | no_license | lizhu06/TILsComparison_PBTvsMET | 08880f46b9d1a42e3f9b8e841a0edc35fd00386e | 5adec0fc526a3025ccbcd99ea34d40b3c56055ba | refs/heads/master | 2020-06-02T05:54:49.924270 | 2019-06-09T22:45:08 | 2019-06-09T22:45:08 | 191,060,883 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,242 | r | plot_brain_samples_staining_res.R | rm(list=ls())
options(stringsAsFactors = FALSE)
setwd("/net/wong05/home/liz86/Steffi/primary_vs_mets/")
library(ggplot2)
library(reshape2)
res <- read.csv("Results_v2/RawData_BrainMets_8.13.18.csv")
res <- res[1:51,1:11]
pri_id <- c("BP52", "BP72", "BP71")
mets_id <- c("BM52", "BM72", "BM71")
pair_id <- c("BP/BM52", "BP/BM72", "BP/BM71")
id <- c(pri_id, mets_id)
uni_pair_id <- rep(pair_id, 2)
uni_group_id <- c(rep("PRI", 3), rep("MET", 3))
pair_id <- uni_pair_id[match(res[,"Slide.ID"], id)] # expressed as mets name
group_id <- uni_group_id[match(res[,"Slide.ID"], id)] # primary or mets
immune_perc <- res[, c("CD20", "CD8", "CD68", "FOXP3")]
immune_perc <- do.call(cbind, lapply(1:ncol(immune_perc), function(x)
as.numeric(gsub("%", "", immune_perc[,x]))))/100
immune_num <- sweep(immune_perc, 1, res[,"Number.of.Cells"], "*")
total_immune <- apply(immune_num, 1, sum)
immune_prop_rel <- sweep(immune_num, 1, total_immune, "/")
score <- as.data.frame(cbind(pair_id, group_id, res[,3], immune_prop_rel))
colnames(score) <- c("pair_id", "group_id", "Slide.ID",
"CD20", "CD8", "CD68", "FOXP3")
save(score, file="Results_v2/brain_mets_staining_score.RData")
score2 <- melt(score, id.vars=c("pair_id", "group_id","Slide.ID"))
colnames(score2)[c(4,5)] <- c("cell", "percent")
# calculate average and SD (and convert to long data)
uni_cell_type <- as.character(unique(score2[,"cell"]))
sum_score2 <- matrix(NA, length(id)*length(uni_cell_type), 6)
temp <- 0
for(i in 1:length(id)){
for(j in 1:length(uni_cell_type)){
temp <- temp + 1
vec <- as.numeric(score2[which(score2[,"Slide.ID"]==id[i] &
score2[,"cell"] == uni_cell_type[j]),"percent"])*100 # convert to percent
sum_score2[temp, 1] <- id[i]
sum_score2[temp, 2] <- uni_pair_id[i]
sum_score2[temp, 3] <- uni_group_id[i]
sum_score2[temp, 4] <- uni_cell_type[j]
sum_score2[temp, 5] <- mean(vec)
sum_score2[temp, 6] <- sd(vec)/sqrt(length(vec))
print(j)
}
}
df <- data.frame(ID=as.factor(sum_score2[,1]),
Pair=as.factor(sum_score2[,2]),
Group=factor(sum_score2[,3], levels=c("PRI","MET")),
Marker=as.factor(sum_score2[,4]), mean=as.numeric(sum_score2[,5]),
sd=as.numeric(sum_score2[,6]))
#df$Region <- factor(rep("tumor", nrow(df)), levels=c("tumor","stroma"))
#df$Region[sapply(1:length(df$MarkerLong), function(x)
# grepl("stroma", df$MarkerLong[x]))] <- "stroma"
#df$Marker <- factor(sapply(1:length(df$MarkerLong), function(x)
# strsplit(as.character(df$MarkerLong[x]), split="\\.")[[1]][1]))
levels(df$Marker)
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
n = 8
cols = gg_color_hue(n)
#names(cols) <- c("Bcell1", "Bcell2", "macro0", "macro2", "macro3",
# "cd8", "treg", "tumor")
cols_sel <- cols[c(1, 3, 6, 7)]
## plot
p <- ggplot(df, aes(x=Group, y=mean, color=Marker, group=Marker)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=mean-sd, ymax=mean+sd), width=.2,
position=position_dodge(0.05))+
scale_color_manual(values=cols_sel) +
theme(text = element_text(size=10))
p <- p + labs(y="percent", x="")
p <- p + facet_grid(. ~ Pair)
pdf(file="Results_v2/Brain_staining_res_rel_totalImmune.pdf",
width=4, height=2)
print(p)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.