blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b90bf31caab396bb7d61d48cb692e0ff96aa053 | 15bbda8a2fadb7f1bb3364adca9f7bea0323a787 | /R/rNCV.R | 16ff208ca30762ddca1c7bf53073febf258898cd | [] | no_license | kforthman/caretStack | 39c45517346e4a91931448413bf297c75364b821 | 968416f10b798634257da2e61cc53b00fa39614f | refs/heads/master | 2021-08-16T00:14:54.603504 | 2021-06-18T17:15:21 | 2021-06-18T17:15:21 | 195,125,995 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 6,772 | r | rNCV.R | #' Repeated, Nested Cross-Validation
#'
#' Supports classification and regression.
#' Note: only continuous variables are expected to be used as predictors. It is assumed that there are a sufficient number of subjects in each category.
#'
#' @param data The data frame containing the training set.
#' @param nRep Number of times nCV is repeated.
#' @param nFolds.outer Number of outer folds
#' @param dir.path Directory where the CV data is stored.
#' @param file.root Prefix for the CV filenames.
#' @param stack.method ???
#' @param weighted.by ???
#' @param stack.wt ???
#' @param control.stack ???
#' @param save.PredVal Binary. Would you like to save the output from the PredVal function?
#' @inheritParams caretModels
#' @inheritParams PredVal
#' @importFrom dplyr do
#' @importFrom plyr ddply
#' @export
rNCV <- function(data, resp.var, ref.lv=NULL, nRep, nFolds.outer, methods,
trControl, tuneLength, preProcess, metric, dir.path, file.root,
stack.method='wt.avg', weighted.by=NULL, stack.wt=NULL, control.stack=NULL, save.PredVal = FALSE){
ptm <- proc.time()
if (class(data[, resp.var])=='factor')
{ resp.lv = levels(data[, resp.var])
} else if (class(data[, resp.var])=='character')
{ resp.lv = unique(data[, resp.var])
} else { resp.lv = 'pred' }
# trControl$allowParallel <- F
#if (!is.null(control.stack$allowParallel)){
# control.stack$allowParallel <- F
#}
res <- foreach(r=1:nRep, .combine=comb_rep, .packages='caret') %dopar% {
index.outer <- createFolds(data[, resp.var], k=nFolds.outer, list=F)
weight <- perf.by.fold <- var.imp <- perf.train <- perf.test <- NULL;
stack.model <- list()
y.pred.comb <- matrix(NA, nrow(data), length(resp.lv))
colnames(y.pred.comb) <- resp.lv
# Outer loop
# partition into nFolds.outer sets
# (left panel of flow chart)
for(k.outer in 1:nFolds.outer) {
calib <- data[index.outer!=k.outer, ] #calibration (training) set
test.set <- data[index.outer==k.outer, ] #testing set
# impute missing data
if (sum(is.na(calib)>0)){
calib <- knnImputation(calib)
}
if (sum(is.na(test.set)>0)){
test.set <- knnImputation(test.set)
}
# Inner loop
# Step 1. Build base learners
# (upper right in flow chart)
# Outputs a file for each resulting fold.
models <- caretModels(calib, resp.var, trControl, preProcess, tuneLength, metric, methods)
if (!is.null(dir.path) & !is.null(file.root)){
save(models,
file = paste0(dir.path, resp.var, '_', file.root, '_Rep_', r, '_fold_', k.outer, '.rda'))
}
# Step 2. Extract predicted values/probabilities
# Use the models generated in previous step to make prediction on the testing set.
# Done for each ML algorithm. PredVal combines the predictions from each method.
pred.val <- PredVal(models, test.set, resp.var, ref.lv, stack.method,
weighted.by, stack.wt, control.stack, tuneLength)
if(save.PredVal){
save(pred.val,
file = paste0(dir.path, resp.var, '_', file.root, '_Rep_', r, '_fold_', k.outer, '-PredVal.rda'))
}
if (length(methods)>1 & !stack.method %in% c('none'))
{ stack.model[[k.outer]] <- pred.val$stack.model
weight <- rbind(weight, data.frame(Rep = r, fold = k.outer, t(pred.val$weight)))
}
## predicted values/probabilities across folds ##
if (length(methods)==1){
y.pred.comb[index.outer==k.outer, ] <- as.matrix(pred.val$prediction$test[[1]][, resp.lv])
} else {
y.pred.comb[index.outer==k.outer, ] <- as.matrix(pred.val$prediction$test$Stack[, resp.lv])
}
# Step 3. Model performance in the calibrating & hold-out sets of the outer loop
perf.t.tmp <- lapply(pred.val$prediction$train, function(x) ddply(x, .(Resample), modelPerf, trControl = trControl))
perf.train <- do.call(rbind, perf.t.tmp)
perf.test <- data.frame(modelPerf.summ(pred.val$prediction, trControl)$test)
if (length(methods)==1){
rownames(perf.test) <- methods
} else if (length(methods)>1){
perf.v.tmp <- perf.test[rownames(perf.test)=='Stack', ]
}
perf.by.fold <- rbind(perf.by.fold,
data.frame(Rep = r, fold = k.outer,
method = rownames(perf.v.tmp), perf.v.tmp))
# Step 4. Variable importance
if (length(methods)==1){
var.imp <- rbind(var.imp, varImp(models[[1]]))
} else {
var.imp <- rbind(var.imp,
data.frame(
Rep = r,
fold = k.outer,
VarImp(models, 'Stack', weight=pred.val$weight)[, c('variable','Stack')]))
}
}
if (!is.null(dir.path) & !is.null(file.root)){
save(stack.model, file=
paste0(dir.path, resp.var, '_', file.root, '_stack.model_Rep_', r, '.rda'))
}
if ('pred' %in% resp.lv){
df.comb <- data.frame(obs = data[, resp.var], y.pred.comb)
} else if (!'pred' %in% resp.lv){
df.comb <- data.frame(y.pred.comb)
df.comb$pred <- factor(resp.lv[apply(df.comb[, resp.lv], 1, which.max)],
levels = levels(data[, resp.var]))
df.comb$obs <- data[, resp.var]
}
perf.comb <- modelPerf(df.comb, trControl)
perf.train$method <- gsub("\\..*", "", rownames(perf.train) )
perf.test$method <- gsub("\\..*", "", rownames(perf.test) )
return(list(index.outer = index.outer,
stack.wt = weight , y.pred.comb = y.pred.comb,
perf.by.fold = perf.by.fold, perf.comb = perf.comb,
perf.train = perf.train, perf.test = perf.test,
var.imp = var.imp))
}
if (nRep>1){ colnames(res$index.outer) <- paste0('Rep', 1:nRep) }
names(res$var.imp)[4] <- 'importance'
if ('pred' %in% resp.lv){
colnames(res$y.pred.comb) <- paste0('Rep', 1:nRep)
df.ensemble <- data.frame(obs = data[, resp.var], pred = rowMeans(res$y.pred.comb))
} else if (!'pred' %in% resp.lv){
suppressWarnings(
colnames(res$y.pred.comb) <- levels(interaction(resp.lv, paste0('Rep', 1:nRep)))
)
df.ensemble <- setNames(data.frame(matrix(NA, nrow(res$y.pred.comb), length(resp.lv))), resp.lv)
for (j in resp.lv){
df.ensemble[, j] <- rowMeans(res$y.pred.comb[, grep(j, colnames(res$y.pred.comb))])
}
df.ensemble$pred <- factor(resp.lv[apply(df.ensemble, 1, which.max)], levels = levels(data[, resp.var]))
df.ensemble$obs <- data[, resp.var]
}
res$perf.ensemble <- modelPerf(df.ensemble, trControl)
res$elapsed.time <- (proc.time() - ptm)[3]
return(res)
}
|
c5797c2e67f89e8a01a8b503bdfc31127dd5b739 | fe7ed525e7945c7c29fcf99f345d2ca66dd8f114 | /dwr_07_regular_expressions.R | a07bdc026f5ff8afa50d41ddbfc03295ead5d25d | [] | no_license | TCornulier/DataWranglingInR | 1b52a90c8d1a0f6f1149203cab35811c2e55a68d | c5c6ae61ff7143a1073bef8c64ae4d43e728f67f | refs/heads/master | 2023-02-19T05:08:21.517597 | 2021-01-21T21:44:16 | 2021-01-21T21:44:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,232 | r | dwr_07_regular_expressions.R | #' ---
#' title: "Data Wrangling in R: Regular Expressions"
#' author: "Clay Ford"
#' date: "Spring 2016"
#' output: pdf_document
#' ---
setwd("../data")
load("datasets_L06.Rda")
# This lecture uses the following packages:
# install.packages("stringr")
library(stringr)
# install.packages("qdapRegex")
library(qdapRegex)
# Intro -------------------------------------------------------------------
# A 'regular expression' is a pattern that describes a set of strings.
# Examples:
# - all 5-digit numbers in a document
# - all 5-digit numbers ending in 00
# - words spelled in ALL CAPS
# - words in brackets or delimiters [],<>,(),{}
# - words at the end of a sentence
# - all email addresses
# - dates in a certain format
# These are examples of string patterns. Regular Expressions are the language we
# use to describe the pattern. You should know, however, regular expressions are
# a language into itself. There are entire books devoted to regular expressions.
# Quote floating around internet: "Some people, when confronted with a problem,
# think 'I know, I'll use regular expressions.' Now they have two problems."
# Regular expressions can be tricky to get right, especially for complex
# patterns.
# We will only dabble in regular expressions. Key lesson: recognize when you
# need a regular expression and know enough to cobble one together using your
# knowledge, wits and Google.
# Two PDF files you may want to download and save for reference:
# http://biostat.mc.vanderbilt.edu/wiki/pub/Main/SvetlanaEdenRFiles/regExprTalk.pdf
# http://gastonsanchez.com/Handling_and_Processing_Strings_in_R.pdf
# Good pages to print off/bookmark:
# http://www.cheatography.com/davechild/cheat-sheets/regular-expressions/
# http://regexlib.com/CheatSheet.aspx
# or just Google "regex cheatsheet"
# Good library book:
# Go to virgo, search for "Regular expressions cookbook"
# RegEx tutorials:
# http://www.rexegg.com/
# http://www.regular-expressions.info/
# Regular Expression Basics -----------------------------------------------
# Regular expressions are composed of three components:
# (1) literal characters
# (2) modifiers (or metacharacters)
# (3) character classes
# (1) LITERAL CHARACTERS
# These are the literal characters you want to match. If you want to find the
# word "factor", you search for "factor", the literal characters.
# (2) MODIFIERS
# Modifiers define patterns;
# meet the modifiers:
# $ * + . ? [ ] ^ { } | ( ) \
# precede these with double backslash (in R!) if you want to treat them as
# literal characters.
# ^ start of string
# $ end of string
# . any character except new line
# * 0 or more
# + 1 or more
# ? 0 or 1
# | or (alternative patterns)
# {} quantifier brackets: exactly {n}; at least {n,}; between {n,m}
# () group patterns together
# \ escape character (needs to be escaped itself in R: \\)
# [] character class brackets (not to be confused with R's subsetting brackets!)
# (3) CHARACTER CLASSES
# a range of characters to be matched;
# placed in brackets: []
# For example: [a-q] means all letters from a - q;
# [a-zA-Z] means all alphabetic characters;
# [0-9A-Za-z] means all alphanumeric characters;
# The ^ symbol means "not" when used in brackets, so [^abc] means "Not (a or b
# or c)"
# From R documentation: "Because their interpretation is locale- and
# implementation-dependent, character ranges are best avoided." Good advice if
# you're sharing R code. Otherwise, fine to use on your own.
# PREDEFINED CHARACTER CLASSES
# [:lower:] - Lower-case letters in the current locale. [a-z]
#
# [:upper:] - Upper-case letters in the current locale. [A-Z]
#
# [:alpha:] - Alphabetic characters: [:lower:] and [:upper:]. [a-zA-Z]
#
# [:digit:] - Digits: 0 1 2 3 4 5 6 7 8 9. [0-9]
#
# [:alnum:] - Alphanumeric characters: [:alpha:] and [:digit:]. [0-9A-Za-z]
#
# [:punct:] - Punctuation characters: ! " # $ % & ' ( ) * + , - . / : ; < = > ?
# @ [ \ ] ^ _ ` { | } ~.
#
# [:graph:] - Graphical characters: [:alnum:] and [:punct:].
#
# [:blank:] - Blank characters: space and tab, and possibly other
# locale-dependent characters such as non-breaking space.
#
# [:space:] - Space characters: tab, newline, vertical tab, form feed, carriage
# return, space and possibly other locale-dependent characters.
#
# [:print:] - Printable characters: [:alnum:], [:punct:] and space.
# Note that the brackets in these class names are part of the symbolic names,
# and must be included in addition to the brackets delimiting the bracket list!
# More regex codes! (Yay! More stuff!) Be sure to escape that backslash!
# \b - Word boundary
# \d - any decimal digit
# \w - any word character
# \s - any white-space character
# \n - a New line
# see ?regex for an indepth overview of regular expressions.
# RegEx examples ----------------------------------------------------------
# Let's create some sample text to demonstrate regular expressions:
someText <- c(" here's a sentence",
"This is me typing at 2:02 in the morning",
"Back in 1995 I was only 22.",
"You saw 4 monkeys?",
"There are 10 kinds of people, those that understand binary
and the other 9 that don't care",
"Who likes pancakes? I do. I really really like pancakes!",
" <strong>Bolded text is bold and daring</strong>",
"figure1.jpg", "cover.jpg", "report.pdf", "log.txt",
"I'm a one man wolfpack and I weigh 222",
"OMG, a 3-eyed cyclops!!!",
"2112 is an awesome album.",
"2222 is my PIN")
someText
# Examples of SUPER BASIC regex patterns:
# find elements in vector beginning with 1 or more spaces
grep("^ +", someText, value=T)
grep("^[[:blank:]]+", someText, value=T)
# find elements containing a question mark; need to "escape" the "?"
grep("\\?", someText, value=T)
# find elements ending with a question mark
grep("\\?$", someText, value=T)
# find elements containing one or more numbers
grep("[0-9]+", someText, value=T)
grep("[[:digit:]]+", someText, value=T)
# find elements containing numbers with 2 digits
grep("[0-9]{2}", someText, value=T)
grep("[[:digit:]]{2}", someText, value=T)
# text ending with .jpg; need to escape the "."
grep("\\.jpg$", someText, value=T)
# text ending with a 3=character file extension
grep("\\.[[:alpha:]]{3}$", someText, value=T)
grep("\\.\\w{3}$", someText, value=T)
# text beginning with only letters, and containing only letters, ending in .jpg
grep("^[a-zA-Z]+\\.jpg", someText, value=T)
grep("^[[:alpha:]]+\\.jpg", someText, value=T)
# text containing two consecutive "really "
grep("(really ){2}",someText, value=T)
# text containing two or more !
grep("!{2,}",someText, value=T)
# Contraction beginning with 3 letters
grep(" [[:alpha:]]{3}'", someText, value = T)
grep("\\b[[:alpha:]]{3}'", someText, value = T)
# text with 3-character words
grep("\\b\\w{3}\\b", someText, value = T)
# text with 3-character words but no file names
grep("\\b\\w{3}\\b[^[:punct:]]", someText, value = T)
# text with ALL CAPS (two or more CAPS)
grep("\\b[[:upper:]]{2,}\\b", someText, value = T)
# text with a new line
grep("\\n", someText, value = T)
# matching 0 or more times
grep("2*2", someText, value = T)
# matching 1 or more times
grep("2+2", someText, value = T)
# Search/Replace with RegEx -----------------------------------------------
# Recall sub() and gsub() functions. These perform replacement of the first and
# all matches respectively. In a previous lecture we used them to search/replace
# literal strings. Now let's use them with regular expressions. A few examples:
# Replace Repeated Whitespace with a Single Space
gsub(" +"," ", someText)
gsub("\\s+"," ",someText) # removes \n!
# Trim Leading and Trailing Whitespace:
gsub("^ +| +$","", someText)
# Or better yet, just use the built-in function
trimws(someText)
# Replace a new line with a space
gsub("\\n"," ",someText)
# Remove HTML/XML tags (basic)
# "<" followed by anything but ">" and ending with ">"
gsub("<[^>]*>","",someText)
# Or better yet, just use the qdapRegex function rm_angle()
rm_angle(someText)
# Extract with RegEx ------------------------------------------------------
# The base R functions regexpr() and gregexpr() along with regmatches() can be
# used to extract character matches, but I find the str_extract() and
# str_extract_all() in the stringr package to be easier and faster to use.
# str_extract() extracts first piece of a string that matches a pattern while
# str_extract_all() extracts all matches. A few examples:
# Extract one- or two-digit numbers:
# first match
str_extract(someText, "[0-9]{1,2}")
# all matches; returns a list
str_extract_all(someText, "[0-9]{1,2}")
# can use the base R function unlist() function to get just the numbers in a
# vector:
unlist(str_extract_all(someText, "[0-9]{1,2}"))
# Extract a string that contains a . followed by 3 lower-case letters (file
# extensions)
str_extract(someText,"\\.[a-z]{3}")
# just the file extenions without a period (not very elegant but works)
str_extract(someText,"(jpg|tif|pdf|txt)$")
# Extract text beginning with only letters, and containing only letters,
# ending in .jpg
str_extract(someText, "^[a-z]+\\.jpg")
# to get just the text
tmp <- str_extract(someText, "^[a-z]+\\.jpg")
tmp[!is.na(tmp)]
# Web scraping ------------------------------------------------------------
# Regular Expressions can be very helpful when doing web scraping. Let's scrape
# some data and demonstrate. Simply give a URL as an argument to the readLines()
# function. The readLines() function reads in text lines. The following reads in
# the HTML code of a web page into a single vector.
# 113th Congress Senate Bills: first 100 results.
senate_bills <- readLines("http://thomas.loc.gov/cgi-bin/bdquery/d?d113:0:./list/bss/d113SN.lst:")
# Notice senate_bills is a vector, not a data frame. Each element of text
# corresponds to one line of HTML code:
senate_bills[1:10]
# We'd like to create a data frame that includes bill number, bill title,
# sponsor, and number of co-sponsors.
# In the HTML we see that bill number, title, and sponsor are in lines that
# begin like this: "<p><b>15.</b>". We can use regular expressions to find all
# 1-3 digit numbers followed by a period and </b>.
# grep() can find the indices of such patterns,
k <- grep("[0-9]{1,3}\\.</b>", senate_bills)
k[1:4]
# Use k to subset the data
temp <- senate_bills[k]
head(temp)
tail(temp)
# Now replace the HTML tags with space
temp <- gsub("<[^>]*>", " ",temp)
head(temp)
tail(temp)
# break vector elements by ":"
temp <- strsplit(temp,":")
# Let's see what we have so far:
head(temp)
# To get the bill numbers we can pull out the first element of each list
# component as follows:
bill <- sapply(temp,function(x)x[1])
head(bill)
# Now we can use str_extract() to pull out the bill numbers. I've decided to
# keep the "S":
bill <-str_extract(bill, "S\\.[0-9]{1,3}")
head(bill)
# Now let's get the bill title. It's in the second element.
temp[[1]]
# pull out second element of each list component
title <- sapply(temp,function(x)x[2])
title[1:4]
# get rid of " Sponsor" at end
title <- gsub(" Sponsor$","",title)
# get rid of leading and trailing spaces
title <- trimws(title)
head(title)
# Now get the bill sponsor. It's in the third element.
temp[[1]]
sponsor <- sapply(temp,function(x)x[3])
sponsor <- trimws(sponsor) # get rid of leading spaces
head(sponsor)
# Get number of cosponsors by first finding those vector elements that contain
# the string "Cosponsors". Have to be careful; not all bills have Cosponsors
# (ie, Cosponsors (None) ) but all have the word "Cosponsors".
k <- grep("Cosponsors",senate_bills)
# subset vector to contain only those matching elements
temp <- senate_bills[k]
head(temp)
# Now extract number of cosponsors; either None or a 1-2 digit number.
cosponsors <- str_extract(temp, pattern = "\\([[:alnum:]]{1,4}\\)")
# Get rid of parentheses
cosponsors <- gsub(pattern = "[\\(|\\)]", replacement = "", cosponsors)
# Replace "None" with 0 and convert to numeric
cosponsors <- as.numeric(gsub("None",0,cosponsors))
summary(cosponsors)
# And finally create data frame
senate_bills <- data.frame(bill, title, sponsor, cosponsors,
stringsAsFactors = FALSE)
head(senate_bills)
# What if we wanted to do this for all results? We have to iterate through the URLs.
# http://thomas.loc.gov/cgi-bin/bdquery/d?d113:0:./list/bss/d113SN.lst:[[o]]&items=100&
# http://thomas.loc.gov/cgi-bin/bdquery/d?d113:100:./list/bss/d113SN.lst:[[o]]&items=100&
# http://thomas.loc.gov/cgi-bin/bdquery/d?d113:200:./list/bss/d113SN.lst:[[o]]&items=100&
# ...
# http://thomas.loc.gov/cgi-bin/bdquery/d?d113:3000:./list/bss/d113SN.lst:[[o]]&items=100&
# We also may want to create a data frame in advance to store the data
SenateBills <- data.frame(bill=character(3020), title=character(3020),
sponsor=character(3020),
cosponsors=numeric(3020),
stringsAsFactors = FALSE)
# Now cycle through the URLS using the code from above. I suppose ideally I
# would determine the upper bound of my sequence (3000) programmatically, but
# this is a one-off for the 113th congress so I'm cutting myself some slack.
for(i in seq(0,3000,100)){
senate_bills <- readLines(paste0("http://thomas.loc.gov/cgi-bin/bdquery/d?d113:",i,":./list/bss/d113SN.lst:"))
# bill number
k <- grep("[0-9]{1,3}\\.</b>", senate_bills)
temp <- senate_bills[k]
temp <- gsub("<[^>]*>", " ",temp)
temp <- strsplit(temp,":")
bill <- sapply(temp,function(x)x[1])
bill <- str_extract(bill, "S\\.[0-9]{1,4}") # need to increase to 4 digits
# title
title <- sapply(temp,function(x)x[2])
title <- gsub(" Sponsor$","",title)
title <- trimws(title)
# sponsor
sponsor <- sapply(temp,function(x)x[3])
sponsor <- trimws(sponsor)
# coponsors
k <- grep("Cosponsors",senate_bills)
temp <- senate_bills[k]
cosponsors <- str_extract(temp, pattern = "\\([[:alnum:]]{1,4}\\)")
cosponsors <- gsub(pattern = "[\\(|\\)]", replacement = "", cosponsors)
cosponsors <- as.numeric(gsub("None",0,cosponsors))
# add to data frame
rows <- (i+1):(i+length(k))
SenateBills[rows,] <- data.frame(bill, title, sponsor, cosponsors, stringsAsFactors = FALSE)
}
# For another web scraping tutorial of mine, see:
# https://github.com/UVa-R-Users-Group/meetup/tree/master/2014-10-07-web-scraping
# The rvest package by Hadley Wickham allows you to "Easily Harvest (Scrape) Web
# Pages":
# http://blog.rstudio.org/2014/11/24/rvest-easy-web-scraping-with-r/
# The XML package also has some functions for converting HTML tables to data
# frames.
# RegEx within data frames ------------------------------------------------
# Recall our allStocks data. We wanted to add a column indicating which stock
# each row belongs to. We can use gsub() and regular expressions to easily
# do this.
head(allStocks)
# Notice the row name contains the name of the stock. We can extract the row
# names and formally add them to the data frame using the rownames() function.
# first three row names:
rownames(allStocks)[1:3]
# extract all row names and add to data frame:
allStocks$Stock <- rownames(allStocks)
head(allStocks)
# Let's reset the row names:
rownames(allStocks) <- NULL
head(allStocks)
# Now we find the pattern "\\.csv\\.[0-9]{1,3}" and replace with nothing. Recall
# that "." is metacharacter that has to be escaped. [0-9]{1,3} translates to all
# numbers ranging from 0 - 999.
allStocks$Stock <- gsub(pattern = "\\.csv\\.[0-9]{1,3}",
replacement = "",
allStocks$Stock)
# and let's make our new variable a factor:
allStocks$Stock <- factor(allStocks$Stock)
head(allStocks)
tail(allStocks)
summary(allStocks$Stock)
# While we're at it, let's fix the Date. (Currently a factor.)
allStocks$Date <- as.Date(allStocks$Date, format="%d-%b-%y")
# And just for fun, graph closing price over time for all stocks one on graph:
library(ggplot2)
ggplot(allStocks, aes(x=Date, y = Close, color=Stock)) + geom_line()
# Now let's finish cleaning up the 2012 election data!
names(electionData)
# I want to drop everything to the right of the "NA.34 NA" column. Frankly I'm
# not sure what those columns contain.
# Get the column number of the column with header "NA.34 NA"
fir <- grep("NA.34 NA", names(electionData))
fir
# get the column number of the last column in the data frame
las <- ncol(electionData)
las
# Now subset the data frame; keep all columns except 72-82
electionData <- electionData[,-c(fir:las)]
# drop columns with names of "NA.1, NA.2, etc"; these are proportions. I can
# always derive them later if I want them.
ind <- grep("NA\\.[0-9]{1,2}", names(electionData))
ind
electionData <- electionData[,-ind]
# and some final clean up
names(electionData)[3] <- "Total.Popular.Vote"
names(electionData)[5] <- "Elec Vote R"
electionData$"Pop Vote D" <- NULL
rownames(electionData) <- NULL
# still some lingering character columns
which(sapply(electionData, is.character))
# convert to numeric
electionData[,2:6] <- sapply(electionData[,2:6], as.numeric)
# Now our election data contains only number of votes.
# Another extended example ------------------------------------------------
# Let's add Occupation names to the arrests data. Recall that the Occup columns
# contains numeric codes for Occupations. I'd like to make that column a factor
# where the codes are associated with levels that define the code number.
arrests$Occup[1:5]
# First we read in a document that contains Occupation code numbers and the
# occupation name. I created this from the codebook that accompanied this data.
oc <- readLines("../data/00049-Occupation-codes.txt", warn=FALSE)
# trim whitespace
oc <- trimws(oc)
# Notice all code numbers are in the first three positions. Let's use stringr
# for the str_extract() function. Notice we need to convert to integer to match
# the integer codes in the arrests data frame.
codeNums <- as.integer(str_extract(string = oc, pattern = "^[0-9]{3}"))
# Getting the code names is a little harder. There are probably a dozen
# different ways to proceed from this point on, but here's how I decided to do
# it. Basically extract everything except numbers.
codeNames <- trimws(str_extract(string = oc, pattern = "[^[:digit:]]+"))
head(codeNames)
tail(codeNames)
# Now I can make Occup a factor with levels equal to codeNums and labels equal
# to codeNames. I'm going to make a new column so we can compare to the original
# column.
arrests$Occup2 <- factor(arrests$Occup, levels = codeNums, labels = codeNames)
# some quick counts; they seem to match our source file
head(summary(arrests$Occup2))
tail(summary(arrests$Occup2))
# Apparently there are no codes in the data for Cannot Read Film (997) or None
# listed (998) despite them being listed in the code book.
nrow(subset(arrests, Occup %in% c(997,998)))
# Which codes are we using that don't have matches in the data?
setdiff(codeNums,arrests$Occup)
# 174 = secret society; codebook reports 0 so that makes sense.
# Which codes are in the data that we don't have matches for in codeNums?
setdiff(arrests$Occup, codeNums)
# 1, 2, and 178 are not listed in the codebook!
k <- setdiff(arrests$Occup, codeNums)
head(subset(arrests, Occup %in% k, select = c("Occup","Occup2")))
nrow(subset(arrests, Occup %in% k))
# 403 records with Occup code that doesn't match codebook
# Bottom line: this data, as provided by ICPSR, is a bit dirty.
# qdapRegex package -------------------------------------------------------
# The qdapRegex package has some pre-defined functions for Regular Expression
# Removal, Extraction, and Replacement. Let's explore some of them by way of an
# example.
# I have a text file of studio albums by the Canadian rock band, Rush. Read it
# in:
rushSA <- readLines("rush_albums.txt")
rushSA
# I'd like to make a data frame with two columns: album title and year released.
# We'll use qdapRegex functions to do this.
# First let's trim the white space:
rushSA <- trimws(rushSA)
# The qdapRegex package has a function called rm_between() that will
# Remove/Replace/Extract Strings Between 2 Markers. I want to use it to extract
# album release year between parentheses. Note I have to use the extract=TRUE
# argument:
year <- rm_between(rushSA, left = "(", right=")", extract=TRUE)
# That returns a list; I can use unlist() to make it a vector:
year <- unlist(year)
year
# I need to remove the string ", cover album". Could use gsub() to find and
# replace with nothing, but a more general approach would be to extract all
# numbers. Remember the tidyr helper function, extract_numeric?
year <- tidyr::extract_numeric(year)
year
# Now get the album titles; this time use the rm_between() function without the
# extract=TRUE argument. This removes everything between the (), including the
# parentheses.
album <- rm_between(rushSA, left = "(", right=")")
album
# And now our data frame:
rushStudioAlbums <- data.frame(year, album)
head(rushStudioAlbums)
# There is also a package called stringi that bills itself as "THE string
# processing package for R". As I understand it, stringr is wrapper for stringi.
# Learn more :http://www.rexamine.com/resources/stringi/
# save data for next set of lecture notes
save(list=c("electionData", "weather", "arrests", "allStocks", "popVa","airplane",
"SenateBills"), file="../data/datasets_L07.Rda")
|
d78409f514e50a3e7d2dd69a68116640402b26cd | 29542f41da364e5be61af302be09c004164a5f5a | /Rscript/02_Total_predation.R | b3982182daec8b2a72c5b69561458b36ef7bfee8 | [] | no_license | KatkaSam/CATEX_BABE | 4213d7414e8074a38b530693acc2255e802d643f | 50d9cac9274595923bb4c1ddda2d9332f22a70e9 | refs/heads/master | 2023-07-05T14:35:13.470705 | 2021-08-18T14:01:13 | 2021-08-18T14:01:13 | 339,658,149 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,995 | r | 02_Total_predation.R | #----------------------------------------------------------#
#
#
# CATEX BABE experiment
#
# Total predation analyses
#
# Katerina Sam 2021
#
#----------------------------------------------------------#
#----------------------------------------------------------#
# 3. Exploratory graphs -----
#----------------------------------------------------------#
# see data
summary(dataset_catex)
# prepare proportional data fro graphs
dataset_catex$PropTotPred<-dataset_catex$TotalPred72H/dataset_catex$NonLost72H
summary(dataset_catex)
is.numeric(dataset_catex$PropTotPred)
(expl_plot1<-
dataset_catex%>%
ggplot(
aes(
x = Site,
y = PropTotPred)) +
geom_flat_violin(
col = "gray30",
alpha = 1/2,
trim = TRUE,
position = position_nudge(
x = 0.2,
y = 0)) +
geom_boxplot(
width=0.2,
outlier.shape = NA,
col = "gray30",
alpha = 0.5) +
geom_point(
position = position_jitter(width = 0.15),
alpha = 1,
size = 1) +
labs(
x = "Site",
y = expression(paste("Total proportion of predated caterpillars"))) +
theme(
text = element_text(size = text_size),
legend.position = "none"))
ggsave(
"figures/explor_plot_01_sites.pdf",
expl_plot1,
width = PDF_width,
height = PDF_height,
units = "in")
(expl_plot2<-
dataset_catex%>%
ggplot(
aes(
x = Site,
y = PropTotPred,
col=Strata,
fill=Strata)) +
geom_flat_violin(
col = "gray30",
alpha = 1/2,
trim = TRUE,
position = position_nudge(
x = 0.2,
y = 0)) +
geom_boxplot(
width=0.2,
outlier.shape = NA,
col = "gray30",
alpha = 0.5) +
geom_point(
position = position_jitter(width = 0.15),
alpha = 1,
size = 1) +
labs(
x = "Site",
y = expression(paste("Total proportion of predated caterpillar"))) +
theme(
text = element_text(size = text_size),
legend.position = "top"))
ggsave(
"figures/explor_plot_02_sites_Strata.pdf",
expl_plot2,
width = PDF_width,
height = PDF_height,
units = "in")
### Models TOTAL PREDATION
### The full model which considers treatment in interaction with plant species, distance of the neighbouring
###trees from the central tree and the directions + it takes the cluster of the trees as random effect
### cbind(TotalPred, OK) is used in the binomial glmers - generally we need to use the number of predated caterpillars and number of those that survived,
### column OK = exposed - (predated by birds + predated by arthropods + lost)
#----------------------------------------------------------#
# 3.1 Model build -----
#----------------------------------------------------------#
glmm_total_predation_full <- glmer(cbind(TotalPred72H, Survived72H)~poly(Lat,2)*Strata + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_module <- glmer(cbind(TotalPred72H, Survived72H)~poly(abs(Lat),2)*Strata + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_noStrata <- glmer(cbind(TotalPred72H, Survived72H)~poly(Lat,2) + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_linear <- glmer(cbind(TotalPred72H, Survived72H)~poly(abs(Lat),1)*Strata + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_full_add <- glmer(cbind(TotalPred72H, Survived72H)~poly(Lat,2)+Strata + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_linear_add <- glmer(cbind(TotalPred72H, Survived72H)~poly(abs(Lat),1)+Strata + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_Strata <- glmer(cbind(TotalPred72H, Survived72H)~Strata + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_null <- glmer(cbind(TotalPred72H, Survived72H)~1 + (1|Species),
data = dataset_catex, family = "binomial")
AICctab(glmm_total_predation_full, glmm_total_predation_module, glmm_total_predation_noStrata, glmm_total_predation_linear,
glmm_total_predation_full_add, glmm_total_predation_linear_add, glmm_total_predation_Strata, glmm_total_predation_null)
# build the best model
glm_predation_select<-glmm_total_predation_full
## Predict the values
newData <- data.frame(Lat = rep(seq(from = -40, to = 55, length.out = 500),2),
Strata = rep(c("U", "C"), each = 500))
newData$Predation <- predict(glm_predation_select, newdata = newData, re.form = NA, type = "response")
model_plot_01 <-plot(dataset_catex$TotalPred72H/(dataset_catex$TotalPred72H + dataset_catex$Survived72H) ~
jitter(dataset_catex$Lat), col = c("deepskyblue3", "goldenrod3")[as.numeric(as.factor(dataset_catex$Strata))])
lines(newData$Lat[newData$Strata == "U"],
newData$Predation[newData$Strata == "U"], col = "goldenrod3")
lines(newData$Lat[newData$Strata == "C"],
newData$Predation[newData$Strata == "C"], col = "deepskyblue3")
newData %>%
as_tibble() %>%
write_csv("data/output/OK_prediction_total_predation.csv")
#----------------------------------------------------------#
# 3.2 Figure from model draw -----
#----------------------------------------------------------#
(model_plot_01 <- ggplot(dataset_catex,
aes(
x=Lat,
y = PropTotPred,
col = Strata,
fill=Strata,
size = 3)) +
geom_point(
data = dataset_catex,
aes(y = PropTotPred),
size = 3,
position = position_jitterdodge(
dodge.width = 2,
jitter.width = 2)) +
geom_line(data = newData, aes(y = Predation), size = 2) +
coord_flip() +
labs(
x = "Latitude",
y = expression(paste("Proportion attacked")) )+
scale_fill_manual(values = c("#42adc7", "#ffb902"))+
scale_color_manual(values = c("#42adc7", "#ffb902"))+
theme(
text = element_text(size = text_size),
legend.position = "right")) +
theme(axis.line = element_line(colour = "black", size = 1, linetype = "solid")) +
theme(axis.ticks = element_line(colour = "black", size = 1, linetype = "solid"))
ggsave(
"figures/OK_model_plot_01_TotalPredations.pdf",
model_plot_01,
width = PDF_width,
height = PDF_height,
units = "in")
#----------------------------------------------------------#
# 3.3 Model and figure from absolute latitude FUN TEST -----
#----------------------------------------------------------#
dataset_catex2 <-
readxl::read_xlsx("data/input/CatexBABE_Complete.xlsx")
Sites <- data.frame(Site = c("TOM", "LAK", "BUB", "KAK", "DRO", "EUC"),
Lat = c(42.68, 51.2, 21.6, 5.13, 16.1, 33.62))
dataset_catex2$Lat <- Sites$Lat[match(dataset_catex2$Site, Sites$Site)]
glmm_total_predation_full <- glmer(cbind(TotalPred72H, Survived72H)~poly(Lat,2)*Strata + (1|Species),
data = dataset_catex2, family = "binomial")
glmm_total_predation_module <- glmer(cbind(TotalPred72H, Survived72H)~poly(abs(Lat),2)*Strata + (1|Species),
data = dataset_catex, family = "binomial")
glmm_total_predation_noStrata <- glmer(cbind(TotalPred72H, Survived72H)~poly(Lat,2) + (1|Species),
data = dataset_catex2, family = "binomial")
glmm_total_predation_linear <- glmer(cbind(TotalPred72H, Survived72H)~poly(abs(Lat),1)*Strata + (1|Species),
data = dataset_catex2, family = "binomial")
glmm_total_predation_full_add <- glmer(cbind(TotalPred72H, Survived72H)~poly(Lat,2)+Strata + (1|Species),
data = dataset_catex2, family = "binomial")
glmm_total_predation_linear_add <- glmer(cbind(TotalPred72H, Survived72H)~poly(abs(Lat),1)+Strata + (1|Species),
data = dataset_catex2, family = "binomial")
glmm_total_predation_Strata <- glmer(cbind(TotalPred72H, Survived72H)~Strata + (1|Species),
data = dataset_catex2, family = "binomial")
glmm_total_predation_null <- glmer(cbind(TotalPred72H, Survived72H)~1 + (1|Species),
data = dataset_catex2, family = "binomial")
AICctab(glmm_total_predation_full, glmm_total_predation_module, glmm_total_predation_noStrata, glmm_total_predation_linear,
glmm_total_predation_full_add, glmm_total_predation_linear_add, glmm_total_predation_Strata, glmm_total_predation_null)
# build the best model
glm_predation_select2<-glmm_total_predation_full
## Predict the values
newData2 <- data.frame(Lat = rep(seq(from = 0, to = 55, length.out = 500),2),
Strata = rep(c("U", "C"), each = 500))
newData2$Predation <- predict(glm_predation_select, newdata = newData2, re.form = NA, type = "response")
model_plot_02absol <-plot(dataset_catex$TotalPred72H/(dataset_catex$TotalPred72H + dataset_catex$Survived72H) ~
jitter(dataset_catex2$Lat), col = c("deepskyblue3", "goldenrod3")[as.numeric(as.factor(dataset_catex$Strata))])
lines(newData2$Lat[newData2$Strata == "U"],
newData2$Predation[newData2$Strata == "U"], col = "goldenrod3")
lines(newData2$Lat[newData2$Strata == "C"],
newData2$Predation[newData2$Strata == "C"], col = "deepskyblue3")
newData2 %>%
as_tibble() %>%
write_csv("data/output/xx_prediction_latitude_absolute.csv")
#----------------------------------------------------------#
# 3.4 Model build for DISCRETE SITES NOT USED NYMORE -----
#----------------------------------------------------------#
# compute all posible combinations
glm_total_predation_full <- glm(cbind(TotalPred72H, Survived72H)~Site*Strata,
data = dataset_catex, family = "binomial",
na.action = "na.fail")
# compute all posible combinations
glm_total_predation_dd <-
MuMIn::dredge(
glm_total_predation_full,
trace = T)
# save result table
glm_total_predation_dd %>%
as_tibble() %>%
write_csv("data/output/xx_total_predation_model_result_discrete.csv")
# observe the best model
glm_total_predation_dd %>%
as_tibble() %>%
filter(delta < 2 ) %>%
View()
# build the best model
glm_predation_select<-glmm_total_predation_full
summary(glm_predation_select)
check_model(glm_predation_select, binwidth = 10)
model_performance(glm_predation_select)
check_heteroscedasticity(glm_predation_select)
qplot(residuals(glm_predation_select))
# calculate emmeans
glm_predation_emmeans <-
emmeans(
glm_predation_select,
pairwise ~ Strata*Site,
type = "response")
plot(glm_predation_emmeans)
#----------------------------------------------------------#
# 3.2 Figure from DISCRETE MODEL -----
#----------------------------------------------------------#
(model_plot_01 <-
glm_predation_emmeans$emmeans %>%
as_tibble() %>%
ggplot(
aes(
x=Site,
y = prob,
col = Strata,
fill=Strata)) +
scale_x_discrete(limits=c("LAK", "TOM", "BUB", "DRO", "KAK", "EUC")) +
geom_point(
data = dataset_catex,
aes(y = PropTotPred),
alpha = 0.5,
size = 2,
position = position_jitterdodge(
dodge.width = 0.5,
jitter.width = 0.15)) +
geom_errorbar(
aes(
ymin = asymp.LCL,
ymax = asymp.UCL),
width=0.3,
position = position_dodge(width = 0.5, preserve = "single"),
size = 2)+
geom_point(
shape = 0,
position = position_dodge(width = 0.5),
size = 4) +
labs(
x = "Site",
y = expression(paste("Total proportion of attacked caterpillars")) )+
scale_fill_manual(values = c("deepskyblue3", "goldenrod3"))+
scale_color_manual(values = c("deepskyblue3", "goldenrod3"))+
theme(
text = element_text(size = text_size),
legend.position = "right")) +
theme(axis.line = element_line(colour = "black", size = 1, linetype = "solid")) +
theme(axis.ticks = element_line(colour = "black", size = 1, linetype = "solid"))
# to turn and rescale the figure
model_plot_01<-model_plot_01 + coord_flip() +
scale_x_discrete(limits=c("EUC", "DRO", "KAK", "BUB", "LAK","TOM"))
# save pdf
ggsave(
"figures/xx_model_plot_DISCRETE_total_predation.pdf",
model_plot_01,
width = PDF_width,
height = PDF_height,
units = "in")
# save the pairwise test
glm_predation_emmeans$contrasts %>%
as_tibble() %>%
arrange(p.value) %>%
write_csv("data/output/xx_total_predation_pairwise_contrast.csv")
glm_predation_emmeans$emmeans %>%
as_tibble() %>%
write_csv("data/output/xx_total_predation_pairwise_emmeans.csv")
|
6bd0b2fbae833fbcf0953cf2b289443cacd5cfbf | 7da718dc45c69be0dbf0409fe423f32f28151dff | /inst/shiny/server_5_prettyPlot/server_5_prettyPlot_prep.R | ca7e396926222d003e9b543dac545941160c63d7 | [] | no_license | cran/eSDM | ac865dd1a35268c31a17be3e20d964b1882bb850 | 35c58df0a1d89e5c501ecd55cb3608c5ebda5101 | refs/heads/master | 2021-06-16T10:59:20.678147 | 2021-05-04T03:50:08 | 2021-05-04T03:50:08 | 199,010,992 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,962 | r | server_5_prettyPlot_prep.R | ### Functions that return data used in server_5_prettyPlot_plot.R
### Get model(s) to plot, generate lists with plotting information
### NOTE ###
# numericInput()'s convert everything to numeric, so entries or symbols are
# coerced to numeric NA
###############################################################################
# Get set(s) of predictions to plot
### Process selected rows from tables of predictions
# Return list of [which tables have selected rows, a 3 element list of the...
# ...rows selected in those tables, a 3 element list of selected spdfs]
# '3 element lists' correspond to the 3 tables
pretty_model_selected <- reactive({
req(pretty_models_idx_count() == 1)
model.idx.list <- pretty_models_idx_list()
if (isTruthy(model.idx.list[[1]])) {
vals$models.orig[[model.idx.list[[1]]]]
} else if (isTruthy(model.idx.list[[2]])) {
st_sf(
vals$overlaid.models[[model.idx.list[[2]]]],
geometry = vals$overlay.base.sfc, agr = "constant"
)
} else if (isTruthy(model.idx.list[[3]])) {
st_sf(
vals$ensemble.models[[model.idx.list[[3]]]],
geometry = vals$overlay.base.sfc, agr = "constant"
)
} else {
validate("High quality map error; please report this as an issue")
}
})
### Get desired projection (crs object) for map
pretty_crs_selected <- reactive({
if (input$pretty_proj_ll) {
crs.ll
} else {
if (input$pretty_proj_method == 1) {
req(pretty_models_idx_count() == 1)
model.idx.list <- pretty_models_idx_list()
validate(
need(length(model.idx.list) == 3,
"High quality map crs error; please report this as an issue")
)
if (isTruthy(model.idx.list[[1]])) {
st_crs(vals$models.orig[[model.idx.list[[1]]]])
} else {
vals$overlay.crs #st_crs(vals$overlay.base.sfc)
}
} else if (input$pretty_proj_method == 2) {
st_crs(vals$models.orig[[req(as.numeric(input$pretty_proj_idx))]])
} else {
x <- st_crs(input$pretty_proj_epsg)
validate(
need(x[[2]],
paste("Error: The provided EPSG code was not recognized;",
"please provide a valid code"))
)
x
}
}
})
### Return model specified by user
pretty_model_toplot <- reactive({
st_transform(pretty_model_selected(), pretty_crs_selected())
})
### Returns logical indicating whether [0, 360] range needs to be used
pretty_range_360 <- reactive({
req(pretty_models_idx_count() == 1)
check_360(pretty_model_toplot())
})
### Return model specified by user, transformed to 0-360 range
# Used in multiple places (toplot and color scheme), hence reactive
# Only called after check has already been done, hence no check_preview360()
pretty_model_toplot360 <- reactive({
preview360_split(pretty_model_toplot())
})
###############################################################################
### Compile map range (plot limits)
pretty_map_range <- reactive({
req(c(
input$pretty_range_xmin, input$pretty_range_xmax,
input$pretty_range_ymin, input$pretty_range_ymax
))
})
###############################################################################
# Color scheme of predictions
#------------------------------------------------------------------------------
### Process inputs and return list with num of colors and color palette to use
pretty_colorscheme_palette_num <- reactive({
req(input$pretty_color_palette)
perc <- input$pretty_color_perc == 1
color.palette.idx <- input$pretty_color_palette
if (perc) {
color.num <- 10
} else {
color.num <- val.pretty.color.num()
validate(
need(color.num, "Error: The 'Number of colors' entry must be a number")
)
}
### Set number of colors and color palette
if (color.palette.idx == 1) {
color.palette <- pal.esdm
color.num <- 10
} else if (color.palette.idx == 2) {
validate(
need(color.num <= 11,
"Error: The 'RColorBrewer: Spectral' palette has a max of 11 colors")
)
color.palette <- rev(RColorBrewer::brewer.pal(color.num, "Spectral"))
} else if (color.palette.idx == 3) {
validate(
need(color.num <= 9,
"Error: The 'RColorBrewer: YlGnBu' palette has a max of 9 colors")
)
color.palette <- rev(RColorBrewer::brewer.pal(color.num, "YlGnBu"))
} else if (color.palette.idx == 4) {
color.palette <- viridis::viridis(color.num)
} else if (color.palette.idx == 5) {
color.palette <- viridis::inferno(color.num)
} else if (color.palette.idx == 6) {
color.num <- 12
color.palette <- dichromat::colorschemes$"DarkRedtoBlue.12"
} else {
validate("Error: Error in Color Scheme processing")
}
list(color.palette, color.num)
})
#------------------------------------------------------------------------------
### Generate list that specifies color scheme things
pretty_colorscheme_list <- reactive({
#----------------------------------------------------------
### NA color
if (input$pretty_na_color_check) {
col.na <- NULL
} else {
col.na <- input$pretty_na_color
}
#----------------------------------------------------------
### Get reactive elements
perc <- input$pretty_color_perc == 1
color.palette <- pretty_colorscheme_palette_num()[[1]]
color.num <- pretty_colorscheme_palette_num()[[2]]
#----------------------------------------------------------
### Determine data break points and legend labels
# Prep
if (pretty_range_360()) {
x <- pretty_model_toplot360()
} else {
x <- pretty_model_toplot()
}
data.name <- switch(pretty_table_row_idx()[1], "Pred", "Pred", "Pred_ens")
# Call function
temp <- pretty_colorscheme_func(
x, data.name, pretty_map_range(), perc, color.num,
leg.perc.esdm, input$pretty_legend_round
)
#----------------------------------------------------------
### Return list
list(
data.name = data.name, data.breaks = temp[[1]], col.pal = color.palette,
col.na = col.na, leg.labs = temp[[2]],
perc = perc, leg.round = input$pretty_legend_round #incldued for update
)
})
###############################################################################
### Generate list of legend arguments
pretty_legend_list <- reactive({
validate(
need(!is.na(input$pretty_legend_size),
"Error: The legend text size entry must be a number")
)
if (input$pretty_legend) {
if (input$pretty_legend_inout == 1) {
leg.out <- FALSE
leg.pos <- list.pos.vals[[as.numeric(input$pretty_legend_pos)]]
leg.outside.pos <- NULL
leg.width <- 1
} else { #input$pretty_legend_inout == 2
leg.out <- TRUE
leg.pos <- NULL
leg.outside.pos <- input$pretty_legend_pos
leg.width <- input$pretty_legend_width
validate(
need(dplyr::between(leg.width, 0.1, 0.5),
"The 'Legend width' entry must be between 0.1 and 0.5")
)
}
leg.text.size <- input$pretty_legend_size
leg.border <- ifelse(input$pretty_legend_frame, "black", FALSE)
list(
inc = TRUE, out = leg.out, pos = leg.pos, out.pos = leg.outside.pos,
text.size = leg.text.size, width = leg.width, border = leg.border
)
} else {
# defaults for others params included for sake of update
list(
inc = FALSE, out = FALSE, pos = list.pos.vals[[3]], out.pos = NULL,
text.size = 1, width = 1, border = "black"
)
}
})
###############################################################################
# Section 2
###############################################################################
### Title and axis labels
pretty_titlelab_list <- reactive({
validate(
need(!is.na(input$pretty_title_cex) && !is.na(input$pretty_lab_cex),
"Error: The title and axis size entries must be numbers")
)
list(
title = input$pretty_title, xlab = input$pretty_xlab,
ylab = input$pretty_ylab, titlecex = input$pretty_title_cex,
labcex = input$pretty_lab_cex
)
})
###############################################################################
### Margin info
pretty_margin_list <- reactive({
temp <- c(
input$pretty_margin_in1, input$pretty_margin_in2, input$pretty_margin_in3,
input$pretty_margin_in4, input$pretty_margin_out
)
validate(
need(!anyNA(temp) && length(temp) == 5,
"Error: All margin values must be numbers")
)
list(
input$pretty_margin_in1, input$pretty_margin_in2, input$pretty_margin_in3,
input$pretty_margin_in4, input$pretty_margin_out
)
})
###############################################################################
### Generate list of coordinate grid mark and label info
pretty_tick_list <- reactive({
validate(
need(input$pretty_tick_lon_start,
"Error: The 'Longitude grid mark start' entry must be a number"),
need(input$pretty_tick_lon_interval,
"Error: The 'Longitde grid mark interval' entry must be a number"),
need(input$pretty_tick_lat_start,
"Error: The 'Latitude grid mark start' entry must be a number"),
need(input$pretty_tick_lat_interval,
"Error: The 'Latitude grid mark interval' entry must be a number"),
need(input$pretty_tick_lw,
"Error: The 'Grid mark width' entry must be a number"),
need(input$pretty_tick_alpha,
"Error: The 'Grid mark transparency' entry must be a number"),
need(input$pretty_tick_label_size,
"Error: The 'Coordinate label size' entry must be a number")
)
validate(
need(input$pretty_tick_lon_start < input$pretty_range_xmax,
paste("Error: The 'Longitude grid mark start' must be less than the",
"'Longitude maximum'")),
need(input$pretty_tick_lat_start < input$pretty_range_ymax,
paste("Error: The 'Latitude grid mark start' must be less than the",
"'Latitude maximum'")),
need(input$pretty_tick_label_size > 0,
"Error: The 'Coordinate label size' entry must be greater than zero")
)
grid.ticks <- ifelse(
input$pretty_tick_label_inout == 1, FALSE, 2 %in% input$pretty_tick_which
)
lon.grid.vals <- seq(
from = input$pretty_tick_lon_start, to = input$pretty_range_xmax,
by = input$pretty_tick_lon_interval
)
lat.grid.vals <- seq(
from = input$pretty_tick_lat_start, to = input$pretty_range_ymax,
by = input$pretty_tick_lat_interval
)
list(
inc = input$pretty_tick,
grid.lines = 1 %in% input$pretty_tick_which, grid.ticks = grid.ticks,
x.vals = lon.grid.vals, y.vals = lat.grid.vals,
grid.lw = input$pretty_tick_lw,
grid.alpha = input$pretty_tick_alpha,
grid.col = input$pretty_tick_color,
grid.labs.size = input$pretty_tick_label_size,
grid.labs.in = input$pretty_tick_label_inout == 1
)
})
###############################################################################
### Generate lists of additional objects to plot
pretty_addobj_list <- reactive({
lapply(req(vals$pretty.addobj), function(i) {
i$obj <- st_transform(i$obj, pretty_crs_selected())
if (pretty_range_360()) {
if (length(i$obj) > 6000) {
i$obj <- check_preview360_split(i$obj, force.360 = TRUE)
} else {
i$obj <- st_union(
check_preview360_mod(i$obj, force.360 = TRUE),
by_feature = TRUE
)
}
}
range.poly <- pretty_range_poly_func(
pretty_map_range(), pretty_crs_selected()
)
pretty_int_func(i$obj, range.poly, tolower(i$obj.text))
i
})
})
###############################################################################
|
b24917ff4f2c9098d4f584bfbca8372caf830fbd | f48e25ade098aef7aa6f9fde4927bbf2b2092d14 | /man/dasl.yeast.Rd | 51efc163a3185c351053b55e1e815c0525cd8ee6 | [] | no_license | sigbertklinke/mmstat.data | 23fa7000d5a3f776daec8b96e54010d85515dc7d | 90f698e09b4aac87329b0254db28d835014c5ecb | refs/heads/master | 2020-08-18T04:29:05.613265 | 2019-10-17T11:44:57 | 2019-10-17T11:44:57 | 215,747,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 486 | rd | dasl.yeast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.yeast}
\alias{dasl.yeast}
\title{Yeast}
\format{16 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/yeast/?sf_paged=43}{Yeast}
}
\description{
Yeast
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
From student experiment (De Veaux)
}
\concept{Analysis of Variance}
|
f5d11c86090c7efcda6cc00f4230033ef80865fd | f53e353c54541c9282a9822e1fa23698bf533bd7 | /test/test6.R | 989b43ff02d95b2c2798a889af0dbc936c31cb2c | [] | no_license | sakur0zxy/R-practice | acee9b2335077365e70e94fdf4734ed6dee58485 | b8ec1f0a0feddcb16f988e0ca45c9b45b908440b | refs/heads/master | 2020-03-26T05:58:17.014993 | 2018-09-27T08:04:53 | 2018-09-27T08:04:53 | 144,583,426 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 496 | r | test6.R | Newtons<-function(fun,x,ep=1e-5,it_max=100){
index<-0;k<-1
while(k<=it_max){
x1<-x;obj<-fun(x);
x<-x-solve(obj$j,obj$f);
norm<-sqrt((x-x1)%*%(x-x1))
if(norm<ep){
index<-1;break
}
k<-k+1
}
obj<-fun(x)
list(root=x,it=k,index=index,funVal=obj$f)
}
funs<-function(x){
f<-c(x[1]^2+x[2]^2-5,(x[1]+1)*x[2]-(3*x[1]+1))
j<-matrix(c(2*x[1],2*x[2],x[2]-3,x[1]+1),nrow=2,byrow=T)
list(f=f,j=j)
}
Newtons(funs,c(0,1)) |
fcbe618d35b20e4d463484c534e72f76539b27ce | efc94aaf5d6679ed4633ba634727857bb7933ebb | /man/tidyverse_update.Rd | af37120409c8398db5f7c31bd15165b6753f5b38 | [
"MIT"
] | permissive | tidyverse/tidyverse | 035ac53292744d06100ce8050e287bb375121740 | 8ec2e1ffb739da925952b779925bb806bba8ff99 | refs/heads/main | 2023-09-02T21:30:17.203856 | 2023-03-08T21:31:34 | 2023-03-08T21:31:42 | 67,510,715 | 1,411 | 375 | NOASSERTION | 2023-03-07T05:15:08 | 2016-09-06T13:29:39 | R | UTF-8 | R | false | true | 661 | rd | tidyverse_update.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update.R
\name{tidyverse_update}
\alias{tidyverse_update}
\title{Update tidyverse packages}
\usage{
tidyverse_update(recursive = FALSE, repos = getOption("repos"))
}
\arguments{
\item{recursive}{If \code{TRUE}, will also list all dependencies of
tidyverse packages.}
\item{repos}{The repositories to use to check for updates.
Defaults to \code{getOption("repos")}.}
}
\description{
This will check to see if all tidyverse packages (and optionally, their
dependencies) are up-to-date, and will install after an interactive
confirmation.
}
\examples{
\dontrun{
tidyverse_update()
}
}
|
056e29c49c76dadb33bb4457f125f55bdcdaec9a | b8ec16b847fdf007365acd49fee80f23145f5db6 | /C11_Modified_Variable_Class.R | 14099d95361eef673a9a89492699aa734dd96e5e | [] | no_license | SnowMinxin12/HIVST2 | dac1c98d59168c6714ec016e2cfa34ce72627251 | b74890d937791e2a91659c2cca099249626fb81c | refs/heads/main | 2023-04-29T22:36:05.211776 | 2021-05-10T21:44:48 | 2021-05-10T21:44:48 | 345,498,592 | 0 | 2 | null | 2021-04-27T21:00:16 | 2021-03-08T01:40:21 | R | UTF-8 | R | false | false | 5,258 | r | C11_Modified_Variable_Class.R | ### C11_Check_Variable_Class.R
### add 1 to all factor and binary variables
### Check factors as factors, numbers as numbers
### author: Zhentao Yu, Minxin Lu
### date: 2021-3-31
### input: C01_ReadData.R
### output: a new dataset with variables in the correct class
### DataB for baseline 307 data
### DataS for Survey 207 data
### DataA for Alter 269 data
library(dplyr)
source("C01_ReadData.R")
##### Baseline 309 data #####
# date variables
date_varB <-"B"
# binary variables\
binary_varB <- c("D","E","G","l","M.A","M.B","M.C","M.D","M.E",
"M.F","M.N","R","W","Z","AC","AE.A","AE.B",
"AE.C","AE.D","AE.E","AE.F","AE.G",
"AJ.A","AJ.B","AJ.C","AJ.D","AJ.E","AJ.F","AJ.G",
"AP","AQ.A","AQ.B","AQ.C","AQ.D","AQ.E","AQ.F","AT",
"AU","BA","BB.A","BB.B","BB.C","BB.D","BB.E","BB.F","BE",
"BF","BL","BM.A","BM.B","BM.C","BM.D","BM.E","BM.F","BP",
"BQ","BR","BW.A","BW.B","BW.C","BW.D","BW.E","BW.F","BZ",
"CA","CF.A","CF.B","CF.C","CF.D","CF.E","CF.F","CI",
"CJ","CO.A","CO.B","CO.C","CO.D","CO.E","CO.F","CR",
"CS","CY","CZ")
# factor variables
factor_varB <- c("F","H","K","P","AH","AL","AM","AN",
"AW","AX","AY", "BH","BI","BJ", "BT","BU","BV",
"CC","CD","CE","CL","CM","CN",
"CT","CU","CW","DA","DB")
#ordinal variables
ordinal_varB <- c("I","J","Q","T","V","X","Y","AB","AD","AF","AG","AI",
"AO","AR","AS","AZ","BC","BD","BK","BN","BO","BX","BY","CG","CH",
"CP","CQ","CV")
#unrelated variables
tobedeleted_varB <- c("AK","AV","BG","BS","CB","CK")#initial name of people who index usually hang out with
#continous variables
cont_varB <- setdiff(colnames(DataB),c(date_varB,binary_varB,factor_varB,ordinal_varB,tobedeleted_varB))
# factors and ordinals as factors
dataB.c11 <-DataB
dataB.c11[factor_varB] = lapply(dataB.c11[factor_varB],factor)
dataB.c11[ordinal_varB] = lapply(dataB.c11[ordinal_varB],factor)
dataB.c11[cont_varB] = lapply(dataB.c11[cont_varB],as.numeric)
# remove the initial names of closest people
# dataB.c11 <- dataB.c11[ , -which(names(dataB.c11) %in% tobedeleted_varB)]
DataB <- dataB.c11
##### Survey 207 data #####
# date variables
date_varS <- c('A')
# binary variables
binary_varS <- c('E','F','G','H','I','O','P','Q','R','S','Y','Z',
'AA','AB','AC','AD','AE','AF','AG','AH','AI','AJ','AK','AL','AM',
'AN','AO','AP','AQ','AR','AS',
"AT.A","AT.B","AT.C","AT.D","AT.E","AT.F","AT.G","AT.H",
'AV','AZ','BD','BI')
# factor variables
factor_varS <- c('J','K','L','M','N','T','U','V','W','X','AW','AX','AY',
'BB','BN')
#ordinal variables
ordinal_varS <- c('BC','BF','BH','BJ','BK','BM')
#unrelated variables
unrelated_varS <- c()
#continous variables
cont_varS <- setdiff(colnames(DataS),c(date_varS,binary_varS,factor_varS,ordinal_varS,unrelated_varS))
# factors and ordinals as factors; continuous variables as numerical
dataS.c11 <- DataS
dataS.c11[factor_varS] = lapply(dataS.c11[factor_varS],factor)
dataS.c11[ordinal_varS] = lapply(dataS.c11[ordinal_varS],factor)
dataS.c11[cont_varS] = lapply(dataS.c11[cont_varS],as.numeric)
DataS <- dataS.c11
##### Alter 269 data #####
# date variables
date_varA <- c('C')
# binary variables
binary_varA <- c('L','O','Q','U','Z',
'AC','AD','AE','AF','AG','AH','AI',
'AJ.A','AJ.B','AJ.C','AJ.D','AJ.E','AJ.F','AJ.G','AJ.H',
'AM','AP','AQ','AR','AS',
'BH','BI.A','BI.B','BI.C','BI.D','BI.E','BI.F','BL','BM','BS',
'BT.A','BT.B','BT.C','BT.D','BT.E','BT.F','BW','BX',
'CD','CE.A','CE.B','CE.C','CE.D','CE.E','CE.F',
'CH','CI','CJ','CO.A','CO.B','CO.C','CO.D','CO.E','CO.F',
'CR','CS','CX.A','CX.B','CX.C','CX.D','CX.E','CX.F',
'DA','DB','DG.A','DG.B','DG.C','DG.D','DG.E','DG.F',
'DJ')
# factor variables
factor_varA <- c('B','D','J','M','N','S',
'AL','AN','AO','AT','AU','AV','AW',
'BB','BD','BE','BF','BO','BP','BQ',
'BZ','CA','CB','CL','CM','CN','CU','CV','CW',
'DD','DE','DF','DK','DL')
#ordinal variables
ordinal_varA <- c('W','T','Y',
'AB','AK','AY',
'BA','BG','BJ','BK','BR','BU','BV',
'CC','CF','CG','CP','CQ','CY','CZ',
'DH','DI','DM','DN')
#unrelated variables
unrelated_varA <- c('AJ.H', # none of above happened
'P',
'BC','BN','BY',
'CK','CT',
'DC')
#continous variables
cont_varA <- setdiff(colnames(DataA),c(date_varA,binary_varA,factor_varA,ordinal_varA,unrelated_varA))
# factors and ordinals as factors; continuous variables as numerical
dataA.c11 <- DataA
dataA.c11[factor_varA] = lapply(dataA.c11[factor_varA],factor)
dataA.c11[ordinal_varA] = lapply(dataA.c11[ordinal_varA],factor)
dataA.c11[cont_varA] = lapply(dataA.c11[cont_varA],as.numeric)
DataA <- dataA.c11
|
614cba743e1d7c969cf1c61abdfde24f3d12f120 | 5874ae0a22213c3a692763f285a64433bd512f94 | /R/ggmap.R | 79de3ea6d181129a12d7ae601679ad361916b2d9 | [] | no_license | d8aninja/code | 8356486291a2db9f419549edaa525d9bbe39abfc | 80a836db49a31ecd6d0e62aaf8b6e4417c49df68 | refs/heads/master | 2021-09-06T11:35:13.956195 | 2018-02-06T04:11:33 | 2018-02-06T04:11:33 | 80,898,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 337 | r | ggmap.R | library(ggmap)
gc <- geocode("denver, colorado")
center <- as.numeric(gc)
ggmap(get_googlemap(center = center, scale = 1), extent = "device")
# ggmap(get_googlemap(center = center, color = "bw", scale = 2), extent = "device")
map <- get_googlemap(style = c(feature = "all", element = "labels", visibility = "off"))
ggmap(map)
|
8de933c3b2ca2bf2801a7526f7d76734b8b81f4f | db1ea206b2ae975ddb0d74af9f6df9a05e994e03 | /R_grambank/dists/fixation_scores_muthukrishna_run.R | 7d22da4a8546a00e440fd737686fac281c604ca9 | [] | no_license | grambank/grambank-analysed | 1b859b5b25abb2e7755421b65a63bef96dfc8114 | 47d54c9fe82c2380d3c89042fd9f477aa117e044 | refs/heads/main | 2023-06-28T20:00:59.261977 | 2023-06-07T17:04:28 | 2023-06-07T17:04:28 | 397,491,052 | 3 | 0 | null | 2023-04-21T13:32:55 | 2021-08-18T06:04:03 | R | UTF-8 | R | false | false | 193 | r | fixation_scores_muthukrishna_run.R | source("dists/fixation_scores_muthukrishna_GB_fun.R")
fun_cfx(df = Language_meta_data, group = "AUTOTYP_area", cut_off = 0)
fun_cfx(df = Language_meta_data, group = "Macroarea", cut_off = 0)
|
806c8ed08cfef99d0a9dd4c725086a4a13d68fd3 | 87472097e88f2e3aef1e9f003add2aa149c50233 | /man/filterImmdataByAge.Rd | 1d011a8798d49a5cf795f9ab451ad831ba4fc768 | [] | no_license | RGLab/ImmuneSignatures2 | f1feca1e5f05f99419a8aca00b0d68928e1b8e82 | 15fc078c4475ae421142aa4b6271c9143db04eda | refs/heads/main | 2023-04-18T07:08:56.765734 | 2022-12-05T22:52:42 | 2022-12-05T22:52:42 | 252,603,828 | 1 | 1 | null | 2022-07-28T23:05:21 | 2020-04-03T01:27:26 | R | UTF-8 | R | false | true | 423 | rd | filterImmdataByAge.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/immuneResponsePreProcessing.R
\name{filterImmdataByAge}
\alias{filterImmdataByAge}
\title{Filter immdata list elements by age filter}
\usage{
filterImmdataByAge(immdata, ages)
}
\arguments{
\item{immdata}{list of assay data.table(s)}
\item{ages}{age cutoffs, either one or two sets}
}
\description{
Filter immdata list elements by age filter
}
|
620ab089fdd605e51374e84790e27b7e527fbde4 | bbe446615c097c400c0adb27ad0850565eb57c0b | /inst/doc/intro_htmlwidgets.R | fb166a5963f2cab52398ac05cfd7f18af55ce397 | [] | no_license | cran/reactR | 83dd22bef80c144564d5488452b310baa5de6841 | 99ba22d9f4a66d85679321ad99550b9ece46b913 | refs/heads/master | 2021-07-18T04:35:53.296417 | 2021-02-22T13:50:06 | 2021-02-22T13:50:06 | 72,949,686 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,234 | r | intro_htmlwidgets.R | ## ---- echo=FALSE, include=FALSE-----------------------------------------------
knitr::opts_chunk$set(eval = FALSE)
## -----------------------------------------------------------------------------
# install.packages(c("shiny", "devtools", "usethis", "htmlwidgets", "reactR"))
## -----------------------------------------------------------------------------
# # Create the R package
# usethis::create_package("~/sparklines")
# # Inject the widget templating
# withr::with_dir(
# "~/sparklines",
# reactR::scaffoldReactWidget("sparklines", list("react-sparklines" = "^1.7.0"), edit = FALSE)
# )
## -----------------------------------------------------------------------------
# system("yarn install")
# system("yarn run webpack")
## -----------------------------------------------------------------------------
# devtools::document()
# devtools::install(quick = TRUE)
## -----------------------------------------------------------------------------
# shiny::runApp()
## -----------------------------------------------------------------------------
# sparklines <- function(message, width = NULL, height = NULL, elementId = NULL) {
#
# # describe a React component to send to the browser for rendering.
# content <- htmltools::tag("div", list(message))
#
# # create widget
# htmlwidgets::createWidget(
# name = 'sparklines',
# reactR::reactMarkup(content),
# width = width,
# height = height,
# package = 'sparklines',
# elementId = elementId
# )
# }
## -----------------------------------------------------------------------------
# sparklines <- function(data, ..., width = NULL, height = NULL) {
#
# # describe a React component to send to the browser for rendering.
# content <- reactR::component(
# "Sparklines",
# list(data = data, ...)
# )
#
# # create widget
# htmlwidgets::createWidget(
# name = 'sparklines',
# reactR::reactMarkup(content),
# width = width,
# height = height,
# package = 'sparklines'
# )
# }
## -----------------------------------------------------------------------------
# #' @export
# sparklinesLine <- function(...) {
# reactR::React$SparklinesLine(...)
# }
#
# #' @export
# sparklinesSpots <- function(...) {
# reactR::React$SparklinesSpots(...)
# }
## -----------------------------------------------------------------------------
# system("yarn install")
# system("yarn run webpack")
# devtools::document()
# devtools::install()
# library(sparklines)
# sparklines(rnorm(10), sparklinesLine())
## -----------------------------------------------------------------------------
# library(shiny)
# library(sparklines)
#
# ui <- fluidPage(
# titlePanel("Sparklines library"),
# sliderInput("n", label = "Number of samples", min = 2, max = 1000, value = 100),
# sparklinesOutput("myWidget")
# )
#
# server <- function(input, output, session) {
# output$myWidget <- renderSparklines({
# sparklines(
# rnorm(input$n),
# sparklinesLine()
# )
# })
# }
#
# shinyApp(ui, server)
|
65a202c4d609226d2a041293658c9be3a4d18297 | a2dabe75febdfeb630cf77c370e53e4ca05035fc | /outputplot_valley.R | 3e99040bbc34d16c775f1e59b96f2ddb8eae3bda | [] | no_license | leeyaowen/treemap | e5c5b3574b5320f2cf1035878a62d76ecafbf54f | 4e69db87df59e3403743e279ae24c785c992c569 | refs/heads/master | 2021-01-24T12:36:42.466149 | 2019-07-14T00:19:25 | 2019-07-14T00:19:25 | 123,140,528 | 0 | 0 | null | null | null | null | BIG5 | R | false | false | 6,629 | r | outputplot_valley.R | library(dplyr)
library(ggplot2)
library(ggthemes)
library(ggrepel)
dt<-read.csv(file.choose(),stringsAsFactors = FALSE)
#dt<-read.csv("./valley1output_20190629.csv",stringsAsFactors = FALSE)
#X1=x1,Y1=y1,xbase=x2,ybase=y2
plotmap<-function(X1,Y1,xbase,ybase){
#防呆
if(X1>max(dt$x1,na.rm = TRUE)|X1<min(dt$x1,na.rm = TRUE)){
stop("X1 not in range!")
}else if(Y1>max(dt$y1,na.rm = TRUE)|Y1<min(dt$y1,na.rm = TRUE)){
stop("Y1 not in range!")
}else{
}
#設定周圍小樣方
if(xbase==1 & ybase==1){
plot_center<-filter(dt,x1==X1,y1==Y1,x2==1,y2==1)
plot_NW<-filter(dt,x1==X1-1,y1==Y1,x2==2,y2==2)
plot_NW<-mutate(plot_NW,x3=x3-500,y3=y3+500)
plot_N<-filter(dt,x1==X1,y1==Y1,x2==1,y2==2)
plot_N<-mutate(plot_N,y3=y3+500)
plot_NE<-filter(dt,x1==X1,y1==Y1,x2==2,y2==2)
plot_NE<-mutate(plot_NE,x3=x3+500,y3=y3+500)
plot_E<-filter(dt,x1==X1,y1==Y1,x2==2,y2==1)
plot_E<-mutate(plot_E,x3=x3+500)
plot_SE<-filter(dt,x1==X1,y1==Y1-1,x2==2,y2==2)
plot_SE<-mutate(plot_SE,x3=x3+500,y3=y3-500)
plot_S<-filter(dt,x1==X1,y1==Y1-1,x2==1,y2==2)
plot_S<-mutate(plot_S,y3=y3-500)
plot_SW<-filter(dt,x1==X1-1,y1==Y1-1,x2==2,y2==2)
plot_SW<-mutate(plot_SW,x3=x3-500,y3=y3-500)
plot_W<-filter(dt,x1==X1-1,y1==Y1,x2==2,y2==1)
plot_W<-mutate(plot_W,x3=x3-500)
}else if(xbase==1 & ybase==2){
plot_center<-filter(dt,x1==X1,y1==Y1,x2==1,y2==2)
plot_NW<-filter(dt,x1==X1-1,y1==Y1+1,x2==2,y2==1)
plot_NW<-mutate(plot_NW,x3=x3-500,y3=y3+500)
plot_N<-filter(dt,x1==X1,y1==Y1+1,x2==1,y2==1)
plot_N<-mutate(plot_N,y3=y3+500)
plot_NE<-filter(dt,x1==X1+1,y1==Y1+1,x2==1,y2==1)
plot_NE<-mutate(plot_NE,x3=x3+500,y3=y3+500)
plot_E<-filter(dt,x1==X1,y1==Y1,x2==2,y2==2)
plot_E<-mutate(plot_E,x3=x3+500)
plot_SE<-filter(dt,x1==X1,y1==Y1,x2==2,y2==1)
plot_SE<-mutate(plot_SE,x3=x3+500,y3=y3-500)
plot_S<-filter(dt,x1==X1,y1==Y1,x2==1,y2==1)
plot_S<-mutate(plot_S,y3=y3-500)
plot_SW<-filter(dt,x1==X1-1,y1==Y1,x2==2,y2==1)
plot_SW<-mutate(plot_SW,x3=x3-500,y3=y3-500)
plot_W<-filter(dt,x1==X1-1,y1==Y1,x2==2,y2==2)
plot_W<-mutate(plot_W,x3=x3-500)
}else if(xbase==2 & ybase==2){
plot_center<-filter(dt,x1==X1,y1==Y1,x2==2,y2==2)
plot_NW<-filter(dt,x1==X1,y1==Y1+1,x2==1,y2==1)
plot_NW<-mutate(plot_NW,x3=x3-500,y3=y3+500)
plot_N<-filter(dt,x1==X1,y1==Y1+1,x2==2,y2==1)
plot_N<-mutate(plot_N,y3=y3+500)
plot_NE<-filter(dt,x1==X1+1,y1==Y1+1,x2==1,y2==1)
plot_NE<-mutate(plot_NE,x3=x3+500,y3=y3+500)
plot_E<-filter(dt,x1==X1+1,y1==Y1,x2==1,y2==2)
plot_E<-mutate(plot_E,x3=x3+500)
plot_SE<-filter(dt,x1==X1+1,y1==Y1,x2==1,y2==1)
plot_SE<-mutate(plot_SE,x3=x3+500,y3=y3-500)
plot_S<-filter(dt,x1==X1,y1==Y1,x2==2,y2==1)
plot_S<-mutate(plot_S,y3=y3-500)
plot_SW<-filter(dt,x1==X1,y1==Y1,x2==1,y2==1)
plot_SW<-mutate(plot_SW,x3=x3-500,y3=y3-500)
plot_W<-filter(dt,x1==X1,y1==Y1,x2==1,y2==2)
plot_W<-mutate(plot_W,x3=x3-500)
}else if(xbase==2 & ybase==1){
plot_center<-filter(dt,x1==X1,y1==Y1,x2==2,y2==1)
plot_NW<-filter(dt,x1==X1,y1==Y1,x2==1,y2==2)
plot_NW<-mutate(plot_NW,x3=x3-500,y3=y3+500)
plot_N<-filter(dt,x1==X1,y1==Y1,x2==2,y2==2)
plot_N<-mutate(plot_N,y3=y3+500)
plot_NE<-filter(dt,x1==X1+1,y1==Y1,x2==1,y2==2)
plot_NE<-mutate(plot_NE,x3=x3+500,y3=y3+500)
plot_E<-filter(dt,x1==X1+1,y1==Y1,x2==1,y2==1)
plot_E<-mutate(plot_E,x3=x3+500)
plot_SE<-filter(dt,x1==X1+1,y1==Y1-1,x2==1,y2==2)
plot_SE<-mutate(plot_SE,x3=x3+500,y3=y3-500)
plot_S<-filter(dt,x1==X1,y1==Y1-1,x2==2,y2==2)
plot_S<-mutate(plot_S,y3=y3-500)
plot_SW<-filter(dt,x1==X1,y1==Y1-1,x2==1,y2==2)
plot_SW<-mutate(plot_SW,x3=x3-500,y3=y3-500)
plot_W<-filter(dt,x1==X1,y1==Y1,x2==1,y2==1)
plot_W<-mutate(plot_W,x3=x3-500)
}else{
stop()
}
#結合周圍小樣方資料,限制輸出圖檔範圍
plotall<-bind_rows(plot_center,plot_NW,plot_N,plot_NE,plot_E,plot_SE,plot_S,plot_SW,plot_W)
plotall<-filter(plotall,x3>=-100 & x3<=600 & y3>=-100 & y3<=600)
if(nrow(plotall)==0){
plotfake<-data.frame(x1=X1,y1=Y1,x2=xbase,y2=ybase,tag="abc",sp="abc",dbh=0,x3=NA,y3=NA,stringsAsFactors = FALSE)
plotall<-bind_rows(plotall,plotfake)
}else{
}
#確認出圖
print(paste("正在產生(",X1,",",Y1,")","(",xbase,",",ybase,")",sep=""))
#設定格線
xyline<-seq(50,450,50)
#出圖
p<-ggplot(plotall,aes(x=x3,y=y3))+
theme(panel.background = element_blank(),axis.ticks = element_blank(),axis.title = element_blank(),axis.text = element_blank(),plot.title = element_text(face = "bold",hjust = 0.3),plot.caption = element_text(hjust = 0.8,size = 18))+
ggtitle(paste("Quadrat No.(",X1,",",Y1,")","(",xbase,",",ybase,")", sep=""))+
scale_x_continuous(limits = c(-150,650))+
scale_y_continuous(limits = c(-150,650))+
coord_fixed()
#畫格線與外框
for (i in 1:length(xyline)) {
p<-p+geom_segment(x=xyline[i],y=0,xend=xyline[i],yend=500,colour="gray80",size=0.2)+
geom_segment(x=0,y=xyline[i],xend=500,yend=xyline[i],colour="gray80",size=0.2)
}
p<-p+geom_segment(x=250,y=0,xend=250,yend=500,colour="black",size=0.5)+
geom_segment(x=0,y=250,xend=500,yend=250,colour="black",size=0.5)+
geom_rect(aes(ymax=500,ymin=0,xmax=500,xmin=0),alpha=0,size=0.7,colour="black")+
labs(caption = "( / )")
#畫點與字,存檔
if(plotall[1,"sp"]!="abc"){
p<-p+geom_point(aes(size=dbh),shape=1,stroke=0.4,show.legend = F,colour=ifelse(plotall$dbh>0,"black","gray50"))+
scale_size_continuous(range = c(0, 20), limits = c(0,105))+
geom_text_repel(aes(label=tag),hjust=-0.1,vjust=1.2,size=2.5,colour=ifelse(plotall$dbh>0,"black","gray50"))
ggsave(filename=paste("Line",X1, "_", "plot(",X1,",",Y1,")","(",xbase,",",ybase,").pdf", sep=""), width = 210, height = 297, units = "mm")
}else{
ggsave(filename=paste("Line",X1, "_", "plot(",X1,",",Y1,")","(",xbase,",",ybase,").pdf", sep=""), width = 210, height = 297, units = "mm")
}
}
#樣方迴圈
outputx2<-matrix(c(1,2))
outputy2<-matrix(c(1,2))
outquadrat<-function(X1,Y1){
for (i in 1:length(outputx2)) {
for (j in 1:length(outputy2)) {
plotmap(X1,Y1,i,j)
}
}
}
#樣區迴圈
outputNX1<-matrix(c(0:14))
outputNY1<-matrix(c(7:20))
for(i in 1:length(outputNX1)){
for(j in 1:length(outputNY1)){
outquadrat(outputNX1[i],outputNY1[j])
}
}
|
38838b9943f1324c747df211f97856b5124bbf96 | ea524efd69aaa01a698112d4eb3ee4bf0db35988 | /tests/testthat/test-expect-inheritance.R | 0129dc090baa013d7fabac4d04fedfeee101b274 | [
"MIT"
] | permissive | r-lib/testthat | 92f317432e9e8097a5e5c21455f67563c923765f | 29018e067f87b07805e55178f387d2a04ff8311f | refs/heads/main | 2023-08-31T02:50:55.045661 | 2023-08-08T12:17:23 | 2023-08-08T12:17:23 | 295,311 | 452 | 217 | NOASSERTION | 2023-08-29T10:51:30 | 2009-09-02T12:51:44 | R | UTF-8 | R | false | false | 2,071 | r | test-expect-inheritance.R | test_that("expect_type checks typeof", {
expect_success(expect_type(factor("a"), "integer"))
expect_failure(expect_type(factor("a"), "double"))
})
test_that("expect_is checks class", {
local_edition(2)
expect_success(expect_is(factor("a"), "factor"))
expect_failure(expect_is(factor("a"), "integer"))
})
test_that("expect_s3/s4_class fails if appropriate type", {
A <- methods::setClass("A", contains = "list")
expect_failure(expect_s3_class(1, "double"), "not an S3 object")
expect_failure(expect_s3_class(A(), "double"), "not an S3 object")
expect_failure(expect_s4_class(factor(), "double"), "not an S4 object")
})
test_that("expect_s[34]_class can check not S3/S4", {
expect_success(expect_s3_class(1, NA))
expect_snapshot_failure(expect_s3_class(factor(), NA))
A <- methods::setClass("A", contains = "list")
expect_success(expect_s4_class(1, NA))
expect_snapshot_failure(expect_s4_class(A(), NA))
})
test_that("test_s4_class respects class hierarchy", {
A <- methods::setClass("A", contains = "list")
B <- methods::setClass("B", contains = "list")
C <- methods::setClass("C", contains = c("A", "B"))
on.exit({
methods::removeClass("A")
methods::removeClass("B")
methods::removeClass("C")
})
expect_success(expect_s4_class(C(), "A"))
expect_success(expect_s4_class(C(), "B"))
expect_snapshot_failure(expect_s4_class(C(), "D"))
})
test_that("test_s3_class respects class hierarchy", {
x <- structure(list(), class = c("a", "b"))
expect_success(expect_s3_class(x, "a"))
expect_success(expect_s3_class(x, "b"))
expect_snapshot_failure(expect_s3_class(x, "c"))
expect_snapshot_failure(expect_s3_class(x, c("c", "d")))
})
test_that("test_s3_class can request exact match", {
x <- structure(list(), class = c("a", "b"))
expect_failure(expect_s3_class(x, "a", exact = TRUE))
expect_success(expect_s3_class(x, c("a", "b"), exact = TRUE))
})
test_that("expect_s3_class allows unquoting of first argument", {
f <- factor("a")
expect_success(expect_s3_class(!! rlang::quo(f), "factor"))
})
|
b361b9d7c2aae6e19a6d7298615127131f49cb3e | c750c1991c8d0ed18b174dc72f3014fd35e5bd8c | /pkgs/MMST/man/psych24r.Rd | b656c4ebc3f58e1468d29324a500ac067c220ff7 | [] | no_license | vaguiar/EDAV_Project_2017 | 4b190e66fe7a6b4078cfe1b875bccd9b5a594b25 | 288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f | refs/heads/base | 2021-01-23T02:39:36.272851 | 2017-05-01T23:21:03 | 2017-05-01T23:21:03 | 86,010,131 | 1 | 0 | null | 2017-05-01T23:43:04 | 2017-03-24T00:21:20 | HTML | UTF-8 | R | false | false | 1,822 | rd | psych24r.Rd | \name{psych24r}
\alias{psych24r}
\docType{data}
\title{
MMST PSYCH24R DATA
}
\description{
psychological tests, 587, 588, 595
}
\usage{data(psych24r)}
\format{
A data frame with 301 observations on the following 31 variables.
\describe{
\item{\code{Case}}{a numeric vector}
\item{\code{Sex}}{a factor with levels \code{F} \code{M}}
\item{\code{Age}}{a numeric vector}
\item{\code{Grp}}{a numeric vector}
\item{\code{V1}}{a numeric vector}
\item{\code{V2}}{a numeric vector}
\item{\code{V3}}{a numeric vector}
\item{\code{V4}}{a numeric vector}
\item{\code{V5}}{a numeric vector}
\item{\code{V6}}{a numeric vector}
\item{\code{V7}}{a numeric vector}
\item{\code{V8}}{a numeric vector}
\item{\code{V9}}{a numeric vector}
\item{\code{V10}}{a numeric vector}
\item{\code{V11}}{a numeric vector}
\item{\code{V12}}{a numeric vector}
\item{\code{V13}}{a numeric vector}
\item{\code{V14}}{a numeric vector}
\item{\code{V15}}{a numeric vector}
\item{\code{V16}}{a numeric vector}
\item{\code{V17}}{a numeric vector}
\item{\code{V18}}{a numeric vector}
\item{\code{V19}}{a numeric vector}
\item{\code{V20}}{a numeric vector}
\item{\code{V21}}{a numeric vector}
\item{\code{V22}}{a numeric vector}
\item{\code{V23}}{a numeric vector}
\item{\code{V24}}{a numeric vector}
\item{\code{V25}}{a numeric vector}
\item{\code{V26}}{a numeric vector}
\item{\code{group}}{a factor with levels \code{GRANT} \code{PASTEUR}}
}
}
\details{
}
\source{
\url{http://www.psych.yorku.ca/friendly/lab/files/psy6140/data/psych24r.sas}
}
\references{
A. Izenman (2008), \emph{Modern Multivariate Statistical Techniques}, Springer
}
\examples{
}
\keyword{datasets}
|
cdeef09b756bd81788df440a436a17d4f79ffdc2 | 4c37705204ff56dd2681a4e4004ea66216a3155c | /plot4.R | 224cf7c5cda6ea9303fde6139ab4ba0b7b4dce0a | [] | no_license | TMcIntyreZA/ExData_Plotting1 | e99fc8f5d0fff59d9f64692be33a1380d1ac7022 | b951d37ee0a77a8923efa7b8687c4a24aeff223e | refs/heads/master | 2021-01-22T19:22:22.116632 | 2017-03-17T08:05:07 | 2017-03-17T08:05:07 | 85,194,864 | 0 | 0 | null | 2017-03-16T12:45:21 | 2017-03-16T12:45:21 | null | UTF-8 | R | false | false | 1,312 | r | plot4.R | d <- read.table("household_power_consumption.txt", header = TRUE, sep= ";", dec = ".", stringsAsFactors = FALSE,)
dim(d)
d[1,]
d2 <- d[d$Date %in% c("1/2/2007","2/2/2007") ,]
dim(d2)
##plot 4
dtime <- strptime(paste(d2$Date, d2$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
gap <- as.numeric(d2$Global_active_power)
grp <- as.numeric(d2$Global_reactive_power)
submet1 <- as.numeric(d2$Sub_metering_1)
submet2 <- as.numeric(d2$Sub_metering_2)
submet3 <- as.numeric(d2$Sub_metering_3)
volt <- as.numeric(d2$Voltage)
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
##subplot1
plot(dtime, gap, type = "l", xlab = "", ylab = "Global Active Power", cex.lab = 0.75, cex.axis = 0.75, cex = 0.2)
##subplot2
plot(dtime, volt, type = "l", xlab = "datetime", ylab = "Voltage", cex.lab = 0.75, cex.axis = 0.75)
##subplot3
plot(dtime, submet1, type = "l", ylab = "Energy sub metering", col = "black", xlab = "", cex.lab = 0.75, cex.axis = 0.75)
lines(dtime, submet2, type = "l", col = "red")
lines(dtime, submet3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, , cex = 0.7, col = c("black", "red", "blue"), bty = "n")
##subplot4
plot(dtime, grp, type = "l", xlab = "datetime", ylab = "Global_reactive_power", cex.lab = 0.75, cex.axis = 0.75)
dev.off() |
94070bc7c26d07a6b38ffc288cf21cd8c1bcd5fd | c9f23137dbb44f38b4aafbd5391b2ff179090515 | /man/SimpleBondPrice.Rd | 76464b1233986403cce2a7f8543e18997dc37a60 | [] | no_license | phenaff/R-Package-empfin | d7bc1cef3698ee0d4d1a8bce6bcb8831c86a907c | 6bf91c651d92dff6c28de47a86c84ce62310f42b | refs/heads/master | 2023-01-12T08:23:50.503245 | 2023-01-12T02:05:28 | 2023-01-12T02:05:28 | 10,160,673 | 1 | 3 | null | null | null | null | UTF-8 | R | false | true | 618 | rd | SimpleBondPrice.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BondUtils.R
\name{SimpleBondPrice}
\alias{SimpleBondPrice}
\title{Bond price}
\usage{
SimpleBondPrice(coupon, n, yield)
}
\arguments{
\item{coupon}{(real) coupon rate (.05: 5\%)}
\item{n}{(integer) number of years to expiry}
\item{yield}{(real) yield to maturity}
}
\value{
price of $1 nominal
}
\description{
Simplified bond price calculation
}
\details{
Price of a bond with annual coupon, computed
on coupon payment date
\deqn{
P = \sum_{i=1}^n \frac{c}{(1+y)^i} + \frac{1}{(1+r)^n}
}
}
\examples{
p <- SimpleBondPrice(.05, 10, .05)
}
|
c6bfec11f393315701f11ddfdb87648d143d7a0a | f7872cf7882aed12825c2f118669875cd17224c9 | /QC_plus_doubletsdet.R | 06d15b49f80566d1e95f7e7703d6363977365cfe | [] | no_license | LeGrand-Lab/QC_single_cell | e9e718ee77eee7c5d5cb10cca661a41c286d544f | 85fcb1237f1507d048244dae1b7206339dd6887a | refs/heads/master | 2022-11-13T20:31:41.850076 | 2020-06-25T11:58:13 | 2020-06-25T11:58:13 | 274,629,969 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,793 | r | QC_plus_doubletsdet.R | ###
# QUALITY CONTROL AND PREPROCESSING
# This workflow is valid for 10X-format raw counts.
# Third-party protocol yields matrices where GENE SYMBOLS are rownames.
# ATTENTION: Doublet detection procedures should only be applied to libraries
# generated in the same experimental batch.
# *-* many thanks to Dr. L Modolo for most of this code *-*
# --
# input : data/MYEXPERMNT : barcodes.tsv.gz features.tsv.gz matrix.mtx.gz
# output : results/*.pdf and 'rdatas/MYEXPERMT_END.RData' for downstream analysis
# Joha GL 2020
##
# ============ USER DEFINED
prloc = "~/QC_single_cell" #<<<< check working directory!!
exper="dorsowt2" # TODO change in coherence to folder input
# ============ end user defined
exper = gsub("/",'',exper)
listpackages <- c( "ggplot2", "dplyr", "stringr", "tidyverse",
"BSgenome", "GenomeInfoDb", "Seurat",
"lubridate", # right color
"simpleSingleCell", # for scRNA storage & manipulation
"scater", # for QC control
"scran", # analysis pipeline
"uwot", # UMAP dim-red
"DropletUtils", #utility functions for handling single-cell (RNA-seq)
"AnnotationHub", # ensbl query
"AnnotationDbi", # ensbl query
"sctransform", "SingleCellExperiment", "Matrix" )
lapply(listpackages,require,character.only=TRUE)
# ================== SETTING PATHS
setwd(prloc)
resdir="results/"
system(paste("mkdir",resdir)) # creates if not exists
system("mkdir rdatas") #creates if not exists
sink(paste0(resdir,"outputsfile.txt"), append=TRUE)
sink(paste0(resdir,"outputsfile.txt"), append=TRUE, type="message")
# read 10X
# ================================================================================
sce <- tryCatch({
sce <- read10xCounts(paste0("data/",exper), type="sparse")
return(sce)
} , error = function(e){
print("failed DropletUtils::read10xCounts, using Seurat+SCE steps")
matdat <- Seurat::Read10X(paste0("data/",exper))
sce <- SingleCellExperiment(assays=list(counts=matdat))
rm(matdat)
return(sce)
} , warning=function(w) {
print("10x to sce done but check warnings")
}
)
print("initial matrix dimensions")
dim(sce)#27998 2432
print("starting analysis")
print("loading data and adding annotations")
head(rowData(sce))
#DataFrame with 6 rows and 0 column, fix:
rowData(sce) <- DataFrame(
genes_names = rownames(sce)
)
## ** DATA ANNOTATIONS **
hub_infos <- AnnotationHub() # this goes into .cache/AnnotationHub
hub_ids <- mcols(hub_infos) %>%
data.frame() %>%
rownames_to_column(var = "id") %>% # we keep the rownames
as_tibble() %>%
dplyr::filter(
dataprovider %in% "Ensembl" & # Ensembl annotation
species %in% c("Homo sapiens", "Mus musculus"), # for the two species we want
genome %in% c("GRCh38", "GRCm38"), # on the right genome
str_detect(title, "99"), # on the right revision
rdataclass %in% "EnsDb",
) %>%
dplyr::select(id, species) # id is species code in .db
# pull_ensemble id (dataset is in gene symbols instead,as we know).
# exemple: "Gsn" (symbol) --> "ENSG00000183765" (ensembl geneid)
pull_ensembl.id <- function(id, hub_infos){
mapIds(
hub_infos[[id]],
keys = str_replace(rowData(sce)$genes_names, "(.*)\\..*", "\\1"),
keytype = "SYMBOL", ### transform into a variable to put in function input
column = "GENEID")
}
merge_ensembl.id <- function(id, hub_infos){
sapply(id %>% pull(id), pull_ensembl.id, hub_infos) %>%
as_tibble() %>%
unite(col = "ensembl.id", na.rm = TRUE) %>%
pull(ensembl.id)
}
pull_loc <- function(id, hub_infos){
mapIds(
hub_infos[[id]],
keys = str_replace(rowData(sce)$ensembl.id, "(.*)\\..*", "\\1"),
keytype = "GENEID", ### transform into a variable to put in function input
column = "SEQNAME")
}
merge_loc <- function(id, hub_infos){
sapply(id %>% pull(id), pull_loc, hub_infos) %>%
as_tibble() %>%
unite(col = "chr_pos", na.rm = TRUE) %>%
pull(chr_pos)
}
rowData(sce)$ensembl.id <- merge_ensembl.id(hub_ids,hub_infos)
rowData(sce)$chr_pos = merge_loc(hub_ids, hub_infos)
rowData(sce)$is_genomic <- rowData(sce)$chr_pos %in% c(as.character(1:22), "X", "Y")
rowData(sce)$species = ifelse(str_detect(rowData(sce)$ensembl.id, "^ENSMUSG"),
"Mus musculus", ifelse(str_detect(rowData(sce)$ensembl.id, "^ENSG"),
"Homo sapiens", "exoticGeneSymbol"))
rowData(sce)
print("species detected by gene symbol, before correction")
table(rowData(sce)$species)
# exoticGeneSymbol Homo sapiens Mus musculus
# 1829 20 26149
# and after verification, exoticGeneSymbols belong to M musculus:
rowData(sce)$species[rowData(sce)$species=="exoticGeneSymbol"] <- "Mus musculus"
tail(rowData(sce)[rowData(sce)$species %in% "Homo sapiens",])
# WDR97 WDR97 ENSG00000179698 8 TRUE Homo sapiens
# C2 C2 ENSG00000166278_ENSMUSG00000024371 FALSE Homo sapiens
# C3 C3 ENSG00000125730_ENSMUSG00000024164 FALSE Homo sapiens
# PISD PISD ENSG00000241878 22 TRUE Homo sapiens
# DHRSX DHRSX ENSG00000169084 X TRUE Homo sapiens
# ==============
# QC first steps
# ==============
print("detecting contamination and inferior outliers")
colData(sce) <- DataFrame(
n_mm_umi = colSums(counts(sce)[rowData(sce)$species %in% "Mus musculus", ]),
n_hg_umi = colSums(counts(sce)[rowData(sce)$species %in% "Homo sapiens", ]),
n_umi = colSums(counts(sce))
)
colData(sce)$species <- colData(sce) %>%
as_tibble() %>%
mutate(
prop_mm = n_mm_umi / n_umi,
prop_hg = n_hg_umi / n_umi,
species = case_when(
prop_mm > 0.9 ~ "Mus musculus",
prop_hg > 0.9 ~ "Homo sapiens",
TRUE ~ "mixed"
)
) %>% pull(species)
colData(sce)
colData(sce) %>% as_tibble() %>% summary()
print("filtering out not expressed features")
rowData(sce)$expressed <- scater::nexprs(sce,byrow=TRUE)>0
per_cell <- perCellQCMetrics(sce[rowData(sce)$expressed, ], subset = list(non_genomic = !rowData(sce[rowData(sce)$expressed, ])$is_genomic))
summary(per_cell$sum) # UMI counts !
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 567 5444 9548 9702 12948 41029
summary(per_cell$detected)
summary(per_cell$subsets_non_genomic_percent)
colData(sce) <- cbind(colData(sce),per_cell)
plotdet <- scater::plotColData(sce,x="sum",y="detected", colour_by="species")
pdf(paste0(resdir,exper,"_plotColData_sumVSdetected.pdf"))
plotdet
dev.off()
# inferior outliers
colData(sce)$keep_total <- scater::isOutlier(colData(sce)$sum,type = "lower", log=TRUE)
table(colData(sce)$keep_total) # TRUE are OUTLIERS
sce <- scater::addPerFeatureQC(sce)
head(rowData(sce))
summary(rowData(sce)$detected)
outliplot <- scater::plotColData(sce, x="sum",y="detected",colour_by="keep_total")
outliplot <-outliplot + scale_fill_discrete(name="is.Outlier") + ggtitle("Cells under lower addPerFeatureQC metrics ('inferior' outliers)")
pdf(paste0(resdir,exper,"plotColData_outlier.pdf"))
outliplot
dev.off()
# save(sce, file = paste0("rdatas/",exper,".RData")) # if problems, save here and debug
# ==============
# QC continued
# ==============
print("post perfeatureQC matrix dimensions")
dim(sce)
print("colnames(colData(sce))")
colnames(colData(sce))
dim(sce)
print("substracting empty drops, kneeplot")
knee_plot <- function(bc_rank) {
knee_plt <- tibble(rank = bc_rank[["rank"]],
total = bc_rank[["total"]]) %>%
distinct() %>%
dplyr::filter(total > 0)
annot <- tibble(inflection = metadata(bc_rank)[["inflection"]],
rank_cutoff = max(bc_rank$rank[bc_rank$total > metadata(bc_rank)[["inflection"]]]))
p <- ggplot(knee_plt, aes(total, rank)) +
geom_line() +
geom_hline(aes(yintercept = rank_cutoff), data = annot, linetype = 2) +
geom_vline(aes(xintercept = inflection), data = annot, linetype = 2) +
scale_x_log10() +
scale_y_log10() +
annotation_logticks() +
labs(y = "Rank", x = "Total UMIs")
return(p)
}
bcrank <- DropletUtils::barcodeRanks(
SingleCellExperiment::counts(
sce[rowData(sce)$expressed, ]))
pdf(paste0(resdir,exper,"_knee_plot.pdf"))
knee_plot(bcrank)
dev.off()
colData(sce)$is_cell <- colData(sce)$n_umi > metadata(bcrank)$inflection
summary(colData(sce)$is_cell) # 14 barcodes are not real cells
print("post eval empty drops, matrix dimensions")
dim(sce)
print("Finding doublets")
sce <- computeSumFactors(
sce[rowData(sce)$expressed, sce$is_cell],
clusters = sce$species[sce$is_cell]
)
sce <- logNormCounts(sce)
dbl_dens <- doubletCells(sce[rowData(sce)$expressed, sce$is_cell])
sce$doublet_score <- 0
sce$doublet_score[sce$is_cell] <- log10(dbl_dens + 1)
save(sce, dbl_dens, file = paste0("rdatas/",exper,"_END.RData"))
tsnepl <- plotTSNE(sce[rowData(sce)$expressed,sce$is_cell], colour_by="doublet_score")
detfeat <- scater::plotColData(sce, x="sum",y="detected",colour_by="doublet_score")
pdf(paste0(resdir,exper,"_doublets.pdf"),width=13)
tsnepl + detfeat
dev.off()
pdf(paste0(resdir,exper,"_histogram.pdf"))
qplot(sce$doublet_score, geom="histogram")
hist(colData(sce)$doublet_score)
dev.off()
print("END")
print("FINAL (post doublets detection) matrix dimensions")
dim(sce)
sink()
sink(type="message")
# END
# ================================================================================
## NOTES:
# note that exemple query:
# mapIds(org.Hs.eg.db, keys=MYVECTOR, column="SYMBOL", keytype="ENTREZID")
# is the same as : mapIds(hub_infos[["AH78783"]], keys= ...)
# because "AH78783" is the id accession for H sapiens database
|
ab4b45b80c34d1e4252fdd88fe48dae12d70d573 | 54b32e330e8386506fd061ee26ebd5a18534f77d | /man/sampleCore.Rd | 3916d5327bcf37d13279b3ef687a97cf36903076 | [] | no_license | cran/corehunter | cc37fdf35417a256f6f62dbf9b802ebde6c0423f | b94caba741ba9420e828d82701a9b8f6b539064a | refs/heads/master | 2023-05-30T06:49:01.070982 | 2023-05-11T14:50:02 | 2023-05-11T14:50:02 | 70,241,028 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 7,024 | rd | sampleCore.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/execution.R
\name{sampleCore}
\alias{sampleCore}
\title{Sample a core collection.}
\usage{
sampleCore(
data,
obj,
size = 0.2,
always.selected = integer(0),
never.selected = integer(0),
mode = c("default", "fast"),
normalize = TRUE,
time = NA,
impr.time = NA,
steps = NA,
impr.steps = NA,
indices = FALSE,
verbose = FALSE
)
}
\arguments{
\item{data}{Core Hunter data (\code{chdata}) containing genotypes,
phenotypes and/or a precomputed distance matrix. Typically the
data is obtained with \code{\link{coreHunterData}}. Can also be
an object of class \code{chdist}, \code{chgeno} or \code{chpheno}
if only one type of data is provided.}
\item{obj}{Objective or list of objectives (\code{chobj}).
If no objectives are specified Core Hunter maximizes a weighted
index including the default entry-to-nearest-entry distance
(\code{EN}) for each available data type, with equal weight.
For genotypes, the Modified Roger's distance (\code{MR}) is
used. For phenotypes, Gower's distance (\code{GD}) is applied.}
\item{size}{Desired core subset size (numeric). If larger than one the value
is used as the absolute core size after rounding. Else it is used as the
sampling rate and multiplied with the dataset size to determine the size of
the core. The default sampling rate is 0.2.}
\item{always.selected}{vector with indices (integer) or ids (character) of
items that should always be selected in the core collection}
\item{never.selected}{vector with indices (integer) or ids (character) of
items that should never be selected in the core collection}
\item{mode}{Execution mode (\code{default} or \code{fast}). In default mode,
Core Hunter uses an advanced parallel tempering search algorithm and terminates
when no improvement is found for ten seconds. In fast mode, a simple stochastic
hill-climbing algorithm is applied and Core Hunter terminates as soon as no
improvement is made for two seconds. Stop conditions can be overridden with
arguments \code{time} and \code{impr.time}.}
\item{normalize}{If \code{TRUE} (default), the applied objectives in a multi-objective
configuration (two or more objectives) are automatically normalized prior to execution.
For single-objective configurations, this argument is ignored.
Normalization requires an independent preliminary search per objective (fast stochastic
hill-climber, executed in parallel for all objectives). The same stop conditions, as
specified for the main search, are also applied to each normalization search. In
\code{default} execution mode, however, any step-based stop conditions are multiplied
by 500 for the normalization searches, because in that case the main search (parallel
tempering) executes 500 stochastic hill-climbing steps per replica, in a single step
of the main search.
Normalization ranges can also be precomputed (see \code{\link{getNormalizationRanges}})
or manually specified in the objectives to save computation time when sampling core
collections. This is especially useful when multiple cores are sampled for the same
objectives, with possibly varying weights.}
\item{time}{Absolute runtime limit in seconds. Not used by default (\code{NA}).
If used, it should be a strictly positive value, which is rounded to the
nearest integer.}
\item{impr.time}{Maximum time without improvement in seconds. If no explicit
stop conditions are specified, the maximum time without improvement defaults
to ten or two seconds, when executing Core Hunter in \code{default} or
\code{fast} mode, respectively. If a custom improvement time is specified,
it should be strictly positive and is rounded to the nearest integer.}
\item{steps}{Maximum number of search steps. Not used by default (\code{NA}).
If used, it should be a strictly positive value, which is rounded
to the nearest integer. The number of steps applies to the main
search. Details of how this stop condition is transferred to
normalization searches, in a multi-objective configuration, are
provided in the description of the argument \code{normalize}.}
\item{impr.steps}{Maximum number of steps without improvement. Not used by
default (\code{NA}). If used, it should be a strictly
positive value, which is rounded to the nearest integer.
The maximum number of steps without improvement applies
to the main search. Details of how this stop condition is
transferred to normalization searches, in a multi-objective
configuration, are provided in the description of the argument
\code{normalize}.}
\item{indices}{If \code{TRUE}, the result contains the indices instead of ids
(default) of the selected individuals.}
\item{verbose}{If \code{TRUE}, search progress messages are printed to the console.
Defaults to \code{FALSE}.}
}
\value{
Core subset (\code{chcore}). It has an element \code{sel}
which is a character or numeric vector containing the sorted ids or indices,
respectively, of the selected individuals (see argument \code{indices}).
In addition the result has one or more elements that indicate the value
of each objective function that was included in the optimization.
}
\description{
Sample a core collection from the given data.
}
\details{
Because Core Hunter uses stochastic algorithms, repeated runs may produce different
results. To eliminate randomness, you may set a random number generation seed using
\code{\link{set.seed}} prior to executing Core Hunter. In addition, when reproducible
results are desired, it is advised to use step-based stop conditions instead of the
(default) time-based criteria, because runtimes may be affected by external factors,
and, therefore, a different number of steps may have been performed in repeated runs
when using time-based stop conditions.
}
\examples{
\donttest{
data <- exampleData()
# default size, maximize entry-to-nearest-entry Modified Rogers distance
obj <- objective("EN", "MR")
core <- sampleCore(data, obj)
# fast mode
core <- sampleCore(data, obj, mode = "f")
# absolute size
core <- sampleCore(data, obj, size = 25)
# relative size
core <- sampleCore(data, obj, size = 0.1)
# other objective: minimize accession-to-nearest-entry precomputed distance
core <- sampleCore(data, obj = objective(type = "AN", measure = "PD"))
# multiple objectives (equal weight)
core <- sampleCore(data, obj = list(
objective("EN", "PD"),
objective("AN", "GD")
))
# multiple objectives (custom weight)
core <- sampleCore(data, obj = list(
objective("EN", "PD", weight = 0.3),
objective("AN", "GD", weight = 0.7)
))
# custom stop conditions
core <- sampleCore(data, obj, time = 5, impr.time = 2)
core <- sampleCore(data, obj, steps = 300)
# print progress messages
core <- sampleCore(data, obj, verbose = TRUE)
}
}
\seealso{
\code{\link{coreHunterData}}, \code{\link{objective}}, \code{\link{getNormalizationRanges}}
}
|
788728e906332e4a684a4abddb18b8f8e0c25220 | 9a190c31dcd2e5c1208d3701aedd95ee40cd3a52 | /Scripts/tracksPlot.R | dd9dd6673a66cc28fdf6fae4916bb1840cb0633a | [] | no_license | squatrim/marques2020 | d5d44f19da835e0a371b432aa30cf3dedc117fa1 | 45e31e7d17f006d2d3a17e66a63449f758bf5998 | refs/heads/master | 2023-08-19T07:41:27.915357 | 2021-08-02T14:13:14 | 2021-08-02T14:13:14 | 211,262,384 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,499 | r | tracksPlot.R | tracksPlot <- function(data, gene, bigwig.ymax = 25,
region.min = 1000, region.max = 1000) {
# Plot parameters, only to look better
pp <- getDefaultPlotParams(plot.type = 1)
pp$leftmargin <- 0.15
pp$topmargin <- 15
pp$bottommargin <- 15
pp$ideogramheight <- 5
pp$data1inmargin <- 10
pp$data1outmargin <- 0
# Get coordinate from DE_probes
gene.region <- dplyr::filter(data, Feature == gene)
min <- min(gene.region$Start) - region.min
max <- max(gene.region$End) + region.max
chr <- paste0("chr", unique(gene.region$Chromosome))
zoom.region <- toGRanges(paste0(chr,":",min,"-",max))
# Start by plotting gene tracks
kp <- plotKaryotype(zoom = zoom.region,
genome = "mm10",
cex = 0.5,
plot.params = pp)
genes.data <- makeGenesDataFromTxDb(TxDb.Mmusculus.UCSC.mm10.knownGene,
karyoplot = kp,
plot.transcripts = TRUE,
plot.transcripts.structure = TRUE)
genes.data <- addGeneNames(genes.data)
genes.data <- mergeTranscripts(genes.data)
kpAddBaseNumbers(kp, tick.dist = 20000, minor.tick.dist = 5000,
add.units = TRUE, cex = 0.4, tick.len = 3)
kpPlotGenes(kp, data = genes.data, r0 = 0, r1 = 0.1,
gene.name.cex = 0.5)
# Start to plot bigwig files
big.wig.files <- dir(path = "Data/bw_files/",
pattern = ".bw",
all.files = T,
full.names = T)
# big.wig.files
# Reserve area to plot the bigwig files
out.at <- autotrack(1:length(big.wig.files),
length(big.wig.files),
margin = 0.15,
r0 = 0.15,
r1 = 1)
for(i in seq_len(length(big.wig.files))) {
bigwig.file <- big.wig.files[i]
# Define where the track will be ploted
# autotrack will simple get the reserved space (from out.at$r0 up to out.at$r1)
# and split in equal sizes for each bigwifile, i the index, will control which
# one is being plotted
at <- autotrack(i, length(big.wig.files),
r0 = out.at$r0,
r1 = out.at$r1,
margin = 0.2)
# Plot bigwig
kp <- kpPlotBigWig(kp,
data = bigwig.file,
# ymax = "visible.region",
ymax = bigwig.ymax,
r0 = at$r0,
col = ifelse(grepl("sgCTRL",bigwig.file),
"#000000",ifelse(grepl("sgFosl1",bigwig.file),
"#E41A1C","#4DAF4A")),
r1 = at$r1)
computed.ymax <- ceiling(kp$latest.plot$computed.values$ymax)
# Add track axis
kpAxis(kp,
ymin = 0,
ymax = computed.ymax,
numticks = 2,
r0 = at$r0,
r1 = at$r1,
cex = 0.5)
# Add track label
kpAddLabels(kp,
labels = ifelse(grepl("sgCTRL",bigwig.file),
"sgCtrl",ifelse(grepl("sgFosl1",bigwig.file),
"sgFosl1_1","sgFosl1_3")),
r0 = at$r0,
r1 = at$r1,
cex = 0.5,
label.margin = 0.01)
}
# print(zoom.region)
}
|
26e50861e865d78aa0c196bb401797f93ce74fe4 | 115c6a6a98825f87940ec373558155e921042fd8 | /plot_delly_vaf/plot_delly_vaf.R | f8f41c56678933d67878d0f46cb8c8ff17ae4858 | [] | no_license | morinlab/lab_scripts | a29e879175dd30a98cbde703496a4319f61afb2f | 77639a138e520506e4395cade8ca27b4e6a377c6 | refs/heads/master | 2022-04-30T14:56:23.825628 | 2022-03-14T19:18:56 | 2022-03-14T19:18:56 | 26,938,506 | 6 | 8 | null | 2016-09-14T23:04:39 | 2014-11-21T00:31:04 | R | UTF-8 | R | false | false | 1,539 | r | plot_delly_vaf.R |
# Import libraries --------------------------------------------------------
library("tidyr")
library("magrittr")
library("dplyr")
library("ggplot2")
# Parse command-line arguments --------------------------------------------
options(echo=TRUE)
args <- commandArgs(trailingOnly=TRUE)
input_file <- args[1]
# Create data frame -------------------------------------------------------
df <- read.table(input_file, sep="\t", header=TRUE)
tumour_names <- colnames(df)[8:9]
cols <- colnames(df)
cols[8:9] <- c("tumour_1", "tumour_2")
colnames(df) <- cols
head(df)
# Split adta frame into three based on zero values ------------------------
df_t1_zero <- filter(df, tumour_1 == 0)
df_t1_zero <- mutate(df_t1_zero, tumour_1 = -0.05)
df_t2_zero <- filter(df, tumour_2 == 0)
df_t2_zero <- mutate(df_t2_zero, tumour_2 = -0.05)
df_no_zero <- filter(df, tumour_1 != 0, tumour_2 != 0)
# Plot VAFs ---------------------------------------------------------------
plot <- ggplot() +
geom_point(data = df_no_zero, aes(tumour_1, tumour_2, colour = factor(type)), alpha = 0.5, size = 1) +
geom_point(data = df_t1_zero, aes(tumour_1, tumour_2, colour = factor(type)), alpha = 0.5, size = 1,
position = position_jitter(w = 0.02, h = 0)) +
geom_point(data = df_t2_zero, aes(tumour_1, tumour_2, colour = factor(type)), alpha = 0.5, size = 1,
position = position_jitter(w = 0, h = 0.02)) +
coord_cartesian(xlim = c(-0.1, 1.1), ylim = c(-0.1, 1.1))
ggsave(file="delly_vaf_t1_vs_t2.pdf", width=6.5, height=5)
|
5014425a543d3b615949f6fd1a550b5b3be0b4b1 | c32c54f47c35737ea4ba3a026c81b594fd02b1cf | /man/unregister.Rd | 6902769cd7b0f9196f3f9aaa3bcf6af3e1208dbc | [] | no_license | quinnpertuit/rDailyFantasy | cd46596122d979b5c389d67b19bc354109fa0722 | fb00d802573c855f58d5b7b4d84f96d6724a66a6 | refs/heads/master | 2022-10-24T06:04:42.025973 | 2020-01-11T15:12:36 | 2020-01-11T15:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 277 | rd | unregister.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unregisterParallelFunction.R
\name{unregister}
\alias{unregister}
\title{Unregister doParallel Cluster}
\usage{
unregister()
}
\description{
Unregister doParallel Cluster
}
\examples{
unregister()
}
|
2bdaeb91a3e5ed281e63e11c7f5e47379ceadf2f | 2415aff5aa0882a76a1fa6d3b2c5dbe86ee0fc8e | /Analysis/update_data/counties_merged_data.R | 2f6a4b4641efb801237f895d1035e234ac1a6bae | [] | no_license | blind-contours/Getz_Hubbard_Covid_Ensemble_ML_Public | 3e4456b74dd9d4638e513a7889e1341eeda4c2c6 | b6c609a178af5761ec98454dcb802fe56906a962 | refs/heads/master | 2022-12-05T21:19:22.547100 | 2020-08-18T18:39:44 | 2020-08-18T18:39:44 | 290,294,958 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,270 | r | counties_merged_data.R | # # Clear Workspace
# rm(list=ls())
# Download USFacts data
usf<-data.frame(
read.csv("https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv"),
read.csv("https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv")
)
# Parse FIPS as integers
usf$fips=as.integer(usf$?..countyFIPS)
# Remove counties in Alaska and Hawaii
usf<-usf[!((usf$State %in% c("AK","HI")) | (usf$fips==0)),]
# Read airports data
airports=read.csv("data/processed/counties_airports.csv")
# Adding polygon info for counties to get the centroid
library(tigris) # using the counties() command
library(sf)
# Read counties polygons
# When I tried to pushed the changes to git
# there was an error because tl_2019_us_county.shp
# was too large
# polygons=sf::st_read("data/shape/tl_2019_us_county.shp")
polygons = counties(cb = F, year = 2019, class = "sf")
# Parse FIPS as integers
polygons$fips=as.integer(as.character(polygons$GEOID))
# Keep only counties with data from US Facts
polygons=polygons[polygons$fips %in% usf$fips,]
# Order polygons by FIPS
polygons=polygons[order(polygons$fips),]
# Calculate counties centroids
centroids=sf::st_coordinates(sf::st_centroid(polygons))
# Initialize counties
counties=data.frame(
"FIPS"=polygons$fips,
"Name"=polygons$NAME,
"FirstCaseDay"=NA,
"CountyRelativeDay25Cases"=NA,
"TotalCasesUpToDate"=0,
"USRelativeDay100Deaths"=0,
"TotalDeathsUpToDate"=0,
"CentroidLat"=centroids[,2],
"CentroidLon"=centroids[,1],
"NearestAirportName"=NA,
"NearestAirportDistance"=NA,
"NearestAirportEnplanements"=NA,
"NearestAirportOver5000000Name"=NA,
"NearestAirportOver5000000Distance"=NA,
"NearestAirportOver5000000Enplanements"=NA,
"Population"=NA,
"PublicTransportation"=NA,
"GDP"=NA,
"AreaLand"=polygons$ALAND,
"AreaWater"=polygons$AWATER
)
# Convert cases and deaths data into matrix
ndays=ncol(usf)/2-5
usf=usf[match(counties$FIPS,usf$fips),]
mcases<-data.matrix(usf[,6:(5+ndays)])
mdeaths<-data.matrix(usf[,(ndays+10):(2*ndays+9)])
# Calculate counties cases and deaths statistics
for (i in 1:nrow(counties)) {
if (any(mcases[i,]>0)){
counties$TotalCasesUpToDate[i]=mcases[i,ndays]
counties$TotalDeathsUpToDate[i]=mdeaths[i,ndays]
fc=min(which(mcases[i,]>0))
counties$FirstCaseDay[i]=fc
counties$USRelativeDay100Deaths[i]=mdeaths[i,100]
if (ndays-fc>=24) {counties$CountyRelativeDay25Cases[i]=mcases[i,fc+24]}
}
}
# Store a vector of indices of airports with enplanements of at least 5,000,000
f=which(airports$CY.18.Enplanements>=5000000,)
for (i in 1:nrow(counties)) {
# Calculate county nearest airport
dists=gmt::geodist(counties$CentroidLat[i],counties$CentroidLon[i],airports$Latitude,airports$Longitude,units="km")
m=which.min(dists)
counties$NearestAirportName[i]=as.character(airports$Name[m])
counties$NearestAirportDistance[i]=dists[m]
counties$NearestAirportEnplanements[i]=airports$CY.18.Enplanements[m]
# Calculate county nearest airport with enplanements>5000000
m=which.min(dists[f])
counties$NearestAirportOver5000000Name[i]=as.character(airports$Name[f[m]])
counties$NearestAirportOver5000000Distance[i]=dists[f[m]]
counties$NearestAirportOver5000000Enplanements[i]=airports$CY.18.Enplanements[f[m]]
}
# Add population data
a=read.csv('data/processed/nir_covid_county_population_usafacts.csv')
counties$Population=a$population[match(counties$FIPS,as.integer(a$countyFIPS))]
# Add public transportation data
a=read.csv('data/raw/ACSST5Y2018.S0802_data_with_overlays_2020-04-11T224619.csv')
a=a[2:nrow(a),]
counties$PublicTransportation=as.numeric(a$S0802_C04_001E[match(counties$FIPS,as.integer(substr(a$GEO_ID,10,15)))])
# Add county GDP data
# I (whitney) changed the countyGDP to lagdp1219
# we may need to change this back to what it was before
# switched back to CountyGDP 5/23/20
a=readxl::read_excel("data/raw/CountyGDP.xlsx")
counties$GDP=a$X2018[match(counties$FIPS,as.integer(a$GeoFips))]
# Add single value variables from census
for (name in c("air_quality","all_heartdisease_deathrate","all_stroke_deathrate","num_hospitals","percent_park_access","urban_rural_status")) {
a=read.csv(paste("data/",name,".csv",sep=""))
a$Value[a$Value==-1]=NA
counties[name]=a$Value[match(counties$FIPS,a$cnty_fips)]
}
# Add analytic_data2020
a=read.csv("data/raw/analytic_data2020.csv")
a=a[2:nrow(a),]
counties=cbind(counties,a[match(counties$FIPS,as.integer(as.character(a$X5.digit.FIPS.Code))),8:ncol(a)])
# Add County_Table_Chronic_Conditions_Prevalence_by_Age_2017.xlsx
for (i in 2:4) {
a=readxl::read_excel("data/County_Table_Chronic_Conditions_Prevalence_by_Age_2017.xlsx",sheet = i,skip = 4)
a=a[2:nrow(a),]
counties=cbind(counties,a[match(counties$FIPS, as.integer(a$...3)),4:ncol(a)])
}
# Add County_Table_Chronic_Conditions_Spending_2017.xlsx
for (i in 2:3) {
a=readxl::read_excel("data/raw/County_Table_Chronic_Conditions_Spending_2017.xlsx",sheet = i,skip = 4)
a=a[2:nrow(a),]
counties=cbind(counties,a[match(counties$FIPS, as.integer(a$...3)),4:ncol(a)])
}
# Add DiabetesAtlasCountyData.csv
a=read.csv("data/raw/DiabetesAtlasCountyData.csv",skip = 2)
counties$diabetesAtlas=a[match(counties$FIPS,a$CountyFIPS),4]
# Add Education.xls
a=readxl::read_excel("data/raw/Education.xls",skip=4)
counties=cbind(counties,a[match(counties$FIPS,as.integer(a$`FIPS Code`)),4:ncol(a)])
# Add IHME_USA_COUNTY_RESP_DISEASE_MORTALITY_1980_2014_NATIONAL_Y2017M09D26.XLSX
a=readxl::read_excel("data/raw/IHME_USA_COUNTY_RESP_DISEASE_MORTALITY_1980_2014_NATIONAL_Y2017M09D26.XLSX",skip = 1)
data=a[match(counties$FIPS,a$FIPS),3:ncol(a)]
for (i in 1:nrow(data)){
for (cn in colnames(data)){
counties[i,cn]=as.numeric(strsplit(as.character(data[i,cn]),' ')[[1]][1])
}
}
# Add SVI2018_US_COUNTY.csv
a=read.csv("data/raw/SVI2018_US_COUNTY.csv")
counties=cbind(counties,a[match(counties$FIPS,a$FIPS),7:ncol(a)])
# Add Unemployment.xls
a=readxl::read_excel("data/raw/Unemployment.xls",sheet = 1,skip = 7)
counties=cbind(counties,a[match(counties$FIPS,as.integer(a$FIPStxt)),4:ncol(a)])
# Add tester2.csv
a=read.csv("data/raw/5yearACSdata.csv")
counties=cbind(counties,a[match(counties$FIPS,a$GEOID),3:ncol(a)])
# Prepare states to match census state fips to NOAA state fips
states=as.character(unique(usf$State))
states_fips=purrr::map(states,function(state) usf$stateFIPS[which(usf$State==state)[1]])
# Download average, min and max temperature and precipitation from NOAA
for (p in c("tavg","tmin","tmax","pcp")) {
# Run over months
for (m in 1:4) {
cn=sprintf("%s_m%d",p,m)
print(cn)
counties[cn]=NA
# Run over states
for (n in 1:49) {
url=sprintf("https://www.ncdc.noaa.gov/cag/county/mapping/1-%s-20200%d-1.csv",p,m)
noaa=read.csv(url,skip=3)
sfips=states_fips[states==substr(as.character(noaa$Location.ID[1]),1,2)][[1]]
fips=as.integer(substr(as.character(noaa$Location.ID),4,6))+sfips*1000
f=floor(counties$FIPS/1000)==sfips
counties[cn][f,1]=noaa$Value[match(counties$FIPS[f],fips)]
}
}
}
# Results #
# We may need to take some columns out first
# View(CountiesMergedData20200517)
# Write results to a file
write.csv(counties,"data/processed/CountiesMergedData20200517.csv")
|
6fc6463924c77951c07cec8513ef36ff541a321a | 60ddf6ce5476c45658f88101c9a935f5d64197b1 | /global.R | a018b2e653baf6a20b126d6724e9f307ad45f820 | [] | no_license | amirms/ITMViz | 5716ae8de6d2c4f44e65061ad414ceccca804db1 | 647dd00bb5eb24ccd7e6da6be4080f6a4db2c02b | refs/heads/master | 2021-01-17T07:43:25.274067 | 2017-05-23T18:52:27 | 2017-05-23T18:52:27 | 35,730,686 | 4 | 4 | null | null | null | null | UTF-8 | R | false | false | 11,831 | r | global.R | library(inline)
library(Rcpp)
library(inline)
library(ITM)
ldaState <- function(alpha, beta, eta, K=1, W=0, D=0, vocab=NULL, doc.names=NULL, session=NULL) {
state = list (
dt = Module("dt", getDynLib("ITM")),
#Current Project in benchmark folder
prname= NULL,
min.tfidf= 3, #the minimum tf-idf score - default value = 3
K = K, #User-selected number of topics
docs = NULL, #User-selected bag-of-words
labels = NULL, #User-selected labels for topics
vocab = vocab,
labels = paste0("Topic", 1:K), #instantiated with arbitrary topic names
doc.names = doc.names,
W = W, #Number of words
D = D,
N=0, # total number of tokens in the data
#User-provided parameters of LDA
alpha = alpha,
beta = beta,
eta = eta,
#Create the corresponding alpha and beta for performing LDA
# ldaalpha = matrix(alpha, nrow = 1, ncol=K),
#
# #ldabeta = self.beta * ones((self.T,self.W))
# ldabeta = matrix(beta, nrow=K, ncol=W),
numsamp = 50,
randseed = 821945,
#TODO figure out what this is for
f=NULL,
#State of Interactive LDA
curIterations = 0, #LDA iterations run so far (gets reset when 'Reset Topics' is clicked)
nextIterations = 0, #LDA iterations to run in the next LDA command
append=FALSE, #True if we're updating an existing LDA run, false if we're starting over.
randomSeed = -1, #a negative random seed means that we should use the default
dirty = FALSE, #Dirty flag: true if we've changed the constraints in any way
#without running LDA to refine the topics.
session = session, #Current Session
#Constraints
constraints = list( #User-selected constraints
mlinks = list(), #Must-Link Constraints
clinks = list(), #Cannot-Link Constraints
ilinks = list(), #Isolate Constraints
conflicts = NULL
),
#Visualization information
doc.term = NULL,
term.frequency = NULL,
topic.proportion = NULL,
doc.proportion = NULL,
#Results of constraints compilation
root = NULL,
leafmap = NULL,
# Outputs of inference
zsamp = NULL,
qsamp = NULL,
#phi = matrix(0, nrow=T, ncol=W) #a T x W matrix
phi = NULL,
#theta = matrix(0, nrow=W, ncol=D) #a W x D matrix
theta = NULL,
phi.freq = 0, #the token-topic occurrence table
theta.freq = 0, #the topic-document occurrence table
rel.freq = 0 #the doc-topic frequency
)
#Reset Constraints
state$resetConstraints <- function(){
state$constraints = NULL
}
#Add a constraint
state$addConstraint <- function(words, isCannotLink)
{
constr = list(words = words, isCannotLink = isCannotLink)
state$constraints <-append(state$constraints, constr)
state$dirty = TRUE
}
#Replace a constraint with a new set of words
state$replaceConstraint <- function(index, words, isCannotLink)
{
constr = list(words = words, isCannotLink = isCannotLink)
state$constraints[[index]] <- constr
state$dirty = TRUE
}
#Delete a constraint
state$deleteConstraint <- function(index)
{
if (index <= length(state$constraints))
state$constraints <- stateconstraints[-index]
state$dirty = TRUE
}
state <- list2env(state)
class(state) <- "LDAState"
return(state)
}
fit.LDA <- function(mystate) {
require(Rcpp)
require(inline)
require(ITM)
if (is.null(mystate$prname))
return()
if(mystate$K <= 1)
return()
#if this is the first iteration
if (mystate$curIterations == 0) {
#the min.tfidf must have been set in the constructor of mystate
r <- read.dt(mystate$prname, mystate$min.tfidf)
# list(doclist=doclist, vocab = colnames(bow), docs=rownames(bow),
# term.frequency = colSums(bow), doc.term = rowSums(bow)))
mystate$docs = r$docs
mystate$vocab = r$vocab
mystate$labels <- unlist(lapply(seq(mystate$K), function(c) paste("Topic", c, sep="")))
mystate$term.frequency = r$term.frequency
mystate$doc.term = r$doc.term
mystate$doc.names = r$doc.names
mystate$N <- sum(mystate$term.frequency)
mystate$rel.freq <- mystate$term.frequency/mystate$N
mystate$W = length(r$vocab)
mystate$D = length(r$doc.names)
}
#check if the dt is already set
if (is.null(mystate$dt))
mystate$dt = Module("dt", getDynLib("ITM"))
# Compile preferences, if we haven't already
if(is.null(mystate$root) || isTRUE(mystate$dirty)) {
mystate$f = rep(0, mystate$D)
mystate$qsamp = NULL
mystate$zsamp = NULL
# print("working")
isolate.constraints <- propagate.isolatelinks(mystate$constraints$ilinks, mystate$W, mystate$vocab)
expanded.constraints <- list(mlinks = append(isolate.constraints$mlinks, mystate$constraints$mlinks),
clinks = append(isolate.constraints$clinks, mystate$constraints$clinks))
# print(mystate$constraints$mlinks)
# if (length(mystate$constraints$mlinks) > 0) {
# print(expanded.constraints)
#
# stop("aasas")
# }
#
# return(expanded.constraints)
# Compile constraints into Dirichlet Forest data structure
pc = process.pairwise(expanded.constraints, mystate$W, mystate$vocab)
if (!is.null(pc$conflicts)){
mystate$constraints$conflicts <- pc$conflicts
return()
}
# tree = list(root,leafmap)
tree = buildTree(pc$mlcc, pc$clcc, pc$allowable,
mystate$W, mystate$beta, mystate$eta, mystate$dt)
# return(tree)
mystate$root = tree$root
mystate$leafmap = tree$leafmap
mystate$dirty = FALSE
}
ldaalpha = matrix(mystate$alpha, nrow = 1, ncol=mystate$K)
lda = intLDA(mystate$docs, ldaalpha, mystate$root, mystate$leafmap,
mystate$numsamp, mystate$randseed,
mystate$zsamp, mystate$qsamp, mystate$f)
#Update global state with the results
mystate$phi = t(lda$phi)
#Name rows and cols of phi
rownames(mystate$phi) <- mystate$vocab
colnames(mystate$phi) <- mystate$labels
#Name rows and cols of theta
mystate$theta = lda$theta
rownames(mystate$theta) <- mystate$doc.names
colnames(mystate$theta) <- mystate$labels
mystate$zsamp = lda$zsamp
mystate$qsamp = lda$qsamp
#Compute topic proportion
total.topic <- sum(colSums(mystate$theta))
mystate$topic.proportion = colSums(mystate$theta) / total.topic
#Compute document proportion
#total.terms <- sum(mystate$doc.term)
#add doc.proportion to ldastate
mystate$doc.proportion <- mystate$doc.term / mystate$N
# This is necessary for subsetting data upon selection in topicz.js
# compute the token-topic occurrence table:
mystate$phi.freq <- (mystate$phi / rowSums(mystate$phi)) * mystate$term.frequency
#mystate$theta.freq <- t(mystate$theta * mystate$doc.proportion * mystate$K)
mystate$theta.freq <- t(mystate$theta / mystate$D) * 100
return(mystate)
}
read.dt <- function(prname , t=10) {
require(lsa)
# setwd("~/workspace")
# prname <- "jedit-5.1.0"
print(getwd())
# bow <- read.table(paste("data", prname , "mydata-BoW-matrix.csv", sep="/"), sep=",", row.names = 1, header = TRUE, check.names = FALSE)
bow <- read.table(paste("data", prname , "mydata-BoW-matrix.csv", sep="/"), sep=",", row.names = 1, header = TRUE, check.names = FALSE)
bow <- as.matrix(bow)
# bow <- bow[1:30,]
print(dim(bow))
names <- colnames(bow)
bow <- bow[,which(nchar(names) > 3)]
nbow <- idf.weight(bow)
y = apply(nbow, 2, function(x) (length(x[which(x >= t)]) > 0) )
bow <- bow[,which(y)]
# return(bow)
doclist <- convertbow2list(bow)
return(list(docs=doclist, vocab = colnames(bow), doc.names=rownames(bow),
term.frequency = colSums(bow), doc.term = rowSums(bow)))
}
# Compute inverse document frequency weights and rescale a data frame
# Input: data frame
# Calls: scale.cols
# Output: scaled data-frame
idf.weight <- function(x) {
# IDF weighting
doc.freq <- colSums(x>0)
doc.freq[doc.freq == 0] <- 1
w <- log(nrow(x)/doc.freq)
return(scale.cols(x,w))
}
# Rescale the columns of a data frame or array by a given weight vector
# Input: arrray, weight vector
# Output: scaled array
scale.cols <- function(x,s) {
return(t(apply(x,1,function(x){x*s})))
}
# Rescale rows of an array or data frame by a given weight vector
# Input: array, weight vector
# Output: scaled array
scale.rows <- function(x,s) {
return(apply(x,2,function(x){x*s}))
}
#Input: a document-term matrix(bow)
convertbow2list <- function(bow) {
doclist = list()
for(i in 1:dim(bow)[1]) {
doc = c()
for (j in 1:dim(bow)[2])
doc <- c(doc, rep(j, bow[i,j]))
doclist[[length(doclist)+1]] <- doc
}
return(doclist)
}
#Input:
#bow: An unnormalized bag-of-words
#K: no of topics
fit.classic.LDA <- function(bow, K) {
require(topicmodels)
require(tm)
results = list(phi=NULL, term.frequency=NULL, vocab=NULL, topic.proportion=NULL, doc.names=NULL, theta=NULL)
results$term.frequency = colSums(bow)
results$doc.term = rowSums(bow)
results$vocab = colnames(bow)
results$doc.names = rownames(bow)
dtm <- as.DocumentTermMatrix(bow, weighting = function(x) weightTf(x))
lda <- LDA(dtm, control = list(alpha = 0.1), k = K)
lda_inf <- posterior(lda)
results$phi = t(lda_inf$terms)
total.topic = sum(colSums(lda_inf$topics))
results$topic.proportion = colSums(lda_inf$topics) / total.topic
results$theta = lda_inf$topics
return(results)
}
prepare.global <- function(prname, K) {
alpha = 0.1
beta = .1
eta = 10000
mystate <- ldaState(alpha = alpha, beta = beta, eta = eta)
mystate$prname= "jedit-5.1.0"
mystate$K= 20
bow <- read.dt(mystate$prname, 15)
results = fit.classic.LDA(bow, mystate$K)
mystate$phi = results$phi
mystate$term.frequency = results$term.frequency
mystate$topic.proportion = results$topic.proportion
mystate$vocab = results$vocab
mystate$docs = results$docs
mystate$theta = results$theta
mystate$doc.term = results$doc.term
colnames(mystate$phi) <- paste0("Topic", 1:mystate$K)
#Compute topic proportion
total.topic <- sum(colSums(mystate$theta))
mystate$topic.proportion = colSums(mystate$theta) / total.topic
mystate$N <- sum(mystate$term.frequency)
mystate$rel.freq <- mystate$term.frequency/mystate$N
mystate$phi.freq <- t(t(mystate$phi) * mystate$topic.proportion * mystate$N)
}
# compute.MoJoSim <- function(prname, theta, k) {
#
# d <- proxy::dist(theta)
#
# c <- kmeans(d, centers = k, nstart = 300, iter.max = 200)
#
# compare.MoJo(prname, c$cluster)
# }
# compare.MoJo <- function(prname, clusters) {
# require(gelato)
#
# # setwd("~/workspace")
#
# #Load the priori decomposition
# decomposition <- read.csv(paste("benchmark", prname ,"decomposition.csv", sep="/"), sep=",", header = TRUE)
# priori.decomp <- decomposition$x
# names(priori.decomp) <- decomposition$X
#
#
# # priori.decomp <- normalizeVector(priori.decomp)
# priori.decomp <- find.intersection(priori.decomp, clusters)
# clusters <- find.intersection(clusters, priori.decomp)
#
#
# priori.decomp <- normalizeVector(priori.decomp)
#
# N <- length(clusters)
#
# mojo <- compute.MoJo(clusters, priori.decomp)
#
# mojosim <- sapply(mojo, function(m) 1 - (m/N))
#
# nmis <- compute.NMI(clusters, priori.decomp)
#
# purities <- compute.cluster.purity(clusters, priori.decomp)
#
#
# return(list(mojosim=mojosim, nmis=nmis, purities=purities))
#
# }
|
4cbf53c5f847d624569e1edc464c9479517364bb | 551a0f7f4dae3b16deb08b4e2ce1bc369b399c61 | /EOD_2_transportes_graphs.R | 20fe5a25a6ba1494ebf56b4acdecf2aa5f20ec45 | [] | no_license | eduardon6/EOD | 2170506dd38ca7bbae26873007ef729c346b6a59 | 0ed3277b11faf96e69d3210304c2d35077e875ce | refs/heads/master | 2020-04-10T19:06:46.179101 | 2018-12-10T19:16:03 | 2018-12-10T19:16:03 | 161,223,404 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,213 | r | EOD_2_transportes_graphs.R | library(survey)
library(dplyr)
library(lubridate)
library(ggplot2)
library(foreign)
library(stringr)
rm(list=ls())
d <- read.csv(file = "transportes.csv")
names(d)
d$SEXO[d$SEXO == 1] <- "hombre"
d$SEXO[d$SEXO == 2] <- "mujer"
d$EDAD2 <- d$EDAD
d$EDAD2[d$EDAD2 %in% 6:16] <- "06a16"
d$EDAD2[d$EDAD2 %in% 17:29] <- "17a29"
d$EDAD2[d$EDAD2 %in% 30:39] <- "30a39"
d$EDAD2[d$EDAD2 %in% 40:49] <- "40a49"
d$EDAD2[d$EDAD2 %in% 50:60] <- "50a59"
d$EDAD2[d$EDAD2 %in% 61:97] <- "61a97"
#GRAPH CANTIDAD DE PERSONAS EN TRÁNSITO POR DIA
design_d <- svydesign(ids = ~UPM_DIS, strata = ~EST_DIS, weights = ~FACTOR,
data = d)
options(survey.lonely.psu="remove")
varstime <- names(d)[29:172]
table7 <- lapply(varstime, function(x){
form <- make.formula(c(x, "P5_3"))
d <- data.frame(x, svytable(formula = form, design = design_d))
names(d) <- c("tiempo", "en_transito", "dia", "freq")
return(d)
}) ; names(table7) <- varstime
table7_2 <- do.call(what = "rbind", args = table7)
table7_2$tiempo2 <- ymd_hms(
paste(
today(),
do.call(what = "rbind",
args = strsplit(x = as.character(table7_2$tiempo), split = "tviaje_"))[,2],
sep = "_"),
tz = "Mexico/General")
table7_2$dia <- as.character(table7_2$dia)
table7_2$dia[table7_2$dia == "entre_semana"] <- "Entre semana"
table7_2$dia[table7_2$dia == "sabado"] <- "Sabado"
graph_transito_pordia <- table7_2 %>%
filter(en_transito == TRUE) %>%
ggplot() +
geom_bar(mapping = aes(x = tiempo2, y = freq), stat = "identity", width=120) +
scale_x_datetime(date_breaks = "hours" , date_labels = "%H-%M") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1, size = 7)) +
facet_wrap(facets = ~dia, nrow = 1) +
xlab(label = "Lapsos de 10 minutos") +
ylab(label = "Cantidad de personas en tránsito") +
ggtitle(label = "Cantidad de personas en tránisto por lapsos de 10 minutos en la ZMVM",
subtitle = "Segun reportado por los entrevistados en la EOD 2017")
#GRAPH CANTIDAD DE PERSONAS EN TRÁNSITO POR TRANSPORTE
design_d <- svydesign(ids = ~UPM_DIS, strata = ~EST_DIS, weights = ~FACTOR,
data = d)
options(survey.lonely.psu="remove")
varstime <- names(d)[325:468]
table1 <- lapply(varstime, function(x){
form <- make.formula(c(x, "P5_3", "SEXO", "EDAD2"))
d <- data.frame(x, svytable(formula = form, design = design_d))
names(d) <- c("tiempo", "en_transito", "dia", "sexo", "edad2", "freq")
return(d)
}) ; names(table1) <- varstime
table2 <- do.call(what = "rbind", args = table1)
table2$tiempo2 <- ymd_hms(paste(today(), (do.call("rbind", strsplit(x = as.character(table2$tiempo), split = "ttransp_medio_"))[,2]), sep = " "), tz = "Mexico/General")
table2$dia <- as.character(table2$dia)
table2$dia[table2$dia == "entre_semana"] <- "Entre semana"
table2$dia[table2$dia == "sabado"] <- "Sabado"
transportes <- c("Automovil ", "Caminar",
"Colectivo_Micro ", "Taxi_App_internet", "Taxi_sitio_calle_otro",
"Metrobus_o_Mexibus ", "Transporte_de_personal ", "Moto", "Bicicleta", "Metro", "Autobus", "Autobus_RTP_ o_ M1")
graph3_semana <- table2 %>%
filter(en_transito != "FALSE") %>%
filter(dia == "Entre semana") %>%
filter(en_transito %in% transportes) %>%
ggplot() +
geom_bar(mapping = aes(x = tiempo2, y = freq), stat = "identity", width=250) +
scale_x_datetime(date_breaks = "hours" , date_labels = "%H-%M") +
theme(axis.text.x = element_text(angle=90, hjust=0, vjust=0, size = 5)) +
theme(axis.text.y = element_text(size = 7)) +
facet_wrap(facets = ~en_transito, scales = "free") +
xlab(label = "Lapsos de 10 minutos") +
ylab(label = "Cantidad de personas en tránsito") +
ggtitle(label = "Cantidad de personas en tránisto por lapsos de 10 minutos en la ZMVM que utilizan el medio ENTRE SEMANA",
subtitle = "Segun reportado por los entrevistados en la EOD 2017")
graph3_sabado <- table2 %>%
filter(en_transito != "FALSE") %>%
filter(dia == "Sabado") %>%
filter(en_transito %in% transportes) %>%
ggplot() +
geom_bar(mapping = aes(x = tiempo2, y = freq), stat = "identity", width=250) +
scale_x_datetime(date_breaks = "hours" , date_labels = "%H-%M") +
theme(axis.text.x = element_text(angle=90, hjust=0, vjust=0, size = 5)) +
theme(axis.text.y = element_text(size = 7)) +
facet_wrap(facets = ~en_transito, scales = "free") +
xlab(label = "Lapsos de 10 minutos") +
ylab(label = "Cantidad de personas en tránsito") +
ggtitle(label = "Cantidad de personas en tránisto por lapsos de 10 minutos en la ZMVM que utilizan el medio SABADOS",
subtitle = "Segun reportado por los entrevistados en la EOD 2017")
#GRAPH TIEMPOS DE TRASLADO POR TRANSPORTE
d$transp_length2_min <- d$transp_length2/60
d$transp_length2_min_intervals <- cut(x = d$transp_length2_min, breaks = c(seq(from = 0, to = 120, by = 10), 180, 240, 300))
design_d <- svydesign(ids = ~UPM_DIS, strata = ~EST_DIS, weights = ~FACTOR,
data = d)
options(survey.lonely.psu="remove")
table3 <- data.frame(svytable(formula = ~transp_length2_min+P5_14+P5_3, design = design_d) )
names(table3) <- c("mins", "transporte", "dia", "freq")
table3$mins_intervals <- cut(x = as.numeric(table3$mins), breaks = 20)
graph_tiempo_traslados <- table3 %>%
filter(dia == "entre_semana") %>%
filter(transporte %in% transportes) %>%
ggplot() +
geom_bar(mapping = aes(x = mins_intervals, y = freq), stat = "identity", width = .7) +
#scale_x_datetime(date_breaks = "hours" , date_labels = "%H-%M") +
theme(axis.text.x = element_text(angle=90, hjust=0, vjust=0, size = 6)) +
theme(axis.text.y = element_text(size = 7)) +
facet_wrap(facets = ~transporte, scales = "free") +
xlab(label = "Tiempo de traslado (intervalos de minutos)") +
ylab(label = "Cantidad de viajes") +
ggtitle(label = "Tiempos de traslados en la ZMVM por transporte ENTRE SEMANA",
subtitle = "Segun reportado por los entrevistados en la EOD 2017")
path_graphs <- here::here("GRAPHS")
#ggsave(filename = "graph3_semana.jpeg", plot = graph3_semana, device = "jpeg", path = path_graphs,width = 12, height = 6)
#ggsave(filename = "graph3_sabado.jpeg", plot = graph3_sabado, device = "jpeg", path = path_graphs,width = 12, height = 6)
#ggsave(filename = "graph_tiempo_traslados.jpeg", plot = graph_tiempo_traslados, device = "jpeg", path = path_graphs,width = 12, height = 6)
#TRAYECTOS MAS FRECUENTES
catest <- read.dbf(file = "TCAT_ESTACIONES.dbf")
catest$TRAS <- as.numeric(as.character(catest$TRAS))
catest$TRAS <- factor(x = catest$TRAS, levels = 1:20,
labels = c("Automovil ", "Colectivo_Micro ", "Taxi_App_internet", "Taxi_sitio_calle_otro", "Metro", "Autobus_RTP_ o_ M1", "Bicicleta", "Autobus", "Moto", "Trolebus ", "Metrobus_o_Mexibus ", "Tren_ligero ", "Tren_suburbano ", "Caminar", "Mexicable", "Bicitaxi ", "Mototaxi ", "Transporte_escolar", "Transporte_de_personal ", "Otro "))
catest$TRAS <- droplevels(catest$TRAS)
catest$EST <- as.numeric(catest$EST)
catest$DESC <- str_to_title(catest$DESC)
catest$SISTEMA <- str_to_title(catest$SISTEMA)
names(catest) <- c("P5_14", "P5_17", "estacion", "linea", "sistema")
catest$linea <- as.character(catest$linea)
catest_ini <- catest
names(catest_ini) <- c("P5_14", "P5_17_1C", "estacion_ini", "linea_ini", "sistema_ini")
catest_fin <- catest
names(catest_fin) <- c("P5_14", "P5_17_2C", "estacion_fin", "linea_fin", "sistema_fin")
d$P5_17_1C <- as.numeric(d$P5_17_1C)
d$P5_17_2C <- as.numeric(d$P5_17_2C)
d <- merge(x = d, y = catest_ini, all.x = TRUE) ; dim(d)
d <- merge(x = d, y = catest_fin, all.x = TRUE) ; dim(d)
########################################################
table(d$sistema_fin, d$sistema_ini) #agruparon metrobus y mexibus en p5_14
########################################################
d$linea_misma <- d$linea_ini == d$linea_fin
design_d <- svydesign(ids = ~UPM_DIS, strata = ~EST_DIS, weights = ~FACTOR,
data = d)
options(survey.lonely.psu="remove")
table4 <- data.frame(svytable(formula = ~sistema_ini+sistema_fin+estacion_ini+estacion_fin, design = design_d) )
table4$ini_fin <- paste(table4$estacion_ini, table4$estacion_fin, sep = " - ")
table5 <- lapply(levels(table4$sistema_ini), function(x){
table4 %>%
filter(sistema_ini == x) %>%
filter(sistema_fin == x) %>%
arrange(desc(Freq)) %>%
head(., 15) %>%
mutate(rank = 1:15)
})
table5 <- do.call("rbind", table5)
table5$ini_fin <- factor(x = table5$ini_fin, levels = table5$ini_fin, labels = table5$ini_fin)
graph_trayectos <- ggplot(table5) +
geom_bar(mapping = aes(x = ini_fin, y = Freq), stat = "identity") +
facet_wrap(facets = ~sistema_ini, scales = "free") +
theme(axis.text.x = element_text(angle=30, hjust=.85, vjust=.85, size = 6.5),
axis.text.y = element_text(size = 7,
margin = margin(l = 15, r = 4, unit = "pt"))) +
xlab(label = "Trayecto (inicio - fin)") +
ylab(label = "Frecuencia") +
ggtitle(label = "Trayectos más frecuentes en la ZMVM por transporte (entre semanana y sábados)", subtitle = "Segun reportado por los entrevistados en la EOD 2017")
path_graphs <- here::here("GRAPHS")
#ggsave(filename = "graph_transito_pordia.jpeg", plot = graph_transito_pordia, device = "jpeg", path = path_graphs,width = 12, height = 6)
#ggsave(filename = "graph3_transito_semana.jpeg", plot = graph3_semana, device = "jpeg", path = path_graphs,width = 12, height = 6)
#ggsave(filename = "graph3_transito_sabado.jpeg", plot = graph3_sabado, device = "jpeg", path = path_graphs,width = 12, height = 6)
#ggsave(filename = "graph_tiempo_traslados.jpeg", plot = graph_tiempo_traslados, device = "jpeg", path = path_graphs,width = 12, height = 6)
#ggsave(filename = "graph_trayectos.jpeg", plot = graph_trayectos, device = "jpeg", path = path_graphs,width = 12, height = 6) |
aa24323a956f9e2c51b22135d589177dffc4b5d5 | e54c158a91574d4379603d2694b0e431818ec965 | /QC-Scripts/Sample-QC/Relatedness/postking/plot-duplicate-concordance.R | 6d9e7461664a41d199973670baed7a1f0addcc1a | [] | no_license | cgbycroft/UK_biobank | 3e9de2a2cfb6e81dae0db3f0c5c20995ca9613e6 | f1bb23cdd8881bebacfedccc289922278de180bf | refs/heads/master | 2023-06-20T03:38:10.405339 | 2021-07-19T12:09:55 | 2021-07-19T12:09:55 | 157,422,053 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,343 | r | plot-duplicate-concordance.R | # This script takes the output from ./postking/duplicate-concordance.sh and plots summaries of the concordance rates
args = commandArgs(TRUE)
#args = c("/well/ukbiobank/expt/V2_QCed.SNP-QC/src/V2_QCed.snpqc-tests.R","/well/ukbiobank/expt/V2_QCed.SNP-QC/src/V2_QCed.bin2clusterplots.R","/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/R/scripts/readPSperformance.R","/well/ukbiobank/qcoutput.V2_QCed.sample-QC/QC-Scripts/R/scripts/auxFunctions.R","-in","b1__b11-b001__b095-pair_batches.filtered-samples-pruned-200")
print(args)
h = args[-c(which(args%in%c("-in")),1+which(args%in%c("-in")))]
for(helperScript in h){
source(helperScript)
}
setwd(paste0(baseSampleQCDir,"/QC-Scripts/Sample-QC/Relatedness"))
inPrefix = args[which(args=="-in")+1]
# raw kinship data
#kinRaw = read.table('../../../data/Relatedness/b1__b11-b001__b095-pair_batches.filtered.kin0',header=TRUE,stringsAsFactors=FALSE)
# Raw sample tables (before Oxford QC)
otherInfo = read.multiple.batch.info(batchInfoFields)
# All the duplicates and twins
twinDupeFile = paste0(inPrefix,"-duplicates-twins.txt")
twinDupe = read.table(twinDupeFile,header=TRUE,stringsAsFactors=FALSE)
# Annotated duplicates list. This was based on the 894 pairs detected in the pre-pruned kinship table (as in kinRaw).
dupeAnnotFile = "160513-WCSG-duplicated-samples.txt"
dupeAnnot = read.delim(dupeAnnotFile,header=TRUE,stringsAsFactors=FALSE,sep="\t")
# Master blind-spiked duplicates list from Sam Welsh (nee Murphy)
dupeBlindFile = "170221-BSD-pairs-DISTRIBUTED-v2.csv"
dupeBlind = read.delim(dupeBlindFile,header=TRUE,stringsAsFactors=FALSE,sep=",")
dupeBlind$Best.Array.caps = toupper(dupeBlind$Best.Array)
dupeBlind$Sibling.Best.Array.caps = toupper(dupeBlind$Sibling.Best.Array)
sib1 = dupeBlind[,!grepl("Sibling",colnames(dupeBlind))][,-c(5,8)]
sib2 = dupeBlind[,grepl("Sibling",colnames(dupeBlind))]
# Exclude any pairs with a sample that isn't in our sample tables.
dupeBlind2 = dupeBlind[(sib1[,7]%in%otherInfo$PIID)&(sib2[,7]%in%otherInfo$PIID),]
print("Pairs of intended duplicates where both best.array ids are actually in our raw sample tables:")
print(dim(dupeBlind2)[1])
# the reverse (what's left?)
dupeBlind3 = dupeBlind[!((sib1[,7]%in%otherInfo$PIID)&(sib2[,7]%in%otherInfo$PIID)),]
j = dupeAnnot[intended,]
inAnnot = dupeBlind3[(dupeBlind3$Sibling.Best.Array.caps%in%c(dupeAnnot$Best_Array.x,dupeAnnot$Best_Array.y))|(dupeBlind3$Best.Array.caps%in%c(dupeAnnot$Best_Array.x,dupeAnnot$Best_Array.y)),]
inAnnot$myDuplicate = twinDupe$ID2[match(inAnnot$Sibling.Best.Array.caps,twinDupe$ID1)]
inAnnot$myDuplicate[is.na(inAnnot$myDuplicate)] = twinDupe$ID1[match(inAnnot$Sibling.Best.Array.caps[is.na(inAnnot$myDuplicate)],twinDupe$ID2)]
inAnnot$myDuplicate[is.na(inAnnot$myDuplicate)] = twinDupe$ID2[match(inAnnot$Best.Array.caps[is.na(inAnnot$myDuplicate)],twinDupe$ID1)]
inAnnot$myDuplicate[is.na(inAnnot$myDuplicate)] = twinDupe$ID1[match(inAnnot$Best.Array.caps[is.na(inAnnot$myDuplicate)],twinDupe$ID2)]
# are there duplicates intended, but
# Did we find all of them???
dupeBlind2Pairs = c(paste0(dupeBlind2$Best.Array.caps,dupeBlind2$Sibling.Best.Array.caps),paste0(dupeBlind2$Sibling.Best.Array.caps,dupeBlind2$Best.Array.caps))
twinDupeP = c(paste0(twinDupe$ID2,twinDupe$ID1),paste0(twinDupe$ID1,twinDupe$ID2))
notFound = !paste0(dupeBlind2$Best.Array.caps,dupeBlind2$Sibling.Best.Array.caps)%in%twinDupeP
missed = dupeBlind2[notFound,]
missedUnique = unique(c(missed$Sibling.Best.Array.caps,missed$Best.Array.caps))
sum(!missedUnique%in%otherInfo$PIID)
########
# Check set of pairs by Sample ID
########
sampleNameTab = table(otherInfo$CollectionID)
dupSampleNames = names(sampleNameTab)[sampleNameTab>1]
dupSampleInfo = otherInfo[otherInfo$CollectionID%in%dupSampleNames,]
print(length(dupSampleNames)) # 585 unique names
print(dim(dupSampleInfo)[1]) # 1179 pairs
pairsFound = sapply(unique(dupSampleInfo$CollectionID),function(s){
inds = dupSampleInfo$Best.Array[dupSampleInfo$CollectionID==s]
ps = sum((twinDupe$ID1%in%inds)&(twinDupe$ID2%in%inds))
any = sum((twinDupe$ID1%in%inds)|(twinDupe$ID2%in%inds))
c(ps,any)
})
which(pairsFound[2,]==0)
# 2 duplicated sample names with no individuals in our duplicates list
# 5 duplicated sample names with only ONE individual in our duplicates list
# UKBL__2279896
# UKBL__4182533
# Have we found extras?
dupeAnnotIntended = dupeAnnot[intended,]
dupeAnnotIntended$Best.Array.x%in%dupeBlind2$Best.Array.
# Colin's duplicates exclusion list (these are excluded from the release output)
dupeExclFile = "/well/ukbiobank/expt/V2_QCed.identical_samples/data/V2_QCed.duplicates_exclude.txt"
dupeExl = read.delim(dupeExclFile,header=FALSE,stringsAsFactors=FALSE,sep="\t")[,1]
#dupeAnnot[(dupeAnnot$Best_Array.x%in%dupeExl)&(dupeAnnot$Best_Array.y%in%dupeExl),"Status..SM."]
unintended = dupeAnnot$Status..SM.=="Unintended"
intended = dupeAnnot$Status..SM.=="Intended"
unintendedPairs = c(paste0(dupeAnnot$Best_Array.x[unintended],dupeAnnot$Best_Array.y[unintended]),paste0(dupeAnnot$Best_Array.y[unintended],dupeAnnot$Best_Array.x[unintended]))
intendedPairs = c( paste0(dupeAnnot$Best_Array.x[intended],dupeAnnot$Best_Array.y[intended]),paste0(dupeAnnot$Best_Array.y[intended],dupeAnnot$Best_Array.x[intended]))
twn = dupeAnnot$Status..SM.=="Twins"
confirmedTwinPairs = c( paste0(dupeAnnot$Best_Array.x[twn],dupeAnnot$Best_Array.y[twn]),paste0(dupeAnnot$Best_Array.y[twn],dupeAnnot$Best_Array.x[twn]))
#missingDupes = dupeAnnot[(!dupeAnnot$Best_Array.y%in%twinDupe$ID1)&(!dupeAnnot$Best_Array.y%in%twinDupe$ID2),]
#nKin = table(c(kinRaw$ID1,kinRaw$ID1))
#nKin[missingDupes$Best_Array.x]
#nKin[missingDupes$Best_Array.y]
# NOTE: There are four pairs of duplicates which end up being excluded after pruning the kinship table.
# The following two are in the released data, but filtered out of kinship:
# A550484-4254432-072416-927_B09 A550484-4239209-122815-940_D04 <= twins. But *D04 was in the list of hetmiss outliers. Both individuals are in the release output data.
# A550465-4276624-031217-258_G03 A550465-4196233-091014-716_H06 <== Not twins, but A550465-4196233-091014-716_H06 has > 200 other 'relatives'.
# The other two pairs involve one individual that's excluded from the release output data. They are unintended duplicates, and have one in common: A550465-4195511-082814-525_E05. This is in Colin's duplicates exclusion list. Its 'partner' duplicates are themselves duplicates.
# Furthermore, A550465-4195511-082814-525_E05 has > 200 other 'relatives'.
# Who are genuine twins? This has to be generated by find-families.R Might not exist...
#realTwinsFile = paste0(inPrefix,"-nodupes-duplicates-twins.txt")
#realTwins = read.table(realTwinsFile,header=TRUE,stringsAsFactors=FALSE)
realTwins = twinDupe[(!twinDupe$ID1 %in% dupeExl)&(!twinDupe$ID2 %in% dupeExl),] # exclude any pair with one individual that's in the exclusion file.
# Find the duplicate pairs
twins = paste0(twinDupe$ID1,twinDupe$ID2)%in%paste0(realTwins$ID1,realTwins$ID2)
dupes = !twins
dim(realTwins)[1]==sum(twins)
# Read in the results from plink --genome output
ibdFile = paste0(baseSampleQCDir,"/data/Relatedness/",inPrefix,"-duplicates-genetic-distances-release.genome.dupes")
ibd = read.table(ibdFile,header=TRUE,stringsAsFactors=FALSE)
# count snps that are exactly the same genotype (IBS2)
IBS2 = sapply(1:nrow(twinDupe),function(i){
s1 = twinDupe[i,"ID1"]
s2 = twinDupe[i,"ID2"]
S = which(( (ibd[,"IID1"]==s1)&(ibd[,"IID2"]==s2) ) | ( (ibd[,"IID2"]==s1)&(ibd[,"IID1"]==s2) ))
d = ibd[S,"IBS2"]
#print(d)
return(d)
})
# count snps that are non-missing in both
NSNPs = sapply(1:nrow(twinDupe),function(i){
s1 = twinDupe[i,"ID1"]
s2 = twinDupe[i,"ID2"]
S = which(( (ibd[,"IID1"]==s1)&(ibd[,"IID2"]==s2) ) | ( (ibd[,"IID2"]==s1)&(ibd[,"IID1"]==s2) ))
d = sum(ibd[S,c("IBS2","IBS1","IBS0")])
#print(d)
return(d)
})
fracs = IBS2/NSNPs
twinDupe$IBS2 = IBS2
twinDupe$NMsnps = NSNPs
twinDupe$fracConcordance = fracs
twinDupe$fracDiscordance = 1-fracs
# Subset to just the duplicates
dupePairs = twinDupe[dupes,]
twinPairs = twinDupe[!dupes,]
# write out the results
write.table(twinDupe,file= paste0(inPrefix,"-duplicates-twins-with-discordance.txt"),quote=FALSE,col.names=TRUE,row.names=FALSE)
write.table(dupePairs,file= paste0(inPrefix,"-duplicates-with-discordance.txt"),quote=FALSE,col.names=TRUE,row.names=FALSE)
#########
save(twinDupe,dupePairs,realTwins,dupeAnnot,file=paste0(inPrefix,"-duplicates-twins-with-discordance.RData"))
#########
# plot the results
print(paste0(dim(dupePairs)[1]," duplicated pairs in kinship table."))
print(paste0(sum(twins)," genuine twin pairs in kinship table."))
print(paste0(length(unique(c(dupePairs$ID1,dupePairs$ID2)))," unique samples among duplicate pairs."))
print(paste0("Mean fraction discordance: ",mean(dupePairs$fracDiscordance,na.rm=TRUE)))
print(paste0("SD fraction discordance: ",sd(dupePairs$fracDiscordance,na.rm=TRUE)))
print(paste0("Range fraction discordance: ",paste(range(dupePairs$fracDiscordance,na.rm=TRUE),collapse=" to ")))
# Duplicates (I.e not in twins after applying exclusion list)
png(paste0("plots/",inPrefix,"-duplicates-concordance.png"),width=1000,height=1000,res=150)
hist(100*dupePairs$fracConcordance,breaks=30,xlab="% of non-missing genotypes identical",col="darkgray",main=paste0("Concordance rates for ",dim(dupePairs)[1]," pairs of duplicated samples"),
cex=2,xlim=100*c(min(twinDupe$fracConcordance),1))
dev.off()
png(paste0("plots/",inPrefix,"-duplicates-discordance.png"),width=1000,height=1000,res=150)
hist(100*dupePairs$fracDiscordance,breaks=30,xlab="% of non-missing genotypes discordant",col="darkgray",main=paste0("Discordance rates for ",dim(dupePairs)[1]," pairs of duplicated samples"),
cex=2,xlim=100*c(0,max(twinDupe$fracDiscordance)))
dev.off()
# Unintended uplicates based on the annotated duplicates file
these=paste0(twinDupe$ID1,twinDupe$ID2)%in%unintendedPairs
png(paste0("plots/",inPrefix,"-duplicates-unintended-concordance.png"),width=1000,height=1000,res=150)
hist(100*twinDupe$fracConcordance[these],breaks=30,xlab="% of non-missing genotypes identical",col="darkgray",main=paste0("Concordance rates for ",dim(twinDupe[these,])[1]," pairs of unintended duplicated samples"),
cex=2,xlim=100*c(min(twinDupe$fracConcordance),1))
dev.off()
png(paste0("plots/",inPrefix,"-duplicates-unintended-discordance.png"),width=1000,height=1000,res=150)
hist(100*twinDupe$fracDiscordance[these],breaks=30,xlab="% of non-missing genotypes discordant",col="darkgray",main=paste0("Discordance rates for ",dim(twinDupe[these,])[1]," pairs of unintended duplicated samples"),
cex=2,xlim=100*c(0,max(twinDupe$fracDiscordance)))
dev.off()
# Intended uplicates based on the annotated duplicates file
these=paste0(twinDupe$ID1,twinDupe$ID2)%in%intendedPairs
png(paste0("plots/",inPrefix,"-duplicates-intended-concordance.png"),width=1000,height=1000,res=150)
hist(100*twinDupe$fracConcordance[these],breaks=30,xlab="% of non-missing genotypes identical",col="darkgray",main=paste0("Concordance rates for ",dim(twinDupe[these,])[1]," pairs of intended duplicated samples"),
cex=2,xlim=100*c(min(twinDupe$fracConcordance),1))
dev.off()
png(paste0("plots/",inPrefix,"-duplicates-intended-discordance.png"),width=1000,height=1000,res=150)
hist(100*twinDupe$fracDiscordance[these],breaks=30,xlab="% of non-missing genotypes discordant",col="darkgray",main=paste0("Discordance rates for ",dim(twinDupe[these,])[1]," pairs of intended duplicated samples"),
cex=2,xlim=100*c(0,max(twinDupe$fracDiscordance)))
dev.off()
# Twins
png(paste0("plots/",inPrefix,"-twins-concordance.png"),width=1000,height=1000,res=150)
hist(100*twinPairs$fracConcordance,breaks=30,xlab="% of non-missing genotypes identical",col="darkgray",main=paste0("Concordance rates for ",dim(twinPairs)[1]," pairs of twins"),
cex=2,xlim=100*c(min(twinDupe$fracConcordance),1))
dev.off()
png(paste0("plots/",inPrefix,"-twins-discordance.png"),width=1000,height=1000,res=150)
hist(100*twinPairs$fracDiscordance,breaks=30,xlab="% of non-missing genotypes discordant",col="darkgray",main=paste0("Discordance rates for ",dim(twinPairs)[1]," pairs of twins"),
cex=2,xlim=100*c(0,max(twinDupe$fracDiscordance)))
dev.off()
|
f28c0c1e79f1773e698fdaea567fdcfca13bfe71 | 8e54e5a1c8cb0c89a6751e6c6cb17215eb5ff586 | /R/gutenberg_article_extraction.R | c51d5a77efa155130ec7901e8b7a11ae461e4297 | [] | no_license | jandziak/Integration-of-Text-Mining-and-Topic-Modeling-Tools | 15ed73aac5c03ced6deccd6ae69f24d15fb39de7 | 9f2fe01bd80ca1b881a381dadfde300c41c37232 | refs/heads/master | 2021-01-10T10:32:10.136673 | 2016-03-21T12:16:42 | 2016-03-21T12:16:42 | 53,896,364 | 2 | 0 | null | 2016-03-20T10:38:06 | 2016-03-14T22:26:56 | R | UTF-8 | R | false | false | 976 | r | gutenberg_article_extraction.R | #' Function to extract the article from the Project Gutenberg page
#'
#' @param url A complite path to the text file of the selected to download book.
#' @param full_text Logical parameter deretmining wheather to exclude disclaimer or not.
#'
#' @return book_string The string of the book content. (With or without disclaimer)
#' @examples
#' extract_article("http://www.gutenberg.org/cache/epub/11503/pg11503.txt", TRUE)
#' extract_article("http://www.gutenberg.org/files/51428/51428-0.txt", FALSE)
extract_article <- function(url, full_text = TRUE){
book <- readLines(url, encoding = "UTF-8")
if(!full_text){
disclaimer <- c("\\*\\*\\* START OF THIS PROJECT GUTENBERG",
"\\*\\*\\* END OF THIS PROJECT GUTENBERG")
matches <- grepl(paste(disclaimer,collapse="|"), book)
matches_index <- which(matches)
book <- book[min(matches_index):max(matches_index)]
}
book_string <- paste(book, collapse = " ")
return(book_string)
}
|
6a08847e07f5e8075ae3ea8135877fd10e3f2ae3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/estudy2/examples/apply_market_model.Rd.R | d642527a417bc4a31f3f315e0419ff3b7fe7db85 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,005 | r | apply_market_model.Rd.R | library(estudy2)
### Name: apply_market_model
### Title: Apply a market model and return a list of 'returns' objects.
### Aliases: apply_market_model
### ** Examples
## 1. Mean-adjusted-returns model
## Not run:
##D library("magrittr")
##D tickers <- c("ALV.DE", "CS.PA", "G.MI", "HNR1.HA", "HSX.L", "MUV2.DE",
##D "RSA.L", "TOP.CO")
##D securities_returns <- get_prices_from_tickers(tickers,
##D start = as.Date("2000-01-01"),
##D end = as.Date("2002-01-01"),
##D quote = "Close",
##D retclass = "zoo") %>%
##D get_rates_from_prices(quote = "Close",
##D multi_day = TRUE,
##D compounding = "continuous") %>%
##D apply_market_model(market_model = "mean_adj",
##D estimation_start = as.Date("2001-03-26"),
##D estimation_end = as.Date("2001-09-10"))
## End(Not run)
## The result of the code above is equivalent to:
data(rates)
securities_returns <- apply_market_model(
rates,
market_model = "mean_adj",
estimation_start = as.Date("2001-03-26"),
estimation_end = as.Date("2001-09-10")
)
## 2. Market-adjusted-returns model
## Not run:
##D library("magrittr")
##D rates_indx <- get_prices_from_tickers("^STOXX50E",
##D start = as.Date("2000-01-01"),
##D end = as.Date("2002-01-01"),
##D quote = "Close",
##D retclass = "zoo") %>%
##D get_rates_from_prices(quote = "Close",
##D multi_day = TRUE,
##D compounding = "continuous")
##D tickers <- c("ALV.DE", "CS.PA", "G.MI", "HNR1.HA", "HSX.L", "MUV2.DE",
##D "RSA.L", "TOP.CO")
##D securities_returns <- get_prices_from_tickers(tickers,
##D start = as.Date("2000-01-01"),
##D end = as.Date("2002-01-01"),
##D quote = "Close",
##D retclass = "zoo") %>%
##D get_rates_from_prices(quote = "Close",
##D multi_day = TRUE,
##D compounding = "continuous") %>%
##D apply_market_model(regressor = rates_indx,
##D same_regressor_for_all = TRUE,
##D market_model = "mrkt_adj",
##D estimation_start = as.Date("2001-03-26"),
##D estimation_end = as.Date("2001-09-10"))
## End(Not run)
## The result of the code above is equivalent to:
data(rates, rates_indx)
securities_returns <- apply_market_model(
rates = rates,
regressor = rates_indx,
same_regressor_for_all = TRUE,
market_model = "mrkt_adj",
estimation_start = as.Date("2001-03-26"),
estimation_end = as.Date("2001-09-10")
)
## 3. Single-index market model
## Not run:
##D library("magrittr")
##D rates_indx <- get_prices_from_tickers("^STOXX50E",
##D start = as.Date("2000-01-01"),
##D end = as.Date("2002-01-01"),
##D quote = "Close",
##D retclass = "zoo") %>%
##D get_rates_from_prices(quote = "Close",
##D multi_day = TRUE,
##D compounding = "continuous")
##D tickers <- c("ALV.DE", "CS.PA", "G.MI", "HNR1.HA", "HSX.L", "MUV2.DE",
##D "RSA.L", "TOP.CO")
##D securities_returns <- get_prices_from_tickers(tickers,
##D start = as.Date("2000-01-01"),
##D end = as.Date("2002-01-01"),
##D quote = "Close",
##D retclass = "zoo") %>%
##D get_rates_from_prices(quote = "Close",
##D multi_day = TRUE,
##D compounding = "continuous") %>%
##D apply_market_model(regressor = rates_indx,
##D same_regressor_for_all = TRUE,
##D market_model = "sim",
##D estimation_method = "ols",
##D estimation_start = as.Date("2001-03-26"),
##D estimation_end = as.Date("2001-09-10"))
## End(Not run)
## The result of the code above is equivalent to:
data(rates, rates_indx)
securities_returns <- apply_market_model(
rates = rates,
regressor = rates_indx,
same_regressor_for_all = TRUE,
market_model = "sim",
estimation_method = "ols",
estimation_start = as.Date("2001-03-26"),
estimation_end = as.Date("2001-09-10")
)
|
3c5079ac82f39efe40aa11c559252421c7ef8029 | 422a9321879f597bf9031f6f547b75377fe16c38 | /tests/testthat.R | 90e561168ad9935eb57890c714eaee8062e96a67 | [
"MIT"
] | permissive | KMurray12/mmrefpoints | 3b307eab3079d0328041abd1ee150fc3363c16bb | 05b3997ff7a7a02ec886ca32cfb0d625e49c50a1 | refs/heads/master | 2023-05-29T01:14:20.759907 | 2021-05-28T01:13:20 | 2021-05-28T01:13:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 66 | r | testthat.R | library(testthat)
library(mmrefpoints)
test_check("mmrefpoints")
|
0dcedc4e60ddbb5a6ffb7372b928200cc6ac54cb | 754be5ab881b61f346986bea05dc03ab5b49094c | /man/translateCOMPASStoflowReMixNames.Rd | cac0d51ac4f07a9ca7af4cafce87bdd7e376131e | [
"MIT"
] | permissive | RGLab/flowReMix | 1a2f791a775be25f6910307129d5846910db4c11 | 732fe42e97474cf4ac2b0ee26b65f75e9949cd13 | refs/heads/master | 2021-01-12T11:26:05.593944 | 2019-02-27T04:50:55 | 2019-02-27T04:50:55 | 72,910,585 | 2 | 1 | MIT | 2019-02-27T04:50:56 | 2016-11-05T07:01:12 | HTML | UTF-8 | R | false | true | 585 | rd | translateCOMPASStoflowReMixNames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{translateCOMPASStoflowReMixNames}
\alias{translateCOMPASStoflowReMixNames}
\title{Translate COMPASS markere names to FlowReMix format}
\usage{
translateCOMPASStoflowReMixNames(x)
}
\arguments{
\item{x}{\code{matrix} with column names in COMPASS format}
}
\value{
\code{matrix} with column names in FlowReMix format.
}
\description{
Translate COMPASS markere names to FlowReMix format
}
\examples{
\dontrun{
#x is a matrix with columns named by COMPASS.
translateCOMPASStoFlowReMixNames(x)
}
}
|
b49ebbbc613eee5ac970b3baeb019055b69bc2da | 22f761644fa84c4fe0086e3a013fd1f636e2ae0c | /man/calendar-register.Rd | e86e7de10694ae30487c6765f52f58d6285b214a | [] | no_license | cran/bizdays | a8fe606fd516f02231f0e6e42377f32747a76892 | fc0512ebbae7cbb9d8b26829ca35004fc3cf9f3d | refs/heads/master | 2023-01-25T04:01:52.148067 | 2023-01-20T16:40:06 | 2023-01-20T16:40:06 | 17,694,808 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,153 | rd | calendar-register.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calendar.R
\name{calendar-register}
\alias{calendar-register}
\alias{calendars}
\alias{remove_calendars}
\alias{has_calendars}
\title{Calendars register}
\usage{
calendars()
remove_calendars(cals)
has_calendars(cals)
}
\arguments{
\item{cals}{character vector of calendars names}
}
\description{
Every calendar created with \code{create.calendar} is stored in the
calendar register.
The idea behind this register is allowing calendars to be accessed
by its names.
}
\details{
\code{calendars} returns the object which represents the calendars register.
Since the register inherits from \code{environment}, the calendars are
retrieved with the \code{[[} operator.
But the register object has its own \code{print} generic which helps listing
all registered calendars.
\code{remove_calendars} remove calendars from the register.
}
\examples{
# ACTUAL calendar
cal <- create.calendar("Actual")
cal <- calendars()[["Actual"]]
remove_calendars("Actual")
# lists registered calendars
calendars()
has_calendars(c("actual", "weekends"))
}
|
c607e3ddaefac0478b7082de029865666adabfd5 | 8ef27de17d0110828d77ca91b4f4e71af73fc12f | /R/modelturnover.R | e595058b711d445c2858b332459fd837abf0f283 | [] | no_license | marcpaga/pulsedSilac | 95537ce75dc65a9573186708b2917ac700c7cbe6 | 23e5e48083b5edfc99c5dbc42bef487610bec5af | refs/heads/master | 2020-05-17T09:29:16.236700 | 2020-03-07T12:58:50 | 2020-03-07T12:58:50 | 183,634,007 | 2 | 0 | null | 2019-12-05T09:54:02 | 2019-04-26T13:31:31 | R | UTF-8 | R | false | false | 23,075 | r | modelturnover.R | #' @rdname modelTurnover
#' @name modelTurnover
#' @title Estimate protein/peptide turnover
#'
#' @description Method to apply turnover models on protein/peptide data
#'
#' @details The nls and nlrob functions have many arguments that can be tunned
#' for parameter fitting. Unfortunately, not all the possible argument
#' combinations have been tested. It is recommended to first test one model
#' with the desired parameters with silent = FALSE to see that it runs smoothly
#' and then run the whole proteome with silent = TRUE to supress failed
#' convergence errors. For example, some methods for nlrob use upper and lower
#' bounds instead of start.
#'
#' Please open an issue on github if the function is having trouble with a
#' particular argument.
#'
#' For robust modelling the method 'CM' and 'mtl' are not yet supported.
#'
#' @param x A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or
#' \code{SilacProteomicsExperiment} object.
#' @param assayName \code{character} indicating which assay to use as data
#' input for the model.
#' @param formula \code{formula} to be used. Time must always be named "t" and
#' the data must be named "fraction".
#' @param start named \code{list} with the initical values for the parameters
#' in formula.
#' @param robust \code{logical} indicating if robust modelling from the
#' \code{robustbase} package should be used.
#' @param mode \code{character} indicating which type of data should be used.
#' Can be "protein": one model per protein; "grouped": one model per protein
#' using peptide data; "peptide" one model per peptide.
#' @param verbose \code{logical} indicating if a progress bar should be
#' printed.
#' @param returnModel \code{logical} indicating if the model objects should
#' be returned also in the output.
#' @param conditionCol \code{character} indicating which column of colData(x)
#' describes the conditions.
#' @param timeCol \code{character} indicating which column of colData(x)
#' describes time.
#' @param proteinCol \code{character} indicating which column of rowData(x)
#' describes the assigned protein to a peptide. (Only for peptide data)
#' @param silent \code{logical} indicating if the errors given by nls/nlrob
#' should be printed.
#' @param ... further parameters passed into \code{nls} or \code{nlrob}.
#'
#' @return A named \code{list} with either model metrics in matrices or the
#' model objects.
#'
#' @examples
#' data('wormsPE')
#' wormsPE <- calculateIsotopeFraction(wormsPE, ratioAssay = 'ratio')
#'
#' modelList <- modelTurnover(x = wormsPE[1:10],
#' assayName = 'fraction',
#' formula = 'fraction ~ 1 - exp(-k*t)',
#' start = list(k = 0.02),
#' mode = 'protein',
#' robust = FALSE,
#' returnModel = TRUE,
#' silent = TRUE)
#'
#' @importFrom robustbase nlrob
#' @importFrom R.utils insert
#' @importFrom utils setTxtProgressBar txtProgressBar
#' @importFrom stats nls as.formula
#' @import methods
#' @export
setGeneric('modelTurnover', function(x, ...) {
standardGeneric('modelTurnover')
})
#' @rdname modelTurnover
#' @export
setMethod('modelTurnover',
'SilacProteinExperiment',
function(x,
assayName = 'fraction',
formula = 'fraction ~ 1-exp(-k*t)',
start = list(k = 0.02),
robust = FALSE,
mode = 'protein',
verbose = FALSE,
returnModel = FALSE,
conditionCol,
timeCol,
silent = TRUE,
...){
## argument checker ----------------------------------------------------------
if (!assayName %in% names(assays(x))) {
txt <- sprintf('%s not found in assay names', assayName)
stop(txt)
}
if (!missing(conditionCol)) {
metadata(x)[['conditionCol']] <- conditionCol
}
if (!missing(timeCol)) {
metadata(x)[['timeCol']] <- timeCol
}
## data processing and configuration -----------------------------------------
mat <- assays(x)[[assayName]]
## columns of each condition
loopCols <- .loopWrapper(x, 'conditionCol')
if (any(vapply(loopCols, length, integer(1)) == 0)) {
loopCols <- loopCols[which(vapply(loopCols, length, integer(1)) != 0)]
}
## get the condition and time columns to get the vectors from colData
conditionCol <- .giveMetaoption(x, 'conditionCol')
timeCol <- .giveMetaoption(x, 'timeCol')
timeAttr <- colData(x)[, timeCol]
condAttr <- colData(x)[, conditionCol]
## rownames, colnames and conditionnames for dimension naming of output
## matrices
r_names <- rownames(x)
c_names <- colnames(x)
cond_names <- names(loopCols)
## if models are returned, then we need a list of lists
## else then we need lists and matrices
if (returnModel) {
modelList <- list()
}
## initialize all the output matrices ----------------------------------------
residual_matrix <- matrix(data = NA,
nrow = nrow(x),
ncol = ncol(x))
if (!is.null(r_names)) rownames(residual_matrix) <- r_names
if (!is.null(c_names)) colnames(residual_matrix) <- c_names
stderror_matrix <- matrix(data = NA,
nrow = nrow(x),
ncol = length(loopCols))
if (!is.null(r_names)) rownames(stderror_matrix) <- r_names
if (!is.null(cond_names)) colnames(stderror_matrix) <- cond_names
## there will be a value per condition and parameter, therefore they
## are multiplicative
param_values <- list()
param_stderror <- list()
param_tval <- list()
param_pval <- list()
for (i in seq_len(length(start))) {
temp_mat <- matrix(data = NA,
nrow = nrow(x),
ncol = length(loopCols))
if (!is.null(r_names)) rownames(temp_mat) <- r_names
if (!is.null(cond_names)) colnames(temp_mat) <- cond_names
param_values[[i]] <- temp_mat
param_stderror[[i]] <- temp_mat
param_tval[[i]] <- temp_mat
param_pval[[i]] <- temp_mat
}
names(param_values) <- names(start)
names(param_stderror) <- names(start)
names(param_tval) <- names(start)
names(param_pval) <- names(start)
## weights are only with robust modelling
if (robust) {
weight_matrix <- matrix(data = NA, nrow = nrow(x), ncol = ncol(x))
if (!is.null(r_names)) rownames(weight_matrix) <- r_names
if (!is.null(c_names)) colnames(weight_matrix) <- c_names
}
## protein turnover modelling ------------------------------------------------
## for each condition and for each protein model protein turnover
for (i in seq_along(loopCols)) {
if (returnModel) {
modelList[[i]] <- list()
}
if (verbose) {
cat('Modelling a condition\n')
}
for (j in seq_len(nrow(x))) {
## progress bar
if (verbose) {
if (j == 1){
pb <- txtProgressBar(min = 1, max = nrow(x), initial = 0, style = 3)
}
setTxtProgressBar(pb = pb, value = j)
if (j == nrow(x)) {
cat('\n')
}
}
## modelDf contains the data to do the model on
if (j == 1) {
modelDf <- data.frame(t = colData(x)[loopCols[[i]], timeCol],
fraction = NA)
}
modelDf[, 'fraction'] <- mat[j, loopCols[[i]]]
modeldata <- .modelTurnover(data = modelDf,
formula = formula,
start = start,
robust = robust,
returnModel = returnModel,
silent = silent,
...)
if (returnModel) {
if (is.null(modeldata)) {
modelList[[i]][[j]] <- NA
} else {
modelList[[i]][[j]] <- modeldata[['model']]
}
}
if (is.null(modeldata)) {
next
}
## extract the data from the model and put it in the output matrices
residual_matrix[j, loopCols[[i]]] <- modeldata[['residuals']]
stderror_matrix[j, i] <- modeldata[['stderror']]
for (param in seq_len(length(start))) {
param_values[[param]][j, i] <- modeldata[['params.vals']][param]
param_tval[[param]][j, i] <- modeldata[['params.tval']][param]
param_pval[[param]][j, i] <- modeldata[['params.pval']][param]
param_stderror[[param]][j, i] <- modeldata[['params.stderror']][param]
}
if (robust) {
weight_matrix[j, loopCols[[i]]] <- modeldata[['weights']]
}
} ## end of row loop
##residuals and weights as assays with model name
## rest in a matrix
} ## end of loopCols loop
## all the output matrices in a list
outList <- list(residuals = residual_matrix,
stderror = stderror_matrix,
param_values = param_values,
param_pval = param_pval,
param_tval = param_tval,
param_stderror = param_stderror)
if (robust) {
outList[['weights']] <- weight_matrix
}
if (returnModel) {
outList[['models']] <- modelList
}
## add the configuration as attributes that are using in the plotting
## functions
attributes(outList)[['loopCols']] <- loopCols
attributes(outList)[['time']] <- timeAttr
attributes(outList)[['cond']] <- condAttr
attributes(outList)[['assayName']] <- assayName
attributes(outList)[['mode']] <- mode
return(outList)
})
#' @rdname modelTurnover
#' @export
setMethod('modelTurnover',
'SilacPeptideExperiment',
function(x,
assayName = 'fraction',
formula = 'fraction ~ 1-exp(-k*t)',
start = list(k = 0.02),
robust = FALSE,
mode = c('grouped', 'peptide'),
verbose = FALSE,
returnModel = FALSE,
conditionCol,
timeCol,
proteinCol,
silent = TRUE,
...){
## argument checker ----------------------------------------------------------
if (!mode %in% c('grouped', 'peptide')) {
stop('Mode must be either "grouped" or "peptide"')
}
## a model for each peptide
if (mode == 'peptide') {
message('Modelling each peptide individually')
outList <- callNextMethod()
return(outList)
} else if (mode == 'grouped') {
message('Modelling peptides grouped by protein')
}
if (!assayName %in% names(assays(x))) {
txt <- sprintf('%s not found in assay names', assayName)
stop(txt)
}
## metaoptions part
if (!missing(conditionCol)) {
metadata(x)[['conditionCol']] <- conditionCol
}
if (!missing(timeCol)) {
metadata(x)[['timeCol']] <- timeCol
}
if (!missing(proteinCol)) {
metadata(x)[['proteinCol']] <- proteinCol
}
## too avoid a Cran check note since this is passed as an internal argument
## by the ProteomicsExperiment method
if (!exists('r_names_prot')) r_names_prot <- NULL
## data processing and configuration -----------------------------------------
mat <- assays(x)[[assayName]]
## columns of each condition
loopCols <- .loopWrapper(x, 'conditionCol')
if (any(vapply(loopCols, length, integer(1)) == 0)) {
loopCols <- loopCols[which(vapply(loopCols, length, integer(1)) != 0)]
}
## get the condition and time columns to get the vectors from colData
conditionCol <- .giveMetaoption(x, 'conditionCol')
timeCol <- .giveMetaoption(x, 'timeCol')
proteinCol <- .giveMetaoption(x, 'proteinCol')
timeAttr <- colData(x)[, timeCol]
condAttr <- colData(x)[, conditionCol]
protAttr <- as.character(rowData(x)[, proteinCol])
proteinIds <- unique(rowData(x)[, proteinCol])
## rownames, colnames and conditionnames for dimension naming of output
## matrices
if (mode == 'peptide') {
r_names <- rownames(x)
} else if (mode == 'grouped') {
## passed by the ProteomicsExperiment method
if (exists('r_names_prot') & !is.null(r_names_prot)) {
r_names <- r_names_prot
r_names_pept <- rownames(x)
} else {
r_names <- proteinIds
r_names_pept <- rownames(x)
}
}
c_names <- colnames(x)
cond_names <- names(loopCols)
## if models are returned, then we need a list of lists
## else then we need lists and matrices
if (returnModel) {
modelList <- list()
}
## initialize all the output matrices ----------------------------------------
residual_matrix <- matrix(data = NA,
nrow = nrow(x),
ncol = ncol(x))
if (!is.null(r_names_pept)) rownames(residual_matrix) <- r_names_pept
if (!is.null(c_names)) colnames(residual_matrix) <- c_names
stderror_matrix <- matrix(data = NA,
nrow = length(proteinIds),
ncol = length(loopCols))
if (!is.null(r_names)) rownames(stderror_matrix) <- r_names
if (!is.null(cond_names)) colnames(stderror_matrix) <- cond_names
## there will be a value per condition and parameter, therefore they
## are multiplicative
param_values <- list()
param_stderror <- list()
param_tval <- list()
param_pval <- list()
for (i in seq_len(length(start))) {
temp_mat <- matrix(data = NA,
nrow = length(proteinIds),
ncol = length(loopCols))
if (!is.null(r_names)) rownames(temp_mat) <- r_names
if (!is.null(cond_names)) colnames(temp_mat) <- cond_names
param_values[[i]] <- temp_mat
param_stderror[[i]] <- temp_mat
param_tval[[i]] <- temp_mat
param_pval[[i]] <- temp_mat
}
names(param_values) <- names(start)
names(param_stderror) <- names(start)
names(param_tval) <- names(start)
names(param_pval) <- names(start)
## weights are only with robust modelling
if (robust) {
weight_matrix <- matrix(data = NA, nrow = nrow(x), ncol = ncol(x))
if (!is.null(r_names_pept)) rownames(weight_matrix) <- r_names_pept
if (!is.null(c_names)) colnames(weight_matrix) <- c_names
}
## protein/peptide turnover modelling ----------------------------------------
## for each condition and for each protein model protein turnover
for (i in seq_along(loopCols)) {
if (returnModel) {
modelList[[i]] <- list()
}
if (verbose) {
cat('Modelling a condition\n')
}
for (j in seq_along(proteinIds)) {
## progress bar
if (verbose) {
if (j == 1){
pb <- txtProgressBar(min = 1, max = length(proteinIds),
initial = 0, style = 3)
}
setTxtProgressBar(pb = pb, value = j)
if (j == length(proteinIds)) {
cat('\n')
}
}
id <- proteinIds[j]
## cant use subset because proteinCol is an object
fracs <- mat[which(rowData(x)[, proteinCol] == id), loopCols[[i]], drop = FALSE]
## modelDf contains the data to do the model on
modelDf <- data.frame(t = rep(colData(x)[loopCols[[i]], timeCol],
each = nrow(fracs)),
fraction = as.vector(fracs))
modeldata <- .modelTurnover(data = modelDf,
formula = formula,
start = start,
robust = robust,
returnModel = returnModel,
silent = silent,
...)
if (returnModel) {
if (is.null(modeldata)) {
modelList[[i]][[j]] <- NA
} else {
modelList[[i]][[j]] <- modeldata[['model']]
}
}
if (is.null(modeldata)) {
next
}
res <- matrix(modeldata[['residuals']],
ncol = length(loopCols[[i]]),
nrow = nrow(fracs))
residual_matrix[which(rowData(x)[,proteinCol] == id), loopCols[[i]]] <- res
stderror_matrix[j, i] <- modeldata[['stderror']]
for (param in seq_len(length(start))) {
param_values[[param]][j, i] <- modeldata[['params.vals']][param]
param_tval[[param]][j, i] <- modeldata[['params.tval']][param]
param_pval[[param]][j, i] <- modeldata[['params.pval']][param]
param_stderror[[param]][j, i] <- modeldata[['params.stderror']][param]
}
if (robust) {
wei <- matrix(modeldata[['weights']],
ncol = length(loopCols[[i]]),
nrow = nrow(fracs))
weight_matrix[which(rowData(x)[, proteinCol] == id), loopCols[[i]]] <- wei
}
} ## end of row loop
##residuals and weights as assays with model name
## rest in a matrix
} ## end of loopCols loop
## all the output matrices in a list
outList <- list(residuals = residual_matrix,
stderror = stderror_matrix,
param_values = param_values,
param_pval = param_pval,
param_tval = param_tval,
param_stderror = param_stderror)
if (robust) {
outList[['weights']] <- weight_matrix
}
if (returnModel) {
outList[['models']] <- modelList
}
## configuration attributes for plotting functions
attributes(outList)[['loopCols']] <- loopCols
attributes(outList)[['time']] <- timeAttr
attributes(outList)[['cond']] <- condAttr
attributes(outList)[['prot']] <- protAttr
attributes(outList)[['assayName']] <- assayName
attributes(outList)[['mode']] <- mode
return(outList)
})
#' @rdname modelTurnover
#' @export
setMethod('modelTurnover',
'SilacProteomicsExperiment',
function(x,
assayName = 'fraction',
formula = 'fraction ~ 1-exp(-k*t)',
start = list(k = 0.02),
robust = FALSE,
mode = c('protein', 'grouped', 'peptide'),
verbose = FALSE,
returnModel = FALSE,
conditionCol,
timeCol,
proteinCol,
silent = TRUE,
...){
if (!mode %in% c('protein', 'grouped', 'peptide')) {
stop('Mode must be either "protein", "grouped" or "peptide".')
}
if (mode == 'protein') {
outList <- modelTurnover(x = x@SilacProteinExperiment,
assayName = assayName,
formula = formula,
start = start,
robust = robust,
verbose = verbose,
returnModel = returnModel,
conditionCol = conditionCol,
timeCol = timeCol,
silent = silent,
...)
} else if (mode == 'peptide') {
outList <- modelTurnover(x = x@SilacPeptideExperiment,
assayName = assayName,
formula = formula,
start = start,
robust = robust,
mode = mode,
verbose = verbose,
returnModel = returnModel,
conditionCol = conditionCol,
timeCol = timeCol,
silent = silent,
...)
} else if (mode == 'grouped') {
outList <- modelTurnover(x = x@SilacPeptideExperiment,
assayName = assayName,
formula = formula,
start = start,
robust = robust,
mode = mode,
verbose = verbose,
returnModel = returnModel,
conditionCol = conditionCol,
timeCol = timeCol,
proteinCol = proteinCol,
r_names_prot = rownames(x@SilacProteinExperiment),
silent = silent,
...)
}
return(outList)
})
#' @importFrom stats coefficients sigma
#' @keywords internal
.modelTurnover <- function(data, formula, start, robust, returnModel,
silent = TRUE, ...) {
## internal function that does the actual modelling, robust or not,
## and takes care of NAs
originalnrow <- nrow(data)
if (sum(is.na(data[,2])) > 0) {
isna <- which(!is.na(data[,2]))
data <- data[isna, ]
} else {
isna <- NULL
}
if (robust) {
model <- try(nlrob(formula = as.formula(formula),
data = data,
start = start, ...), silent = silent)
if (is(model, 'try-error')) {
return(NULL)
}
summ <- summary(model)
residuals2 <- residuals(model)
stderror <- sigma(model)
weights2 <- summ[['rweights']]
params.vals <- coefficients(summ)[,1]
params.stderror <- coefficients(summ)[,2]
params.tval <- coefficients(summ)[,3]
params.pval <- coefficients(summ)[,4]
if (!is.null(isna)) {
residuals <- rep(NA, originalnrow)
weights <- rep(NA, originalnrow)
residuals[isna] <- residuals2
weights[isna] <- weights2
} else {
residuals <- residuals2
weights <- weights2
}
outList <- list(residuals = residuals,
stderror = stderror,
weights = weights,
params.vals = params.vals,
params.stderror = params.stderror,
params.tval = params.tval,
params.pval = params.pval)
if (returnModel) {
outList[['model']] <- model
}
return(outList)
} else {
model <- try(nls(formula = as.formula(formula),
data = data,
start = start, ...), silent = silent)
if (is(model, 'try-error')) {
return(NULL)
}
summ <- summary(model)
residuals2 <- residuals(model)
stderror <- sigma(model)
params.vals <- coefficients(summ)[seq_along(start), 1]
params.stderror <- coefficients(summ)[seq_along(start), 2]
params.tval <- coefficients(summ)[seq_along(start), 3]
params.pval <- coefficients(summ)[seq_along(start), 4]
if (!is.null(isna)) {
residuals <- rep(NA, originalnrow)
residuals[isna] <- residuals2
} else {
residuals <- residuals2
}
outList <- list(residuals = residuals,
stderror = stderror,
params.vals = params.vals,
params.stderror = params.stderror,
params.tval = params.tval,
params.pval = params.pval)
if (returnModel) {
outList[['model']] <- model
}
return(outList)
}
}
|
5bf3f35bcc813a5376fd1af98b572bde562df0c7 | ce58874f810dc6c63451be5496d1704eaa4e2e7b | /Lab 7_Cross-Classified Models/MLM_Lab 7 _ Cross Classified Model (New Slab's conflicted copy 2014-11-19).R | 5e045a9ab5287fb788c3864970537b4ae805e21d | [] | no_license | nmldesjardins/MLM | 6c17135be8962fe2d3f8e82a486e7ec561625600 | be7374922602709798858b4057526dfa3647a37a | refs/heads/master | 2020-05-18T01:09:44.182667 | 2015-04-23T05:30:27 | 2015-04-23T05:30:27 | 34,423,295 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 4,770 | r | MLM_Lab 7 _ Cross Classified Model (New Slab's conflicted copy 2014-11-19).R | ### This lab goes through cross-classified models and
### basic diagnostics.
###--------------------------------------------------------------------###
### CROSS-CLASSIFIED MODELS ###
###--------------------------------------------------------------------###
#### This part of lab goes through a basic cross-classified model.
#### The dataset is mostly fake, and is not NELS or ATLAS!
#### In it, perceivers (the participants) rated the emotions of a bunch of targets.
#### There are a total of 24 targets and 100 perceivers.
#### Every perceiver rated 1 of 4 sets of 6 targets.
#### The sets of targets are consistent across perceivers; each set
#### was rated by 25 perceivers.
#### We want to see how much of the variance in the emotion perceptions
#### is attributable to perceivers vs. targets.
#### In other words, we're answering two questions:
#### (1) Do people show a lot of bias or idiosyncrancies when they rate targets?
#### (i.e., is perceiver variance high?)
#### (2) Do people agree about their ratings of targets? (i.e., is target variance high?)
#### We can then try to explain the perceiver and target variance with L2 variables.
##### LOAD PACKAGES. #####
library(foreign)
library(lme4)
##### GET DATA. #####
data<-read.spss("perception data.sav", to.data.frame=T, use.value.labels = F)
head(data)
summary(data)
## about the variables:
# percid: perceiver (participant) id
# targid: target id
# perc_emo: the perceiver's rating of the target's emotion
# actual_emo: the target's actual emotion
# targ_gender: target gender; 0 = male; 1 = female
# arousal/dominance/valence: perceiver emotions
## about the data:
# each row is a perception of one target from one perceiver
# perception is the L1 variable
# there are NO L1 predictors
# perceptions are nested in perceivers crossed with targets
## full vs partial crossing:
# Because each perceiver only saw a subset of the targets,
# these data are partially crossed. If each perceiver saw
# all 24 targets, it would be fully crossed. Both types
# of models are specified in the same way.
# We can see the crossing structure here (we can
# also see that we have missing data):
xtabs(~percid + targid, data=data)
##### CROSS-CLASSIFIED MODEL #####
## Again, we want to know how much of the variance in the
## perceptions is attributable to perceivers vs. targets.
## We have no L1 predictors, so we can only have an intercept + random effects.
model1<- lmer(perc_emo~1 + (1|percid) + (1|targid), data=data)
summary(model1)
### Get ICCs:
as.data.frame(VarCorr(model1))
p_var<-as.data.frame(VarCorr(model1))[1,4]
t_var<-as.data.frame(VarCorr(model1))[2,4]
err_var<-as.data.frame(VarCorr(model1))[3,4]
tot_var<-p_var+t_var+err_var
p_ICC<-p_var/tot_var
t_ICC<-t_var/tot_var
p_ICC*100
t_ICC*100
### Add a perceiver predictor:
## Does the perceiver's emotion explain some of the variance in their ratings?
model2<- lmer(perc_emo ~ valence + (1|percid) + (1|targid), data=data)
summary(model2)
### Add a target predictor:
## Does the target's gender influence perceptions of their emotions?
model3<- lmer(perc_emo ~ actual_emo + (1|percid) + (1|targid), data=data)
summary(model3)
###--------------------------------------------------------------------###
### DIAGNOSTICS ###
###--------------------------------------------------------------------###
## This goes through the Lecture 16 diagnostics (starting at p. 11).
##### LOAD PACKAGES. #####
library(ggplot2)
library(lme4)
##### LOAD DATA. #####
## For these plots, we'll be using the NELS88 dataset.
nels<-read.spss("NELS88.sav", to.data.frame=T)
head(nels)
summary(nels)
##### FIT THE MODEL. #####
## compute mean SES by school
nels$meanSES<-ave(nels$ses,nels$Schoolid)
mod1<- lmer(mathscore ~ timeonmath + meanSES + (timeonmath|Schoolid), data=nels)
mod2<- lme(mathscore ~ timeonmath + meanSES, ~timeonmath|Schoolid, data=nels, na.action = na.exclude)
##### RESIDUAL PLOTS. #####
### L1 PLOTS.
## boxplot of residuals by school (p. 14)
boxplot(resid(mod1)~nels$Schoolid)
## plot of residuals vs predicted (fitted()) values
# GET LINE AT 0?
plot(resid(mod1, scaled=T)~fitted(mod1))
## histogram of standardized (scale()) residuals -- error distribution
hist(scale(resid(mod1)))
# OR
hist(resid(mod1, scaled=T))
## qq plot
qqnorm(resid(mod1))
## by predictor
# time on math (in the model) (p. 18)
plot(resid(mod1)~nels$timeonmath)
# parent education (not in the model) (p. 19)
plot(resid(mod1)~nels$parented)
# L2 -- should have single value for each group?
l2eb<-coef(mod2)
grpm<-ave(nels$ses, nels$Schoolid)
l2eb<-cbind(l2eb,unique(grpm))
# intercept
plot(l2eb$"(Intercept)"~l2eb$"unique(grpm)")
# slope
plot(l2eb$timeonmath~l2eb$"unique(grpm)") |
d0d23ebd6ab15ce0fd32ea9ecd260d0211314ee3 | b0abc4e1a9593f7bfbc01e4c42687d3d702fa1f7 | /lp.tools/man/Logger.Rd | 056766684ab8bd1e4926b0fcf43ab248d8653a7c | [] | no_license | geohof/R-Library-Public | 6f8abaf3656b4ba2ce572f14d4fd82de38eccd53 | 0424b26664e4bbdd2fb92a18f78e981b68dc2cbf | refs/heads/master | 2020-04-05T08:29:56.394563 | 2019-01-28T14:25:47 | 2019-01-28T14:25:47 | 25,771,442 | 0 | 0 | null | 2018-12-18T15:57:36 | 2014-10-26T12:32:47 | R | UTF-8 | R | false | true | 386 | rd | Logger.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lp.tools.R
\docType{class}
\name{Logger}
\alias{Logger}
\title{Logging via R6 object}
\format{\code{\link{R6Class}} object.}
\usage{
Logger
}
\value{
Object of \code{\link{R6Class}} Logger
}
\description{
Log to screen, file and to a data.frame.
}
\details{
Details: TODO.
}
\keyword{Log,}
\keyword{Logging}
|
98585e543246c8f2cc1a091877c58b001422e069 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/TPmsm/examples/image.TPCmsm.Rd.R | 88903814a643a345156974bf34820769262cd5e8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 837 | r | image.TPCmsm.Rd.R | library(TPmsm)
### Name: image.TPCmsm
### Title: image method for a TPCmsm object
### Aliases: image.TPCmsm
### Keywords: hplot methods multivariate survival
### ** Examples
# Set the number of threads
nth <- setThreadsTP(2)
# Create survTP object
data(heartTP)
heartTP_obj <- with( heartTP, survTP(time1, event1, Stime, event, age=age) )
# Compute LIN conditional transition probabilities with confidence band
TPCmsm_obj <- transLIN(heartTP_obj, s=57, t=310, x=0, conf=TRUE, n.boot=100,
method.boot="basic")
# Plot image with Time in the x-axis
image(TPCmsm_obj, image.type="tc", tr.choice=c("1 1", "1 2", "2 2"), conf.int=TRUE,
ylab="Age")
# Plot image with Time in the y-axis
image(TPCmsm_obj, image.type="ct", tr.choice=c("1 1", "1 2", "1 3"), conf.int=TRUE,
xlab="Age")
# Restore the number of threads
setThreadsTP(nth)
|
16da541401a0fefda9d14761846eeed999cc5fb5 | cf6dd5e431c3bf01df4d4b181c2e116eb8617bf0 | /Aigues de Barcelona/Code/Analysis_1.R | e6f145a498f3bac386af5caf7486a70b01dbd2c0 | [] | no_license | asinga1982/Competitions | e1503bc72133241b27ea1ea3bc2b8e41964c04d1 | e48464a9439cbc54c8a34dc5b39bc9b29e582753 | refs/heads/master | 2020-12-25T06:46:51.273564 | 2017-06-22T21:01:35 | 2017-06-22T21:01:35 | 64,934,215 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,253 | r | Analysis_1.R | #ANALYSIS FOR PREDICTING CONTACTS
# New,ended contracts and pre contacts consolidated by date
nc_byDate <- aggregate(newCont1$NEW_CONTRACTS, by=list(newCont1$Date), FUN=sum)
colnames(nc_byDate) <- c("Date", "NewContracts")
nc_byDate <- nc_byDate[with(nc_byDate, order(Date)),]
ec_byDate <- aggregate(endCont1$ENDED_CONTRACTS, by=list(endCont1$Date), FUN=sum)
colnames(ec_byDate) <- c("Date", "EndedContracts")
ec_byDate <- ec_byDate[with(ec_byDate, order(Date)),]
preCont$Date <- as.Date(preCont$START.DATE, format="%Y-%m-%d")
precont_byDay <- aggregate(preCont$Contacts, by=list(preCont$Date), FUN=sum)
colnames(precont_byDay) <- c("Date", "Contacts")
precont_byDay <- precont_byDay[with(precont_byDay, order(Date)),]
precont_byDayType <- aggregate(preCont$Contacts, by=list(preCont$Date, preCont$CONTACT.TYPE),
FUN=sum)
colnames(precont_byDayType) <- c("Date", "Type","Contacts")
precont_byDayType <- precont_byDayType[with(precont_byDayType, order(Date)),]
#Combine the 2 datasets - new and ended
comb_byDate <- join(nc_byDate, ec_byDate, type="full")
comb_byDate <- comb_byDate[-1,]
#Remove nulls
comb_byDate$NewContracts[is.na(comb_byDate$NewContracts)] <- 0
comb_byDate$EndedContracts[is.na(comb_byDate$EndedContracts)] <- 0
#Add additonal features to combined data
comb_byDate$weekday <- weekdays(comb_byDate$Date)
comb_byDate$total <- comb_byDate$NewContracts + comb_byDate$EndedContracts
precont_byDayType$weekday <- weekdays(precont_byDayType$Date)
precont_byDayType$weekend <- "N"
precont_byDayType$weekend <- ifelse(precont_byDayType$weekday %in%
c('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'), "N", "Y" )
# Visualization
ggplot(comb_byDate) + geom_point(aes(Date,total, group=weekday ,color=weekday) )
hist(log(comb_byDate$NewContracts+1), breaks=50)
#Combine this with contacts
allcomb <- join(comb_byDate, precont_byDay, type="inner")
allcomb$Mth <- as.numeric(substr(allcomb$Date,6,7))
allcomb$Qtr <- as.factor(quarters(allcomb$Date))
allcomb$year <- as.numeric(substr(allcomb$Date,1,4))
#Visuals
ggplot(allcomb) + geom_point(aes(total,Contacts, group=weekday ,color=weekday)) +
facet_grid(year~Qtr)
ggplot(precont_byDayType[precont_byDayType$Type!="Call - Input" &
precont_byDayType$Type!="Visit" &
precont_byDayType$Type!="Web - Input" &
precont_byDayType$Type!= "Mail - Recieved" &
precont_byDayType$Type!= "Internal Management",]) +
geom_point(aes(Date,Contacts, group=weekend ,color=weekend)) +
facet_grid(Type~.)
ggplot(precont_byDayType) +
geom_point(aes(Date,Contacts, group=weekend ,color=weekend))
# Consolidate by year and quarter
precont_byDayType$Qtr <- quarters(precont_byDayType$Date)
precont_byDayType$year <- as.numeric(substr(precont_byDayType$Date,1,4))
precont_byDay$Qtr <- quarters(precont_byDay$Date)
precont_byDay$year <- as.numeric(substr(precont_byDay$Date,1,4))
precont_byYearQtr <- aggregate(precont_byDay$Contacts, by=list(precont_byDay$Qtr,
precont_byDay$year),
FUN=sum)
colnames(precont_byYearQtr) <- c("Qtr", "Year", "Contacts")
|
230e99f3f13f2bfe0074c13fa47a4429c00c6ef7 | 7505da6d4b338f172cac1af24d692302d42be6bc | /man/f0.rosenbrock4.Rd | 5297cf68d57b9eb49296837bb9d44079826465b4 | [
"MIT"
] | permissive | antonio-pgarcia/evoper | 367da295fd704bbde96370c990b8be56d70879b5 | 5337eb8917ed851ffb5f916023d08de12bf281d1 | refs/heads/master | 2021-01-19T04:18:37.948801 | 2020-08-30T10:25:53 | 2020-08-30T10:25:53 | 61,146,979 | 6 | 1 | null | null | null | null | UTF-8 | R | false | true | 582 | rd | f0.rosenbrock4.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test-functions.R
\name{f0.rosenbrock4}
\alias{f0.rosenbrock4}
\title{f0.rosenbrock4}
\usage{
f0.rosenbrock4(x1, x2, x3, x4)
}
\arguments{
\item{x1}{The first function variable}
\item{x2}{The second function variable}
\item{x3}{The third function variable}
\item{x4}{The fourth function variable}
}
\value{
The function value
}
\description{
The rosenbrock function of 4 variables for testing
optimization methods. The global optima for the function is given by
xi = 1, forall i E {1...N}, f(x) = 0.
}
|
87711cf453ad669ef021301e8b6f39dc90d74c26 | fd03276234b78a4ed9cac26b9c4b398765de5ae1 | /plot1.R | 7eb2a627c899a350653a4591e5c790df1eedf6ed | [] | no_license | swamypv/ExData_Plotting1 | a41654ebe92528d14bdebc526396de1519f8e981 | dfd62eb49f8949a26968779bfca0c1864aa05ddf | refs/heads/master | 2021-01-18T07:43:14.837071 | 2014-05-11T10:52:23 | 2014-05-11T10:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 832 | r | plot1.R | plot1 <- function(){
#get lines numbers of the data where date is matching
lines <- grep('^[1-2]/2/2007', readLines("household_power_consumption.txt"))
#Read the data
energy.data <- read.table(file="./household_power_consumption.txt", sep=";", stringsAsFactors=F, header=TRUE, skip=lines[1]-1, nrows=length(lines)-1)
#Read header
header_data <- read.table(file="./household_power_consumption.txt", sep=";", stringsAsFactors=F, header=FALSE, nrows=1)
#Assign header to dataset
names(energy.data) <- header_data
#Create histogram with color, heading and width and height
hist(energy.data$Global_active_power, xlab="Global Active Power (Kilowatts)", col="RED", main="Global Active Power")
#Copy histogram to png file
dev.copy(device=png, file="./ExData_Plotting1/plot1.png", width=480, height=480)
dev.off()
} |
59b4009ac5c7d3de2cec69bf436707c5f477ec55 | 05e196e8e28339978921846cbec10d8b6c04d46b | /R Scripts/Merge_data_frames.R | 0bf81e2daf028535d4f45366b1b94724f486a76c | [] | no_license | Joey-Herrera/Texas-Metro-Housing-Prices | 998eb27ada9e4e953e7f7578cadd39b3f64cfaca | 58c23a3bded65d215ab4aaeecb8297a672e709e0 | refs/heads/main | 2023-06-05T22:16:38.582276 | 2021-07-02T18:00:17 | 2021-07-02T18:00:17 | 349,486,473 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,300 | r | Merge_data_frames.R | #Merge data frames
###For Zillow data
#Add year variable to Zillow dataframe to eventually join with ACS data
Zillow_long <- mutate(Zillow_long, YEAR = format(as.Date(Zillow_long$date, format="%Y-%m-%d"),"%Y"))
# Condense data frame by RegionName and year to lower the overall number of observations
region_list_zillow <- c("Amarillo, TX", "Austin, TX",
"Beaumont, TX", "Brownsville, TX",
"Corpus Christi", "College Station",
"Dallas-Fort Worth, TX", "El Paso, TX",
"Houston, TX", "Laredo, TX",
"Lubbock, TX",
"Midland, TX", "Odessa, TX", "San Angelo, TX",
"San Antonio, TX",
"Tyler, TX", "Waco, TX", "Wichita Falls, TX")
Zillow_long_condensed = Zillow_long %>%
filter(RegionName %in% region_list_zillow) %>%
group_by(RegionName, YEAR) %>%
summarise( avg_price = mean(price))
#Take out observations in 2020 and 2021
extra_years = c( '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013',
'2014', '2015', '2016', '2017', '2018', '2019')
Zillow_long_condensed = Zillow_long_condensed %>%
filter(YEAR %in% extra_years)
###For IPUMS data
#Create the RegionName variable in IPUMS data
IPUMS_Texas_filtered_2013 = IPUMS_Texas_filtered_2013 %>%
mutate(RegionName = factor(MET2013, levels = as.character(c("12420", "13140", "15180", "17780", "18580", "19100",
"26420", "29700", "31180", "33260", "36220", "41660",
"41700", "46340", "47380", "48660")) ,
labels = c("Amarillo, TX", "Austin, TX", "Beaumont, TX","Brownsville, TX", "Dallas-Fort Worth, TX",
"El Paso, TX", "Houston, TX", "Laredo, TX", "Lubbock, TX", "Midland, TX", "Odessa, TX", "San Angelo, TX",
"San Antonio, TX", "Tyler, TX", " Waco, TX", " Wichita Falls, TX")))
###Code for joining the ACS data and the Zillow data into a single dataset using year as the common variable
TX_housing <- merge(IPUMS_Texas_filtered_2013 , Zillow_long_condensed, by= c("YEAR", "RegionName"))
|
a893dfb18177d28f50112f7be51194331ae636f8 | 5cfa9463ce68e472c7ddad4fbd4bc545996fc626 | /man/brf_summary_cat.Rd | e1ae9b256c4c475fa359ea8c2de9dc4b55e5f176 | [
"MIT"
] | permissive | AdrienLeGuillou/briefr | 277a55cc7758b7fa54919b040c3ea4299fed7194 | d406da1f7ab14886c7229d761c78f665fa78d762 | refs/heads/master | 2022-08-29T17:00:40.695409 | 2022-07-20T13:24:08 | 2022-07-20T13:24:08 | 179,670,547 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 596 | rd | brf_summary_cat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summaries.R
\name{brf_summary_cat}
\alias{brf_summary_cat}
\title{Summarise a categorical column in a data frame with a grouping option}
\usage{
brf_summary_cat(df, data_col, grouping_col = NULL)
}
\arguments{
\item{df}{a data frame}
\item{data_col}{the unquoted name of the column to summarise}
\item{grouping_col}{the unquoted name of the column to use for groupings}
}
\value{
a dataframe containing the summary informations
}
\description{
Summarise a categorical column in a data frame with a grouping option
}
|
298003f9a6705114dd90906e5169fbef1237be32 | a06d0f71cc50f366f2531d4054b17647ae777566 | /man/promoters_mm9.Rd | 5edd9f20a147c43ceeb1b852cc3a8cdb22070b0e | [
"Artistic-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | CharlesJB/metagene | c50afe14bee3882b774e47de83fd41d974038fac | b01ccc58d07bd644c77aaf649684cb0609c6461c | refs/heads/master | 2021-08-08T10:20:13.092965 | 2021-07-08T04:13:53 | 2021-07-08T04:13:53 | 14,694,458 | 11 | 10 | Artistic-2.0 | 2019-02-13T13:14:17 | 2013-11-25T18:35:41 | R | UTF-8 | R | false | true | 496 | rd | promoters_mm9.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{promoters_mm9}
\alias{promoters_mm9}
\title{Promoters regions of mm9 Entrez genes.}
\format{A \code{GRanges} object with 21677 ranges.}
\usage{
promoters_mm9
}
\value{
A \code{GRanges}.
}
\description{
Each regions have a width of 2000 nucleotide centered at the transcription
start site.
}
\examples{
data(promoters_mm9)
}
\seealso{
\code{\link{get_promoters_txdb}}
}
\keyword{datasets}
|
cb2e498852e04f3aee3c8e694dff005bc3c17337 | c162d4146e41345c2a1a5bd3db425951f967ff64 | /man/pISVMpeptide.Rd | df82085e05913ef23f5b0bf5beea53401386ea19 | [] | no_license | qwwz/pIR | 494c7ec043783e3f8f305b0ef4be7c062a38189f | b35c644bbd1ee7f25fcd62c9948fea78a78ea556 | refs/heads/master | 2021-06-08T00:39:23.625948 | 2016-09-10T23:54:32 | 2016-09-10T23:54:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 433 | rd | pISVMpeptide.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pISVM.R
\name{pISVMpeptide}
\alias{pISVMpeptide}
\title{pISVMpeptide}
\usage{
pISVMpeptide(sequence, model = "default")
}
\arguments{
\item{sequence}{The sequence to be used}
\item{model}{The SVM-based model to be used in the prediction (use "default", "heller" or "branca" options)}
}
\description{
This function predict the pI of a single sequence.
}
|
584481e6b06ebd90c35e8b17d0f5d6d917914ad5 | 403d8ecc10bd3257f104b3d51502731505009087 | /R/bananafy_as_in_bash_scripts.R | d47b52413dfee4f32a0c6afc07dbd0911990180b | [
"MIT"
] | permissive | RMHogervorst/bananafy | 2708874c2f972e8ca68064e342ed1cd348750cc3 | 99685e8fc5fdf5920ab9bf600eb10dc0eebfa483 | refs/heads/master | 2021-09-02T00:15:41.317376 | 2017-12-29T10:42:17 | 2017-12-29T10:42:17 | 115,713,736 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,480 | r | bananafy_as_in_bash_scripts.R | #!/usr/bin/Rscript --vanilla
args <- commandArgs(trailingOnly = TRUE)
if (length(args) < 1){
stop("I think you forgot to input an image and output name? \n")
}
offset <- NULL
debug <- FALSE
suppressPackageStartupMessages(library(magick))
## Commandline version of add banana
#banana <- image_read("images/banana.gif") # this assumes you have a project with the folder /images/ inside.
#add_banana <- function(, offset = NULL, debug = FALSE){
image_in <- magick::image_read(args[[1]])
banana <- image_read("~/scripts/images/banana.gif") # 365w 360 h
image_info <- image_info(image_in)
if("gif" %in% image_info$format ){stop("gifs are to difficult for me now")}
stopifnot(nrow(image_info)==1)
# scale banana to correct size:
# take smallest dimension.
target_height <- min(image_info$width, image_info$height)
# scale banana to 1/3 of the size
scaling <- (target_height /3)
front <- image_scale(banana, scaling)
# place in lower right corner
# offset is width and hieight minus dimensions picutre?
scaled_dims <- image_info(front)
x_c <- image_info$width - scaled_dims$width
y_c <- image_info$height - scaled_dims$height
offset_value <- ifelse(is.null(offset), paste0("+",x_c,"+",y_c), offset)
if(debug) print(offset_value)
frames <- lapply(as.list(front), function(x) image_composite(image_in, x, offset = offset_value))
result <- image_animate(image_join(frames), fps = 10)
message("writing bananafied image to ", args[[2]])
image_write(image = result, path = args[[2]])
|
7496294d6a0b9057999d83f5717383c927fb976c | 3863aff88126d9754977927390a367ddf3ae9b61 | /lib/backtest.R | 23f9c397c1c9886869c3286408b03564aaa8bde4 | [] | no_license | ZhengyangXu/FinalProject-5261 | c3ee9164f193f81d52b59145c2167a415de4335e | 0153a38ec18ab04ec43d2bbaa967123ce3fbe5e4 | refs/heads/master | 2020-03-11T11:51:36.281593 | 2018-04-13T00:15:22 | 2018-04-13T00:15:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 429 | r | backtest.R |
backtest <- function(wP,data = data_final[,-1]){
n = dim(data)[1]
return = data[2:n,]/data[1:(n-1),]
weight <- matrix(rep(0,250*482),ncol=482,nrow=250)
weight_mat_nrow <- floor((nrow(data)-250)/60)
for(i in 1:weight_mat_nrow){
weight <- rbind(weight, matrix(rep(wP[i,],60),ncol=ncol(data),nrow = 60,byrow = T))
}
test_daily_return <- diag(weight %*% t(return[1:nrow(weight),]))
return(test_daily_return)
}
|
eb5a297b2d34af8891d0296fffd19806e11e0348 | 7466dbb3f016774d6cb1ddeb142de1edae496378 | /man/V.Rd | 55ca37415712841c33f094a9117c539396ca07ef | [] | no_license | cran/chinese.misc | 0dc04d6470cff7172c76f3a735986ef7128c74da | 369fd6b193e5d969354a31e568fabe53cb596c8c | refs/heads/master | 2021-01-19T09:55:21.948813 | 2020-09-11T20:50:03 | 2020-09-11T20:50:03 | 82,150,007 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,612 | rd | V.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/V.R
\name{V}
\alias{V}
\title{Copy and Paste from Excel-Like Files}
\usage{
V(tofactor = 0, keepblank = 0, sep = "\\t")
}
\arguments{
\item{tofactor}{if this is equal to numeric 1 or \code{TRUE}, characters will be converted to factors. Otherwise no
conversion will be done. The default is not to convert.}
\item{keepblank}{if characters are not to be converted to factors, this argument decides how to deal with
blank cells in character columns. If it is numeric 1 or \code{TRUE}, a blank cell will be converted
to "" (size 0 string). Otherwise it is viewed as \code{NA} (default).}
\item{sep}{a single character to differentiate cells of a table. The default value should be used when
your data is from Excel.}
}
\description{
These functions make it easy for copy and paste data from Excel-like files, especially when there are
blank cells or when different columns have different lengths. All of them have the same arguments.
\itemize{
\item \code{V}, when you do not copy rownames or colnames
\item \code{VR}, when the 1st column is for rownames and there are no colnames in what you copy
\item \code{VC}, when there are colnames but no rownames
\item \code{VRC} and the same: \code{VCR}, when there are both rownames and colnames
}
If you copy something from a text document (e.g., Windows Notepad), the function may warn
"incomplete final line found by readTableHeader...". This is because your content does not end with an end of
line sign. You can simply ignore this warning!
}
|
9151fefd7287d64cff14b4be910c1cde65942f1c | 878960a141b8a52b1606a7ccdde801c6dcac4768 | /plot4.R | c87c7080d58f5dcc4ad8a04fd66d455a8a901dea | [] | no_license | patternplot/airpollution | 63700ecd41f22fc80c012a6659d908c6c8c4e197 | 6add5238924df815d94d21d3180ec359339fc840 | refs/heads/master | 2021-01-10T05:27:00.002782 | 2016-03-12T02:21:50 | 2016-03-12T02:21:50 | 53,708,967 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,056 | r | plot4.R | require(ggplot2)
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
## Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
# First get the SCCs for Coal Sources
coalSCC <- SCC[grep("*coal*",SCC$Short.Name, ignore.case=TRUE),"SCC"]
# Subset to "Baltimore" data
NEICoal <- subset(NEI, NEI$SCC %in% coalSCC)
NEICoalAggr <- aggregate(NEICoal[, 'Emissions'], by=list(NEICoal$year), sum)
colnames(NEICoalAggr) <- c('Year', 'Emissions')
png(filename="plot4.png",width=800,height=600)
plotpng <- ggplot(data=NEICoalAggr, aes(x=Year, y=Emissions/1000)) +
geom_line(aes(group=1, col=Emissions)) + geom_point(aes(size=2, col=Emissions)) +
ggtitle(expression('Total Emissions of PM'[2.5])) +
ylab(expression(paste('PM', ''[2.5], ' in kilotons'))) +
geom_text(aes(label=round(Emissions/1000,digits=2), size=2, hjust=1.5, vjust=1.5)) +
theme(legend.position='none')
print(plotpng)
dev.off() |
9f9d7e727cfe4cc65f9c92ed84d645be0853b554 | dd8d6d76c90a658682dcb7889edfc79adfb81a70 | /PrepScripts/prep_TLE_Pos.R | 872e7a3646fe4f5a4a89df86e46949482f131e4a | [] | no_license | wkumler/EdwardsLipids | 37b20d7a7936dd716cff9593b2c7fe2fcefa9bb5 | 4decfddba8b310e72b3be2702e10fb651639815d | refs/heads/master | 2020-03-28T09:52:28.593100 | 2018-11-20T00:28:02 | 2018-11-20T00:28:02 | 148,065,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,522 | r | prep_TLE_Pos.R | # prep_TLE_pos.R
#
################ Caveats and prerequisites #############
#
# Presumes user has installed the R packages "xcms", "CAMERA", "tools", "IPO", with all required dependencies
#
# If multicore tasking is desired, "snowfall" also required; Rmpi doesn't seem to be necessary
#
# This script the following inputs:
#
# 1. A series of .mzXML files from the same dataset, containing centroided ms1 data
# of a single ion mode. File conversion from the Thermo .raw format, centroiding of data,
# and extraction of + and - mode scans into separate files can be accomplished in batch
# using the script "Exactive_full_scan_process_ms1+.r", available from
# https://github.com/vanmooylipidomics/LipidomicsToolbox. The .mzXML files should be placed
# together in a single directory, which can be specified by the user below.
#
# 2. If the package IPO was previously used to optimize xcms peak-picking or group/retcor
# parameters AND automatic import of the optimized settings from an existing .csv file
# is desired, specification of the path to file "IPO_xcmsparamfits_ ... .csv," where ...
# is an ISO 8601 timestamp. A suitable .csv file will be generated if the user elects IPO
# at two user-input points in this script, or such a file can be generated from IPO using
# the helper script optim_centWaveParams_standalone.R, latest version at
# https://github.com/vanmooylipidomics/LipidomicsToolbox/blob/master/optim_centWaveParams_standalone.R
################ Initial setup and variable definition #############
# load required packages
library(tools)
library(xcms)
library(CAMERA)
library(rsm)
library(parallel)
library(IPO)
library(snow) # if multicore tasking is desired
library(BiocParallel)
register(bpstart(MulticoreParam(1)))
library(LOBSTAHS)
# ******************************************************************
################ Basic user begin editing here #############
# ******************************************************************
################ User: define locations of data files and database(s) #############
working_dir <- "/media/windows/Users/willi/Documents/Berkeley/Elab/SURFIN"
setwd(working_dir)
data_source <- "/media/wkumler/TheVault/6a_TLE_ESI" #Specify working directory for Ubuntu
mzXMLdirs <- c("/mzXML_pos", "/mzXML_neg")
# specify which of the directories above you wish to analyze this time through
chosenFileSubset = paste0(data_source, "/mzXML_pos/")
# specify the ID numbers (i.e., Orbi_xxxx.mzXML) of any files you don't want to push through xcms
excluded.mzXMLfiles = NULL #leaving this blank (i.e. c() ) excludes all of them?
retcor.meth = "loess"
# retcor.meth = "obiwarp"
# ******************************************************************
################ Basic user stop editing here #############
# ******************************************************************
################# Define functions; run me first #############
# readinteger: for a given prompt, allows capture of user input as an integer; rejects non-integer input
readinteger = function(prompttext) {
n = readline(prompt=prompttext)
if (!grepl("^[0-9]+$", n)) {
return(readinteger(prompttext))
}
as.integer(n)
}
# readyesno: for a given prompt, allows capture of user input as y or n; rejects other input
readyesno = function(prompttext) {
n = readline(prompt=prompttext)
if (!grepl("y|n", n)) {
return(readyesno(prompttext))
}
as.character(n)
}
# verifyFileIonMode: return the ion mode of data in a particular mzXML file,
# by examining "polarity" attribute of each scan in the file
verifyFileIonMode = function(mzXMLfile) {
rawfile = xcmsRaw(mzXMLfile) # create an xcmsraw object out of the first file
# determine ion mode by examining identifier attached to scan events
if (table(rawfile@polarity)["negative"]==0 & (table(rawfile@polarity)["positive"]==length(rawfile@scanindex))) {
filepolarity = 1 # positive
} else if (table(rawfile@polarity)["positive"]==0 & (table(rawfile@polarity)["negative"]==length(rawfile@scanindex))) {
filepolarity = -1 # negative
} else if (table(rawfile@polarity)["positive"]>=1 & table(rawfile@polarity)["negative"]>=1) {
stop("At least one file in the current dataset contains scans of more than one ion mode.
Please ensure data for different ion modes have been extracted into separate files. Stopping...")
} else if (table(rawfile@polarity)["positive"]==0 & table(rawfile@polarity)["negative"]==0) {
stop("Can't determine ion mode of data in the first file. Check manner in which files were converted. Stopping...")
}
filepolarity
}
# getSubsetIonMode: return the ion mode of a subset of files, using sapply of verifyFileIonMode
getSubsetIonMode = function(mzXMLfilelist) {
ionmodecount = sum(sapply(mzXMLfilelist, verifyFileIonMode)) # get sum of ion mode indicators for the files in the subset
if (ionmodecount==length(mzXMLfilelist)) { # can conclude that all files contain positive mode data
subset.polarity = "positive"
} else if (ionmodecount==-length(mzXMLfilelist)) { # can conclude that all files contain negative mode data
subset.polarity = "negative"
}
subset.polarity
}
# selectXMLSubDir: allows user to choose which subset of files to process
selectXMLSubDir = function(mzXMLdirList) {
print(paste0("mzXML files exist in the following directories:"))
for (i in 1:length(mzXMLdirList)) {
# get number of mzXML files in this directory
numGoodFiles = length(list.files(mzXMLdirList[i], recursive = TRUE, full.names = TRUE, pattern = "*(.mzXML|.mzxml)"))
if (numGoodFiles>0) { # there are .mzXML data files in this directory
print(paste0(i, ". ", numGoodFiles," .mzXML files in directory '",mzXMLdirList[i],"'"))
}
}
processDecision = readinteger("Specify which subset you'd like to process, using integer input: ")
mzXMLdirList[processDecision]
}
# getFNmatches: returns index(es) of file names in a given file list containing the ID numbers in a match list
getFNmatches = function(filelist,IDnumlist) {
unique(grep(paste(IDnumlist,collapse="|"),filelist, value=FALSE))
}
# genTimeStamp: generates a timestamp string based on the current system time
genTimeStamp = function () {
output_DTG = format(Sys.time(), "%Y-%m-%dT%X%z") # return current time in a good format
output_DTG = gsub(" ", "_", output_DTG) # replace any spaces
output_DTG = gsub(":", "-", output_DTG) # replaces any colons with dashes (Mac compatibility)
}
################# Load in mzXML files, get xcms settings from IPO or user input #############
# check to make sure user has specified at least something in mzXMLdirs
if (!exists("mzXMLdirs")) {
stop("User has not specified any directories containing mzXML files. Specify a value for mzXMLdirs.")
}
# load selected subset for processing
mzXMLfiles.raw = list.files(chosenFileSubset, recursive = TRUE, full.names = TRUE)
# verify the ion mode of the data in these files
#WILLIAM IS CHEATING HERE, you should actually run the full thing on a new data set
#subset.polarity = getSubsetIonMode(mzXMLfiles.raw)
subset.polarity = "positive"
# provide some feedback to user
print(paste0("Loaded ",length(mzXMLfiles.raw)," mzXML files. These files contain ",
subset.polarity," ion mode data. Raw dataset consists of:"))
print(mzXMLfiles.raw)
# check whether user has elected to exclude any files, and exclude them if they happen to be in this subset
if (exists("excluded.mzXMLfiles") & length(excluded.mzXMLfiles)>0) { #fixed this code by removing quotes
excludedfiles = getFNmatches(IDnumlist = excluded.mzXMLfiles, filelist = mzXMLfiles.raw) # index files to be excluded
print(paste0("The following files will be excluded from processing based on user's input:"))
print(mzXMLfiles.raw[excludedfiles])
mzXMLfiles = mzXMLfiles.raw[-excludedfiles] # exclude the files from mzXMLfiles
} else {
mzXMLfiles = mzXMLfiles.raw
}
#####################################################################################
######## Peak-picking & creation of xcmsSet using xcms (and IPO, if desired) ########
#####################################################################################
print(paste0("Using values of centWave parameters specified in the script by user..."))
# "non-optimized" settings listed here are based on recommended "HPLC/Orbitrap settings"
# from Table 1 of Patti et al., 2012, "Meta-analysis of untargeted metabolomic data from
# multiple profiling experiment," Nature Protocols 7: 508-516
centW.min_peakwidth = 10
centW.max_peakwidth = 45 # lowered from Patti et al. recommended HPLC setting of 60 based
#on visual inspection of a single sample with plotPeaks
centW.ppm = 2.5
centW.mzdiff = 0.005
centW.snthresh = 10
centW.prefilter = c(3,7500) # 3.5k recommended by Patti et al. appears to be too low
centW.noise = 500
# specify some additional settings we wish to keep constant, regardless of where the parameters above were obtained
centW.fitgauss = TRUE
centW.sleep = 1
centW.mzCenterFun = c("wMean")
centW.verbose.columns = TRUE
centW.integrate = 1
centW.profparam = list(step=0.01) # setting this very low, per Jan Stanstrup; low setting uses more memory but helps
# avoid the situation where mass accuracy eclipses the actual width of the m/z
#windows used to define each peak
#(a real possibility with Orbitrap data; see
#http://metabolomics-forum.com/viewtopic.php?f=8&t=598#p1853)
centW.nSlaves = 4 # if you have r package "snow" installed, can set to number of cores you wish to make use of
################# Create xcmsSet using selected settings #############
print(paste0("Creating xcmsSet object from ",length(mzXMLfiles),
" mzXML files remaining in dataset using specified settings..."))
# create xcms xset object; runs WAY faster with multicore tasking enabled;
xset_centWave = xcmsSet(mzXMLfiles,
method = "centWave",
profparam = centW.profparam,
ppm = centW.ppm,
peakwidth = c(centW.min_peakwidth,centW.max_peakwidth),
fitgauss = centW.fitgauss,
noise = centW.noise,
mzdiff = centW.mzdiff,
verbose.columns = centW.verbose.columns,
snthresh = centW.snthresh,
integrate = centW.integrate,
prefilter = centW.prefilter,
mzCenterFun = centW.mzCenterFun,
# sleep = centW.sleep
BPPARAM = bpparam()
)
print(paste0("xcmsSet object xset_centWave created:"))
print(xset_centWave)
save(xset_centWave, file = "xset_CentWave")
#Conclude xcmsSet object creation, saved as xset_CentWave
#Begin retention time and grouping with xset_CentWave
load("xset_CentWave")
# Some notes:
#
# 1. If using massifquant or centWave and you are sure your input data are centroided, can ignore warning message
# "It looks like this file is in profile mode. [method] can process only centroid mode data !" since this is
# just based on a heuristic. That is, you can ignore the message if you are certain data are in centroid mode.
# You can verify this by opening one of your converted .mzXML files in a text reader. You should see:
# <dataProcessing centroided="1"></dataProcessing> (a "0" is bad)
#
# For more on this error, see http://metabolomics-forum.com/viewtopic.php?f=8&t=267 or
# https://groups.google.com/forum/#!topic/xcms/xybDDQTaQiY
#
# 2. So long as the number of peak data insertion problems is relatively low (i.e., < 100),
# you can safely ignore the error. Otherwise, might try lowering the ppm
#
# 3. On-the-fly plotting features (i.e., with sleep ≥ 0.001 enabled) don't appear to
# function properly in Mac RStudio
#####################################################################################
##### Grouping and retention time correction using xcms (and IPO, if desired) #######
#####################################################################################
print(paste0("Using values of group and retcor parameters specified in the script by user..."))
# retcor.loess settings below are the function defaults
loess.missing = 1
loess.extra = 1
loess.smoothing = "loess"
loess.span = c(0.2)
loess.family = "gaussian" # want to leave outliers in for the time being
# retcor.obiwarp settings below are the function defaults
obiwarp.center = NULL
obiwarp.profStep = 1
obiwarp.response = 1
obiwarp.distFunc = "cor_opt"
obiwarp.gapInit = NULL
obiwarp.gapExtend = NULL
obiwarp.factorDiag = 2
obiwarp.factorGap = 1
obiwarp.localAlignment = 0
# settings for group.density below are based on the recommended HPLC/Orbitrap settings
# from Table 1 of Patti et al., 2012, "Meta-analysis of untargeted metabolomic data from
# multiple profiling experiment," Nature Protocols 7: 508-516
density.bw = 5 # 15?
density.max = 50
density.minfrac = 0.25
density.minsamp = 2
density.mzwid = 0.015 # 0.001?
# specify some additional settings we wish to keep constant, regardless of where the parameters above were obtained
obiwarp.center = NULL
obiwarp.plottype = "deviation" # "none"
density.sleep = 0
loess.plottype = "mdevden" # none
################# Perform grouping and retention time correction on dataset #############
print(paste0("Performing grouping and retention time correction on dataset"))
print(paste0("Using group.density and retcor.",retcor.meth))
# initial grouping
# using method = "density" with settings from above
xset_gr = group(xset_centWave,
method = "density",
bw = density.bw,
minfrac = density.minfrac,
minsamp = density.minsamp,
mzwid = density.mzwid,
max = density.max,
sleep = density.sleep
)
rm(xset_centWave)
# chromatographic alignment (retention time correction)
if (retcor.meth=="loess") {
xset_gr.ret = retcor(xset_gr,
# method = "loess", # this appears unnecessary
missing = loess.missing,
extra = loess.extra,
smooth = "loess",
span = loess.span,
family = loess.family,
plottype = loess.plottype,
col = NULL,
ty = NULL
)
} else if (retcor.meth=="obiwarp") {
xset_gr.ret = retcor.peakgroups(xset_gr,
method = "obiwarp",
plottype = obiwarp.plottype,
profStep = obiwarp.profStep,
center = obiwarp.center,
response = obiwarp.response,
distFunc = obiwarp.distFunc,
gapInit = obiwarp.gapInit,
gapExtend = obiwarp.gapInit,
factorDiag = obiwarp.factorDiag,
factorGap = obiwarp.factorGap,
localAlignment = obiwarp.localAlignment,
initPenalty = 0
)
}
# perform grouping again
rm(xset_gr)
print(paste0("Performing second peak grouping after application of retcor..."))
# using method = "density" with settings from above
xset_gr.ret.rg = group(xset_gr.ret,
method = "density",
bw = density.bw,
minfrac = density.minfrac,
minsamp = density.minsamp,
mzwid = density.mzwid,
max = density.max,
sleep = density.sleep
)
# fill missing peaks
rm("xset_gr.ret")
print(paste0("Filling missing peaks..."))
save(xset_gr.ret.rg, file="xset_gr.ret.rg")
#Conclude retention time and grouping and save to xset_gr.ret.rg
#Begin peak filling
load("xset_gr.ret.rg")
register(bpstart(MulticoreParam(1)))
xset_gr.ret.rg.fill = fillPeaks.chrom(xset_gr.ret.rg, BPPARAM = bpparam())
rm(xset_gr.ret.rg)
save(xset_gr.ret.rg.fill, file = "xset_gr.ret.rg.fill")
#Conclude peak filling, save as xset_gr.ret.rg.fill
#####################################################################################
##### Isotope peak identification, creation of xsAnnotate object using CAMERA #######
#####################################################################################
load("xset_gr.ret.rg.fill")
print(paste0("Applying CAMERA to identify isotopic peaks, create xsAnnotate object, and
create CAMERA pseudospectra using correlation of xcms peak groups between and
within samples. These pseudospectra are the groups within which the adduct
hierarchy and retention time screening criteria will be applied using LOBSTAHS"))
# first, a necessary workaround to avoid a import error; see https://support.bioconductor.org/p/69414/
imports = parent.env(getNamespace("CAMERA"))
unlockBinding("groups", imports)
imports[["groups"]] = xcms::groups
lockBinding("groups", imports)
# create annotated xset using wrapper annotate(), allowing us to perform all CAMERA tasks at once
xset_a = annotate(xset_gr.ret.rg.fill,
quick=FALSE,
sample=NA, # use all samples
nSlaves=1, # use 4 sockets
# group FWHM settings
# using defaults for now
sigma=6,
perfwhm=0.6,
# groupCorr settings
# using defaults for now
cor_eic_th=0.75,
graphMethod="hcs",
pval=0.05,
calcCiS=TRUE,
calcIso=TRUE,
calcCaS=FALSE, # weird results with this set to TRUE
# findIsotopes settings
maxcharge=4,
maxiso=4,
minfrac=0.5, # 0.25?
# adduct annotation settings
psg_list=NULL,
rules=NULL,
polarity=subset.polarity,
multiplier=3,
max_peaks=100,
# common to multiple tasks
intval="into",
ppm=2.5,
mzabs=0.0015
)
# at this point, should have an xsAnnotate object called "xset_a"
# in hand, which will serve as the primary input to the main screening and annotation function
# "doLOBscreen" in LOBSTAHS
rm(xset_gr.ret.rg.fill)
print(paste0("xsAnnotate object 'xset_a' has been created. User can now use LOBSTAHS to perform screening..."))
print(xset_a)
save(xset_a, file = "prepCompletedxsA")
load("prepCompletedxsA")
########################################################################
#LOBSTAHS PART
#######################################################################
data(default.LOBdbase)
LOB <- doLOBscreen(xsA=xset_a, polarity = "positive", match.ppm = 2.5,
retain.unidentified = F, rt.restrict = T)
LOBscreen_diagnostics(LOB)
LOBdata <- getLOBpeaklist(LOB)
write.csv(LOBdata, file = "LOB_Peaklist_Pos.csv")
rm(xset_a, LOB, default.LOBdbase)
|
697a12e62da3856370903f2b0781d11867b1ef4e | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gtkLabelSetText.Rd | 40a4612ad075c253187834b77542f5ec30fb10e8 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 475 | rd | gtkLabelSetText.Rd | \alias{gtkLabelSetText}
\name{gtkLabelSetText}
\title{gtkLabelSetText}
\description{Sets the text within the \code{\link{GtkLabel}} widget. It overwrites any text that
was there before. }
\usage{gtkLabelSetText(object, str)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkLabel}}}
\item{\verb{str}}{The text you want to set}
}
\details{This will also clear any previously set mnemonic accelerators.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
55c4b793712ae320422cf30f597b8575e0362782 | d62d9ea2f6aa749fa48455bddbd3208279ce6449 | /man/flip_layers.Rd | 0955a75abce20504865d378a5b2dd4e13253bb62 | [] | no_license | jporobicg/atlantistools | 3bffee764cca1c3d8c7a298fd3a0b8b486b7957e | 75ea349fe21435e9d15e8d12ac8060f7ceef31a2 | refs/heads/master | 2021-01-12T03:06:55.821723 | 2017-05-26T04:03:33 | 2017-05-26T04:03:33 | 78,160,576 | 1 | 0 | null | 2017-05-25T23:35:23 | 2017-01-06T00:51:21 | R | UTF-8 | R | false | true | 1,108 | rd | flip_layers.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flip-layers.R
\name{flip_layers}
\alias{flip_layers}
\title{Flip layers for visualization.}
\usage{
flip_layers(data)
}
\arguments{
\item{data}{dataframe with columns polygon and layer. layer id is based on
atlantis output (0 = layer closest to the sediment)}
}
\value{
dataframe with flipped layerids. 1 = surface.
}
\description{
Within Atlantis the water column id 0 is the water column closest to the sediment.
In order to simplify graphical interpretation of vertical plots this order
is reversed. The surface layer is 1 by default. The sediment layer id is equal
to the number of total layers. Please note that this is only used for graphical
display.
}
\examples{
data <- rbind(expand.grid(species = "sp1", polygon = 0, layer = 0:7),
expand.grid(species = "sp1", polygon = 1, layer = 0:4),
expand.grid(species = "sp1", polygon = 2, layer = 0:2),
expand.grid(species = "sp1", polygon = 3, layer = c(0:3, 7)))
data$atoutput <- runif(nrow(data), min = 0, max = 2)
flip_layers(data)
}
|
77db129a59b857c40952cbada7435f7bd32fa340 | 2972e393d2d5d38b018fe48e85f8991552a2ccf8 | /ED2/transect_runs/NC/run/workflow.R | 65fae3c1080af4137f36a8d40962d28230ef56df | [] | no_license | KristinaRiemer/model-vignettes | 8ba73d33b1bd30f01077daf87b50ec937a658e08 | 91c7d27a948f9b1e0e0f83b85170be0758e72c27 | refs/heads/master | 2022-09-15T18:51:15.139122 | 2022-08-19T21:48:16 | 2022-08-19T21:48:16 | 182,847,131 | 0 | 0 | null | 2020-06-23T17:33:18 | 2019-04-22T18:43:03 | null | UTF-8 | R | false | false | 3,587 | r | workflow.R | # This may take a long time to run. Run as a background job if you don't want
# to tie up your R session. In RStudio click the "Source" drop-down and choose
# "Source as Local Job"
# Load packages -----------------------------------------------------------
library(PEcAn.all)
library(furrr)
library(progressr)
# Read in settings --------------------------------------------------------
#edit this path
inputfile <- "ED2/transect_runs/NC/run/pecan.xml"
#check that inputfile exists, because read.settings() doesn't do that!
if (file.exists(inputfile)) {
settings <- PEcAn.settings::read.settings(inputfile)
} else {
stop(inputfile, " doesn't exist")
}
#check outdir
settings$outdir
# Prepare settings --------------------------------------------------------
#TODO: check that dates are sensible?
settings <- prepare.settings(settings, force = FALSE)
write.settings(settings, outputfile = paste0("pecan_checked_", Sys.Date(), ".xml"))
settings <- do_conversions(settings)
# Query trait database ----------------------------------------------------
settings <- runModule.get.trait.data(settings)
# Meta analysis -----------------------------------------------------------
runModule.run.meta.analysis(settings)
# Write model run configs -----------------------------------------------------
# This will write config files locally and attempt to copy them to your HPC. In
# my experience, this copying fails, but it doesn't matter because the next step
# ALSO attempts to copy the config files to the HPC.
runModule.run.write.configs(settings)
# Start model runs --------------------------------------------------------
runModule_start_model_runs(settings, stop.on.error = FALSE)
## If for some reason the above function tries to copy files back from HPC before
## runs are finished, this code will manually copy it back.
#
# cmd <-
# paste0(
# "rsync -az -q ",
# "'", settings$host$name, ":", settings$host$outdir, "' ",
# "'", settings$outdir, "'"
# )
#
# system(cmd)
# Results post-processing -------------------------------------------------
## Convert and consolidate ED2 .h5 files to .nc files
## NOTE: this is supposed to get run by runModule_start_model_runs() but is
## currently broken and needs to be run manually. Might get fixed once PEcAn
## container on HPC is updated so check for .nc files in outdir before running
## this
# TODO: Check how many ensembles failed (and why?) by looking for empty dirs
# This "works" but the .nc files produced are not useable, I think, because they
# don't indicate which values come from which PFT. There is a workaround in
# plot.R
## use 2 cores to speed up
plan(multisession, workers = 2)
dirs <- list.dirs(file.path(settings$outdir, "out"), recursive = FALSE)
with_progress({
p <- progressor(steps = length(dirs))
future_walk(dirs, ~{
p() #progress bar
model2netcdf.ED2(
.x,
settings$run$site$lat,
settings$run$site$lon,
settings$run$start.date,
settings$run$end.date,
settings$pfts
)
})
})
### DON'T remove .h5 files. The .nc files are currently malformed and you need
### the raw output for plotting.
# Model analyses ----------------------------------------------------------
## Get results of model runs
get.results(settings)
## Run ensemble analysis on model output
runModule.run.ensemble.analysis(settings)
#The run.ensemble.analysis() step fails because whatever output the
#ensemble.output...Rdata file didn't grab the ensemble ID correctly
# run manually:
run.ensemble.analysis(settings, ensemble.id = "NOENSEMBLEID")
|
e81d4ffcc863bc31c3994f8541165665eabf6365 | 19e4ab785434b096af3059dce4c533d0ad424b90 | /HMDLexis/HMDLexis/R/d_s1x1.R | 975ba5bd491d092f3693adae8e4a9e15d719d400 | [] | no_license | timriffe/HMDLexis | feaae9eef42d71783e836116b8b0a5d174f838d6 | 9c1c9aa9dc39e27e185320e3b3362f473437af62 | refs/heads/master | 2021-03-26T10:25:57.264746 | 2018-11-10T10:50:38 | 2018-11-10T10:50:38 | 28,879,743 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,242 | r | d_s1x1.R |
#'
#' @title d_s1x1, a function to split RR death counts into Lexis triangles based on regression coefficients.
#'
#' @description The HMD Methods Protocol gives a formula to split Lexis 1x1 death counts (squares) into triangles based on the results of a regression. Function can be run innocuously on any Deaths data, even if no 1x1 RR is present.
#'
#' @details This function can optionally deal with territorial adjustments. If a Tadj file is given, it is handled appropriately. Since Tadj files are created by default by \code{readInputDB()}, there is no reason not to specify them always, even when not relevant. It doesn't matter. If you have a Tadj, though, you better know it.
#'
#' @param Deaths LexisDB internal Deaths \code{data.frame}, long format, all columns. Format as given by \code{readInputDB()}.
#' @param Births LexisDB internal Births \code{data.frame}, as given by \code{readInputDB()}.
#' @param Tadj LexisDB internal territorial (or universe) adjustment \code{data.frame}, as given by \code{readInputDB()}. This is optional.
#'
#' @importFrom reshape2 acast
#' @importFrom reshape2 melt
#' @importFrom compiler cmpfun
#'
#' @export
#'
d_s1x1 <- function(Deaths, Births, Tadj = NULL){
# check if function needs to be run, if not, return deaths
# TR: 1 June, 2016: use new %==%, more efficient
if (!any(Deaths$Lexis[with(Deaths, AgeIntervali %==% 1 &
YearInterval == 1)] == "RR")){
cat("d_s1x1() not necessary; no 1x1 RR deaths to split into triangles at this time.")
return(Deaths)
}
# some prelim data procedures, pretty run-of-the-mill
#--------------------------------------------------
# TOT is never necessary:
Deaths <- Deaths[Deaths$Age != "TOT", ]
# slice off UNK, rbind back on later:
UNKi <- Deaths$Age == "UNK"
UNKTF <- any(UNKi)
if (UNKTF){
UNK <- Deaths[UNKi, ]
Deaths <- Deaths[!UNKi, ]
}
# slice off OP, to be slapped back on later
OPi <- Deaths$AgeInterval == "+"
OPTF <- any(OPi)
if (OPTF){
OP <- Deaths[OPi, ]
Deaths <- Deaths[!OPi, ]
}
# TR: added Aug 22, 2016
TadjTF <- is.null(Tadj)
#--------------------------------------------------
# now start the sex-loop (this can't be done in a big Year-Sex apply scheme
# because consecutive birth cohort sizes are needed
Areas <- unlist(tapply(Deaths$Area, Deaths$Year,unique))
Dout <- list()
# Sex <- "f"
for (Sex in c("f","m")){
Dsex <- Deaths[Deaths$Sex == Sex, ]
RRi <- with(Dsex, Lexis == "RR" & AgeInterval == "1" & YearInterval == 1)
RR <- Dsex[RRi, ]
DO <- Dsex[!RRi, ] # Save other Lexis shapes for downstream treatment.
maxAges <- unlist(lapply(split(RR, RR$Year), function(RRyr){
max(RRyr$Agei)
}))
# note that this does the job of d_ma0() for a large block of ages
RRwide <- acast(RR, Agei ~ Year, sum, value.var = "Deaths", fill = 0)
# TR: Aug 23, 2016. Note that RRwide needn't be a continuous block of years.
# years can have gaps, since it only picks up years with RR to split...
# TR: 1 June, 2016
# ensure that RRwide goes to age 0
AgesBox <- 0:as.integer(rownames(RRwide)[nrow(RRwide)])
yrs <- as.integer(colnames(RRwide))
RRwideBox <- matrix(0,
ncol=length(yrs),
nrow=length(AgesBox),
dimnames=list(AgesBox, yrs))
RRwideBox[rownames(RRwide),colnames(RRwide)] <- RRwide
RRwide <- RRwideBox
# years are in order, but might not be continuous chunks..........
yrsc <- colnames(RRwide)
yrs <- as.integer(yrsc)
# TR: modified Mon Aug 22, 2016 to account for Tadj.
# begin modify here.
Ball <- Births$Births[Births$Sex == Sex]
names(Ball) <- Births$Year[Births$Sex == Sex]
Ball <- Ball[sort(names(Ball))]
BT_1 <- Ball[as.character(yrs - 1)]
BT <- Ball[yrsc]
if (!TadjTF){
ind <- Tadj$Type == "Rb" & Tadj$Sex == Sex
RB <- Tadj$Value[ind]
names(RB) <- Tadj$Year[ind]
# need names BT...
BT_1 <- BT_1 * RB[names(BT)]
}
IMRdenom <- (1 / 3 * BT_1) + (2 / 3 * BT)
# names can be shifted depending on order of BT_1 and BT. Might be relevant for alignment
names(IMRdenom) <- names(BT)
# end modify here
# this would only kick in if births from year t-1 are missing. Then assume same as present year
NAind <- is.na(IMRdenom)
IMRdenom[NAind] <- Ball[yrsc[NAind]]
# this is trickier than first glance, since it's conceivable to have infant mort
# in TL TU while higher ages are in RR, which need to be split. We therefore don't
# necessarily take the first row of RRwide as infant mort, but rather sum it independently
# VH, VV, or RV mort is ignored here.
# TR: Aug 22, 2016. Noted, numerator does not need Tadj adjust, only denom.
IMRnum <- acast(Dsex[Dsex$Lexis %in% c("TL","TU","RR") & Dsex$Agei == 0, ],
Age ~ Year,
sum,
value.var = "Deaths",
fill = 0)[ , , drop = TRUE] # drop = TRUE turns it into a vector (named still)
IMR <- IMRnum[yrsc] / IMRdenom # ensures we get same years as need for RR
# get these in a matrix conformable with formulas
IMRT <- IMR[col(RRwide)]
dim(IMRT) <- dim(RRwide)
# now robust to non-consecutive yrs
# account for fluctuations in cohort size (needs tadjification)
# this takes ALL available cohorts, not just those constrained by RR needs
# TR: changed Mon Aug 22, 2016 to use Tadj where necessary
# begin modify here
BT_1 <- Ball[-length(Ball)]
BT <- Ball[-1]
if (!TadjTF){
# similar lines, but possibly different yrIn
#yrIn <- as.character(as.integer(names(BT_1)) + 1)
BT_1 <- BT_1 * RB[names(BT)]
}
pib <- BT / (BT + BT_1)
# pib <- Ball[2:length(Ball)] /
# (Ball[2:length(Ball)] + Ball[1:(length(Ball) - 1)])
## end modify here
# now we determine the cohort for each cell in RRwide :-)
yrsM <- yrs[col(RRwide)]
ageM <- as.integer(rownames(RRwide))[row(RRwide)]
TLCoh <- as.character(yrsM - ageM)
dim(TLCoh) <- dim(RRwide)
# now distribute these over cohorts
PIB <- pib[TLCoh]
dim(PIB) <- dim(RRwide)
dimnames(PIB) <- dimnames(RRwide)
# assume constant sizes for cohorts not available
# plot(density(PIB, na.rm=TRUE))
PIB[is.na(PIB)] <- 0.5
# [[ NOTE: if births prior to first year of deaths are available, we could use these too ]]
# some indicator matrices
Ix0 <- Ix1 <- I1919 <- I1918 <- PIB * 0
if (1918 %in% yrs){
I1918[,"1918"] <- 1
}
if (1919 %in% yrs){
I1919[,"1919"] <- 1
}
Ix0["0", ] <- 1
Ix1["1", ] <- 1
if (Sex == "f"){
# from Table A1, ages 0-130
alpha <- c(0.0392, 0.1365, rep(0.0130,3),rep(c(0.0018,-0.0140,-0.0135,-0.0061,-0.0046,-0.0041,-0.0072,-0.0070,
-0.0071,-0.0084,-0.0091,-0.0134,-0.0175,-0.0201,-0.0230,-0.0231,-0.0187,-0.0112,-0.0014), each = 5),
rep(0.0190,31))
names(alpha) <- 0:130
alpha <- alpha[rownames(RRwide)]
ALPHA <- (PIB * 0 + 1) * alpha
# the full formula
pTL <- 0.4710 + ALPHA + 0.7372 * (PIB - 0.5) +
0.1025 * I1918 -0.0237 * I1919 +
-0.0112 * log(IMRT) +
-0.0688 * log(IMRT) * Ix0 +
0.0268 * log(IMRT) * Ix1 +
0.1526 * (log(IMRT) - log(0.01)) * Ix0 * (IMRT < 0.01)
}
if (Sex == "m"){
# from Table A1, ages 0-130
alpha <- c(0.0230, 0.1249, rep(0.0086, 3), rep(c(0.0031, -0.0086, -0.0175, 0.0035, 0.0081, 0.0031,
-0.0065, -0.0117, -0.0148, -0.0145, -0.0142, -0.0157, -0.0179, -0.0198, -0.0223, -0.0216,
-0.0160, -0.0083, 0.0039), each = 5), rep(0.0313, 31))
names(alpha) <- 0:130
alpha <- alpha[rownames(RRwide)]
ALPHA <- (PIB * 0 + 1) * alpha
# the full formula
pTL <- 0.4838 + ALPHA + 0.6992 * (PIB - 0.5) +
0.0728 * I1918 -0.0352 * I1919 +
-0.0088 * log(IMRT) +
-0.0745 * log(IMRT) * Ix0 +
0.0259 * log(IMRT) * Ix1 +
0.1673 * (log(IMRT) - log(0.01)) * Ix0 * (IMRT < 0.01)
}
# get counts in triangles
TL <- RRwide * pTL
TU <- RRwide - TL
# put in long format
TL <- melt(TL, varnames = c("Age","Year"), value.name = "Deaths")
TU <- melt(TU, varnames = c("Age","Year"), value.name = "Deaths")
# select only ages up until original max age of RR in given year
TL <- do.call(rbind,
lapply(
split(TL, TL$Year), function(YR, maxAges){
yr <- unique(YR$Year)
YR[YR$Age <= maxAges[as.character(yr)],]
},maxAges=maxAges)
)
TU <- do.call(rbind,
lapply(
split(TU, TU$Year), function(YR, maxAges){
yr <- unique(YR$Year)
YR[YR$Age <= maxAges[as.character(yr)],]
},maxAges=maxAges)
)
# add Lexis shape
TL$Lexis <- "TL"
TU$Lexis <- "TU"
DeathsTLTU <- rbind(TL, TU)
DN <- as.data.frame(
matrix(ncol = ncol(Deaths),
nrow = nrow(DeathsTLTU),
dimnames = list(NULL, colnames(Deaths)))
)
DN$PopName <- unique(Deaths$PopName)
DN$Year <- as.integer(DeathsTLTU$Year)
DN$Area <- Areas[as.character(DN$Year)]
DN$YearInterval <- 1
DN$Sex <- Sex
DN$Age <- as.character(DeathsTLTU$Age)
DN$AgeInterval <- "1"
DN$Lexis <- DeathsTLTU$Lexis
DN$Deaths <- DeathsTLTU$Deaths
DN$Agei <- DeathsTLTU$Age
DN$AgeIntervali <- 1
DN$LDB <- 1
DN <- assignNoteCode(DN, "d_s1x1()")
DN$Access <- "O" # this maybe need to be sensitive? apply general rule later?
Dout[[Sex]] <- rbind(DO, DN)
}
# tack on parts we sliced off
if (UNKTF){
Dout[["UNK"]] <- UNK
}
if (OPTF){
Dout[["OP"]] <- OP
}
Deaths <- resortDeaths(do.call(rbind, Dout))
# TR: 1 June, 2016: add d_agg() step in case RR overlapped with TL,TU
Deaths <- d_agg(Deaths)
rownames(Deaths) <- NULL
invisible(Deaths)
}
|
0594dae34699652161a9c0e3090d6d7bc7153575 | befbf39f0718b1444f911dbb7c98d1c4a7dcf7b2 | /man/goblets.Rd | b5f5d1c1c9c9061993f4bc1cc8aafdd29b43b53f | [] | no_license | cran/calibrate | fe5b71eb8b0dc404e773c28097db86f4a05110bc | d41eeeb2509038b18dcc28a6c7bb256ef3b56209 | refs/heads/master | 2021-01-22T14:38:35.100617 | 2020-06-19T04:40:14 | 2020-06-19T04:40:14 | 17,694,921 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 440 | rd | goblets.Rd | \name{goblets}
\docType{data}
\alias{goblets}
\title{Size measurements of archeological goblets}
\description{
This data set gives 6 different size measurements of 25 goblets
}
\usage{data(goblets)}
\format{A data frame containing 25 observations.}
\source{Manly, 1989}
\references{
Manly, B. F. J. (1989) \emph{Multivariate statistical methods: a primer}.
London: Chapman and Hall, London
}
\keyword{datasets}
|
c3ec00eca5250a52dbb4485cdd9f38ec8fe84371 | 5d07aa560356e77c20833bdbb493c554625a455d | /man/LSRPrisma_flow.Rd | 87d86d38a54831653d163ad270c6bc4bd59686de | [] | no_license | nealhaddaway/livingPRISMAflow | f0403cb63c5d3f2b392efbc8f4569d1c40e0b2ee | 60de662cf3698a3932ffc32592195a60a7155475 | refs/heads/master | 2023-04-13T10:11:41.268084 | 2021-07-01T10:25:48 | 2021-07-01T10:25:48 | 331,282,349 | 6 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,077 | rd | LSRPrisma_flow.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LSRPrisma_flow.R
\name{LSRPrisma_flow}
\alias{LSRPrisma_flow}
\title{Living systematic review flow diagrams}
\usage{
LSRPrisma_flow(data, previous = TRUE, other = TRUE)
}
\arguments{
\item{data}{A dataset ready for plotting, produced as a list
of items using \code{LSRPrisma_data()}.}
\item{previous}{A logical argument specifying whether the 'previous
studies/report' arm should be included in the flow diagram (TRUE) or
not (FALSE).}
\item{other}{A logical argument specifying whether the 'other
sources' arm should be included in the flow diagram (TRUE) or
not (FALSE).}
}
\value{
A LSR Prisma plot of the data plotted onto a flow
diagram.
}
\description{
Produce various formats of PRISMA 2020 compliant living
systematic review flow diagrams with options for showing inputs from
previous reviews and other sources.
}
\examples{
\dontrun{
data <- read.csv('inst/extdata/approach1.csv')
flowdata <- LSRPrisma_data(data)
plot <- LSRPrisma_flow(flowdata, previous = FALSE, other = TRUE)
plot
}
}
|
b86639c3fb0f01470de3a87156c74beace71f036 | bca43a8c0fe050a69eaa45adef82ca459b6c58f4 | /R/0onload.R | 5d482b21c4768215c20debaf6875d9f9145263ba | [] | no_license | kkholst/lavaSearch2 | 901fc73a65e1020151df113f049cde34857110f1 | c120b2d7ce0f53c2495d8bb316b49ad18f3a6471 | refs/heads/master | 2020-04-07T07:30:43.989775 | 2018-10-04T16:18:42 | 2018-10-04T16:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 801 | r | 0onload.R | ## * .onLoad
.onLoad <- function(lib, pkg="lavaSearch2") {
# available methods to compute the distribution of the max statistic
lava::lava.options(search.calcMaxDist = c("integration","boot-residual","boot-wild"),
search.p.adjust = c("fastmax", "max", "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"),
search.calc.quantile.int = FALSE
)
}
## * .onAttach
.onAttach <- function(lib, pkg="lavaSearch2") {
desc <- utils::packageDescription(pkg)
packageStartupMessage(desc$Package, " version ",desc$Version)
}
lava_categorical2dummy <- get("categorical2dummy", envir = asNamespace("lava"), inherits = FALSE)
lava_estimate.lvm <- get("estimate.lvm", envir = asNamespace("lava"), inherits = FALSE)
|
0b848f81172dbd78cffd2307a75635064fa4607e | 3468f9559e492713fb0461ebae027e8b8bcb85c9 | /R/summarize_concentrations.R | 9f4873e3ca95e6cd5da7a57b277676045bf475ef | [] | no_license | cleberecht/singaEvaluate | 40702f35e8fd75641fe4e362e5138aee484afa49 | c8dd26ae325593e445d0e71f9477724607faa0ea | refs/heads/master | 2020-08-30T07:49:18.919915 | 2020-01-06T12:16:58 | 2020-01-06T12:16:58 | 218,310,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,218 | r | summarize_concentrations.R | #' Parses concentrations from a json trajectory.
#'
#' @param trajectory The parsed trajectory
#' @param entity_mapping The entity to collector mapping created by \code{\link{map_collector}} or manually.
#' @import dplyr
#' @export
summarize_concentrations <- function(trajectory, entity_mapping) {
# retrieve collector names from mapping
collector_names <- entity_mapping %>%
distinct(collector) %>%
pull(collector)
# list to cellect rows
collecting_list = list()
# for each collector
for (filter_name in collector_names) {
# get mapping for entity
entities <- entity_mapping %>%
filter(collector == filter_name) %>%
pull(entity)
# add to list
collecting_list[[filter_name]] <- trajectory %>%
# look for entity in mapping
filter(entity %in% entities) %>%
# group without entity since it was filtered
group_by(time, node, section) %>%
# sum equivalent entitiies
summarise(concentration = sum(concentration)) %>%
ungroup() %>%
# add entity column
mutate(entity = filter_name)
}
# cosmetics and return
bind_rows(collecting_list) %>%
arrange(time) %>%
select(time, node, section, entity, concentration)
}
|
0cf6343d045e880272d78294dc561913ec08e9aa | 6c76ad5b8fd4b120363f269fb49a8a41088818e5 | /man/roundSim.Rd | 9110318f05bef87b5d22dde7c719863e62b03eef | [] | no_license | mattsigal/SimDisplay | 4a2d91a688178cf566cd3097276d8532622e1dd0 | ecc3a7568fe6a9de1be7e7a1212f4ca66cafb68b | refs/heads/master | 2021-01-10T07:29:03.573726 | 2017-09-15T15:05:37 | 2017-09-15T15:05:37 | 52,392,430 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,027 | rd | roundSim.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roundSim.R
\name{roundSim}
\alias{roundSim}
\title{Rounding values for presentation}
\usage{
roundSim(x, digits = 3, percent = FALSE)
}
\arguments{
\item{x}{A \code{vector} or \code{data.frame} object.}
\item{digits}{An \code{integer} value indicating the number of digits to round to.}
\item{percent}{A \code{boolean} value indicating whether column elements should be multiplied by 100.}
}
\value{
A \code{vector} or \code{data.frame} object.
}
\description{
\code{roundSim} takes a vector or data.frame object and returns the same
type of object, but rounded.
}
\examples{
\dontrun{
data(Brown1974)
roundSim(Brown1974) # returns error
roundSim(Brown1974[,4:9]) # dataframe input, dataframe output
str(roundSim(Brown1974[,4:9])) # vectors are now character-type
roundSim(Brown1974[,4:9], 2)
set.seed(10)
dat <- rnorm(n = 5, mean = 0, sd = 150) # Wider range, vector input
roundSim(dat) # vector output
}
}
\seealso{
\code{\link{SimDisplay}}
}
|
fd98e273049597a4e37202280e6bf5d5a264475b | c8c40624cd94035c905479e2b213b05c9e0f9463 | /demos/data_import/pdf_scraping/pdf_scraping_wa_covid_reports_exercises.R | 83640eeaece85d658df13193dbe8dd4329c64813 | [
"MIT"
] | permissive | deohs/coders | 3fba6aeba4fe9a1a246917b4eb7b6b83f78cb857 | e33234625e3910b133894fb9a9cd260fef815c79 | refs/heads/main | 2022-10-28T23:04:06.049888 | 2022-10-19T20:02:41 | 2022-10-19T20:02:41 | 191,814,394 | 9 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,453 | r | pdf_scraping_wa_covid_reports_exercises.R | # PDF "scraping" exercises: WA DOH Covid-19 data
# See: https://www.doh.wa.gov/Emergencies/COVID19/DataDashboard#downloads
# Load packages
if (!require(pacman)) install.packages("pacman")
pacman::p_load(readr, dplyr, tidyr, purrr, tabulizer, ggplot2, RColorBrewer)
# Prepare data folder
data_dir <- "data"
if (!dir.exists(data_dir)) {
dir.create(data_dir, showWarnings = FALSE, recursive = TRUE)
}
# ----------
# Example 1
# ----------
# Extract tables as a list of data.frames
# Download file
filename <- "Weekly-COVID-19-Long-Term-Care-Report.pdf"
filepath <- file.path(data_dir, filename)
if (!file.exists(filepath)) {
url <- paste0('https://www.doh.wa.gov/Portals/1/Documents/1600/coronavirus/',
'data-tables/', filename)
download.file(url, filepath)
}
# Extract table from pages 4-5 and clean up so it's all in one data frame
df_list <- extract_tables(...)
# Cleanup data
df <- # ...
# names(df)
# [1] "County"
# [2] "Total.LTC.Associated.and.Likely.Associated.Deaths"
# [3] "Total.LTC.Associated.and.Likely.Associated.Cases"
# dim(df)
# [1] 36 3
# sapply(df, class)
# County
# "character"
# Total.LTC.Associated.and.Likely.Associated.Deaths
# "numeric"
# Total.LTC.Associated.and.Likely.Associated.Cases
# "numeric"
# ----------
# Example 2
# ----------
# Extract table as text then convert to data.frames
# Download file
filename <- "348-791-COVID19VaccinationCoverageRaceEthnicityAgeWAState.pdf"
filepath <- file.path(data_dir, filename)
if (!file.exists(filepath)) {
url <- paste0('https://www.doh.wa.gov/Portals/1/Documents/1600/coronavirus/',
'data-tables/', filename)
download.file(url, filepath)
}
# Extract text from page 12 as a string
# txt <- extract_text(...)
# Parse text into data frames and clean up
lines <- read_lines(txt)
init_vac <- # ...
full_vac <- # ...
# init_vac
# A tibble: 5 x 2
# age pct_init
# <chr> <dbl>
# 1 0-19 0.2
# 2 20-34 6
# 3 35-49 8.4
# 4 50-64 10.7
# 5 65+ 30
# full_vac
# A tibble: 5 x 2
# age pct_full
# <chr> <dbl>
# 1 0-19 0.1
# 2 20-34 3
# 3 35-49 4.4
# 4 50-64 3.9
# 5 65+ 2.3
# Merge data frames and reshape
df <- inner_join(init_vac, full_vac, by = "age") %>%
pivot_longer(cols = where(is.numeric), names_prefix = "pct_",
names_to = "type", values_to = "pct") %>%
mutate(type = factor(type, levels = c('init', 'full'),
labels = c('Percent Initiating Within Age Group',
'Percent Fully Vaccinated Within Age Group')))
# Create custom color palette (lavender, purple, light gray)
my_pal <- c("#C8C8FF", "#7D00AF", "#E1E1E1")
# Plot data
ggplot(df, aes(x = age, y = pct, fill = type)) +
geom_bar(width = 0.6, stat = "identity",
position = position_dodge(width = 0.7, preserve = "total")) +
geom_text(aes(label = sprintf("%0.1f%s", round(pct, digits = 1), "%")),
vjust = -0.5, position = position_dodge(width = 0.7), size = 3.2) +
scale_fill_manual(values = my_pal) +
labs(title = "Figure 3: Percent Vaccinated, By Age",
x = "Age Group", y = "Percent") +
theme_void() +
theme(legend.title = element_blank(), legend.position = c(0.23, 0.6),
plot.title = element_text(hjust = 0.5), axis.text.x = element_text())
# Prepare images folder
images_dir <- "images"
if (!dir.exists(images_dir)) {
dir.create(images_dir, showWarnings = FALSE, recursive = TRUE)
}
# Save plot
ggsave(file.path(images_dir, "percent_vaccinated.png"), height = 4, width = 6)
# ----------
# Example 3
# ----------
# Extract a table by area (Non-reproducible: requires user interaction)
# Download file
filename <- "MultisystemInflammatorySyndromeChildrenCOVID19WA2020.pdf"
filepath <- file.path(data_dir, filename)
if (!file.exists(filepath)) {
url <- paste0('https://www.doh.wa.gov/Portals/1/Documents/1600/coronavirus/',
filename)
download.file(url, filepath)
}
# locate_area() will present an interactive selection tool to draw a box
# around the table you wish to extract and return a list containing a vector.
# area <- locate_areas(file = filepath, pages = 6)
# area
# [[1]]
# top left bottom right
# 223.22188 68.71747 548.20669 415.60767
# The vector contains the coordinates for the box corners. This output can be
# used with extract_tables().
# The same interactive tool can be used to extract a table by area.
# Extract table on page 6 into a data.frame by area using extract_areas()
# df <- extract_areas(...)
# df
# County.in.Washington Number.of.reported.cases.of.MIS.C
# 1 Chelan 1
# 2 Douglas 1
# 3 Franklin 2
# 4 King 12
# 5 Kitsap 2
# 6 Lewis 2
# 7 Mason 1
# 8 Pierce 4
# 9 Skagit 2
# 10 Snohomish 5
# 11 Spokane 1
# 12 Yakima 6
# 13 Total 39
|
8a031cbeb43c1175271b57ea08bf3e72e99f93e1 | 5c9a3e5a02f21cfaed27e5934805cb768479e124 | /src/C50_real_data.R | 4711c50b1be8ed88916ef42c45a3641f90306258 | [] | no_license | jasontgalvin/DM_Project | e8aefd8930cc5b7eab17d4bf805a306764178e3f | 0540e159fc6e1f205f2a1873e424314e9c660eae | refs/heads/master | 2020-12-31T06:31:34.278718 | 2017-04-11T20:03:25 | 2017-04-11T20:03:25 | 86,612,414 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 570 | r | C50_real_data.R | #import packages for C50
library("C50")
#read in data
data <- read.table("pdata",header=TRUE,sep="\t")
#split data by even column and odd
L<-length(data[,1])
even_data<-data[seq(0,L,+2),]
odd_data<-data[seq(1,L,+2),]
#use even data as training sets
t<-system.time(data.tree <- C5.0(formula=label~.,data=even_data))
#predict and calculate accuracy rate
p <- predict(data.tree,odd_data,type="class")
confMat <-table(odd_data$label,p)
accuracy <- sum(diag(confMat))/sum(confMat)
#print out the result
print(summary(data.tree))
print("Accuracy")
print(accuracy)
print(t)
|
029200814ac05485ab924fad511f58fdd9aede4f | 8460f01682430d81affb89d2e7d48f2deea5fa41 | /run_analysis.R | 4471631da80aca4fb79df74c10bfd4392e75255a | [] | no_license | blue3sky/CleaningDataProject | 9e94738824466a2e285a3e040ef88e84ae5256b5 | 55d686091e61042258b13e642b7c4221737603ee | refs/heads/master | 2021-01-24T22:59:57.097001 | 2014-10-26T13:37:08 | 2014-10-26T13:37:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,977 | r | run_analysis.R | # First please set the working directory as the directory where the Samsung data is located.
# This run_analysis.R file does the following:
# *********************************************************************************************************************
# 1.Merges the training and the test sets to create one data set.
# *********************************************************************************************************************
# First we deal with the training set
# We start with the X_train data
Xtrain=read.table("./train/X_train.txt")
dim(Xtrain) # we have a look and get familiar with the data
names(Xtrain)
Xtrain[1:10,1:10]
# We add the y_train data...
Ytrain=read.table("./train/y_train.txt")
dim(Ytrain)
names(Ytrain)
Ytrain[1:10,]
table(Ytrain$V1)
# ... and the subject_train data
Subtrain=read.table("./train/subject_train.txt")
dim(Subtrain)
names(Subtrain)
Subtrain[1:10,]
table(Subtrain$V1)
# We now perform 2 first "merging operations".
# I.e. simply putting together the x_train data with the the y_train and the sub_train data
firstMerge=cbind(Xtrain,Ytrain)
dim(firstMerge)
secondMerge=cbind(firstMerge,Subtrain)
dim(secondMerge)
# Now we start again and perform basically the same thing for the test data.
Xtest=read.table("./test/x_test.txt")
dim(Xtest)
names(Xtest)
Xtest[1:10,1:10]
# We add the y_test data...
Ytest=read.table("./test/y_test.txt")
dim(Ytest)
names(Ytest)
Ytest[1:10,]
table(Ytest$V1)
# ... and the subject_test data
Subtest=read.table("./test/subject_test.txt")
dim(Subtest)
names(Subtest)
Subtest[1:10,]
table(Subtest$V1)
# We now perform the 2 same first "merging operations".
# I.e. simply putting together the x_test data with the the y_test and the sub_test data
firstMerge2=cbind(Xtest,Ytest)
dim(firstMerge2)
secondMerge2=cbind(firstMerge2,Subtest)
dim(secondMerge2)
# Now let's simply append the test data frame below the train data frame
combDF=rbind(secondMerge,secondMerge2)
dim(combDF)
# *********************************************************************************************************************
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# *********************************************************************************************************************
# We read-in the variable names
myFeatures=read.table("./features.txt")
dim(myFeatures)
myFeatures[1:10,]
varNames=as.character(myFeatures[,2]) # We transform those names as characters instead of factors
a=grepl("std()|mean()",varNames) # this is a logical vector telling which name contains either std() or mean()
sum(a)
b=c(a,c(TRUE,TRUE)) # we want to keep the last 2 columns as well (Activity and Individual)
sum(b) # This is going to be our number of columns after subsetting
length(b)
dim(combDF) # we check that the dimensions match
# Now we do the actual subsetting operation
combDF2=combDF[,b] # We only keep the colums whose names contain either std() or mean()
dim(combDF2)
# *********************************************************************************************************************
# 3.Uses descriptive activity names to name the activities in the data set
# *********************************************************************************************************************
tableActivity=read.table("./activity_labels.txt")
tableActivity
tableActivity2=tableActivity[,2]
tableActivity2
class(tableActivity2)
tableActivity3=as.character(tableActivity2)
tableActivity3
class(tableActivity3)
length(tableActivity3)
# We now replace the integers from 1 to 6 which used to describe activity names by their more explicit names
activityNames=sapply(combDF2[,80],function (x) tableActivity3[x])
head(activityNames)
tail(activityNames)
combDF2[1:10,79:81]
combDF2[,80] = activityNames
combDF2[1:10,79:81]
# *********************************************************************************************************************
# 4.Appropriately labels the data set with descriptive variable names
# *********************************************************************************************************************
names(combDF2)
c=grep("std()|mean()",varNames,value=TRUE)
length(c)
head(c,50)
d=gsub("-","",c)
e=gsub("\\()","",d)
e
f=tolower(e)
f
length(f)
g=c(f,c("activity","individual"))
names(combDF2)=g
combDF2[1:10,77:81]
# *********************************************************************************************************************
# 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
# *********************************************************************************************************************
library(reshape2)
MeltDF=melt(combDF2,id=c("activity","individual"),measure.vars=f)
dim(MeltDF)
head(MeltDF)
RecastDF=dcast(MeltDF,individual+activity~variable,mean)
RecastDF[,1:5]
class(RecastDF)
dim(RecastDF)
|
38f615f8aa648ad10806ef6d184dd3271bd759e7 | ae50d81889d88e0510bd4d076c6a559848cf112a | /Shiny-Templates/Self-Contained-Apps/Plotly-map-with-timeline/ui.R | 41815487afee29e61b733a45bc57925488317bb3 | [
"MIT"
] | permissive | ToonTalk/Live-Data_Scripts-and-Templates | a0f784a0320cc9a8dac5e030b51f44aa72cec32c | 4855d56f7b6b2be212ff2f7df3c867788e22e225 | refs/heads/gh-pages | 2021-01-15T12:30:57.498318 | 2016-08-05T14:54:57 | 2016-08-05T14:54:57 | 65,015,940 | 0 | 0 | null | 2016-08-05T12:12:43 | 2016-08-05T12:12:42 | null | UTF-8 | R | false | false | 3,327 | r | ui.R | ## ==== Packages to load for server
library(shiny) # Some advanced functionality depends on the shiny package being loaded client-side, including plot.ly
library(plotly) # Load the plotly library
library(shinythemes) # Template uses the cerulean theme as it is pretty
library(knitr)
library(DT)
## ==== Global client-side Variables
example_data_frame <-
data.frame(
"Send Location" = c("50.97178\n 13.960129"),
"Send City" = c("DEU, Mockethal"),
"Receive Location" = c("50.97178\n 13.960129"),
"Receive City" = c("DEU, Mockethal"),
"Date" = c("1800-01-01"),
"Category" = c("A")
)
## ==== shinyUI function which generates the UI
shinyUI(fluidPage(
## ==== Include google analytics code
tags$head(includeScript("google-analytics.js")),
## ==== Automatically include vertical scrollbar
## ==== This prevents the app from reloading content when the window is resized which would otherwise result in the
## ==== appearance of the scrollbar and the reloading of content. Note that "click data" may still be lost during
## ==== resizing, as discussed here https://github.com/rstudio/shiny/issues/937
tags$style(type = "text/css", "body { overflow-y: scroll; }"),
theme = shinytheme("cerulean"),
HTML(
"<h2>Plot.ly Scattergeo Plot with Timeline to Filter out Data</h2>"
),
sidebarLayout(
sidebarPanel(
uiOutput("show_timeslider_UI"),
# uiOutput("legend_type_UI"),
uiOutput("time_period_of_interest_UI"),
uiOutput("show_letters_before_date_UI"),
uiOutput("show_routes_UI"),
width = 4
),
mainPanel(
uiOutput("nothing_to_display_UI"),
plotlyOutput("europe_map", height = "100%"),
width = 8
)
),
wellPanel(
HTML(
"<p>This Shiny app is a template designed by Martin Hadley in the IT Services Department of Oxford University for the Live Data Project</p>",
"<p>The template takes a .csv file with the following structure</p>"
),
datatable(example_data_frame,options = list(dom = 't', autowidth = "50%",rownames = FALSE), rownames = FALSE),
HTML(
"<br>",
"<p>The example data in this application is an anonymised subset of data collected from the 19th Century postal network, the size of each point
corresponds to the number of letters sent from that location.</p>"
),
HTML(
"<br><p>The interactive map above is provided by the <a href=http://plot.ly>plot.ly</a> R package and provides the following features:</p>",
"<ul>",
"<li>Zoom with scrollwheel/touch</li>",
"<li>Hide a location by clicking its corresponding trace in the legend</li>",
"</ul>"
),
HTML("<a rel='license' href='http://creativecommons.org/licenses/by/4.0/'><img alt='Creative Commons License' style='border-width:0'
src='https://i.creativecommons.org/l/by/4.0/88x31.png' /></a><br /><span xmlns:dct='http://purl.org/dc/terms/'
href='http://purl.org/dc/dcmitype/InteractiveResource' property='dct:title' rel='dct:type'>Plot.ly Scattergeo Plot with Timeline
to Filter out Data</span> by <span xmlns:cc='http://creativecommons.org/ns#' property='cc:attributionName'>Live Data Project</span>
is licensed under a <a rel='license' href='http://creativecommons.org/licenses/by/4.0/'>Creative Commons Attribution 4.0 International License</a>.")
)
)) |
d4f2944da3f2fc0761c4703b410bdf9c135d3fc7 | 4852053beec8d61bc1ea1f3443ee4e56ba8f4d89 | /plot2.R | 0ffdb309ef88169db2362b9b8d6a284fe4e1a8bc | [] | no_license | juste-zabarskaite/ExData_Plotting1 | 329b3206159821f4f65ea16a915784f6f4b59551 | 651df2b344f674a5720e701771b9891a4f95d264 | refs/heads/master | 2020-01-24T20:54:16.628755 | 2016-11-17T01:57:52 | 2016-11-17T01:57:52 | 73,968,413 | 0 | 0 | null | 2016-11-16T22:49:18 | 2016-11-16T22:49:18 | null | UTF-8 | R | false | false | 798 | r | plot2.R | # Exploratory Data Analysis - Week 1 Assignment
## Read data, generate plot & save as PNG file
plot2 <- function() {
## read data from text file
## install.packages("data.table")
library(data.table)
source <- "household_power_consumption.txt"
x <- read.table(source, sep=";", header=TRUE, na.strings="?")
## subset dates 2007-02-01 and 2007-02-02
x$Date <- as.Date(as.character(x$Date), format="%d/%m/%Y")
data <- subset(x, Date>="2007-02-01" & Date<="2007-02-02")
## generate plot2 figure
datetime <- strptime(paste(data$Date, data$Time, sep=" "), "%Y-%m-%d %H:%M:%S")
plot(datetime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
## save as PNG file
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
} |
9ad7270da664e064f7be52aec8a8b686d20a5726 | 93b800fb677d6c7685a2934be4a44079de170990 | /man/htable.Rd | dd3d0cdb081818f095bd1fa63651553333ba0490 | [
"MIT"
] | permissive | mkearney/lop | fa47144543f9952745c31c8aa0cd7b28ad7ee6e4 | d0a0d468454881462753f81793aa1a23dbc53c1b | refs/heads/master | 2022-04-11T01:49:57.379555 | 2020-04-06T22:02:34 | 2020-04-06T22:02:34 | 202,009,106 | 4 | 0 | null | null | null | null | UTF-8 | R | false | true | 308 | rd | htable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-funs.R
\name{htable}
\alias{htable}
\title{htable}
\usage{
htable(x, header = NA, trim = TRUE, fill = FALSE, dec = ".")
}
\description{
See \code{rvest::\link[rvest:html_table]{html_table}} for details.
}
\keyword{internal}
|
01d5f5e72efad71482d1627f92d0aa6a9a88194c | 17d582790e37f4a1fa3cfcfc531fdf5c4f4086d4 | /packrat/lib/x86_64-apple-darwin18.2.0/3.5.2/rcmdcheck/tests/testthat.R | a822cfb24789346c77a5fdf9d21ba043ccd77d99 | [] | no_license | teyden/asthma-research | bcd02733aeb893074bb71fd58c5c99de03888640 | 09c1fb98d09e897e652620dcab1482a19743110f | refs/heads/master | 2021-01-26T08:20:58.263136 | 2020-02-27T04:12:56 | 2020-02-27T04:12:56 | 243,374,255 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(rcmdcheck)
test_check("rcmdcheck")
|
a1a7de642f44aec4912017244aa2fd43ae9081c8 | 343d1d087b4fe03548e585995cb403085ea15a6b | /src/analysis_for_plots.R | 91a1537f80a14e7b89984deaf97818f4841335e7 | [] | no_license | LaurelineJ/Reservoir_Optimization_Using_KNN | a418756c688be1ff151ebe9f0f9dd484bb8ddf13 | 1697cd60aace2935271b5c4cc875a76b155c617b | refs/heads/master | 2021-04-27T07:59:05.843033 | 2018-02-21T20:37:56 | 2018-02-21T20:37:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,485 | r | analysis_for_plots.R |
if(with_rioSF == TRUE){
pdf(file=paste("../results/results from KNN Sim with RSF_",percent_init_reservoir,"initstorage for 25 years.pdf"),onefile=TRUE,width = 12,height=6)
}else{
pdf(file=paste("../results/results from KNN Sim without RSF_",percent_init_reservoir,"initstorage for 25 years.pdf"),onefile=TRUE,width = 12,height=6)
}
#par(mfrow=c(2,2))
#par(mar=c(4,4,4,1))
for(r in 1:nR){
plot(rSfinalKNN[r,,1],type="l",ylim=c(min(rSfinalKNN[r,,]),max(rSfinalKNN[r,,])),ylab="storage [mio m3]",xlab = "months")
for(s in 2:nS){lines(rSfinalKNN[r,,s])}
abline(h=SCmax_rt[r,1],col="red")
#lines(Storagesimfinal[r,],type="l", legend("topleft",c("Simlated Storage", col="red"),col="red"))
title(paste(resname[r], "reservoir"))
}
sumfailuresim<-sum(Failuresim)
hist(colSums(colSums(rFfinalKNN)),xlab = "Failure [mio m3]",main = " ", xlim = c(0 , 120))
abline(v=mean(colSums(colSums(rFfinalKNN))),col="red",lwd=3)
abline(v=quantile(colSums(colSums(rFfinalKNN)),probs = c(0.1,0.5,0.9)),col=c(3:nR),lwd=3,lty=2:4)
abline(v=sumfailuresim,col=6,lwd=3,lty=5)
legend("topleft",c("Mean","10th Percentile","Median","90th Percentile","Simulated Failure"), col=c(2:(nR+1)),lty=1:5,lwd=3,cex=1,bty="n")
if(with_rioSF == TRUE){
title(paste0("Distribution failure for 25 years \nwith Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage"),cex.main=1)
} else {
title(paste0("Distribution failure for 25 years \nwithout Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage"), cex.main=1)
}
for(r in 1:nR){
plot(I_rts[r,,1],type="l",ylim=c(min(I_rts[r,,]),max(I_rts[r,,])),ylab="storage [mio m3]",xlab = "months")
for(s in 2:nS){lines(I_rts[r,,s])}
abline(h=SCmax_rt[r,1],col="red")
title(paste("inflow reservoir", resname[r]))
}
plot(colSums(rFfinalKNN[,,1]),type="l",xlab="months",ylab="in mio m3",ylim=c(min(colSums(rFfinalKNN[,,])),max(colSums(rFfinalKNN[,,]))))
for(s in 2:nS){lines(colSums(rFfinalKNN[,,s]))}
title("Withdrawal failure")
plot(colSums(r2FfinalKNN[,,1]),type="l",xlab="months",ylab="in mio m3",ylim=c(min(colSums(r2FfinalKNN[,,])),max(colSums(r2FfinalKNN[,,]))))
for(s in 2:nS){lines(colSums(r2FfinalKNN[,,s]))}
title("Release failure")
plot(colSums((rFfinalKNN+r2FfinalKNN)[,,1]),type="l",xlab="months",ylab="in mio m3",ylim=c(min(colSums((rFfinalKNN+r2FfinalKNN)[,,])),max(colSums((rFfinalKNN+r2FfinalKNN)[,,]))))
for(s in 2:nS){lines(colSums((rFfinalKNN+r2FfinalKNN)[,,s]))}
title("Total failure")
mLOC=apply(rQfinalKNN,MARGIN=c(1,3),sum)
mIMP=apply(rQIMPfinalKNN,MARGIN=c(1,3),sum)
mALLres<-colSums(mLOC)
if(with_rioSF==TRUE){
imp<-mIMP[1:nTrucks,]
imp<-apply(imp,MARGIN=c(2),sum)
Jucazinho<-mLOC[1,]
otherreservoirs<-mLOC[2:nR,]
otherreservoirs<-apply(otherreservoirs,MARGIN=c(2),sum)
riosf<-mIMP[nIMP,]
withdrawaloptim<-rbind(Jucazinho,otherreservoirs,imp,riosf)
}else{
imp<-mIMP[1:nTrucks,]
imp<-apply(imp,MARGIN=c(2),sum)
Jucazinho<-mLOC[1,]
otherreservoirs<-mLOC[2:nR,]
otherreservoirs<-apply(otherreservoirs,MARGIN=c(2),sum)
withdrawaloptim<-rbind(Jucazinho,otherreservoirs,imp)
}
if(with_rioSF==TRUE){
AB<-4
}else{
AB=3
}
rFmedian<-apply(rFfinalKNN,MARGIN=c(1,2),median)
rFmedianJuca<-rFmedian[1,]
rFmedianothers<-colSums(rFmedian[2:nR,])
rFmedianAll<-colSums(rFmedian[1:nR,])
rFall<-colSums(rFfinalKNN)
rFallmedian<-apply(rFall,1,median)
withdrawalsimJuca<-withdrawalKNNsimfinal[1,]
withdrawalsimothers<-colSums(withdrawalKNNsimfinal[2:nR,])
withdrawalsimALL<-colSums(withdrawalKNNsimfinal[1:nR,])
Failuresimulated<-colSums(FailureKNNsimfinal)
plot((mALLres-rFmedianAll),type="l",ylab="Withdrawal [mio m3]",xlab = "months",col=1,ylim=c(min(0),max(sum(D_mt[,1]))),lwd=3,lty=1)
if (with_rioSF == TRUE){
lines(withdrawaloptim[AB,], col = 2,lwd=3,lty=3)}
if (with_rioSF == TRUE){
lines(rFallmedian, col=3, lwd=3,lty=6)
} else {
lines(rFallmedian, col=3, lwd=3,lty=6)
}
lines(withdrawalsimALL, type="l",col=4,ylim=c(min(0),max(sum(D_mt[,1]))),lwd=1,lty=5)
if (with_rioSF == TRUE){
lines(OneFailureKNNsimfinal[1,], col=3, lwd=3,lty=6)
}else{
lines(Failuresimulated, col=3, lwd=3,lty=6)
}
if(with_rioSF == TRUE){
legend("topleft", c("","All Reservoirs Optim","Rio Sao Francisco Optim","Failure Optim","All Reservoirs Sim","Failure Sim"), col = c(0,1,2,3,4,5),lty=c(1,2,3,4,5),pt.cex=0.5,lwd=3, cex=0.75,bty="n")
}else{
legend("topleft",c("","All Reservoirs Optim", "Failure Optim","All Reservoirs Sim","Failure Sim"), col = c(1,3,4,5),pt.cex=0.5,lwd=3, lty=c(0,1,3,4,5), cex=0.75,bty="n")
}
if(with_rioSF == TRUE){
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwith Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage"),cex.main=1)
} else {
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwithout Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage" ), cex.main=1)
}
plot((withdrawalsimALL),type="l",ylab="Withdrawal [mio m3]",xlab = "months",col=1,ylim=c(min(0),max(sum(D_mt[,1]))),lwd=3,lty=1)
if (with_rioSF == TRUE){
lines(ImportsimRioSFknn[1,], col = 2,lwd=3,lty=3)
}
if (with_rioSF == TRUE){
lines(OneFailureKNNsimfinal[1,], col=3, lwd=3,lty=6)
}else{
lines(Failuresimulated, col=3, lwd=3,lty=6)
}
if(with_rioSF == TRUE){
legend("topleft", c("","All Reservoirs","Rio Sao Francisco","Failure"), col = c(0,1,2,3),lty=c(0,1,2,3),pt.cex=0.5,lwd=3, cex=0.75,bty="n")
}else{
legend("topleft",c("","All Reservoirs Sim","Failure"), col = c(0,1,3),pt.cex=0.5,lwd=3, lty=c(0,1,3), cex=0.75,bty="n")
}
if(with_rioSF == TRUE){
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwith Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage" ),cex.main=1)
} else {
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwithout Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage" ), cex.main=1)
}
dev.off()
hist(colSums(colSums(rFfinalKNN)),xlab = "Failure [mio m3]",main = " ",ylim = c(0,10), xlim = c(0 , max(sum(D_mt[,1])*24)))
abline(v=mean(colSums(colSums(rFfinalKNN))),col="red",lwd=3)
abline(v=quantile(colSums(colSums(rFfinalKNN)),probs = c(0.1,0.5,0.9)),col=c(3:nR),lwd=3,lty=2:4)
abline(v=sumfailuresim,col=6,lwd=3,lty=5)
legend("topleft",c("Mean","10th Percentile","Median","90th Percentile","Simulated Failure"), col=c(2:(nR+1)),lty=1:5,lwd=3,cex=1,bty="n")
if(with_rioSF == TRUE){
title(paste0("Distribution failure for 25 years \nwith Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage"),cex.main=1)
} else {
title(paste0("Distribution failure for 25 years \nwithout Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage"), cex.main=1)
}
plot((mALLres-rFmedianAll),type="l",ylab="Withdrawal [mio m3]",xlab = "months",col=1,ylim=c(min(0),max(sum(D_mt[,1]))),lwd=3,lty=1)
if (with_rioSF == TRUE){
lines(withdrawaloptim[AB,], col = 2,lwd=3,lty=3)}
if (with_rioSF == TRUE){
lines(rFallmedian, col=3, lwd=3,lty=6)
} else {
lines(rFallmedian, col=3, lwd=3,lty=6)
}
lines(withdrawalsimALL, type="l",col=4,ylim=c(min(0),max(sum(D_mt[,1]))),lwd=1,lty=5)
if (with_rioSF == TRUE){
lines(OneFailureKNNsimfinal[1,], col=3, lwd=3,lty=6)
}else{
lines(Failuresimulated, col=3, lwd=3,lty=6)
}
if(with_rioSF == TRUE){
legend("topleft", c("","All Reservoirs Optim","Rio Sao Francisco Optim","Failure Optim","All Reservoirs Sim","Failure Sim"), col = c(0,1,2,3,4,5),lty=c(1,2,3,4,5),pt.cex=0.5,lwd=3, cex=0.75,bty="n")
}else{
legend("topleft",c("","All Reservoirs Optim", "Failure Optim","All Reservoirs Sim","Failure Sim"), col = c(1,3,4,5),pt.cex=0.5,lwd=3, lty=c(0,1,3,4,5), cex=0.75,bty="n")
}
if(with_rioSF == TRUE){
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwith Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage"),cex.main=1)
} else {
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwithout Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage" ), cex.main=1)
}
plot((withdrawalsimALL),type="l",ylab="Withdrawal [mio m3]",xlab = "months",col=1,ylim=c(min(0),max(sum(D_mt[,1]))),lwd=3,lty=1)
if (with_rioSF == TRUE){
lines(ImportsimRioSFknn[1,], col = 2,lwd=3,lty=3)
}
if (with_rioSF == TRUE){
lines(OneFailureKNNsimfinal[1,], col=3, lwd=3,lty=6)
}else{
lines(Failuresimulated, col=3, lwd=3,lty=6)
}
if(with_rioSF == TRUE){
legend("topleft", c("","All Reservoirs","Rio Sao Francisco","Failure"), col = c(0,1,2,3),lty=c(0,1,2,3),pt.cex=0.5,lwd=3, cex=0.75,bty="n")
}else{
legend("topleft",c("","All Reservoirs Sim","Failure"), col = c(0,1,3),pt.cex=0.5,lwd=3, lty=c(0,1,3), cex=0.75,bty="n")
}
if(with_rioSF == TRUE){
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwith Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage" ),cex.main=1)
} else {
title(paste0("Withdrawals from reservoirs and imports and failure for 25 years \nwithout Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage" ), cex.main=1)
}
if(with_rioSF==TRUE){
rQLOCAL<-rQ[,,1:12]
rQimport<-rQIMP[1:nTrucks,,1:12]
rQRSF<-rQIMP[nTrucks+1,,1:12]
Failure=colSums(rF)
Failure<-apply(Failure,1,median)
rFfinalmedian<-apply(rFall,1,median)
costQ<-costQ_rmt
costImport <- costIMP_jmt[1:nTrucks,,]
costRSF<-costIMP_jmt[nTrucks+1,,]
costFail <- 20
Failuresim_one<-Failurewithdrawalsim[,1:12]
RioSFsim_one<-ImportsimRioSFknn[,1:12]
Truckssim_one<-Truckssimknn[1:12]
TotLOCALCost=sum(rQLOCAL*costQ[,,1:12])
TotTRUCKCost=sum(rQimport*costImport[,,1:12])
TotRSFCost=sum(rQRSF*costRSF[,1:12])
TotalDEFICITCost=sum(Failure*costFail)
#LOCALsimCost=sum(withdrawalsim_one*costQ[,1:12])
FailuresimCost=sum(Failuresim_one*costFail)
RioSFsimCost=sum(RioSFsim_one*costRSF[,1:12])
TruckssimCost=sum(Truckssim_one*costImport[,,1:12])
TOTALCOST=TotLOCALCost+TotTRUCKCost+TotRSFCost+TotalDEFICITCost
print(TotLOCALCost)
print(TotTRUCKCost)
print(TotRSFCost)
print(TotalDEFICITCost)
print(TOTALCOST)
COST<-rbind(TotLOCALCost,TotTRUCKCost,TotRSFCost,TotalDEFICITCost,TOTALCOST,FailuresimCost,RioSFsimCost,TruckssimCost) #LOCALsimCost,not includes
# write.csv(COST,file = paste0("TT/Each year cost with Rio Sao Francisco and ", percent_init_reservoir[1]*100,"% initial storage in year ", yearpredictingfor,".csv"))
}else{
rQLOCAL<-rQ[,,1:12]
rQimport<-rQIMP[1:nTrucks,,1:12]
Failure=colSums(rF)
Failure<-apply(Failure,1,median)
rFfinalmedian<-apply(rFall,1,median)
costQ<-costQ_rmt
costImport <- costIMP_jmt[1:nTrucks,,]
costFail <- 20
Failuresim_one<-Failurewithdrawalsim[,1:12]
Truckssim_one<-Truckssimknn[1:12]
TotLOCALCost=sum(rQLOCAL*costQ[,,1:12])
TotTRUCKCost=sum(rQimport*costImport[,,1:12])
TotalDEFICITCost=sum(Failure*costFail)
FailuresimCost=sum(Failuresim_one*costFail)
TruckssimCost=sum(Truckssim_one*costImport[,,1:12])
TOTALCOST=TotLOCALCost+TotTRUCKCost+TotalDEFICITCost
print(TotLOCALCost)
print(TotTRUCKCost)
print(TotalDEFICITCost)
print(TOTALCOST)
COST<-rbind(TotLOCALCost,TotTRUCKCost,TotalDEFICITCost,TOTALCOST,FailuresimCost,TruckssimCost) #LOCALsimCost,not includes
} ### this 4 next lines have to be changed too.
if (timeknn == 1){
if (with_rioSF==1){
COSTFINAL<-array(data=0,c(8,25))
}else{
COSTFINAL<-array(data=0,c(6,25))
}
}
COSTFINAL[,timeknn]<-COST[,1] |
d267a7620288d2a940500e0c40f256dbf6a744ff | 2eebaf7f9e3246d2453df9289574fba9e7f8151a | /R/flood_risk_map.R | 181e0b6aa0f9b50e4e351c7781c57be1195d88d9 | [
"MIT"
] | permissive | mikejohnson51/FlowFinder | b47f2e726c452770bfa755c0da90ff7d13a1da92 | 617610cb3d53229de23a43775892223f8f854162 | refs/heads/master | 2021-06-07T20:26:34.622266 | 2021-03-15T00:05:05 | 2021-03-15T00:05:05 | 136,057,097 | 6 | 1 | MIT | 2021-03-15T00:05:05 | 2018-06-04T17:00:17 | R | UTF-8 | R | false | false | 2,886 | r | flood_risk_map.R | #' Make Flood Risk Max
#'
#' @param path a path the the maximum value
#'
#' @return a leaflet map
#' @export
#'
make_flood_risk_map = function(path = NULL, dir = NULL){
if (is.null(dir)) {
dir <- system.file("flowfinder", package = "FlowFinder")
}
vals = fst::read_fst(path)
df = fst::read_fst(paste0(dir,"/data/comids_w_tz.fst"))
#write_fst(df, path = "/Users/mikejohnson/Desktop/FlowlineMap/comids_w_tz.fst")
#save(df,file ="/Users/mikejohnson/Desktop/FlowlineMap/comids_w_tz.rda" )
#tz.shp = rgdal::readOGR("/Users/mikejohnson/Downloads/world/tz_world_mp.shp", stringsAsFactors = FALSE)
#sp = sp::SpatialPointsDataFrame(coords = cbind(df$long,df$lat), data = df, proj4string = AOI::HydroDataProj)
#tz.shp = tz.shp[sp,]
#tz = over(sp, tz.shp)
#df$tz = tz$TZID
#sp@data$tz = tz$TZID
#good = sp[which(!is.na(tz)), ]
#missing = sp[which(is.na(tz)), ]
#for( i in 1:nrow(missing@data)){
#missing@data$tz[i] = good$tz[ which.min(abs(missing@data$lat[i] - good@data$lat)) ]
#}
#df = rbind(missing@data, good@data)
#df = df[order(df$COMID),]
data = merge(vals, df, by = "COMID")
for(i in 1:nrow(data)){
data$locTime[i] = format(data$max_date[i], tz= data$tz[i],usetz=TRUE)
}
pop <- paste(
paste0("<a class='open-stream'>", "<strong>NHD COMID: </strong>", data$COMID, "</a>"),
paste0("<a class='lat_lon'>", "<strong>Location: </strong>", paste0(data$lat," / ",data$long),"</a>"),
paste("<strong>Timezone:</strong>", data$tz),
paste("<strong>Time of Peak (local):</strong>", data$locTime),
paste("<strong>Time of Peak (UTC):</strong>", data$max_date),
paste('<a class="flood-data"><i class="fa fa-line-chart"></i></a>'),
sep = "<br/>"
)
m = leaflet() %>% addProviderTiles("CartoDB", group = "Base") %>%
addProviderTiles("Esri.WorldImagery", group = "Imagery") %>%
addProviderTiles("Esri.NatGeoWorldMap", group = "Terrain") %>%
addCircleMarkers(
data = data,
lat = data$lat,
lng = data$long,
radius = ifelse(data$change /1000 > 20, 20, data$change /1000),
color = "red",
stroke = FALSE,
fillOpacity = 0.5,
clusterOptions = markerClusterOptions(
iconCreateFunction =
JS(
iconCreateFunction = JS(
"function (cluster) {
var childCount = cluster.getChildCount();
if (childCount < 100) {
c = 'rgba(255, 92, 93, .8);'
} else if (childCount < 1000) {
c = 'rgba(255, 0, 0, .8);'
} else {
c = 'rgba(128, 0, 0, .8);'
}
return new L.DivIcon({ html: '<div style=\"background-color:'+c+'\"><span>' + childCount + '</span></div>', className: 'marker-cluster', iconSize: new L.Point(40, 40) });
}")
)),
popup = pop
) %>% addLayersControl(
baseGroups = c("Base", "Imagery", "Terrain"),
options = layersControlOptions(collapsed = T)
)
return(m)
}
|
7f5a665800273d8c42f954f37930bc43b0ac80ba | ba686f2453edd95f3d310d700590cb675bc5ee7a | /R/table_definition.R | 870cc7cea0b2f1348dd332b4b2afc0feefb42345 | [
"MIT"
] | permissive | RedOakStrategic/redshiftTools | 4f6867d683473f9555d918b4d52820b31b413428 | cb3529700ed4919ec256696c46aa82fe7992ba26 | refs/heads/master | 2021-01-09T01:56:38.431219 | 2020-08-27T14:56:17 | 2020-08-27T14:56:17 | 242,209,200 | 6 | 0 | NOASSERTION | 2020-08-27T14:56:18 | 2020-02-21T18:52:53 | R | UTF-8 | R | false | false | 4,498 | r | table_definition.R | #' @importFrom "utils" "head"
calculateCharSize <- function(col){
col=as.character(col)
maxChar = max(nchar(col,type='bytes'), na.rm=T)
if(is.infinite(maxChar)){
maxChar=1000
warning('Empty column found, setting to 1024 length')
}
sizes = c(2^c(3:15),65535) # From 8 to 65535, max varchar size in redshift
fsizes = sizes[ifelse(sizes>maxChar, T, F)]
if(length(fsizes)==0){
warning("Character column over maximum size of 65535, set to that value but will fail if not trimmed before uploading!")
warning(paste0('Example offending value: ', head(col[nchar(col) > 65535], 1)))
return(max(sizes, na.rm=T))
}else{
return(min(fsizes, na.rm=T))
}
}
colToRedshiftType <- function(col, compression) {
class = class(col)[[1]]
switch(class,
logical = {
return('boolean')
},
numeric = {
return('float8')
},
integer = {
if(all(is.na(col))){ #Unknown column, all null
return('int')
}
if(max(col, na.rm = T) < 2000000000){ # Max int is 2147483647 in Redshift
return('int')
} else if (max(col, na.rm=T) < 9200000000000000000){ #Max bigint is 9223372036854775807 in redshift, if bigger treat as numeric
return('bigint')
} else{
return('numeric(38,0)')
}
},
Date = {
return('date')
},
POSIXct = {
return('timestamp')
},
POSIXlt = {
return('timestamp')
}
)
charSize = calculateCharSize(col)
if(compression==T){
return(paste0('VARCHAR(', charSize, ') encode zstd'))
}else{
return(paste0('VARCHAR(', charSize, ')'))
}
}
getRedshiftTypesForDataFrame <- function(df, compression) {
return(
sapply(
df,
FUN = colToRedshiftType,
compression
)
)
}
#' Generate create table statement for Amazon Redshift
#'
#' This lets you easily generate a table schema from a data.frame, which allows for easily uploading to redshift afterwards.
#'
#' @param df the data.frame you want to upload to Amazon Redshift
#' @param table_name the name of the table to create, if not specified it'll use the data.frame name
#' @param sortkeys Column or columns to sort the table by
#' @param sortkey_style Sortkey style, can be compound or interleaved http://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data-compare-sort-styles.html
#' @param distkey Distkey column, can only be one, if chosen the table is distributed among clusters according to a hash of this column's value.
#' @param distkey_style Distkey style, can be even or all, for the key distribution use the distkey parameter. http://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html
#' @param compression Add encoding for columns whose compression algorithm is easy to guess, for the rest you should upload it to Redshift and run ANALYZE COMPRESSION
#' @examples
#'
#'n=1000
#'testdf = data.frame(
#'a=rep('a', n),
#'b=c(1:n),
#'c=rep(as.Date('2017-01-01'), n),
#'d=rep(as.POSIXct('2017-01-01 20:01:32'), n),
#'e=rep(as.POSIXlt('2017-01-01 20:01:32'), n),
#'f=rep(paste0(rep('a', 4000), collapse=''), n) )
#'
#'cat(rs_create_statement(testdf, table_name='dm_great_table'))
#'
#' @export
rs_create_statement <- function(
df,
table_name = deparse(substitute(df)),
sortkeys,
sortkey_style='compound',
distkey,
distkey_style='even',
compression=T
){
definitions = getRedshiftTypesForDataFrame(df, compression)
fields = paste(names(definitions), definitions, collapse=',\n')
sortkey_style=tolower(sortkey_style)
distkey_style=tolower(distkey_style)
if(ncol(df) > 1600){
warning("Redshift doesn't support tables of more than 1600 columns")
}
dkey=''
if(!missing(distkey)){
dkey=paste0('diststyle key distkey(', distkey, ')\n')
}else if (distkey_style=='all'){
dkey=paste0('diststyle all\n')
}else if (distkey_style!='even'){
warning('Unknown distkey style', distkey_style)
}
skey=''
if(!missing(sortkeys)){
if(length(sortkeys) > 1){
skeyvals = paste(sortkeys, collapse=', ')
if(!sortkey_style %in% c('interleaved', 'compound')){
warning('Unknown sortkey style', sortkey_style)
}
skey=paste0(sortkey_style, ' sortkey (', skeyvals, ')\n')
}else{
skey=paste0('sortkey(', sortkeys,')\n')
}
}
return(paste0('CREATE TABLE ', table_name, ' (\n', fields, '\n)', dkey, skey,';'))
}
|
d3905f2e1411ad7943b8d889f3a1d1807ca83453 | a7b4b3a28483bb2958fc454fb18affd26b3561bf | /R/geneSequence.R | ffed15079278c678665be1daf04fae10ae18869b | [] | no_license | drostlab/seqstats | 9519b99fdc51dbaaeb18da81e542e082a2267568 | b40bcbed58328d6642b3d856535f15236bde1a3c | refs/heads/master | 2022-05-12T09:07:24.444415 | 2020-04-26T13:06:23 | 2020-04-26T13:06:23 | 30,696,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,613 | r | geneSequence.R | #' @title Retrieve Biological Sequences of a given Set of Genes
#' @description This function takes an character vector storing gene ids of interest
#' and returns the biological sequence of the corresponding gene ids.
#' @param genes a character vector storing the gene id of a organisms of interest to be queried against the
#' acnucdb database.
#' @param db a character string specifying the database from which the sequence shall be retrieved.
#' @author Hajk-Georg Drost
#' @details Sequence information is retrieved from the acnucdb database.
#' @return A list of sequences for each corresponding input gene stored as string.
#' @examples
#' \dontrun{
#' # retrieve amino acid sequences from the 'swissprot' database
#' # for gene ids:"AT1G06090" = "Q9LND9" and "AT1G06100" = "Q9LND8"
#' seqs <- geneSequence(c("Q9LND9","Q9LND8"), db = "swissprot")
#'
#' # choose different databases available
#' seqinr::choosebank()
#' }
#'
#' @export
geneSequence <- function(genes, db){
n_genes <- length(genes)
seqList <- vector(mode = "list", length = n_genes)
# open acnucdb connection: seqinr
seqinr::choosebank(db)
# retrieve sequences for the corresponding gene list
seqList <- lapply(as.list(genes), retrieve_sequence)
# close acnucdb connection: seqinr
seqinr::closebank()
# return sequences as strings
res <- vector(mode = "list", length = n_genes)
res <- lapply(seqList, seqinr::c2s)
names(res) <- genes
return(res)
}
|
762859f11bc0c189fe069ccdbc9288321b6a1372 | 1056ff1109dedd7e36737fcb6ebd80f8fc599b8a | /02_Analisis_series_tiempo.R | df6ae93d7a180dc88fea30b2dfe72186315de793 | [] | no_license | Ajeronimo/Series-de-tiempo | c770bfa963e5e35e2a3a390f0ab83f60fb3d3564 | 7cb02c19c625dc7149a91bf0ce388b7aeb427d8d | refs/heads/master | 2023-07-22T03:56:56.400979 | 2021-09-10T12:00:58 | 2021-09-10T12:00:58 | 296,864,228 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,405 | r | 02_Analisis_series_tiempo.R | rm(list = ls())
# Limpiamos la consola
cat("\014")
# Cambiamos el directorio de trabajo
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
# Librerias basicas para el estudio de series temporales
# ------------------------------------------------------
# ------------------------------------------------------
library(TSstudio)
library(forecast)
library(lmtest)
library(rlang)
# descripcion de la data
data=read.csv("petroleo.csv",header = T,sep=";",dec=".")
head(data,5)
tail(data,5)
data.ts <- ts(data = data$petroleo, start = c(2010,1),end = c(2019,12), frequency = 12 )
data.ts
plot(data.ts)
# Particion la serie training/testing
# -----------------------------------
data.ts.part = ts_split(data.ts, sample.out = 12)
train.ts = data.ts.part$train
test.ts = data.ts.part$test
# Optimizadores AUTO ARIMA
# -------------------------
# -------------------------
model1 <- auto.arima(train.ts)
print(model1)
# Series: train.ts
# ARIMA(1,1,0)(0,0,1)[12]
# Graficamos
plot.ts(data.ts, main = 'Precio petroleo', col = 'blue')
lines(fitted(model1),col = 'red')
# Forecast
# --------
forecast_arima <- forecast(model1, h = 12)
forecast_arima
# Rendimiento del modelo
# ----------------------
test_forecast(actual = data.ts,
forecast.obj = forecast_arima,
test = test.ts)
# Podemos observar que el modelo no se ajusta adecuadamante para los ultimos periodos
# Al parecer el modelo esta influenciado por los retardos, pero no precisamente
# de los datos que están cerca, sino de aquellos que estan varios rezagos a tras
# SARIMAX
# ------------------
# ------------------
# trabajando con regresores
library(data.table)
data_regre <- as.data.frame(train.ts)
data_regre$x1 <- shift(data_regre$x, n = 1, type = 'lag')
data_regre$x2 <- shift(data_regre$x, n = 2, type = 'lag')
# Vamos a imputa los valores faltantes
data_regre$x1[1] <- 96.66
data_regre$x2[1] <- 96.66
data_regre$x2[2] <- 96.66
view(data_regre)
as.matrix(data_regre[c('x1','x2')]) # Seleccionamos variables como regresores
model02 <- auto.arima(train.ts, xreg = as.matrix(data_regre[c('x1','x2')]))
print(model02)
# Regression with ARIMA(0,0,0)(0,0,1)[12] errors
# Forecast
# --------
data_regre_test <- as.data.frame(test.ts)
data_regre_test$x1 <- shift(data_regre_test$x, n = 1, type = 'lag')
data_regre_test$x2 <- shift(data_regre_test$x, n = 2, type = 'lag')
tail(train.ts) # para ver el ultimo dato de la data de train
data_regre_test$x1[1] <- 95.92 # Rellenamos con el ultimo dato
data_regre_test$x2[1] <- 95.92
data_regre_test$x2[2] <- 95.92
# Forecast
# --------
forecast_sarima <- forecast(model02, h = 12,
xreg = as.matrix(data_regre_test[c('x1','x2')]))
test_forecast(actual = data.ts,
forecast.obj = forecast_sarima,
test = test.ts)
# Neural network models (redes artificiales del tipo perceptron)
# Tenemos que escalarla para tener pesos
# ---------------------
# ---------------------
model02_nn <- nnetar(train.ts, scale.inputs = TRUE)
# model02_nn <- nnetar(train.ts, scale.inputs = TRUE, lambda = "auto")
print(model02_nn)
forecast_nn = forecast(model02_nn, h = 12)
plot_forecast(forecast_nn)
forecast_nn$fitted
checkresiduals(model02_nn)
|
1d15a8c4a6f90df0ac279610954fa0ee056a514e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/beginr/examples/rpkg.Rd.R | a55055594cd9c26d45124faf3a416819676b3ca7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 126 | r | rpkg.Rd.R | library(beginr)
### Name: rpkg
### Title: Create a new R package demo folder
### Aliases: rpkg
### ** Examples
rpkg()
|
40c1bfa81edd4dd019471b2f0baf27b73c0548b6 | 43f3286dbec3cbb37c008cffcd75a8faaaca259f | /plot2.R | 537f18de9abfecc9ff61d3c4f2475e83e15adaeb | [] | no_license | Rpvrushank/ExData_Plotting1 | e6a1370ef70f0cb0554d5ade8cd9d5757b4ac0e8 | 5df0c98b21fd704e13ee56eb5b363c69d8a61673 | refs/heads/master | 2022-12-06T22:48:42.012389 | 2020-08-15T12:05:34 | 2020-08-15T12:05:34 | 286,724,159 | 0 | 0 | null | 2020-08-11T11:17:18 | 2020-08-11T11:17:17 | null | UTF-8 | R | false | false | 472 | r | plot2.R | Data_H <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
sub <- Data_H[Data_H$Date %in% c("1/2/2007", "2/2/2007"),]
datetime <- strptime(paste(sub$Date, sub$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
GlobalActivePower <- as.numeric(sub$Global_active_power)
png("plot2.png", width = 480, height = 480)
plot(datetime, GlobalActivePower,type = "l",xlab = " ", ylab = "Global Active power (kilowatts)")
dev.off()
|
6712acdb0a18dec75ac8eff8a3a90eb5593120d7 | 2e1f19f01e19a1acf2465d24fc3954263e281b52 | /man/get_recent_late_contributions_committee.Rd | 52218a62c9ab8c58b932031c246f1512246aa586 | [] | no_license | DavytJ/ProPublicaR | ebdc03ac0bc30efa6933aaa62316fa3fcbf98b00 | e9fe623ffc063665581238c3196f78bb32b08b77 | refs/heads/master | 2020-03-19T08:33:55.239980 | 2018-10-30T16:10:25 | 2018-10-30T16:17:09 | 136,215,002 | 0 | 0 | null | 2018-06-05T17:56:19 | 2018-06-05T17:56:19 | null | UTF-8 | R | false | true | 1,196 | rd | get_recent_late_contributions_committee.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_recent_late_contributions_committee.R
\name{get_recent_late_contributions_committee}
\alias{get_recent_late_contributions_committee}
\title{Get Recent Late Contributions to a Specific Committee
https://www.propublica.org/datastore/apis
HTTP Request: GET https://api.propublica.org/campaign-finance/v1/{cycle}/committees/{fec-id}/48hour}
\usage{
get_recent_late_contributions_committee(cycle = 2018, FEC_ID, myAPI_Key)
}
\arguments{
\item{cycle}{The election cycle}
\item{FEC_ID}{The FEC-assigned 9-character ID of a committee.}
\item{myAPI_Key}{To use the Campaign Finance API, you must sign up for an API key. The API key must be included in all API requests to the server, set as a header.}
}
\value{
List of returned JSON from endpoint that retrieves the most recent late contributions to a specific committee.
}
\description{
Get Recent Late Contributions to a Specific Committee
https://www.propublica.org/datastore/apis
HTTP Request: GET https://api.propublica.org/campaign-finance/v1/{cycle}/committees/{fec-id}/48hour
}
\examples{
\dontrun{
get_recent_late_contributions_committee(2016, 'C00575050')
}
}
|
9be8e305c136c0a5d39d80c7925326c22714a9da | 6c44b1d41d99af5769be7e5e3ad0584522f6d06b | /MLRegression1.R | 3cd8d12892b278e98e3d0cd5cf40d689b831dafe | [] | no_license | ameilij/PracticalMachineLearning | e74411693f84a17d478f98b7b5e69f70e24b1344 | afd11380ddb2ee5ff15145128af5b360804ccc30 | refs/heads/master | 2020-09-21T02:37:26.401715 | 2016-10-10T02:12:08 | 2016-10-10T02:12:08 | 67,361,032 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,315 | r | MLRegression1.R | # Regression Example with Machine Learning
library(caret)
data(faithful)
set.seed(333)
inTrain <- createDataPartition(y = faithful$waiting, p = 0.5, list = FALSE)
trainFaith <- faithful[inTrain, ]
testFaith <- faithful[-inTrain, ]
head(trainFaith)
# Look at relationship with a visual graph
plot(trainFaith$waiting, trainFaith$eruptions, pch = 19, col = "blue", xlab = "waiting", ylab = "duration")
# Fit linear model
lm1 <- lm(eruptions ~ waiting, data = trainFaith)
summary(lm1)
plot(trainFaith$waiting, trainFaith$eruptions, pch = 19, col = "blue", xlab = "Waiting", ylab = "Duration")
lines(trainFaith$waiting, lm1$fitted.values, lwd = 3)
# Let's predict a new value manually
newvalue = 80
coef(lm1)[1] + coef(lm1)[2] * newvalue
newdata = data.frame(waiting = 80)
predict(lm1, newdata)
# But how do we evaluate the test set?
# Plot training and test set
par(mfrow = c(1,2))
plot(trainFaith$waiting, trainFaith$eruptions, pch = 19, col = "blue", xlab = "Waiting", ylab = "Duration")
lines(trainFaith$waiting, predict(lm1), lwd = 3)
plot(testFaith$waiting, testFaith$eruptions, pch = 19, col = "blue", xlab = "Waiting", ylab = "Duration")
lines(testFaith$waiting, predict(lm1, newdata = testFaith), lwd = 3)
# Test errors for datasets
sqrt(sum((lm1$fitted.values - trainFaith$eruptions) ^ 2))
sqrt(sum((predict(lm1, newdata = testFaith) - testFaith$eruptions) ^ 2))
# My own test to detect correlation in dataset and model
cor(trainFaith$waiting, trainFaith$eruptions)
cor(trainFaith$waiting, predict(lm1, trainFaith))
cor(testFaith$waiting, testFaith$eruptions)
cor(testFaith$waiting, predict(lm1, newdata = testFaith))
myRealTestValues <- testFaith$eruptions
myPredictedTestValues <- predict(lm1, newdata = testFaith)
quantile(myRealTestValues)
quantile(myPredictedTestValues)
# Prediction intervals
pred1 <- predict(lm1, newdata = testFaith, interval = "prediction")
ord <- order(testFaith$waiting)
plot(testFaith$waiting, testFaith$eruptions, pch = 19, col = "blue")
matlines(testFaith$waiting[ord], pred1[ord, ], type = "l", col = c(1,2,2), lty = c(1,1,1), lwd = 3)
modFit <- train(eruptions ~ waiting, data = trainFaith, method = "lm")
summary(modFit$finalModel)
plot(testFaith$waiting, testFaith$eruptions, pch = 19)
lines(testFaith$waiting, predict(modFit, newdata = testFaith), lwd = 3)
|
d92d008e899b646b8846cc4805fd8f82c7660dc6 | 99ecd4dbba37c42bc06e43ac182b3384607a67e3 | /R/LinesAngles.R | 7e808cced2fa00f3a566760c34008b05548b54ac | [] | no_license | cran/LearnGeom | 0e33990d00d458e24a786a994300b1e1f62d9400 | 6aebc1d73c39d5d3776f0dd364df99307b2e4b0a | refs/heads/master | 2021-06-02T14:03:42.398531 | 2020-07-14T15:00:03 | 2020-07-14T15:00:03 | 99,415,877 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 964 | r | LinesAngles.R | #' Computes the angle that form two lines
#'
#' \code{LinesAngles} computes the angle that form two lines
#' @param Line1 Line object previously created with \code{CreateLinePoints} or \code{CreateLineAngle}
#' @param Line2 Line object previously created with \code{CreateLinePoints} or \code{CreateLineAngle}
#' @return Returns the angle that form the two lines
#' @examples
#' P1 <- c(0,0)
#' P2 <- c(1,1)
#' Line1 <- CreateLinePoints(P1, P2)
#' P3 <- c(1,-1)
#' P4 <- c(2,3)
#' Line2 <- CreateLinePoints(P3, P4)
#' angle <- LinesAngles(Line1, Line2)
#' @export
LinesAngles<-function(Line1, Line2){
m1=Line1[1]
m2=Line2[1]
if (m1==m2){
angle=0
}
else{
vector1=c(1,m1)
vector2=c(1,m2)
num=(vector1[1]*vector2[1]+vector1[2]*vector2[2])
den=sqrt(vector1[1]^2+vector1[2]^2)*sqrt(vector2[1]^2+vector2[2]^2)
angle=acos(num/den)
angle=(360*angle)/(2*pi)
}
names(angle)="angle"
return(angle)
} |
cf31ca01831dfc6ee4b4dacbbdd8a5fd48da4c12 | 19ffb430a323bc8a207be7a08e1b716cd215b8fe | /RsNlme/inst/Examples/OneCpt_Seq0Order1stOrderAbsorp_1stOrderElim.R | 6a21ea3459fe94ca2a053e93e8e07a8edd32b1ec | [] | no_license | phxnlmedev/rpackages | 51b9bd9bf955e7da7a3dc4ca6f13b32adfd3f049 | 59dafc62c179d98407c4fbcbb4936786d71ee6a5 | refs/heads/master | 2020-05-07T20:12:28.132137 | 2019-07-23T11:58:39 | 2019-07-23T11:58:39 | 180,429,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,635 | r | OneCpt_Seq0Order1stOrderAbsorp_1stOrderElim.R | ##############################################################################################################
## Description
##
## The purpose of this example is to demonstrate how to create a model involving mixed first-order and zero-order
## absorption through RsNlme, and then simulate it. The model demonstrated is a one-compartment model with
## first-order elimination, where the zero-order absorption is followed by a first-order absorption.
##
##############################################################################################################
##############################################################################################################
############## Setup Environment Variables and Load Necessary Packages ##############
##############################################################################################################
# ==========================================================================================================
# Setup environment
# ==========================================================================================================
source("c:/Work/NlmeInstall_07_10_19/Examples/SetUpEnv_LoadRPackages.R")
setwd("c:/Work/NlmeInstall_07_10_19/Examples/")
##############################################################################################################
############### Create the Model, Simulation Input Dataset, and Column Mapping files ###############
#
# - Create the model through RsNlme
# - Create the simulation input dataset
# - Map the input dataset to the model
##############################################################################################################
ModelName = "OneCpt_Seq0Order1stOrderAbsorp_1stOrderElim"
# ==========================================================================================================
# Create the PK/PD model through RsNlme
# ==========================================================================================================
#-----------------------------------------------------------------------------------------------------
# Basic one-compartment PK model
model = pkmodel(numCompartments = 1
, modelName = ModelName
)
# Dosing information for the first-order absorption pathway
DoseInfo_AbsCpt = DosePoint(isZeroOrderAbsorption = No
, isTlag = TRUE
, tlagExpression = "Tlag1"
, isBioavail = TRUE
, bioavailExpression = "logitF1; ilogit(logitF1)"
)
# Dosing information for the zero-order absorption pathway
DoseInfo_CentralCpt = DosePoint(isZeroOrderAbsorption = DurationDose
, isTlag = FALSE
, isBioavail = TRUE
, bioavailExpression = "logitF1; 1 - ilogit(logitF1)"
, durationExpression = "D2"
)
# Update the model with the mixed first-order and zero-order absorption included
model = mixedFirstOrderZeroOrderAbsorption(model, DoseInfo_CentralCpt, DoseInfo_AbsCpt)
#----------------------------------------------------------------------------------------------------
# Reset structural model parameters (including initial values for fixed and random effects)
#----------------------------------------------------------------------------------------------------
# Reset the distribution form for logitF1
structuralParam(model, "logitF1") = c(style = Normal)
# Fixed effects (the default value is 1)
initFixedEffects(model) # output initial values for fixed effects
initFixedEffects(model) = c(tvV = 5, tvCl = 1, tvKa1 = 2.5, tvlogitF1 = 0.1, tvD2 = 6, tvTlag1 = 2) # set up initial values for fixed effects
# Random effects (the default value is 1)
initRandomEffects(model) = c(Diagonal
, FALSE
, "nV, nD2, nlogitF1, nCl, nTlag1, nKa1"
, "0.01, 0.01, 0.01, 0.01, 0.01, 0.01"
)
#----------------------------------------------------------------------------------------------------
# Reset residual error model
#----------------------------------------------------------------------------------------------------
residualEffect(model,"C") = c(errorType = Multiplicative, SD = "0.1")
#----------------------------------------------------------------------------------------------------
# Update the model based on the above changes on structural model parameters and residual error model
#----------------------------------------------------------------------------------------------------
# Incorporate the above changes on fixed effects into the model
model = generatePML(model)
# Output the model
print(model)
# ==========================================================================================================
# Create the simulation input dataset
# ==========================================================================================================
# Create the simulation input data
dt_SimInputData = data.table(ID = seq(1, 2)
, Time = 0
, Dose = c(10, 20)
)
dt_SimInputData$RepDose = dt_SimInputData$Dose
# ==========================================================================================================
# Map the input dataset to the created model
# ==========================================================================================================
# Initialize model mapping (link the input dataset to model@inputData)
# and automatically map some of the model variables to the data columns
initColMapping(model) = dt_SimInputData
modelColumnMapping(model) # output the mapping
# Manually set up the mapping for the rest of variables
modelColumnMapping(model) = c(Aa1 = "Dose", A1 = "RepDose")
##############################################################################################################
################### Model Simulation ###################
##############################################################################################################
# ==========================================================================================================
# - Set up default name for model, input dataset and mapping file
# - Set up host platform
# - Set up simulation parameters (numReplicates, seed, output tables)
# ==========================================================================================================
# Host setup: run locally
host = NlmeParallelHost(sharedDirectory = Sys.getenv("NLME_ROOT_DIRECTORY")
, parallelMethod = NlmeParallelMethod("LOCAL_MPI")
, hostName = "MPI"
, numCores = 1
)
# --------------------------------------------------------------------------
# Simulation setup
# --------------------------------------------------------------------------
# Simulation table for structural model parameters
SimTableStructuralModelParams = NlmeSimTableDef(name = "SimTableStructuralModelParams.csv"
, timesList = "0"
, variablesList = "V,Cl,Ka1,logitF1,D2,Tlag1"
, timeAfterDose = FALSE
)
# Simulation table for simulations
SimTableObs = NlmeSimTableDef(name = "SimTableObs.csv"
, timesList = "0, 0.5, 1, 2, 4, 8, 12, 16, 20, 24"
, variablesList = "C, CObs"
, timeAfterDose = FALSE
)
# Simulation setup
SimSetup = NlmeSimulationParams(numReplicates = 50
, seed = 1
, simulationTables = c(SimTableStructuralModelParams, SimTableObs)
)
# ==========================================================================================================
# Run the model
# ==========================================================================================================
job = simmodel(host, SimSetup, model)
|
644c8e3fc14a6671cc604556346a6b9bd4123840 | d67e790cc65a617c4c58de4c5fec51e43fd85ac4 | /analise/analise.R | d2bc8ce91b48cb0f1940d4d9b440c133d524a78a | [] | no_license | hugogbs/metodologia | 39cd32efafb142f9761ca4a676e54e37c6ca9e1d | 68b8fe3f341ab3e0e6e40031eec4ed0a88b9a49a | refs/heads/master | 2021-01-02T23:03:29.001799 | 2017-08-30T17:05:29 | 2017-08-30T17:05:29 | 99,454,813 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,643 | r | analise.R | library(tidyverse)
heap <- read_csv("dados/output.csv", col_names = FALSE)
insertion <- read_csv("dados/output1.csv", col_names = FALSE)
quick <- read_csv("dados/output2.csv", col_names = FALSE)
quick3 <- read_csv("dados/output3.csv", col_names = FALSE)
dados <- rbind(heap, quick, quick3, insertion) %>%
mutate(algoritmo = X1) %>%
mutate(tempo = X3) %>%
mutate(tempoSeg = tempo / 10**9) %>%
mutate(tipo_entrada = ifelse(grepl("quase_ordenado", X2), "Quase Ordenado",
ifelse(grepl("faixa_pequena", X2), "Faixa Pequena", "Faixa Grande"))) %>%
mutate(tamanho_entrada = ifelse(grepl("10a6", X2), "10^6", "10^4")) %>%
mutate(X1 = NULL) %>%
mutate(X2 = NULL) %>%
mutate(X3 = NULL)
ggplot(data = dados, aes(x = algoritmo, y = tempo)) +
geom_boxplot()
ggplot(data = dados %>% subset(algoritmo != "InsertionSort"), aes(x = algoritmo, y = tempo)) +
geom_boxplot()
ggplot(data = dados %>% subset(algoritmo == "InsertionSort"), aes(x = tipo_entrada, y = tempo)) +
geom_boxplot() +
xlab("InsertionSort")
ggplot(data = dados %>% subset(algoritmo == "InsertionSort"), aes(x = tipo_entrada, y = tempo)) +
geom_boxplot() +
xlab("InsertionSort")
ggplot(data = dados %>% subset(algoritmo == "QuickSort"), aes(x = tipo_entrada, y = tempo)) +
geom_boxplot() +
xlab("QuickSort")
ggplot(data = dados %>% subset(algoritmo == "HeapSort"), aes(x = tipo_entrada, y = tempo)) +
geom_boxplot() +
xlab("HeapSort")
ggplot(data = dados %>% subset(algoritmo == "Quick3WaySort"), aes(x = tipo_entrada, y = tempo)) +
geom_boxplot() +
xlab("Quick3WaySort")
ggplot(data = dados, aes(x = tamanho_entrada, y = tempo)) +
geom_boxplot()
ggplot(data = dados %>% subset(algoritmo == "InsertionSort"), aes(x = tamanho_entrada, y = tempo)) +
geom_boxplot() +
xlab("InsertionSort")
ggplot(data = dados %>% subset(algoritmo == "QuickSort"), aes(x = tamanho_entrada, y = tempo)) +
geom_boxplot() +
xlab("QuickSort")
ggplot(data = dados %>% subset(algoritmo == "HeapSort"), aes(x = tamanho_entrada, y = tempo)) +
geom_boxplot() +
xlab("HeapSort")
ggplot(data = dados %>% subset(algoritmo == "Quick3WaySort"), aes(x = tamanho_entrada, y = tempo)) +
geom_boxplot() +
xlab("Quick3WaySort")
summary((dados %>% subset(algoritmo == "InsertionSort"))$tempoSeg)
summary((dados %>% subset(algoritmo == "QuickSort"))$tempoSeg)
summary((dados %>% subset(algoritmo == "HeapSort"))$tempoSeg)
summary((dados %>% subset(algoritmo == "Quick3WaySort"))$tempoSeg)
aov <- aov(tempo ~ algoritmo * tamanho_entrada * tipo_entrada, dados)
summary(aov)
5.274e+22 + 1.813e+22 + 7.135e+21 + 5.268e+22 + 2.199e+22 + 7.135e+21 + 2.197e+22 + 2.474e+19
soma = 1.818047e+23
algoritmo = 5.274e+22/soma
tamanho = 1.813e+22/soma
tipo = 7.135e+21/soma
algo_tamanho = 5.268e+22/soma
algo_tipo = 2.199e+22/soma
tam_tipo = 7.135e+21/soma
algo_tipo_tam = 2.197e+22/soma
erros = 2.474e+19/soma
ggplot(data = dados, aes(y = tempo, x=tamanho_entrada)) +
geom_boxplot() +
facet_wrap(~algoritmo)
ggplot(data = dados, aes(y = tempo, x=tipo_entrada)) +
geom_boxplot() +
facet_wrap(~algoritmo)
ggplot(data = dados, aes(y = tempo, x=tipo_entrada)) +
geom_boxplot() +
facet_wrap(~tamanho_entrada) +
facet_wrap(~algoritmo)
######################## Segundo Experimento #################################333
dados2 <- read_csv("dados/saida2.csv")
aov2 <- aov(tempo ~ algoritmo * tamanho_entrada, dados2)
summary(aov2)
soma2 <- sum(3.673e+22, 1.236e+22, 3.669e+22, 2.579e+16)
prop_algo <- 3.673e+22/soma2
prop_tam <- 1.236e+22/soma2
prop_algo_tam <- 3.669e+22/soma2
prop_erro <- 2.579e+16/soma2 |
a6b1a81adb3c83ea2dcc5d8f3ae92b8973a784cd | ff26cd2c4ffd0b6b5b091018905d53e8ed7aedfc | /R/Auxiliares.R | 0d600c0be1e425a9b7ac71287dee34681bca24e5 | [] | no_license | lauzingaretti/LinkHD | 986663e430b9bbb237f6926d6cf0a06364adc9c5 | b451fd947fab979482cab14b7c7d95c2f1cdadee | refs/heads/master | 2023-03-20T08:01:23.423729 | 2023-03-06T18:59:43 | 2023-03-06T18:59:43 | 184,235,740 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,191 | r | Auxiliares.R | # File Created by Laura M. Zingaretti Adapted from previous kimod package function Date April, 24 2019
#' @import stats
# geometric mean
g_mean <- function(Data) {
if (any(na.omit(Data == 0))) {
0
} else {
exp(mean(log(c(Data)[which(is.finite(unlist(Data)) & Data > 0)])))
}
}
# Center_LR (Centered Log Ratio)
centerLR <- function(Data, b = exp(1)) {
if (!is(Data, "data.frame") & !is(Data, "matrix")) {
stop("The dataset should be a matrix or data.frame")
}
# just if data.frame or matrix has only one column
if (dim(Data)[2] == 1) {
result <- list(Data.clr = Data, geom_m = rep(1, dim(Data)[1]))
} else {
gm <- apply(Data, 1, g_mean)
Data.clr <- log(Data/gm, b)
result <- list(Data.clr = Data.clr, geom = gm)
}
return(result)
}
# Normalize data (scale/center, both or one)
Normalize <- function(X, scale = TRUE, center = TRUE) {
if (!is(X, "list")) {
stop("Error:Object of type 'list' expected")
}
f2 <- function(v) {
if (!is.numeric(v)) {
stop("Error:Object of type 'numeric' expected")
}
row.w <- rep(1, length(v))/length(v)
sqrt(sum(v * v * row.w)/sum(row.w))
}
# list data
XC = list()
# list Aux
M = list()
for (i in seq_along(X)) {
if (center == TRUE && scale == TRUE) {
XC[[i]] <- matrix(scale(X[[i]], center = TRUE, scale = FALSE), nrow = nrow(X[[i]]))
M[[i]] <- apply(XC[[i]], 2, f2)
M[[i]][M[[i]] < 1e-08] <- 1
XC[[i]] <- sweep(XC[[i]], 2, M[[i]], "/")
XC[[i]] <- XC[[i]] * (as.numeric(1/sqrt(sum((XC[[i]] %*% t(XC[[i]]))^2))))
}
if (center == FALSE && scale == FALSE) {
XC[[i]] <- X[[i]]
}
if (center == TRUE && scale == FALSE) {
XC[[i]] = matrix(scale(X[[i]], scale = FALSE), nrow = nrow(X[[i]]))
}
if (center == FALSE && scale == TRUE) {
M[[i]] <- apply(X[[i]], 2, f2)
M[[i]][M[[i]] < 1e-08] <- 1
XC[[i]] <- sweep(XC[[i]], 2, M[[i]], "/")
XC[[i]] = XC[[i]] * (as.numeric(1/sqrt(sum((XC[[i]] %*% t(XC[[i]]))^2))))
}
}
return(XC)
}
# calculate Scalar product between configurations
ScalarProduct <- function(X, Row = TRUE) {
if (!is.matrix(X)) {
X <- as.matrix(X)
}
clases <- apply(X, 2, class)
if (typeof(X) != "double" && typeof(X) != "integer") {
stop("The Scalar Product only should be used with numerical data")
}
if (Row == TRUE) {
Y <- X %*% t(X)
} else {
Y <- t(X) %*% X
}
return(Y)
}
######### Distances computation#########
# functions to compute metrics
distan <- function(mat = NULL, meth.dis = "euclidean", diag = FALSE, upper = FALSE) {
MEASURE <- c("euclidean", "manhattan", "canberra", "pearson", "pearsonabs", "spearman", "spearmanabs", "mahalanobis")
mea <- pmatch(meth.dis, MEASURE)
if (is.na(mea))
stop("Error :Unknown Metric.")
if (mea == 1)
DIS <- as.matrix(dist(mat, method = "euclidean", diag, upper))
if (mea == 2)
DIS <- as.matrix(dist(mat, method = "manhattan", diag, upper))
if (mea == 3)
DIS <- as.matrix(dist(mat, method = "canberra", diag, upper))
if (mea == 4)
DIS <- (1 - cor(t(mat), method = "pearson"))/2
if (mea == 5)
DIS <- 1 - abs(cor(t(mat), method = "pearson"))
if (mea == 6)
DIS <- (1 - cor(t(mat), method = "spearman"))/2
if (mea == 7)
DIS <- 1 - abs(cor(t(mat), method = "spearman"))
if (mea == 8)
DIS <- maha(mat)
attr(DIS, "Metric") <- meth.dis
return(DIS)
}
compbin <- function(X = NULL) {
if (!is(X, "data.frame") && !is(X, "matrix")) {
stop("invalid class of Object")
}
X <- na.omit(X)
Unicos <- unlist(apply(X, 1, unique))
A <- c()
if (max(Unicos) != 1) {
A <- c(A, 1)
}
if (min(Unicos) != 0) {
A <- c(A, 2)
}
M <- sort(unique(c(Unicos)))
if (M[1] != 0 || M[2] != 1) {
A <- c(A, 3)
}
if (length(A) == 0) {
return("Binary Data Imput")
} else {
return("Non-Binary Data Imput")
}
}
dist.binary <- function(df = NULL, method = 1, diag = FALSE, upper = FALSE) {
METHODS <- c("JACCARD S3", "SOCKAL & MICHENER S4", "SOCKAL & SNEATH S5", "ROGERS & TANIMOTO S6", "CZEKANOWSKI S7",
"GOWER & LEGENDRE S9", "OCHIAI S12", "SOKAL & SNEATH S13", "Phi of PEARSON S14", "GOWER & LEGENDRE S2")
if (is.null(method)) {
stop("you should chose a valid method to calculate dist.binary, e.g. 1 to Jaccard, 2 to simple_matching, 3
to sokal, 4 to roger_tanimoto, 5 to dice, 6 to hamman, 7 to ochiai, 8 to sokal2, 9 to phi_pearson, 10 to
gower_legendre")
}
if (!(inherits(df, "data.frame") | inherits(df, "matrix")))
stop("df is not a data.frame or a matrix")
df <- as.matrix(df)
if (!is.numeric(df))
stop("df must contain numeric values")
if (any(df < 0))
stop("non negative value expected in df")
nlig <- nrow(df)
d.names <- row.names(df)
if (is.null(d.names))
d.names <- seq_len(nlig)
nlig <- nrow(df)
df <- as.matrix(1 * (df > 0))
a <- df %*% t(df)
b <- df %*% (1 - t(df))
c <- (1 - df) %*% t(df)
d <- ncol(df) - a - b - c
if (method == 1) {
d <- a/(a + b + c)
}
if (method == 2) {
d <- (a + d)/(a + b + c + d)
}
if (method == 3) {
d <- a/(a + 2 * (b + c))
}
if (method == 4) {
d <- (a + d)/(a + 2 * (b + c) + d)
}
if (method == 5) {
d <- 2 * a/(2 * a + b + c)
}
if (method == 6) {
d <- (a - (b + c) + d)/(a + b + c + d)
}
if (method == 7) {
d <- a/sqrt((a + b) * (a + c))
}
if (method == 8) {
d <- a * d/sqrt((a + b) * (a + c) * (d + b) * (d + c))
}
if (method == 9) {
d <- (a * d - b * c)/sqrt((a + b) * (a + c) * (b + d) * (d + c))
}
if (method == 10) {
d <- a/(a + b + c + d)
diag(d) <- 1
} else {
stop("Non convenient method")
}
d <- sqrt(1 - d)
# if (sum(diag(d)^2)>0) stop('diagonale non nulle')
d <- as.dist(d)
attr(d, "Size") <- nlig
attr(d, "Labels") <- d.names
attr(d, "Diag") <- diag
attr(d, "Upper") <- upper
attr(d, "method") <- METHODS[method]
attr(d, "call") <- match.call()
class(d) <- "dist"
return(d)
}
# function to compute mahalanobis distance
maha <- function(df = NULL) {
if (!is(df, "data.frame") && !is(df, "matrix")) {
stop("invalid class of Object")
}
df <- data.frame(df)
nlig <- nrow(df)
d <- matrix(0, nlig, nlig)
d.names <- row.names(df)
fun1 <- function(x) {
sqrt(sum((df[x[1], ] - df[x[2], ])^2))
}
df <- as.matrix(df)
index <- cbind(col(d)[col(d) < row(d)], row(d)[col(d) < row(d)])
dfcov <- cov(df) * (nlig - 1)/nlig
maha <- eigen(dfcov, symmetric = TRUE)
maha.r <- sum(maha$values > (maha$values[1] * 1e-07))
maha.e <- 1/sqrt(maha$values[seq_len(maha.r)])
maha.v <- maha$vectors[, seq_len(maha.r)]
maha.v <- t(t(maha.v) * maha.e)
df <- df %*% maha.v
d <- c(unlist(apply(index, 1, fun1)))
upper = FALSE
diag = FALSE
attr(d, "Size") <- nlig
attr(d, "Labels") <- d.names
attr(d, "Diag") <- diag
attr(d, "Upper") <- upper
class(d) <- "dist"
return(d)
}
compbin <- function(X = NULL) {
if (!is(X, "data.frame") && !is(X, "matrix")) {
stop("invalid class of Object")
}
X <- na.omit(X)
Unicos <- unlist(apply(X, 1, unique))
if (length(Unicos) == 2) {
if (sort(unique(Unicos)) == c(0, 1)) {
return("Binary Data Imput")
} else {
return("Non-Binary Data Imput")
}
} else {
return("Non-Binary Data Imput")
}
}
##### Projection of each table in Compromise Configuration
TabProj <- function(S, SvdComp, Studies, Observations) {
As <- S %*% SvdComp$u %*% diag(1/sqrt(SvdComp$d))
As <- cbind(as.data.frame(As), rep(Studies, nrow(As)), Observations)
colnames(As) <- c(paste0("CP", seq_len(ncol(As) - 2)), "Studies", "Observations")
return(As)
}
#### get_upper_tri is just to get the upper correlation from a matrix
get_upper_tri <- function(cormat) {
cormat[lower.tri(cormat)] <- NA
return(cormat)
}
#### Variable selection using LM
LinModel <- function(Mat = NULL, X = NULL, intercept = FALSE) {
X1 <- matrix(as.numeric(X), ncol = 1)
# X1<-matrix(scale(X1),ncol=1)
L <- as.matrix(Mat)
if (intercept == TRUE) {
Model <- lm(X1 ~ L, na.action = na.exclude)
}
if (intercept == FALSE) {
Model <- lm(X1 ~ -1 + L, na.action = na.exclude)
}
Res <- summary(Model)
R2 <- Res$adj.r.squared
# p value from f statistic!
pval <- 1 - pf(Res$fstatistic[1], Res$fstatistic[2], Res$fstatistic[3])
if (intercept == TRUE) {
XCoord <- Res$coefficients[2, 1]
YCoord <- Res$coefficients[3, 1]
}
if (intercept == FALSE) {
XCoord <- Res$coefficients[1, 1]
YCoord <- Res$coefficients[2, 1]
}
if (is.na(R2) & is.na(pval)) {
M2 <- NULL
} else {
M2 <- data.frame(c(R2, pval, XCoord, YCoord))
colnames(M2) <- rownames(X)
rownames(M2) <- c("R2-Adj", "p-value", "XCoord", "YCoord")
}
return(M2)
}
TSSfunction = function(x) {
x/sum(x)
}
# Should be data try as Frecuencies? Then Data should be positives.
cia <- function(df) {
df <- as.data.frame(df)
if (!is(df, "data.frame"))
stop("data.frame expected")
if (any(df < 0))
stop("negative entries in table and your data should be frecuencies")
if ((N <- sum(df)) == 0)
stop("all frequencies are zero")
df <- df/N
row.w <- rowSums(df)
col.w <- colSums(df)
df <- df/row.w
df <- sweep(df, 2, col.w, "/") - 1
df <- data.frame(df)
return(df)
}
|
c3552eca90c97ca3c600aaf0cacb7d4ca551e4ca | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/R2ucare/examples/overall_JMV.Rd.R | db474eaa0d0140cb341c2199c82055dbcdb9cedc | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 720 | r | overall_JMV.Rd.R | library(R2ucare)
### Name: overall_JMV
### Title: Overall goodness-of-fit test for the Jolly-Move model
### Aliases: overall_JMV
### Keywords: package
### ** Examples
## Not run:
##D # read in Geese dataset
##D library(RMark)
##D geese = system.file("extdata", "geese.inp", package = "R2ucare")
##D geese = convert.inp(geese)
##D
##D geese.hist = matrix(as.numeric(unlist(strsplit(geese$ch, ''))),nrow=nrow(geese),byrow=TRUE)
##D geese.freq = geese$freq
##D
##D # encounter histories and number of individuals with corresponding histories
##D X = geese.hist
##D freq = geese.freq
##D
##D # load R2ucare package
##D library(R2ucare)
##D
##D # perform overall gof test
##D overall_JMV(X, freq)
## End(Not run)
|
a16c84606eb061291386201922fcdcb93dfc4027 | 0e1d6d0f301463ce7569e077ce0b1918e1cab1fb | /exploratory_data_analysis_project_2_entire_code.R | 229677d2e6035161d18e2f867ce4769a28f97666 | [] | no_license | ilmurjonsdottir/exploratory_data_analysis_project_2 | 1ffbc48d4553523013a4c882c0868cb65762f98e | 19d658d1452f33e71014b0a308938641a91d015e | refs/heads/main | 2023-04-22T20:55:28.187394 | 2021-05-07T12:21:35 | 2021-05-07T12:21:35 | 365,224,094 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,405 | r | exploratory_data_analysis_project_2_entire_code.R | #Load packages
library(dplyr)
library(ggplot2)
library(scales)
library(data.table)
#Setwd
setwd("~/Desktop/R_Programming/Working_Directory/exdata_data_NEI_data")
#Input the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Exploreing the data
head(NEI)
str(NEI)
head(SCC)
str(SCC)
#Question nr. 1
annual <- NEI %>% group_by(year) %>%
filter(year == 1999|2002|2005|2008) %>%
summarize(Annual.Total = sum(Emissions));
pts <- pretty(annual$Annual.Total/1000000);
yrs <- c(1999,2002,2005,2008)
plot(annual$year, annual$Annual.Total/1000000, type = "l", lwd = 2, axes = FALSE,
xlab = "Year",
ylab = expression("Total Tons of PM"[2.5]*" Emissions"),
main = expression("Total Tons of PM"[2.5]*" Emissions in the United States"));
axis(1, at = yrs, labels = paste(yrs));
axis(2, at = pts, labels = paste(pts, "M", sep = ""));
box()
#Question nr. 2
baltimore <- NEI %>%
filter(fips == "24510") %>%
group_by(year) %>%
summarize(Annual.Total = sum(Emissions));
baltimore.pts <- pretty(baltimore$Annual.Total/1000);
plot(baltimore$year, baltimore$Annual.Total/1000, type = "l", lwd = 2, axes = FALSE,
xlab = "Year",
ylab = expression("Total Tons of PM"[2.5]*" Emissions"),
main = expression("Total Tons of PM"[2.5]*" Emissions in Baltimore"));
axis(1, at = c(1999,2002,2005,2008))
axis(2, at = baltimore.pts, labels = paste(baltimore.pts, "K", sep = ""));
box();
#Question nr 3
nei.baltimore <- NEI %>% filter(fips == "24510") %>% group_by(type, year) %>% summarize(Annual.Total = sum(Emissions));
nei.baltimore$type <- factor(nei.baltimore$type, levels = c("ON-ROAD", "NON-ROAD", "POINT", "NONPOINT")) # Re-order factor levels so they plot in the order we wish
ggplot(nei.baltimore, aes(x = factor(year), y = Annual.Total, fill = type)) +
geom_bar(stat = "identity") +
facet_grid(. ~ type) +
xlab("Year") +
ylab(expression("Total Tons of PM"[2.5]*" Emissions")) +
ggtitle(expression("Total Tons of PM"[2.5]*" Emissions in Baltimore by Source Type")) +
theme(axis.text.x=element_text(angle = 90, vjust = 0.5, hjust = 1)) +
scale_y_continuous(labels = comma) +
guides(fill = FALSE)
#Questin nr. 4
scc.coal <- SCC[grep("Fuel Comb.*Coal", SCC$EI.Sector), ];
scc.coal.list <- unique(scc.coal$SCC);
nei.coal <- subset(NEI, SCC %in% scc.coal.list);
nei.coal <- nei.coal %>% group_by(type, year) %>% summarize(Annual.Total = sum(Emissions))
nei.coal.total <- nei.coal %>% group_by(year) %>% summarize(Annual.Total = sum(Annual.Total)) %>% mutate(type = "TOTAL");
nei.coal <- nei.coal %>% select(Annual.Total, type, year);
nei.coal <- bind_rows(nei.coal, nei.coal.total);
nei.coal$type <- factor(nei.coal$type, levels = c("TOTAL", "ON-ROAD", "NON-ROAD", "POINT", "NONPOINT")); # Re-order factor levels to they plot in the order we wish
ggplot(nei.coal, aes(x = factor(year), y = Annual.Total, fill = type)) +
geom_bar(stat = "identity") +
facet_grid(. ~ type) +
xlab("Year") +
ylab(expression("Total Tons of PM"[2.5]*" Emissions")) +
ggtitle(expression(atop("Total Tons of PM"[2.5]*" Emissions in the United States", paste("from Coal Combustion-Related Sources")))) +
theme(plot.margin = unit(c(1,1,1,1), "cm")) +
scale_y_continuous(labels = comma) +
scale_fill_brewer(palette = "Dark2") +
guides(fill = FALSE)
#Question nr. 5
scc.vehicles <- SCC[grep("Mobile.*Vehicles", SCC$EI.Sector), ]; # Pattern match mobile vehicles in SCC description
scc.vehicles.list <- unique(scc.vehicles$SCC); # Create motor vehicle lookup list by SCC
nei.vehicles <- subset(NEI, SCC %in% scc.vehicles.list); # Filter for motor vehicle sources
nei.vehicles <- nei.vehicles %>% filter(fips == "24510") # Filter for Baltimore
nei.vehicles <- merge(x = nei.vehicles, y = scc.vehicles[, c("SCC", "SCC.Level.Two", "SCC.Level.Three")], by = "SCC") # Join in descriptive data on SCC codes
nei.vehicles <- nei.vehicles %>% group_by(year, SCC.Level.Two, SCC.Level.Three) %>% summarize(Annual.Total = sum(Emissions))
nei.vehicles.total <- nei.vehicles %>% group_by(year) %>% summarize(Annual.Total = sum(Annual.Total)) %>% mutate(SCC.Level.Two = "Total")
nei.vehicles <- bind_rows(nei.vehicles, nei.vehicles.total);
nei.vehicles$SCC.Level.Two <- factor(nei.vehicles$SCC.Level.Two, levels = c("Total", "Highway Vehicles - Diesel", "Highway Vehicles - Gasoline"));
ggplot(nei.vehicles, aes(x = factor(year), y = Annual.Total, fill = SCC.Level.Two)) +
geom_bar(stat = "identity") +
facet_grid(. ~ SCC.Level.Two) +
xlab("Year") +
ylab(expression("Total Tons of PM"[2.5]*" Emissions")) +
ggtitle(expression(atop("Total Tons of PM"[2.5]*" Emissions in Baltimore City", paste("from Motor Vehicle Sources")))) +
theme(plot.title = element_text(hjust = 0.5)) + # Center the plot title
theme(plot.margin = unit(c(1,1,1,1), "cm")) + # Adjust plot margins
scale_fill_brewer(palette = "Set1") +
guides(fill = FALSE)
#Questions Nr. 6
scc.vehicles <- SCC[grep("Mobile.*Vehicles", SCC$EI.Sector), ]; # Pattern match mobile vehicles in SCC description
scc.vehicles.list <- unique(scc.vehicles$SCC); # Create motor vehicle lookup list by SCC
nei.vehicles <- subset(NEI, SCC %in% scc.vehicles.list); # Filter for motor vehicle sources
nei.vehicles <- nei.vehicles %>% filter(fips == "24510"| fips == "06037"); # Filter for Baltimore City or Los Angeles County
nei.vehicles$fips[nei.vehicles$fips == "24510"] <- "Baltimore";
nei.vehicles$fips[nei.vehicles$fips == "06037"] <- "Los Angeles";
nei.vehicles <- merge(x = nei.vehicles, y = scc.vehicles[, c("SCC", "SCC.Level.Two")], by = "SCC"); # Join in descriptive data on SCC codes
nei.vehicles <- nei.vehicles %>% group_by(fips, year, SCC.Level.Two) %>% summarize(Annual.Total = sum(Emissions));
nei.vehicles.total <- nei.vehicles %>% group_by(fips, year) %>% summarize(Annual.Total = sum(Annual.Total)) %>% mutate(SCC.Level.Two = "Total");
nei.vehicles <- bind_rows(nei.vehicles, nei.vehicles.total);
nei.vehicles$SCC.Level.Two <- factor(nei.vehicles$SCC.Level.Two, levels = c("Total", "Highway Vehicles - Diesel", "Highway Vehicles - Gasoline"));
ggplot(nei.vehicles, aes(x = factor(year), y = Annual.Total, fill = SCC.Level.Two)) +
geom_bar(stat = "identity") +
facet_grid(fips ~ SCC.Level.Two) +
xlab("Year") +
ylab(expression("Total Tons of PM"[2.5]*" Emissions")) +
ggtitle(expression(atop("Total Tons of PM"[2.5]*" Emissions from Motor Vehicle Sources", paste("in Baltimore City, MD and Los Angeles County, CA")))) +
theme(plot.title = element_text(hjust = 0.5)) + # Center the plot title
theme(plot.margin = unit(c(1,1,1,1), "cm")) + # Adjust plot margins
scale_fill_brewer(palette = "Set1") +
guides(fill = FALSE)
scc.vehicles <- SCC[grep("Mobile.*Vehicles", SCC$EI.Sector), ]; # Pattern match mobile vehicles in SCC description
scc.vehicles.list <- unique(scc.vehicles$SCC); # Create motor vehicle lookup list by SCC
nei.vehicles <- subset(NEI, SCC %in% scc.vehicles.list); # Filter for motor vehicle sources
nei.vehicles <- nei.vehicles %>% filter(fips == "24510"| fips == "06037"); # Filter for Baltimore City or Los Angeles County
nei.vehicles$fips[nei.vehicles$fips == "24510"] <- "Baltimore";
nei.vehicles$fips[nei.vehicles$fips == "06037"] <- "Los Angeles";
nei.vehicles <- merge(x = nei.vehicles, y = scc.vehicles[, c("SCC", "SCC.Level.Two")], by = "SCC"); # Join in descriptive data on SCC codes
nei.vehicles <- nei.vehicles %>% group_by(fips, year, SCC.Level.Two) %>% summarize(Annual.Total = sum(Emissions));
nei.vehicles.total <- nei.vehicles %>% group_by(fips, year) %>% summarize(Annual.Total = sum(Annual.Total)) %>% mutate(SCC.Level.Two = "Total");
nei.vehicles <- bind_rows(nei.vehicles, nei.vehicles.total);
nei.vehicles$SCC.Level.Two <- factor(nei.vehicles$SCC.Level.Two, levels = c("Total", "Highway Vehicles - Diesel", "Highway Vehicles - Gasoline"));
ggplot(nei.vehicles, aes(x = factor(year), y = Annual.Total, fill = SCC.Level.Two)) +
geom_bar(stat = "identity") +
facet_grid(fips ~ SCC.Level.Two, scales = "free") + # Setup facets and allow scales to adjust to data in each location
xlab("Year") +
ylab(expression("Total Tons of PM"[2.5]*" Emissions")) +
ggtitle(expression(atop("Total Tons of PM"[2.5]*" Emissions from Motor Vehicle Sources", paste("in Baltimore City, MD and Los Angeles County, CA")))) +
theme(plot.title = element_text(hjust = 0.5)) + # Center the plot title
theme(plot.margin = unit(c(1,1,1,1), "cm")) + # Adjust plot margins
scale_fill_brewer(palette = "Set1") +
guides(fill = FALSE)
nei.vehicles.DT <- data.table(nei.vehicles)
yoyFunc <- function(x) {x/shift(x)}
yoy.cols <- c("Annual.Total")
nei.vehicles.DT <- nei.vehicles.DT[, paste0("Percent.Change.", yoy.cols) := lapply(.SD, yoyFunc), by = "fips,SCC.Level.Two", .SDcols = yoy.cols]
nei.vehicles.DT <- mutate(nei.vehicles.DT, Percent.Change.Annual.Total = Percent.Change.Annual.Total - 1)
ggplot(nei.vehicles.DT, aes(x = factor(year), y = Percent.Change.Annual.Total, fill = SCC.Level.Two)) +
geom_bar(stat = "identity") +
facet_grid(fips ~ SCC.Level.Two) +
xlab("Year") +
ylab(expression("% Change From Prior Measurement")) +
ggtitle(expression(atop("Percentage Change in Total Tons of PM"[2.5]*" Emissions from Motor Vehicle", paste("Sources in Baltimore City, MD and Los Angeles County, CA")))) +
theme(plot.title = element_text(hjust = 0.5)) + # Center the plot title
theme(plot.margin = unit(c(1,1,1,1), "cm")) + # Adjust plot margins
scale_fill_brewer(palette = "Set1") +
guides(fill = FALSE)
CAGR.df <- nei.vehicles.DT %>%
group_by(fips, SCC.Level.Two) %>%
summarize(N.Years = max(year) - min(year),
Beginning.Qty = Annual.Total[which(year==min(year))],
Ending.Qty = Annual.Total[which(year==max(year))],
CAGR = ((Ending.Qty-Beginning.Qty)/N.Years)/Beginning.Qty);
CAGR.df;
summary(nei.vehicles.DT$Percent.Change.Annual.Total[nei.vehicles.DT$fips=="Baltimore"]);
summary(nei.vehicles.DT$Percent.Change.Annual.Total[nei.vehicles.DT$fips=="Los Angeles"]);
|
0a2a18a168391758dbcaca1d9524178ec3e055d8 | 2f8a4aee3f8ebd0f216b25caeb29fd3e0b5c299a | /R/day27.R | e72bfecfb6acfaf0b77ce62326fcb47d6b72dcc2 | [] | no_license | PMassicotte/30daymapchallenge | 61063cc094bd0881e43b3e836064e5185f848405 | c6ae65c3baec4804219539133537d08756b0a795 | refs/heads/master | 2022-11-01T04:30:27.813798 | 2022-10-19T18:56:06 | 2022-10-19T18:56:06 | 220,877,067 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,338 | r | day27.R | library(tidyverse)
library(ggpmthemes)
library(sf)
library(ggmap)
library(httr)
library(readxl)
theme_set(theme_light_modified(base_family = "Alatsi"))
GET("https://query.data.world/s/gqkvcrgerj75ptvfsm34eybjqcrd5z", write_disk(tf <- tempfile(fileext = ".xlsx")))
df <- read_excel(tf)
mb <- df %>%
janitor::clean_names() %>%
st_as_sf(coords = c("longitude", "latitude"), crs = 4326)
df2 <- st_read("data/day27/mrc_SHP/mrc_ligne.shp")
# Plot --------------------------------------------------------------------
p <- df2 %>%
ggplot() +
geom_sf(size = 0.2, color = "gray40") +
geom_sf(data = mb, color = "#3c3c3c", size = 0.25) +
# stat_sf(geom = "density2d") +
coord_sf(crs = 32198) +
labs(
title = "Beer resources in Québec",
caption = "#30daymapchallenge (Resources) | Data: https://data.world/maxclem/microbrasseriesquebec | @philmassicotte",
subtitle = str_wrap("The microbrewery industry is growing in Québec. This essential resource is a delight for many beer lovers!", 70)
) +
theme(
panel.border = element_blank(),
axis.text = element_blank(),
panel.grid = element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(
color = "#3C3C3C",
hjust = 0.5,
size = 20,
face = "bold"
),
plot.caption = element_text(
color = "#3C3C3C",
size = 5,
hjust = 0.5
),
plot.subtitle = element_text(color = "gray25", size = 8, hjust = 0.5),
legend.key = element_rect(size = 2, colour = NA),
legend.key.size = unit(0.25, "cm"),
legend.text = element_text(size = 6, color = "white"),
legend.title = element_blank(),
legend.position = "top",
legend.direction = "horizontal",
plot.background = element_rect(fill = "#ABB7B7"),
panel.background = element_rect(fill = "#ABB7B7"),
legend.background = element_rect(fill = "#ABB7B7")
)
# Save plot ---------------------------------------------------------------
destfile <- here::here("graphs", "day27.pdf")
ggsave(
destfile,
device = cairo_pdf,
width = 3.52 * 1.1,
height = 4.68 * 1.1
)
knitr::plot_crop(destfile)
bitmap <- pdftools::pdf_render_page(destfile, dpi = 600)
destfile <- here::here("graphs", "day27.png")
png::writePNG(bitmap, destfile)
|
fa470a4f9f231960f0414d61dcaca8bf1a2d5e42 | 937c0247bdbd987297a5f90fc0c6defd668dae2c | /man/control.Rd | a1a9f15ca6b3613fa992083ec0fce6f22bd462a7 | [] | no_license | GioBo/TR8 | 40bbea1f87e1f5f40152b5047530c3263c60883b | 10ab03648e726530d9f13c438b23ceda0adf82c4 | refs/heads/master | 2022-06-19T17:55:28.708433 | 2022-06-09T21:37:17 | 2022-06-09T21:37:17 | 24,714,177 | 17 | 8 | null | null | null | null | UTF-8 | R | false | false | 1,168 | rd | control.Rd | \name{control}
\alias{control}
\title{control}
\usage{
control(name_variable, dframe, DB)
}
\arguments{
\item{name_variable}{name of the variable set up by
tr8_config()}
\item{dframe}{a dataframe containing traits definition
(created by the tr8() function).}
\item{DB}{name of the database to be used (eg.
"Ecoflora")}
}
\value{
a vector of seleceted traits (if the variable was set
through the \code{tr8_config()} function OR \code{NULL} if
\code{tr8_config()} was run, but no traits were chosen for
that database OR an empty vector if \code{tr8_config()} was
not run.
}
\description{
A function to check if the user wants to download
some traits from a certain database.
}
\details{
This function check whether the user has run the
\code{tr8_config()} function and, in case he did, which
traits were selected (i.e. need to be downloaded by the tr8
function) for each database. These variables have the form
"res_NAMEDB" (eg. \code{res_Biolflor}) and they contain the
"output" of a "gWidget::notebook" window. The values of
these variables can be accessed through the \code{svalue}
}
\author{
Gionata Bocci <boccigionata@gmail.com>
}
\seealso{
tr8()
}
|
a58b7027d5e18866fbd8ab45098fe82b6c765db2 | c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d | /fBasics/unitTests/runit.colorPalette.R | 844e368cedccff446b59477e26d93229c8d2b8f1 | [
"MIT"
] | permissive | solgenomics/R_libs | bcf34e00bf2edef54894f6295c4f38f1e480b3fc | e8cdf30fd5f32babf39c76a01df5f5544062224e | refs/heads/master | 2023-07-08T10:06:04.304775 | 2022-05-09T15:41:26 | 2022-05-09T15:41:26 | 186,859,606 | 0 | 2 | MIT | 2023-03-07T08:59:16 | 2019-05-15T15:57:13 | C++ | UTF-8 | R | false | false | 2,384 | r | runit.colorPalette.R |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General
# Public License along with this library; if not, write to the
# Free Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
# Copyrights (C)
# for this R-port:
# 1999 - 2008, Diethelm Wuertz, Rmetrics Foundation, GPL
# Diethelm Wuertz <wuertz@itp.phys.ethz.ch>
# www.rmetrics.org
# for the code accessed (or partly included) from other R-ports:
# see R's copyright and license files
# for the code accessed (or partly included) from contributed R-ports
# and other sources
# see Rmetrics's copyright file
################################################################################
# FUNCTION: COLOR PALETTES:
# rainbowPalette Creates a rainbow color palette
# heatPalette Creates a heat color palette
# terrainPalette Creates a terrain color palette
# topoPalette Creates a topo color palette
# cmPalette Creates a cm color palette
# greyPalette Creates a grey palette
# timPalette Creates a cyan, yellow, to orange palette
# FUNCTION: COLOR RAMPS:
# rampPalette Creates a color ramp palette
# seqPalette Creates a sequential color palette
# divPalette Creates a diverging color palette
# qualiPalette Creates a qualitative color palette
# focusPalette Creates a focus color palette
# monoPalette Creates a mono color palette
################################################################################
test.greyPalette <-
function()
{
# Grey Palette:
args(greyPalette)
greyPalette()
# Return Value:
return()
}
################################################################################
|
689a332117424812103fa021567f34dded62c73a | a29dba249bbd87c29d731a5b794771fda5cf5117 | /R/interior.r | 28b591ebd7769a5f272cf7e0dcc5f8b02f797f3d | [] | no_license | DaYi-TW/Data-science | 30b4f009c074c7fe9a14e9d963dde37c127802c5 | ce8f5dfcf463a25b5a868fe64014d4311c633a18 | refs/heads/main | 2023-06-29T01:32:59.259537 | 2021-07-21T03:36:34 | 2021-07-21T03:36:34 | 370,018,467 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,124 | r | interior.r | interior=function(t1,test_M_data,criteria){
d=as.data.frame(t1)
Fi=d[ncol(d)]
Fs=d[-ncol(d)]
correlation=0
de1=test_M_data[which(colnames(test_M_data)==colnames(Fi))]
for(i in 1 :ncol(Fs)){
de2=test_M_data[which(colnames(test_M_data)==colnames(Fs[i]))]
correlation=correlation+as.data.frame(criteria(de1,de2))
#if(decision=="COR"){
# correlation=correlation+as.data.frame(cor(de1,de2))
#}else if(decision=="CHI"){
# a=as.data.frame(cbind(de1,de2))
# Xsq=chisq.test(t(table(a)))
# correlation=correlation+as.data.frame(Xsq[1])
#}else if(decision=="IG"){
# a=cbind(de1,de2)
#
# for(j in 1:ncol(a)){
# a[j]=as.factor(a[,j])
# }
# colnames(a)[2]="class"
# correlation=correlation+as.data.frame(InfoGainAttributeEval(class~.,data=a))
#}else if(decision=="GR"){
# a=cbind(de1,de2)
#
# for(j in 1:ncol(a)){
# a[j]=as.factor(a[,j])
# }
# colnames(a)[2]="class"
# correlation=correlation+as.data.frame(GainRatioAttributeEval(class~.,data=a))
#}
}
answer=correlation/ncol(Fs)
colnames(answer)[1]="correlation"
rownames(answer)[1]="value"
return(answer)
} |
84b7bb37aa4a337edfd4b4a3b4ee249694f8d63b | 5b82e9e2a0411fe08558ffe5f805f118358034db | /man/ggMA.Rd | 4648269f11236672e2cfe07a8ac39d98a2ad6c30 | [] | no_license | btmonier/ggDESeq | 81f74d40e66622c7a756f68d08c09169781fe090 | d5aa70e33347c2dfe322c26e0665b17de90d9ce1 | refs/heads/master | 2021-01-13T17:02:59.416760 | 2017-02-27T00:08:43 | 2017-02-27T00:08:43 | 76,287,582 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,126 | rd | ggMA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggMA.R
\name{ggMA}
\alias{ggMA}
\title{MA plot from mean expression and log fold changes}
\usage{
ggMA(data, ylim = NULL, padj = 0.05, lfc = 1)
}
\arguments{
\item{data}{a DESeq object class}
\item{ylim}{optional limits to the y axis}
\item{padj}{significance threshold for adjusted p-value highlighting}
\item{lfc}{fold change threshold for visualization}
}
\description{
This function allows you to extract necessary results-based data from a
DESeq object class to create a MA plot (i.e. a scatter plot) of log2 fold
changes versus normalized mean counts while implementing ggplot2 aesthetics.
}
\details{
This function allows the user to extract various elements from a DESeq object
class which in turn, creates a temporary data frame to plot the necessary
ggplot aesthetics. Data points with 'extreme' values that exceed the default
viewing frame of the plot will change character classes (i.e. points of
interest a substantially large log fold change).
}
\examples{
ggMA()
}
\author{
Brandon Monier, \email{brandon.monier@sdstate.edu}
}
|
28706a173baeb80b62c74f1804d8ee876fbd678a | 540d6e750d5297f3a5a778e24b12744d2ddbe7e5 | /R/tm.R | 577f972a26d55013c613d19de573df058777b1b9 | [] | no_license | anucc/metatone-analysis | 83d2a862091e9274c21636ff8018bf246edd0ee4 | 208e9eae6c28c689f597981491604db084459bc8 | refs/heads/master | 2020-04-12T03:11:34.572020 | 2019-06-03T03:18:48 | 2019-06-03T03:18:48 | 40,700,065 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,700 | r | tm.R | gestures <- c("N", # 0: Nothing
"FT", # 1: Fast Taps
"ST", # 2: Slow Taps
"FS", # 3: Fast Swipes
"FSA", # 4: Fast Swipes Accelerating
"VSS", # 5: Very Slow Swirl
"BS", # 6: Big Swirl
"SS", # 7: Small Swirl
"C" # 8: Combination of Swirl and Taps
)
jumble.df <- function(df, cols.to.jumble){
if(is.numeric(cols.to.jumble) && (TRUE %in% (cols.to.jumble < 0)))
cols.to.jumble <- (1:dim(df)[2])[cols.to.jumble]
for(c in cols.to.jumble){
df[,c] <- df[sample.int(dim(df)[1]),c]
}
df
}
## transition matrices
## assumes single session-artist df
calculate.transitions <- function(df, left.stochastic=FALSE){
if(dim(df)[1]<2)
return(data.frame())
trans <- data.frame(from = df$gesture[-length(df$gesture)],
to= df$gesture[-1])
if(left.stochastic)
res <- ddply(trans, .(from), function(x) ddply(x, .(to), function(y) data.frame(prob = length(y$to)/length(x$from))))
else
res <- ddply(trans, .(from), function(x) ddply(x, .(to), function(y) data.frame(prob = length(y$to)/length(trans$from))))
data.frame(time = min(df$time), res)
}
## ## add together (and renormalise) TMs from different musicians
## group.transitions <- function(df){
## ddply(df,
## .(session, app, agent, time, from, to),
## summarize,
## blah...)
## }
transition.flux <- function(df){
if(sum(df$prob)!=1){
message("warning: prob sum isn't 1 - sum = ", sum(df$prob))
}
on.diag.sum <- sum(df[df$from==df$to, "prob"])
data.frame(flux = 1-on.diag.sum)
}
transition.entropy <- function(df){
data.frame(entropy = -sum(df$prob*log2(df$prob)))
}
quantize.times <- function(time, delta.t){
delta.t * (time %/% delta.t)
}
timeslice.df <- function(df, delta.t){
mutate(df, time = quantize.times(time, delta.t))
}
timesliced.transitions <- function(df, delta.t){
ddply(timeslice.df(df, delta.t),
.(session, app, agent, musician, time),
calculate.transitions,
.progress = "text")
}
## flux.variance <- function(df){
## res <- ddply(df,
## .(session, app, agent, musician, time),
## transition.flux)
## ddply(res,
## .(session, app, agent),
## summarize,
## flux.mean = mean(flux),
## ## flux.median = median(flux),
## flux.variance = sd(flux)
## ## flux.mad = mad(flux)
## )
## }
flux.variance <- function(df){
ddply(df,
.(session, app, agent, musician, time),
transition.flux)
}
## entropy.variance <- function(df){
## res <- ddply(df,
## .(session, app, agent, musician, time),
## transition.entropy)
## ddply(res,
## .(session, app, agent),
## summarize,
## entropy.mean = mean(entropy),
## ## entropy.median = median(entropy),
## entropy.variance = sd(entropy)
## ## entropy.mad = mad(entropy)
## )
## }
entropy.variance <- function(df){
ddply(df,
.(session, app, agent, musician, time),
transition.entropy)
}
collapse.musicians <- function(df, collapse.function){
ddply(df,
intersect(c("session", "app", "agent", "time"), names(df)),
numcolwise(collapse.function))
}
## plotting functions
plot.tm.heatmap <- function(df, colour.by, title = ""){
ggplot(df, aes(y = from, x = to)) +
geom_tile(aes_string(alpha = "prob", fill = colour.by)) +
## scale_fill_manual(values = c("blue", "darkgreen", "red")) +
## scale_fill_gradient(limits=fill.limits, low="#000080", high="#CD6600") +
scale_x_discrete(drop = FALSE, limits = gestures) +
scale_y_discrete(drop = FALSE, limits = gestures) +
scale_alpha_continuous(limits = c(0,.5)) + # try 1 as well
coord_fixed() +
labs(title = title,
x = "final state",
y = "initial state",
alpha = "transition\nprobability") +
theme(axis.text.x = element_text(angle=90))
}
## ## jumbled data for sanity checks
## mtdfj <- mtdf
## mtdfj$gesture <- factor(sample(levels(mtdf$gesture), dim(mtdfj)[1], TRUE))
## CHI figures for publication
## taken from colorbrewer2.org
chifig.3colours <- c("#e41a1c", "#377eb8", "#4daf4a")
chifig.2colours <- c("#984ea3", "#ff7f00")
|
7ed44339062730face9875b6a1c0f1b2fc4ff95b | 28adba2768e03e72e23679190c39bfe3158a83a4 | /P1-final project codes1.R | 194fbd64cf79481434c524f5b9736c1bbd7f5b1a | [] | no_license | danishxr/PROJECTS | 7d52988a6ed7d9cfb4ee6b094bcba7bcbbd03ca5 | 40fa8ddd51cbebc525d7f24939305d7431415321 | refs/heads/master | 2021-01-01T18:10:24.975825 | 2017-10-31T17:47:51 | 2017-10-31T17:47:51 | 98,268,701 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,554 | r | P1-final project codes1.R |
library(RColorBrewer)
require(RColorBrewer)
library(ggplot2)
require(ggplot2)
library(dplyr)
require(dplyr)
library(ggmap)
require(ggmap)
library(XML)
require(XML)
library(tidyr)
require(tidyr)
library(fmsb)
require(fmsb)
train <- read.csv("IndiaAffectedWaterQualityAreas.csv",stringsAsFactors = FALSE)
train$Year<- as.Date(train$Year,"%d/%m/%Y")
View(table(train$State.Name))
#We can see observations naming CHATTISGARH and CHHATTISGARH, infact reality is its a spelling mistake of some kind.
#There is only CHHATTISGARH as one of the offical states of india.the later has good number of cases this could really screw up the analysis so we need to clear that.
#cleaning the data a little bit--------------------------------------------
train$State.Name<- gsub(pattern = "\\(.*","",train$State.Name)
train$State.Name<- gsub(pattern = "CHATTISGARH","CHHATTISGARH",train$State.Name)
train$District.Name<- gsub(pattern = "\\(.*","",train$District.Name)
train$Block.Name<- gsub(pattern = "\\(.*","",train$Block.Name)
train$Panchayat.Name<- gsub(pattern = "\\(.*","",train$Panchayat.Name)
train$Quality.Parameter<-as.factor(train$Quality.Parameter)
str(train)
# pattern of chemical compunds -----------------------------------------------------------------------------------------------------------------------------------------------------------------
kk<- as.data.frame(table(train$Year,train$Quality.Parameter),stringsAsFactors = FALSE)
kk$Var1 <- as.Date.factor(kk$Var1)
kk$Var2 <- as.factor(kk$Var2)
qplot(Freq,Var1,data = kk,facets = Var2~.,geom = c("point","smooth"),color=Var2)+labs(title="Trend seen in Chemicals over the Years",x="Number of cases",y="Years",fill="Chemicals")+
theme(plot.title = element_text(face="bold.italic",size = rel(2)),axis.text = element_text(colour = "blue"),axis.title.x=element_text(face="bold.italic"),
axis.title.y=element_text(face="bold.italic"),panel.grid.major.y = element_blank(),axis.ticks = element_line(size = 2),legend.box.background = element_rect(),
legend.box.margin = margin(6, 6, 6, 6))
#INFERENCE-there is a general downward trend as years goes by. destroying the popular thesis of as development happens water quality degrades.
#which chemical is found most of water quality issues------------------------------------------------------------------------------------------------------------------------------------------------
chemicals_present <- as.data.frame(table(train$Quality.Parameter),stringsAsFactors = FALSE)
names(chemicals_present) <- c("CHEMICAL","FREQ_REPORTED")
f <- ggplot(chemicals_present,aes(chemicals_present$CHEMICAL,chemicals_present$FREQ_REPORTED))
f+geom_bar(stat = "identity",fill= brewer.pal(5,"Set2"))+labs(title="Identifying Most Occuring Chemical",x="Chemicals",y="Cases")+
theme(plot.title = element_text(face="bold.italic",size = rel(2)),axis.text = element_text(colour = "blue"),axis.title.x=element_text(face="bold.italic"),
axis.title.y=element_text(face="bold.italic"),panel.grid.major.y = element_blank(),axis.ticks = element_line(size = 2))
#INFERENCE-As we can see there is a arge presence of IRON in all the cases,next is Salinity from Fluoride.
#overview of trend n india-----------------------------------------------------------------------------------------------------------------------------------------
overview <- as.data.frame(table(train$State.Name,train$Quality.Parameter,train$Year))
names(overview) <- c("State.Name","CHEMICAL","YEAR","Freq")
str(overview)
an <- ggplot(overview,aes(overview$State.Name,overview$Freq,fill=overview$CHEMICAL))
an+geom_bar(stat="identity",position = "dodge")+theme(axis.text.x = element_text(angle = 90))+
labs(title="Trend of Chemical Compostion in different states of India",x= "States", y="Number_Of_Cases",fill="CHEMICALS")+
theme(plot.title = element_text(face="bold.italic",size = rel(2)),axis.text = element_text(colour = "blue"),axis.title.x=element_text(face="bold.italic"),
axis.title.y=element_text(face="bold.italic"),panel.grid.major.y = element_blank(),axis.ticks = element_line(size = 2),legend.box.background = element_rect(),
legend.box.margin = margin(6, 6, 6, 6))
#As we can see here ASSAM,BIHAR,RAJASTHAN has larger cases of chemical composition reported,we will look into states which have two or more chemicals in larger cases.
# we could identify states like Assam,Bihar,Orissa,Rajasthan.
#lets take each state chemical wise and see-----------------------------------------------------------------------------------------------------------
goal1<-as.data.frame(table(train$State.Name,train$Quality.Parameter))
names(goal1) <- c("State.Name","CHEMICAL","Freq")
str(goal1)
an <- ggplot(goal1,aes(goal1$State.Name,goal1$Freq,fill=goal1$CHEMICAL))
an+geom_bar(stat="identity",position = "dodge")+facet_wrap(~goal1$CHEMICAL,scales="free")+
labs(title="Trend of Specific Chemical Compostion in different states of India",x= "States", y="Number_Of_Cases",fill="CHEMICALS")+scale_fill_brewer( type = "qua", palette = "Dark2", direction = 1)+
theme(axis.text.x = element_text(angle = 90),plot.background = element_rect(fill = NA),plot.title = element_text(size = rel(2)), panel.background = element_rect(fill =NA),axis.text = element_text(colour = "blue"),
panel.grid.major = element_line(colour = "black"),axis.ticks = element_line(size = 2),legend.box.background = element_rect(),
legend.box.margin = margin(6, 6, 6, 6))
#INFERENCE------------------------------------------------------------------------------------------------------------------------
#iron-ASSAM ,Bihar,Chhatisgargh,orissa
#Fluoride-Rajasthan,second worst Bihar
#Arsenic-westbengal,Assam,Bihar
#Nitrate,Karnataka,Maharashtra,Rajsthan
#salinity-Rajasthan
#so the main problem occurs in ASSAM ,BIHAR&RAJASTHAN as they aranked higher in presence of more than two chemicals at larger cases.
#WESTBENGAL has the highest presence of Arsenic in them but other than that other chemical reported are relatively less.
#we will analyse the trend and see district wise report of these states to get a clearer picture.
#----------------------------------------------------------------------------------------------------------------------------------
#for the state of ASSAM
#to get the values
table(state_ASSAM$District.Name,state_ASSAM$Quality.Parameter,state_ASSAM$Year
#visualization
state_ASSAM <- subset(train,train$State.Name=="ASSAM")
ASSAM <- as.data.frame(table(state_ASSAM$District.Name,state_ASSAM$Quality.Parameter,state_ASSAM$Year),stringsAsFactors = FALSE)
str(ASSAM)
names(ASSAM) <- c("District.Name","CHEMICAL","YEAR","Freq")
assam <- ggplot(ASSAM,aes(ASSAM$CHEMICAL,ASSAM$Freq,fill=ASSAM$District.Name))
assam+geom_bar(stat="identity",position = "dodge")+facet_grid(.~ASSAM$YEAR)+
labs(title="TREND of Chemical Compostion in ASSAM Villages",x="Chemicals",y="Number Of Cases",fill="Districts in ASSAM")+
theme(plot.title = element_text(face="bold.italic",size = rel(2)),axis.text = element_text(colour = "blue"),axis.title.x=element_text(face="bold.italic"),
axis.title.y=element_text(face="bold.italic"),panel.grid.major.y = element_blank(),axis.ticks = element_line(size = 2),legend.box.background = element_rect(),
legend.box.margin = margin(6, 6, 6, 6))
#INFERENCE-Generally the trend shows a downward trend in all the districts with a spike in the year 2011,then it has decreased.
#District of Sontipur has the highest iron content
#for the state of Bihar
#to get the values
table(state_BIHAR$District.Name,state_BIHAR$Quality.Parameter,state_BIHAR$Year)
#visualization
state_BIHAR<- subset(train,train$State.Name=="BIHAR")
BIHAR <- as.data.frame(table(state_BIHAR$District.Name,state_BIHAR$Quality.Parameter,state_BIHAR$Year),stringsAsFactors = FALSE)
str(BIHAR)
names(BIHAR) <- c("District.Name","CHEMICAL","YEAR","Freq")
bihar <- ggplot(BIHAR,aes(BIHAR$CHEMICAL,BIHAR$Freq,fill=BIHAR$District.Name))
bihar+geom_bar(stat="identity",position = "dodge")+facet_grid(.~BIHAR$YEAR)+
labs(title="TREND of Chemical Compostion in BIHAR Villages",x="Chemicals",y="Number Of Cases",fill="Districts in BIHAR")+
theme(plot.title = element_text(face="bold.italic",size = rel(2)),axis.text = element_text(colour = "blue"),axis.title.x=element_text(face="bold.italic"),
axis.title.y=element_text(face="bold.italic"),panel.grid.major.y = element_blank(),axis.ticks = element_line(size = 2),legend.box.background = element_rect(),
legend.box.margin = margin(6, 6, 6, 6))
#INFERENCE-Generally the trend shows a downward trend in all the districts,then it has decreased.
# District of Purnia has the highest iron content.
#for the state of Rajasthan
#to get the values
table(state_RAJASTHAN$District.Name,state_RAJASTHAN$Quality.Parameter,state_RAJASTHAN$Year)
#visualization
state_RAJASTHAN <- subset(train,train$State.Name=="RAJASTHAN")
RAJASTHAN <- as.data.frame(table(state_RAJASTHAN$District.Name,state_RAJASTHAN$Quality.Parameter,state_RAJASTHAN$Year),stringsAsFactors = FALSE)
str(RAJASTHAN)
names(RAJASTHAN) <- c("District.Name","CHEMICAL","YEAR","Freq")
rajasthan<- ggplot(RAJASTHAN,aes(RAJASTHAN$CHEMICAL,RAJASTHAN$Freq,fill=RAJASTHAN$District.Name))
rajasthan+geom_bar(stat="identity",position = "dodge")+facet_grid(.~RAJASTHAN$YEAR)+
labs(title="TREND of Chemical Compostion in RAJASTHAN Villages",x="Chemicals",y="Number Of Cases",fill="Districts in RAJASTHAN")+
theme(plot.title = element_text(face="bold.italic",size = rel(2)),axis.text = element_text(colour = "blue"),axis.title.x=element_text(face="bold.italic"),
axis.title.y=element_text(face="bold.italic"),panel.grid.major.y = element_blank(),axis.ticks = element_line(size = 2),legend.box.background = element_rect(),
legend.box.margin = margin(6, 6, 6, 6))
#INFERENCE-Generally the trend shows a downward trend in all the districts,then it has decreased.
#District of Barmer has the highest salinity content.
#Visualisation using ggmap ----------------------------------------------------------------------------------------------------------------------------------------------------------------------
url <- "http://www.distancelatlong.com/country/india"
poptable <- readHTMLTable(url,which=3)
str(poptable)
names(poptable) <- c("State.Name","latitude","longitude")
poptable$State.Name <- gsub(pattern = "\\(.*","",poptable$State.Name)
poptable$State.Name <- as.character(poptable$State.Name)
poptable$latitude <- as.character(poptable$latitude)
options(digit=8)
poptable$latitude <- as.numeric(poptable$latitude)
poptable$longitude <- as.character(poptable$longitude)
options(digit=8)
poptable$longitude <- as.numeric(poptable$longitude)
poptable$State.Name <- toupper(poptable$State.Name)
lop<-train[,c("State.Name","Quality.Parameter")]
plea<-cbind(lop,poptable)
indiamap <- get_map(location = "india",maptype ="terrain", zoom =5,color='color' )
as.num(plea$Quality.Parameter)
#use different zoom levels for the maps ,even change the types
ggmap(indiamap)+geom_point(data=plea,aes(x=longitude,y=latitude,colour=Quality.Parameter,size=2),alpha=.5,na.rm=TRUE)+
scale_colour_brewer(type = "seq", palette = "Spectral", direction = 1)
#single line fucking elegant
#train$Habitation.Name <- gsub(pattern = "\\(.*","",train$Habitation.Name)
#to really undertand something one must be liberated from it
#radar chart-----------------------------------------------------------------------------------------------------------------------------------------------------
# trial <- as.data.frame(table(train$Year,train$Quality.Parameter,train$State.Name=="RAJASTHAN"))
# trial1 <- subset(trial,trial$Var3=="TRUE")
# nrow(trial1)
# table(trial$Var3)
# sum(trial1$Freq)
# #trial1
# trial3 <- spread(trial1,Var2,Freq)
# #trial3
# trial4 <- trial3[,-2]
# #trial4
# #set.seed(99)
# #radarchart(trial4)
# str(trial4)
# radarchart(trial4,seg=6)
# colors_border=c( rgb(0.2,0.5,0.5,0.9), rgb(0.8,0.2,0.5,0.9) , rgb(0.7,0.5,0.1,0.9) )
# colors_in=c( rgb(0.2,0.5,0.5,0.4), rgb(0.8,0.2,0.5,0.4) , rgb(0.7,0.5,0.1,0.4) )
# radarchart( trial4 , axistype=1 ,
# #custom polygon
# pcol=colors_border , pfcol=colors_in , plwd=2 , plty=1,
# #custom the grid
# cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,20,5), cglwd=0.8,
# #custom labels
# vlcex=0.8
# )
# legend(x=0.7, y=1, legend = rownames(trial4[-c(1,2),]), bty = "n", pch=20 , col=colors_in , text.col = "grey", cex=1.2, pt.cex=3)
|
6a18ba2bd6798f6a632e0607dc7d200325ecabbf | 7375604d8538a001987564c7f11f985f94874ddc | /man/invlogit.Rd | 8db448a35a57f08cf875b56072016de8eb160c46 | [] | no_license | crushing05/crushingr | 3b4433c51d90e88c86a5d643566519b15bfc1fcb | 7cc598abb9fafb8b46e40ee15ec5706d65949fde | refs/heads/master | 2021-01-22T03:39:03.055577 | 2017-07-11T02:50:18 | 2017-07-11T02:50:18 | 28,917,439 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 352 | rd | invlogit.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/functions.R
\name{invlogit}
\alias{invlogit}
\title{Inverse logit}
\usage{
invlogit(x)
}
\arguments{
\item{x}{vector of logit transformed numerical values}
}
\description{
Function to compute inverse logit
}
\keyword{logistic}
\keyword{logit;}
\keyword{regression}
|
b2846752488b5a449875bedb6f22abbff8233e4d | b36bdb960532bb84e9261cef203ae65965d2dbd8 | /ejercicio1_3.R | 85e66682d50308c0359dfd31162c3771e86fdd83 | [] | no_license | lucreciofernandez/R-R.Studio | 4be6fa0e6589fc3535c612a58559815af9be9952 | c6729d522e09b21af46e0823a52bb872484fb540 | refs/heads/master | 2021-01-12T16:53:39.468228 | 2016-12-04T18:09:54 | 2016-12-04T18:09:54 | 71,464,065 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 234 | r | ejercicio1_3.R | # Ejercicio 3: dado
b <- list(a=1:10, b=seq(3), c=rep(2, 5))
# escribe una sentencia que devuelva un vector con la longitud de cada uno de los elementos de lista
v=c(length(unlist(b[1])),length(unlist(b[2])),length(unlist(b[3])))
|
42e706004de3e83bc367e17f32afe0f0e9b2be49 | e045478d3b61cf300cf2f1181287974816724083 | /functions/calcLag.R | d0edeb74ab591edbdf609948f82bb1b3b7b5337f | [] | no_license | TimothyStaples/novel_comm_quaternary | 06bad8614a1a95f9b33b4718b3e07bbbfb5cd5af | a173bfe73d3c110b9d3c0711c0bc512c3a8ba0a4 | refs/heads/master | 2023-04-08T12:47:29.228270 | 2022-04-13T03:15:11 | 2022-04-13T03:15:11 | 481,037,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,083 | r | calcLag.R | calcLag <- function(novel.list,
env.data,
env.var,
local.env.data,
local.lag,
global.lag){
# Prepare modelling data ####
print("Prepping data")
# aggregate 50 year temp change estimates to 200 year sampling bins, take mean
colnames(env.data)[colnames(env.data) == env.var] = "temp"
# global data
env.agg <- bin.env.data(env.data,
env.var = "temp",
bin.width=200,
lims=c(-100,25000))
env.agg <- env.agg[order(env.agg$bin, decreasing=TRUE), ]
# site data
local.agg <- t(apply(local.env.data, 1, function(x){
temp.agg <- cut(as.numeric(colnames(local.env.data)), breaks=seq(-100,25000,200))
agg.char <- as.character(temp.agg)
bin <- as.numeric(substr(agg.char, regexpr(",", agg.char)+1,
nchar(agg.char)-1)) - 0.5*200
tapply(x[-1], bin[-1], mean, na.rm=TRUE)
}))
local.agg <- local.agg[,order(as.numeric(colnames(local.agg)), decreasing=TRUE)]
# now difference temps based on required lag
env.lag <- diff(env.agg$env, differences=1, lag=unlist(global.lag))
env.agg$diff.env <- c(rep(NA, global.lag), env.lag)
local.lag.df <- t(apply(local.agg, 1, function(x){
tempLag <- c(rep(NA, local.lag), diff(x, differences=1, lag=unlist(local.lag)))
}))
dimnames(local.lag.df) = dimnames(local.agg)
# add in local temp data
local.lag.long <- long_form(dataTable = local.lag.df,
data.cols = matrix(rownames(local.lag.df), ncol=1, dimnames=list(rownames(local.lag.df), NA)),
category.cols = local.lag.df)
colnames(local.lag.long) = c("site", "bin", "local.diff")
comb.lag <- merge(local.lag.long, env.agg[,c("bin","diff.env")],
by.x="bin", by.y="bin",
all.x=TRUE, all.y=FALSE, sort=FALSE)
head(comb.lag)
colnames(comb.lag) = c("bin", "site", paste0("localLag", local.lag), paste0("globalLag",global.lag))
return(comb.lag)
} |
2ae9efb6bd01c69e42105542e4dbc08d82d145a9 | b9225a0f129f5acdd90e356197028471aa36608e | /newsapi_databot/scripts/collect_news.R | 6d72d21d2a88079724ccafcb95028747e0168e92 | [] | no_license | rmnppt/newsfuzz | 54f4f42891d1ab6e6e89c1737d6faed28ed6c9f0 | e7ccb9b299cd91eceac414eea87de1bcbd10b7b3 | refs/heads/master | 2021-01-21T18:21:32.272926 | 2017-07-20T23:12:32 | 2017-07-20T23:12:32 | 92,036,134 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,562 | r | collect_news.R | date()
### install / load packages
library(tidyverse)
library(RMySQL)
library(tidytext)
library(qdapDictionaries)
# devtools::install_github("rmnppt/newsfuzz/newsR")
library(newsR)
###
### collect source and article information from api
sources <- newsapiSources("en", "gb")
saveRDS(sources, "data/sources.rds")
articles <- sources$sources$id %>%
lapply(function(s) newsapiArticles(source = s)) %>%
lapply(function(s) jsonlite::flatten(as.data.frame(s))) %>%
do.call(rbind, .)
names(articles) <- sub("articles.", "", names(articles))
###
### timestamp and filter old articles
timelast <- readRDS("data/lastdownloaded.rds")
if(exists(timelast)) {
articles <- articles %>%
filter(publishedAt > timelast)
}
###
### Collect and Clean raw html
getCleanHTML <- function(url) {
cat(url)
input <- httr::GET(url) %>%
httr::content(as = "text", type = "html") %>%
as.data.frame()
names(input) <- "text"
input$text <- as.character(input$text)
token <- unnest_tokens(input, token, text)
token <- token %>% filter(token %in% DICTIONARY$word)
cat("... done\n")
return(token)
}
articles$words <- lapply(articles$url, getCleanHTML)
###
### write new data to db
con <- dbConnect(RMySQL::MySQL(),
host = "",
port = 3306,
dbname = "newsfuzz",
user = ,
password = )
dbWriteTable(con, "articles", d_small, append = TRUE)
dbDisconnect(con)
###
### timestamp for next time
timenow <- print(Sys.time())
saveRDS(timenow, "data/lastdownload.rds")
###
|
8d64aadbb6845c7ebb5eca21e7cb0fb13c75b920 | 4d9a255f944c116d701e31cb79b28edd192a6b8a | /functions/getSummoner.R | 7123e2fb8152b1dc7981ce80a6a55697fe595437 | [] | no_license | Gwangil/C.I.GG | f2d063945e396473060129061723072b34afc91c | 35baf538bab5dba22b6128c9f47fd3ac741b4e49 | refs/heads/master | 2022-05-18T04:32:50.634432 | 2022-05-03T09:11:28 | 2022-05-03T09:11:28 | 172,874,253 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 495 | r | getSummoner.R | # Ref.) https://developer.riotgames.com/api-methods/#summoner-v4/GET_getBySummonerName
# Get represents a summoner
# @param summonerName
# Return type : tibble, data.frame
getSummoner <- function(summonerName) {
GET(url = URLencode(iconv(paste0("https://kr.api.riotgames.com/lol/summoner/v4/summoners/by-name/",
summonerName), to = "UTF-8")),
add_headers("X-Riot-Token" = getOption("RiotApiKey"))) %>%
content() %>% dplyr::bind_rows()
} |
54086af6634cac4ca1ee004e7968dadb441f9a9b | c79fa021f5bb195a4abfcf81d88a49b5ae86ce73 | /R/tong-shrinkage.r | 3ed1be950c8d4a5f9b029d007f10d1a9bf726397 | [
"MIT"
] | permissive | topepo/sparsediscrim | 7c99e48f9552455c494e6a04ab2baabd4044a813 | 60198a54e0ced0afa3909121eea55321dd04c56f | refs/heads/main | 2021-08-08T17:04:45.633377 | 2021-06-28T00:27:34 | 2021-06-28T00:27:34 | 313,120,774 | 4 | 0 | NOASSERTION | 2021-06-28T00:27:34 | 2020-11-15T20:51:32 | R | UTF-8 | R | false | false | 2,078 | r | tong-shrinkage.r | #' Tong et al. (2012)'s Lindley-type Shrunken Mean Estimator
#'
#' An implementation of the Lindley-type shrunken mean estimator utilized in
#' shrinkage-mean-based diagonal linear discriminant analysis (SmDLDA).
#'
#' @export
#' @importFrom stats var
#' @references Tong, T., Chen, L., and Zhao, H. (2012), "Improved Mean
#' Estimation and Its Application to Diagonal Discriminant Analysis,"
#' Bioinformatics, 28, 4, 531-537.
#' \url{http://bioinformatics.oxfordjournals.org/content/28/4/531.long}
#' @param x a matrix with `n` rows and `p` columns.
#' @param r_opt the shrinkage coefficient. If `NULL` (default), we calculate
#' the shrinkage coefficient with the formula given just above Equation 5 on page
#' 533 and denoted by \eqn{\hat{r}_{opt}}. We allow the user to specify an
#' alternative value to investigate better approximations.
#' @return vector of length `p` with the shrunken mean estimator
tong_mean_shrinkage <- function(x, r_opt = NULL) {
n <- nrow(x)
p <- ncol(x)
# Here, we calculate the approximate "optimal" shrinkage coefficient, r.
# The formula is given just above Equation 5 and is denoted \hat{r}_{opt}.
if (is.null(r_opt)) {
r_opt <- (n - 1) * (p - 2) / n / (n - 3)
} else {
r_opt <- as.numeric(r_opt)
}
# The sample means of each feature vector.
xbar <- colMeans(x)
# Tong et al. calculate the mean of the entire matrix, x.
grand_mean <- mean(x)
# The authors then center the sample mean for each feature vector.
centered_xbars <- xbar - grand_mean
# The MLE of the covariance matrix under the assumpton of a multivariate
# normal population with a diagonal covariance matrix.
diag_S <- (n - 1) / n * apply(x, 2, var)
# The term in Equation (6) denoted by:
# || \bar{x} - \bar{x}_{\dot\dot} ||^2_S
shrinkage_norm <- sum(centered_xbars^2 / diag_S)
# Finally, we calculate the shrunken mean given in Equation 6.
if (shrinkage_norm == 0) {
shrunken_mean <- xbar
} else {
shrunken_mean <- grand_mean + (1 - r_opt / shrinkage_norm) * centered_xbars
}
shrunken_mean
}
|
27531947897cb1e3a6db2bd548c7e241a9824f78 | 5a4f0cb1aeb4a0488b365eedc49c96459ded7c7a | /tests/testthat/test-run-simple.R | a151fce684f959060d892ad97ee1b6a028bb72b6 | [] | no_license | beerda/rmake | 8fb80f535d81fc4944e5f7a8fa7afd25a8cc60ad | 61b615a9dd993501cb54a0f39f4bcaf598b14ff1 | refs/heads/master | 2022-07-09T13:14:15.041544 | 2022-06-30T09:06:27 | 2022-06-30T09:06:27 | 118,428,899 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,114 | r | test-run-simple.R | source('realRunTools.R')
test_that('simple R script', {
initTesting('simple')
dep1 <- 'dep1.in'
dep2 <- 'dep2.in'
out <- 'result.out'
r <- 'script.R'
writeToDepFile(dep1)
writeToDepFile(dep2)
createScriptFile(r, out)
createMakefile('library(rmake)',
paste0('job <- list(rRule("', out, '", "', r, '", c("', dep1, '", "', dep2, '")))'),
'makefile(job, "Makefile")')
expect_true(file.exists(dep1))
expect_true(file.exists(dep2))
expect_false(file.exists(out))
expect_true(file.exists(r))
make()
make()
expect_true(file.exists(out))
expect_true(contentGreater(out, dep1))
expect_true(contentGreater(out, dep2))
Sys.sleep(1)
writeToDepFile(dep1)
expect_false(contentGreater(out, dep1))
expect_true(contentGreater(out, dep2))
make()
expect_true(contentGreater(out, dep1))
expect_true(contentGreater(out, dep2))
Sys.sleep(1)
writeToDepFile(dep2)
expect_true(contentGreater(out, dep1))
expect_false(contentGreater(out, dep2))
make()
expect_true(contentGreater(out, dep1))
expect_true(contentGreater(out, dep2))
})
|
e21c271a12884bcecebebdc28d091a2bf6adf8a8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/paleotree/examples/multiDiv.Rd.R | 45b494de9b5d4489d70fde25e78e1d289cdfa866 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,909 | r | multiDiv.Rd.R | library(paleotree)
### Name: multiDiv
### Title: Calculating Diversity Curves Across Multiple Datasets
### Aliases: multiDiv plotMultiDiv plotMultiDiv
### ** Examples
set.seed(444)
record <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1,
nTotalTaxa = c(30,40), nExtant = 0)
taxa <- fossilRecord2fossilTaxa(record)
rangesCont <- sampleRanges(taxa, r = 0.5)
rangesDisc <- binTimeData(rangesCont, int.length = 1)
cladogram <- taxa2cladogram(taxa, plot = TRUE)
#using multiDiv with very different data types
ttree <- timePaleoPhy(cladogram, rangesCont, type = "basic", add.term = TRUE, plot = FALSE)
input <- list(rangesCont, rangesDisc, ttree)
multiDiv(input, plot = TRUE)
#using fixed interval times
multiDiv(input, int.times = rangesDisc[[1]], plot = TRUE)
#using multiDiv with samples of trees
ttrees <- timePaleoPhy(cladogram, rangesCont, type = "basic",
randres = TRUE, ntrees = 10, add.term = TRUE)
multiDiv(ttrees)
#uncertainty in diversity history is solely due to
#the random resolution of polytomies
#multiDiv can also take output from simFossilRecord, via fossilRecord2fossilTaxa
#what do many simulations run under some set of conditions 'look' like on average?
set.seed(444)
records <- simFossilRecord(p = 0.1, q = 0.1, nruns = 10,
totalTime = 30, plot = TRUE)
taxa <- sapply(records,fossilRecord2fossilTaxa)
multiDiv(taxa)
#increasing cone of diversity!
#Even better on a log scale:
multiDiv(taxa, plotLogRich = TRUE)
#pure-birth example with simFossilRecord
#note that conditioning is tricky
set.seed(444)
recordsPB <- simFossilRecord(p = 0.1, q = 0, nruns = 10,
totalTime = 30,plot = TRUE)
taxaPB <- sapply(recordsPB,fossilRecord2fossilTaxa)
multiDiv(taxaPB,plotLogRich = TRUE)
#compare many discrete diversity curves
discreteRanges <- lapply(taxa,function(x)
binTimeData(sampleRanges(x, r = 0.5,
min.taxa = 1), int.length = 7))
multiDiv(discreteRanges)
layout(1)
|
5b33cf6bd0d702a7a0f7168ea16370a7cd7e017c | 7f6ba6b64f6393773aedc74e1998a86c4afc3b48 | /R/utility_plot_landscape.R | 5e29a1d5a372b01f075f0c0538e877447f585abe | [
"MIT"
] | permissive | kisungyou/TDAkit | e71c8dac42429f5681ea3c16a214cf0d3c80f666 | f7a6a9c4c0bdc92e6d138906e448e8b217627609 | refs/heads/master | 2023-07-12T20:29:19.094110 | 2021-08-22T19:06:52 | 2021-08-22T19:06:52 | 213,769,060 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,645 | r | utility_plot_landscape.R | #' Plot Persistence Landscape
#'
#' Given a persistence landscape object in S3 class \code{landscape}, visualize the
#' landscapes using \pkg{ggplot2}.
#'
#' @param x a \code{landscape} object.
#' @param ... extra parameters including \describe{
#' \item{top.k}{the number of landscapes to be plotted (default: 5).}
#' \item{colored}{a logical; \code{TRUE} to assign different colors for landscapes, or \code{FALSE} to use grey color for all landscapes.}
#' }
#'
#' @return a \pkg{ggplot2} object.
#'
#' @examples
#' \donttest{
#' # Use 'iris' data
#' XX = as.matrix(iris[,1:4])
#'
#' # Compute Persistence diagram and landscape of order 0
#' homology = diagRips(XX)
#' landscape = diag2landscape(homology, dimension=0)
#'
#' # Plot with 'barcode'
#' opar <- par(no.readonly=TRUE)
#' plot(landscape)
#' par(opar)
#' }
#'
#' @concept utility
#' @export
plot.landscape <- function(x, ...){
## preprocess
object = x
if (!inherits(object, "landscape")){
stop("* landscape : input 'x' should be a 'landscape' object.")
}
params = list(...)
pnames = names(params)
top.k = ifelse("top.k"%in%pnames, round(params$top.k), 5)
colored = ifelse("colored"%in%pnames, as.logical(params$colored), FALSE)
numtoshow = min(max(1, round(top.k)), ncol(object$lambda))
## prepare for inputs
df_tseq <- df_lbds <- df_show <- NULL
tseq <- lambda <- group <- NULL
df_tseq = rep(as.vector(object$tseq), times=numtoshow)
df_lbds = c()
for (i in 1:numtoshow){
df_lbds = c(df_lbds, as.vector(object$lambda[,i]))
}
df_nums = rep(1:numtoshow, each=length(as.vector(object$tseq)))
df_show = data.frame(tseq=df_tseq, lambda=df_lbds, group=as.factor(df_nums))
## visualize with ggplot2
if (colored){
ggout <- ggplot2::ggplot(data=df_show) +
ggplot2::geom_line(ggplot2::aes_string(x="tseq", y="lambda", group="group", colour="group", linetype="group")) +
ggplot2::scale_color_discrete() +
ggplot2::theme_minimal() +
ggplot2::theme(legend.position = "none")
} else {
ggout <- ggplot2::ggplot(data=df_show) +
ggplot2::geom_line(ggplot2::aes_string(x="tseq", y="lambda", group="group", linetype="group"), color="grey") +
ggplot2::theme_minimal() +
ggplot2::theme(legend.position = "none")
}
## post-processing of the figure
ggout <- ggout +
ggplot2::xlab("") +
ggplot2::ylab("") +
ggplot2::theme(panel.grid = ggplot2::element_blank(),
axis.line.x.bottom = ggplot2::element_line(colour="black"),
axis.line.y.left = ggplot2::element_line(colour="black"))
return(ggout)
} |
1568fd1e0d573c7b0571f845c0e58f9632e2a7ce | c3e542e5b10011f2f209779def58a2a2df393335 | /R/subsetSubjestsDS.R | da8b3bf243430e8902944610879e33d6828a2d20 | [] | no_license | YouchengZHANG/dsMTLBase | 2d7b738b44e6cb54b9477ba416fbb1839672436b | 791ac77ff8146af611e53691e646c9056ba5eb7a | refs/heads/main | 2023-07-12T19:21:50.208248 | 2021-08-07T16:10:19 | 2021-08-07T16:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,932 | r | subsetSubjestsDS.R | ################################################################################
#
# Package Name: dsMTLBase
# Description: The server-side functions of dsMTL
#
# dsMTL - a computational framework for privacy-preserving, distributed
# multi-task machine learning
# Copyright (C) 2021 Han Cao (han.cao@zi-mannheim.de)
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
################################################################################
#' @title Subset subjects given index
#' @description Subset subjects for cross-validation
#' @param symbol The name of the design matrix
#' @param idx The index of the selected subjects
#' @return The subset of the design matrix
#' @details In the k-fold in-site cross-validation, the sample in each server was randomly divided into k fold. The idx was to subset the
#' design matrix into the training or test sample in each of k fold cross-validation
#' @export
#' @author Han Cao
################################################################################
subsetSubjestsDS=function(symbol, idx){
symbol<- eval(parse(text=symbol), envir = parent.frame())
idx <- as.integer(unlist(strsplit(idx, split=",")))
rData= symbol[idx, , drop=F]
return(rData)
}
|
f8691aa845d0f8b4ba31e06db404d9b5576ef985 | d48a6be6d855db72443aa767d680e13596e2a180 | /RMark/R/make.design.data.R | 75592f56c491d7fd20985bff45e65c21efc6a9e1 | [] | no_license | jlaake/RMark | f77e79d6051f1abfd57832fd60f7b63540a42ab9 | 7505aefe594a24e8c5f2a9b0b8ac11ffbdb8a62d | refs/heads/master | 2023-06-26T21:29:27.942346 | 2023-06-25T16:35:43 | 2023-06-25T16:35:43 | 2,009,580 | 17 | 15 | null | 2019-01-10T17:17:11 | 2011-07-06T23:44:02 | R | UTF-8 | R | false | false | 40,631 | r | make.design.data.R | #' Create design dataframes for MARK model specification
#'
#' For each type of parameter in the analysis model (e.g, p, Phi, r), this
#' function creates design data based on the parameter index matrix (PIM) in
#' MARK terminology. Design data relate parameters to the sampling and data
#' structures; whereas \code{data} relate to the object(animal) being sampled.
#' Design data relate parameters to time, age, cohort and group structure. Any
#' variables in the design data can be used in formulas to specify the model in
#' \code{\link{make.mark.model}}.
#'
#' After processing the data, the next step is to create the design data for
#' building the models which is done with this function. The design data are
#' different than the capture history data that relates to animals. The types
#' of parameters and design data are specific to the type of analysis. For
#' example, consider a CJS analysis that has parameters Phi and p. If there
#' are 4 occasions, there are 3 cohorts and potentially 6 different Phi and 6
#' different p parameters for a single group. The format for each parameter
#' information matrix (PIM) in MARK is triangular. RMark uses the all
#' different formulation for PIMS by default, so the PIMs would be
#' \preformatted{ Phi p 1 2 3 7 8 9 4 5 10 11 6 12 } If you chose
#' pim.type="time" for each parameter in "CJS", then the PIMS are structured as
#' \preformatted{ Phi p 1 2 3 4 5 6 2 3 5 6 3 6 } That structure is only useful
#' if there is only a single release cohort represented by the PIM. If you
#' choose this option and there is more than one cohort represented by the PIM
#' then it will restrict the possible set of models that can be represented.
#'
#' Each of these parameters relates to different times, different cohorts (time
#' of initial release) and different ages (at least in terms of time since
#' first capture). Thus we can think of a data frame for each parameter that
#' might look as follows for Phi for the all different structure:
#' \preformatted{ Index time cohort age 1 1 1 0 2 2 1 1 3 3 1 2 4 2 2 0 5 3 2 1
#' 6 3 3 0 } With this design data, one can envision models that describe Phi
#' in terms of the variables time, cohort and age. For example a time model
#' would have a design matrix like: \preformatted{ Int T2 T3 1 1 0 0 2 1 1 0 3
#' 1 0 1 4 1 1 0 5 1 0 1 6 1 0 1 } Or a time + cohort model might look like
#' \preformatted{ Int T2 T3 C2 C3 1 1 0 0 0 0 2 1 1 0 0 0 3 1 0 1 0 0 4 1 1 0 1
#' 0 5 1 0 1 1 0 6 1 0 1 0 1 } While you could certainly develop these designs
#' manually within MARK, the power of the R code rests with the internal R
#' function \code{\link{model.matrix}} which can take the design data and
#' create the design matrix from a formula specification such as \code{~time}
#' or \code{~time+cohort} alleviating the need to create the design matrix
#' manually. While many analyses may only need age, time or cohort, it is
#' quite possible to extend the kind of design data, to include different
#' functions of these variables or add additional variables such as effort.
#' One could consider design data for p as follows: \preformatted{ Index time
#' cohort age effort juvenile 7 1 1 1 10 1 8 2 1 2 5 0 9 3 1 3 15 0 10 2 2 1 5
#' 1 11 3 2 2 15 0 12 3 3 1 15 1 } The added columns represent a time dependent
#' covariate (effort) and an age variable of juvenile/adult. With these design
#' data, it is easy to specify different models such as \code{~time},
#' \code{~effort}, \code{~effort+age} or \code{~effort+juvenile}.
#'
#' With the simplest call:
#'
#' \code{ddl=make.design.data(proc.example.data)}
#'
#' the function creates default design data for each type of parameter in the
#' model as defined by \code{proc.example.data$model}. If
#' \code{proc.example.data} was created with the call in the first example of
#' \code{\link{process.data}}, the model is "CJS" (the default model) so the
#' return value is a list with 2 data frames: one for Phi and one for p. They
#' can be accessed as \code{ddl$Phi} (or \code{ddl[["Phi"]]}) and \code{ddl$p}
#' (or \code{ddl[["p"]]}) or as \code{ddl[[1]]} and \code{ddl[[2]]}
#' respectively. Using the former notation is easier and safer because it is
#' independent of the ordering of the parameters in the list. For this example,
#' there are 16 groups and each group has 21 Phi parameters and 21 p
#' parameters. Thus, there are 336 rows (parameters) in the design data frame
#' for both Phi and p and thus a total of 772 parameters.
#'
#' The default fields in each dataframe are typically \code{group}, \code{cohort},
#' \code{age}, \code{time}, \code{Cohort}, \code{Age}, and \code{Time}. The
#' first 4 fields are factor variables, whereas \code{Cohort}, \code{Age} and
#' \code{Time} are numeric covariate versions of \code{cohort}, \code{age}, and
#' \code{time} shifted so the first value is always zero. However, there are
#' additional fields that are added depending on the capture-recapture model and
#' the parameter in the model. For example, in multistrata models the default data
#' include stratum in survival(S) and stratum and tostratum in Psi, the transition
#' probabilities. Also, for closed capture heterogeneity models a factor variable
#' \code{mixture} is included. It is always best to examine the design data after
#' creating them because those fields are your "data" for building models in
#' addition to individual covariates in the capture history data.
#'
#' If \code{groups} were created in the call to
#' \code{\link{process.data}}, then the factor variables used to create the
#' \code{groups} are also included in the design data for each type of
#' parameter. If one of the grouping variables is an age variable it is named
#' \code{initial.age.class} to recognize explicitly that it represents a static
#' initial age and to avoid naming conflicts with \code{age} and \code{Age}
#' variables which represent dynamic age variables of the age of the animal
#' through time. Non-age related grouping variables are added to the design
#' data using their names in \code{data}. For example if
#' \code{proc.example.data} is defined using the first example in
#' \code{process.data}, then the fields \code{sex}, \code{initial.age.class}
#' (in place of \code{age} in this case), and \code{region} are added to the
#' design data in addition to the \code{group} variable that has 16 levels. The
#' levels of the \code{group} variable are created by pasting (concatenating)
#' the values of the grouping factor in order. For example, M11 is sex=M, age
#' class=1 and region=1.
#'
#' By default, the factor variables \code{age}, \code{time}, and \code{cohort}
#' are created such that there is a factor level for each unique value. By
#' specfying values for the argument \code{parameters}, the values of
#' \code{age}, \code{time}, and \code{cohort} can be binned (put into
#' intervals) to reduce the number of levels of each factor variable. The
#' argument \code{parameters} is specified as a list of lists. The first level
#' of the list specifies the parameter type and the second level specifies the
#' variables (\code{age}, \code{time}, or \code{cohort}) that will be binned
#' and the cutpoints (endpoints) for the intervals. For example, if you
#' expected that survival may change substantially to age 3 (i.e. first 3 years
#' of life) but remain relatively constant beyond then, you could bin the ages
#' for survival as 0,1,2,3-8. Likewise, as well, you could decide to bin time
#' into 2 intervals for capture probability in which effort and expected
#' capture probability might be constant within each interval. This could be
#' done with the following call using the argument \code{parameters}:
#'
#' \preformatted{ddl=make.design.data(proc.example.data,
#' parameters=list(Phi=list(age.bins=c(0,0.5,1,2,8)),
#' p=list(time.bins=c(1980,1983,1986))))}
#'
#' In the above example, \code{age} is binned for Phi but not for p; likewise
#' \code{time} is binned for p but not for Phi. The bins for age were defined
#' as 0,0.5,1,2,8 because the intervals are closed ("]" - inclusive) on the
#' right by default and open ("(" non-inclusive) on the left, except for the
#' first interval which is closed on the left. Had we used 0,1,2,8, 0 and 1
#' would have been in a single interval. Any value less than 1 and greater
#' than 0 could be used in place of 0.5. Alternatively, the same bins could be
#' specified as:
#'
#' \preformatted{ddl=make.design.data(proc.example.data,
#' parameters=list(Phi=list(age.bins=c(0,1,2,3,8)),
#' p=list(time.bins=c(1980,1984,1986))),right=FALSE)}
#'
#' To create the design data and maintain flexibility, I recommend creating the
#' default design data and then adding other binned variables with the function
#' \code{\link{add.design.data}}. The 2 binned variables defined above can be
#' added to the default design data as follows:
#'
#' \preformatted{ ddl=make.design.data(proc.example.data)
#' ddl=add.design.data(proc.example.data,ddl,parameter="Phi",type="age",
#' bins=c(0,.5,1,2,8),name="agebin")
#' ddl=add.design.data(proc.example.data,ddl,parameter="p",type="time",
#' bins=c(1980,1983,1986),name="timebin") }
#'
#' Adding the other binned variables to the default design data, allows models
#' based on either time, timebin, or Time for p and age, agebin or Age for Phi.
#' Any number of additional binned variables can be defined as long as they are
#' given unique names. Note that R is case-specific so \code{~Time} specifies a
#' model which is a linear trend over time ((e.g. Phi(T) or p(T) in MARK)
#' whereas \code{~time} creates a model with a different factor level for each
#' \code{time} in the data (e.g. Phi(t) or p(t) in MARK) and \code{~timebin}
#' creates a model with 2 factor levels 1980-1983 and 1984-1986.
#'
#'
#' Some circumstances may require direct manipulation of the design data to
#' create the needed variable when simple binning is not sufficient or when the
#' design data is a variable other than one related to \code{time}, \code{age},
#' \code{cohort} or \code{group} (e.g., effort index). This can be done with
#' any of the vast array of R commands. For example, consider a situation in
#' which 1983 and 1985 were drought years and you wanted to develop a model in
#' which survival was different in drought and non-drought years. This could
#' be done with the following commands:
#'
#' \code{ddl$Phi$drought=0}
#'
#' \code{ddl$Phi$drought[ddl$phi$time==1983 | ddl$Phi$time==1985]= 1}
#'
#' The first command creates a variable named drought in the Phi design data
#' and initializes it with 0. The second command changes the drought variable
#' to 1 for the years 1983 and 1985. The single brackets [] index a data frame,
#' matrix or vector. In this case \code{ddl$Phi$drought} is a vector and
#' \code{ddl$Phi$time==1983 | ddl$Phi$time==1985} selects the values in which
#' time is equal (==) to 1983 or ("|") 1985. A simpler example might occur if
#' we want to create a function of one of the continuous variables. If we
#' wanted to define a model for p that was a function of age and age squared,
#' we could add the age squared variable as:
#'
#' \code{ddl$p$Agesq=ddl$p$Age^2}
#'
#' Any of the fields in the design data can be used in formulae for the
#' parameters. However, it is important to recognize that additional variables
#' you define and add to the design data are specific to a particular type of
#' parameter (e.g., Phi, p, etc). Thus, in the above example, you could not use
#' Agesq in a model for Phi without also adding it to the Phi design data. As
#' described in \code{\link{make.mark.model}}, there is actually a simpler way
#' to add simple functions of variables to a formula without defining them in
#' the design data.
#'
#'
#' The above manipulations are sufficient if there is only one or two variables
#' that need to be added to the design data. If there are many covariates that
#' are time(occasion)-specific then it may be easier to setup a dataframe with
#' the covariate data and use \code{\link{merge_design.covariates}}.
#'
#'
#' The fields that are automatically created in the design data depends on the
#' model. For example, with models such as "POPAN" or any of the "Pradel"
#' models, the PIM structure is called square which really means that it is a
#' single row and all the rows are the same length for each group. Thus,
#' rectangular or row may have been a better descriptor. Regardless, in this
#' case there is no concept of a cohort within the PIM which is equivalent to a
#' row within a triangular PIM for "CJS" type models. Thus, for parameters with
#' "Square" PIMS the cohort (and Cohort) field is not generated. The cohort
#' field is also not created if \code{pim.type="time"} for "Triangular" PIMS,
#' because that structure has the same structure for each row (cohort) and
#' adding cohort effects would be inappropriate.
#'
#'
#' For models with "Square" PIMS or \code{pim.type="time"} for "Triangular"
#' PIMS, it is possible to create a cohort variable by defining the cohort
#' variable as a covariate in the capture history data and using it as a
#' variable for creating groups. As with all grouping variables, it is added
#' to the design data. Now the one caution with "Square" PIMS is that they are
#' all the same length. Imagine representing a triangular PIM with a set of
#' square PIMS with each row being a cohort. The resulting set of PIMS is now
#' rectangular but the lower portion of the matrix is superfluous because the
#' parameters represent times prior to the entry of the cohort, assuming that
#' the use of cohort is to represent a birth cohort. This is only problematic
#' for these kinds of models when the structure accomodates age and the concept
#' of a birth cohort. The solution to the problem is to delete the design data
#' for the superfluous parameters after is is created (see warning below).
#' For example, let us presume that you used cohort with 3 levels
#' as a grouping variable for a model with "Square" PIMS which has 3 occasions.
#' Then, the PIM structure would look as follows for Phi:
#' \preformatted{ Phi 1 2 3 4 5 6 7 8 9 }. If
#' each row represented a cohort that entered at occasions 1,2,3 then
#' parameters 4,7,8 are superfluous or could be thought of as representing
#' cells that are structural zeros in the model because no observations can be
#' made of those cohorts at those times.
#'
#' After creating the design data, the unneeded rows can be deleted with R
#' commands or you can use the argument \code{remove.unused=TRUE}. As an
#' example, a code segment might look as follows if \code{chdata} was defined
#' properly: \preformatted{
#' mydata=process.data(chdata,model="POPAN",groups="cohort")
#' ddl=make.design.data(mydata) ddl$Phi=ddl$Phi[-c(4,7,8),] } If cohort and
#' time were suitably defined an easier solution especially for a larger
#' problem would be \preformatted{
#' ddl$Phi=ddl$Phi[as.numeric(ddl$Phi$time)>=as.numeric(ddl$Phi$cohort),] }
#' Which would only keep parameters in which the time is the same or greater
#' than the cohort. Note that time and cohort would be factor variables and <
#' and > do not make sense which is the reason for the \code{as.numeric} which
#' translates the factor to a numeric ordering of factors (1,2,...) but not the
#' numeric value of the factor level (e.g., 1987,1998). Thus, the above
#' assumes that both time and cohort have the same factor levels. The design
#' data is specific to each parameter, so the unneeded parameters need to be
#' deleted from design data of each parameter.
#'
#' However, all of this is done automatically by setting the argument
#' \code{remove.unused=TRUE}. It functions differently depending on the type
#' of PIM structure. For models with "Triangular" PIMS, unused design data are
#' determined based on the lack of a release cohort. For example, if there
#' were no capture history data that started with 0 and had a 1 in the second
#' position ("01.....") that would mean that there were no releases on occasion
#' 2 and row 2 in the PIM would not be needed so it would be removed from the
#' design data. If \code{remove.unused=TRUE} the design data are removed for
#' any missing cohorts within each group. For models with "Square" PIMS, cohort
#' structure is defined by a grouping variable. If there is a field named
#' "cohort" within the design data, then unused design data are defined to
#' occur when time < cohort. This is particularly useful for age structured
#' models which define birth cohorts. In that case there will be sampling
#' times prior to the birth of the cohort which are not relevant and should be
#' treated as "structural zeros" and not as a zero based on stochastic events.
#'
#' If the design data are removed, when the model is constructed with
#' \code{\link{make.mark.model}}, the argument \code{default.fixed} controls
#' what happens with the real parameters defined by the missing design data.
#' If \code{default.fixed=TRUE}, then the real parameters are fixed at values
#' that explain with certainty the observed data (e.g., p=0). That is
#' necessary for models with "Square" PIMS (eg, POPAN and Pradel models) that
#' include each capture-history position in the probability calculation. For
#' "Triangular" PIMS with "CJS" type models, the capture(encounter) history
#' probability is only computed for occasions past the first "1", the release.
#' Thus, when a cohort is missing there are no entries and the missing design
#' data are truly superfluous and \code{default.fixed=FALSE} will assign the
#' missing design data to a row in the design matrix which has all 0s. That
#' row will show as a real parameter of (0.5 for a logit link) but it is not
#' included in any parameter count and does not affect any calculation. The
#' advantage in using this approach is that the R code recognizes these and
#' displays blanks for these missing parameters, so it makes for more readable
#' output when say every other cohort is missing. See
#' \code{\link{make.mark.model}} for more information about deleted design data
#' and what this means to development of model formula.
#'
#' For design data of "Multistrata" models, additional fields are added to
#' represent strata. A separate PIM is created for each stratum for each
#' parameter and this is denoted in the design data with the addition of the
#' factor variable \code{stratum} which has a level for each stratum. In
#' addition, for each stratum a dummy variable is created with the name of the
#' stratum (\code{strata.label})and it has value 1 when the parameter is for
#' that stratum and 0 otherwise. Using these variables with the interaction
#' operator ":" in formula allows more flexibility in creating model structure
#' for some strata and not others. All "Multistrata" models contain "Psi"
#' parameters which represent the transitions from a stratum to all other
#' strata. Thus if there are 3 strata, there are 6 PIMS for the "Psi"
#' parameters to represent transition from A to B, A to C, B to A, B to C, C to
#' A and C to B. The "Psi" parameters are represented by multimonial logit
#' links and the probability of remaining in the stratum is determined by
#' substraction. To represent these differences, a factor variable
#' \code{tostratum} is created in addition to \code{stratum}. Likewise, dummy
#' variables are created for each stratum with names created by pasting "to"
#' and the strata label (e.g., toA, toB etc). Some examples of using these
#' variables to create models for "Psi" are given in
#' \code{\link{make.mark.model}}.
#'
#' \preformatted{
#'
#' ######WARNING########
#' Deleting design data for mlogit parameters like Psi in the multistate
#' model can fail if you do things like delete certain transitions. Deleting
#' design data is no longer allowed. It is better
#' to add the field fix. It should be assigned the value NA for parameters that
#' are estimated and a fixed real value for those that are fixed. Here is an example
#' with the mstrata data example:
#'
#' data(mstrata)
#' # deleting design data approach to fix Psi A to B to 0 (DON'T use this approach)
#' dp=process.data(mstrata,model="Multistrata")
#' ddl=make.design.data(dp)
#' ddl$Psi=ddl$Psi[!(ddl$Psi$stratum=="A" & ddl$Psi$tostratum=="B"),]
#' ddl$Psi
#' summary(mark(dp,ddl,output=FALSE,delete=TRUE),show.fixed=TRUE)
#' #new approach using fix to set Phi=1 for time 2 (USE this approach)
#' ddl=make.design.data(dp)
#' ddl$Psi$fix=NA
#' ddl$Psi$fix[ddl$Psi$stratum=="A" & ddl$Psi$tostratum=="B"]=0
#' ddl$Psi
#' summary(mark(dp,ddl,output=FALSE,delete=TRUE),show.fixed=TRUE)
#' }
#' @param data Processed data list; resulting value from process.data
#' @param parameters Optional list containing a list for each type of parameter
#' (list of lists); each parameter list is named with the parameter name (eg
#' Phi); each parameter list can contain vectors named age.bins,time.bins and
#' cohort.bins \tabular{ll}{ \code{subtract.stratum} \tab a vector of strata
#' letters (one for each strata) \cr \tab that specifies the tostratum that is
#' computed by subtraction \cr \tab for mlogit parameters like Psi\cr
#' \code{age.bins} \tab bins for binning ages\cr \code{time.bins} \tab bins for
#' binning times\cr \code{cohort.bins} \tab bins for binning cohorts\cr
#' \code{pim.type} \tab either "all" for all different, "time" for column time
#' structure, or \cr \tab "constant" for all values the same within the PIM\cr}
#' @param remove.unused If TRUE, unused design data are deleted; see details
#' below (as of v3.0.0 this argument is no longer used)
#' @param right If TRUE, bin intervals are closed on the right
#' @param common.zero if TRUE, uses a common begin.time to set origin (0) for
#' Time variable defaults to FALSE for legacy reasons but should be set to TRUE
#' for models that share formula like p and c with the Time model
#' @return The function value is a list of data frames. The list contains a
#' data frame for each type of parameter in the model (e.g., Phi and p for
#' CJS). The names of the list elements are the parameter names (e.g., Phi).
#' The structure of the dataframe depends on the calling arguments and the
#' model & data structure as described in the details above.
#' @author Jeff Laake
#' @export
#' @seealso \code{\link{process.data}},\code{\link{merge_design.covariates}},
#' \code{\link{add.design.data}}, \code{\link{make.mark.model}},
#' \code{\link{run.mark.model}}
#' @keywords utility
#' @examples
#'
#'
#' data(example.data)
#' proc.example.data=process.data(example.data)
#' ddl=make.design.data(proc.example.data)
#' ddl=add.design.data(proc.example.data,ddl,parameter="Phi",type="age",
#' bins=c(0,.5,1,2,8),name="agebin")
#' ddl=add.design.data(proc.example.data,ddl,parameter="p",type="time",
#' bins=c(1980,1983,1986),name="timebin")
#'
#'
#'
make.design.data <-
function(data,parameters=list(),remove.unused=FALSE,right=TRUE,common.zero=FALSE)
{
#------------------------------------------------------------------------------------------------------
# make.design.data - creates a design dataframe that is used to construct the design matrix for mark
# in make.mark.model
#
# Arguments:
#
# data - data list after using process.data
# parameters - list with an element for each parameter
# each element is a list with age.bins, time.bins and cohort.bins
# age.bins - bins for grouping ages
# time.bins - bins for grouping times
# cohort.bins - bins for grouping cohorts
# pim.type - type of pim structure "all","time","constant"
# subtract.stratum - for each stratum, the one to compute by subtraction (for Psi only)
# or for pi the stratum to compute by subtraction for each event
# subtract.events - for each stratum, either the stratum or event to be computed by subtraction
# remove.unused - if TRUE, unused design data are removed; for triangular
# pims, unused design data are determined based on lack of
# ch for a particular row (cohort) of a group; for square
# pims. if there is a cohort field in the design data, then
# it excludes any design data in which cohort < time.
#
# common.zero - if TRUE, uses a common begin.time to set origin (0) for Time variable
# defaults to FALSE for legacy reasons but should be set to TRUE
# for models that share formula like p and c with the Time model
#
# Value:
# full.design.data - list of design data frames for each type of parameter in the model
#
#
# Functions used: setup.parameters, compute.design.data, valid.parameters, setup.model
#
#----------------------------------------------------------------------------------------------------
remove.unused.occasions=function(data,ddl)
{
#
# Check validity of parameter list; stop if not valid
#
parameter="p"
if(!valid.parameters(data$model,parameter)) stop()
parameters=setup.parameters(data$model,parameters=NULL,data$nocc,check=FALSE,
number.of.groups=dim(data$freq)[2])
if(!parameters[[parameter]]$type%in%c("Triang","STriang"))stop("\nDoes not work for parameters with non-triangular PIM\n")
ch=data$data$ch
if(data$model=="Multistrata")
ch=gsub("[1-9 a-z A-Z]","1",ch)
#
# Loop over groups
#
number.of.groups=dim(data$group.covariates)[1]
if(is.null(number.of.groups))
{
number.of.groups=1
chsplit=ch
}
else
ch.split=split(ch,data$data$group)
for(j in 1:number.of.groups)
{
chmat=matrix(as.numeric(unlist(strsplit(ch.split[[j]],split=vector(length=0)))),ncol=nchar(ch[1]),byrow=TRUE)
exclude.occ=(1:dim(chmat)[2])[colSums(chmat)==0]
if(number.of.groups==1)
ddl[[parameter]]=ddl[[parameter]][!ddl[[parameter]]$time%in%levels(ddl[[parameter]]$time)[exclude.occ-1],]
else
{
group=levels(ddl[[parameter]]$group)[j]
ddl[[parameter]]=ddl[[parameter]][(ddl[[parameter]]$group!=group) |
(ddl[[parameter]]$group==group & !ddl[[parameter]]$time%in%levels(ddl[[parameter]]$time)[exclude.occ-1]),]
}
}
return(ddl)
}
#### start of make.design.data
#
#
#
if(remove.unused) stop("As of version 3.0.0 the argument removed.unused=TRUE is no longer allowed.")
if(!is.list(data))
stop("data argument is not a processed data list")
else
if(!"data"%in%names(data)|!"model"%in%names(data))
stop("data argument is not a processed data list")
#
# Check validity of parameter list; stop if not valid
#
if(!valid.parameters(data$model,parameters)) stop()
#
# Setup model and parameters
#
par.list=setup.parameters(data$model,check=TRUE)
parameters=setup.parameters(data$model,parameters,data$nocc,check=FALSE,
number.of.groups=dim(data$freq)[2])
parameters=parameters[par.list]
model.list=setup.model(data$model,data$nocc,data$mixtures)
subtract.events=NULL
#
# Create a data matrix for the each parameter in the model with age, year and cohort for each index
# This data matrix (design.data) is used below to create the design matrix from the formulas
# If age,cohort or year bins are given, use those. Otherwise each is treated as a factor
# wihtout binning.
#
# 10 Jan 06 ; added pim.type argument in call to compute.design.data
#
full.design.data=vector("list",length=length(parameters))
pimtypes=vector("list",length=length(parameters))
anyTriang=FALSE
anySquare=FALSE
for(i in 1:length(parameters))
{
#
# For mixtures, multistrata and robust designs set up values for input to
# compute.design.data
#
limits=NULL
# if only one mixture, set mixed parameters to FALSE and rows to 1
if(data$mixtures==1)
{
if(!is.null(parameters[[i]]$mix))
{
parameters[[i]]$mix=FALSE
parameters[[i]]$rows=1
}
}
# multistate model setup
if(!is.null(parameters[[i]]$bystratum) && parameters[[i]]$bystratum)
{
strata.labels=data$strata.labels
nstrata=data$nstrata
if(data$model=="RDMSOccupancy"&names(parameters)[i]=="Psi"){
nstrata=nstrata+1
strata.labels=c(0,strata.labels)
}
if(!is.null(parameters[[i]]$subset) && nchar(parameters[[i]]$subset)>0)
{
limits=strsplit(parameters[[i]]$subset,"")[[1]]
if(limits[1]=="0")
{
strata.labels=c("0",strata.labels)
nstrata=nstrata+1
}
}
if(!is.null(parameters[[i]]$tostrata) && parameters[[i]]$tostrata)
{
if(!is.null(parameters[[i]]$subtract.stratum))
subtract.stratum=parameters[[i]]$subtract.stratum
else
subtract.stratum=strata.labels
tostrata=TRUE
}
else
{
if(data$model%in%c("RDMSOpenMCSeas","RDMSOpenMisClass","RDMSMisClass","HidMarkov") & names(parameters)[i]%in%c("pi","Omega","Delta"))
{
if(!is.null(parameters[[i]]$subtract.stratum))
subtract.stratum=parameters[[i]]$subtract.stratum
else
{
if(names(parameters)[i]=="Delta")
{
subtract.stratum=NULL
if(is.null(parameters[[i]]$subtract.events))
subtract.events=strata.labels
else
subtract.events=parameters[[i]]$subtract.events
}
else
subtract.stratum=data$strata.labels[nstrata]
}
} else
{
subtract.stratum=NULL
}
tostrata=FALSE
}
}
else
{
# Set values for non-MS models
subtract.stratum=NULL
strata.labels=NULL
nstrata=1
tostrata=FALSE
}
if(!model.list$robust) parameters[[i]]$secondary=FALSE
#
# Compute design data for this parameter if conditions are valid
# mod 27 June 2011 -- if data structure (too few occasions) is such that no parameters can be estimated it does not create the design data
if(is.na(parameters[[i]]$num)||(parameters[[i]]$num+data$nocc)>0)
{
sub.stratum=0
if(!is.null(parameters[[i]]$sub.stratum))sub.stratum=parameters[[i]]$sub.stratum
# Special code for parameter Phi0 in RDMSOccRepro model
if(data$model%in%c("RDMSOccRepro","RDMSOccupancy") & names(parameters)[i]=="Phi0")
{
design.data=expand.grid(stratum=data$strata.labels,group=1:ncol(data$freq))
if(ncol(data$freq)>1)
{
ix=grep("age",names(data$group.covariates))
cnames=names(data$group.covariates)
if(length(ix)!=0)
if(names(data$group.covariates)[ix]=="age")
{
cnames[ix]="initial.age.class"
names(data$group.covariates)=cnames
}
gc=data.frame(data$group.covariates[design.data$group,])
names(gc)=cnames
row.names(gc)=NULL
design.data=cbind(design.data,gc)
}
} else
{
mscale=1
if(data$model=="RDMultScalOcc" &names(parameters)[i]=="Theta")mscale=data$mixtures
if(data$model=="NSpeciesOcc" &names(parameters)[i]=="f")parameters[[i]]$rows=2^data$mixtures-1 -data$mixtures
design.data=compute.design.data(data,parameters[[i]]$begin,parameters[[i]]$num,
parameters[[i]]$type,parameters[[i]]$mix,parameters[[i]]$rows,
parameters[[i]]$pim.type,parameters[[i]]$secondary, nstrata,
tostrata,strata.labels,subtract.stratum,common.zero=common.zero,
sub.stratum=sub.stratum,limits=limits,events=data$events,use.events=parameters[[i]]$events,
mscale=mscale,subtract.events=subtract.events)
}
if(!is.null(parameters[[i]]$mix) && parameters[[i]]$mix)design.data$mixture=as.factor(design.data$mixture)
if(parameters[[i]]$secondary)
{
session.labels=data$begin.time+cumsum(c(0,data$time.intervals[data$time.intervals>0]))
design.data$session=factor(session.labels[design.data$session])
}
design.data$group=as.factor(design.data$group)
if(!is.null(data$group.covariates))
levels(design.data$group)=apply(data$group.covariates,1,paste,collapse="")
if(!is.null(design.data$cohort))
if(is.null(parameters[[i]]$cohort.bins))
design.data$cohort=factor(design.data$cohort,levels=unique(levels(factor(design.data$cohort))))
else
design.data$cohort=cut(design.data$cohort,parameters[[i]]$cohort.bins,include.lowest=TRUE,right=right)
if(!is.null(design.data$age))
if(is.null(parameters[[i]]$age.bins))
design.data$age=factor(design.data$age,levels=unique(levels(factor(design.data$age))))
else
design.data$age=cut(design.data$age,parameters[[i]]$age.bins,include.lowest=TRUE,right=right)
if(!is.null(design.data$time))
# mod 30 Sept 09 to remove unused time factor levels
if(is.null(parameters[[i]]$time.bins))
design.data$time=factor(design.data$time,levels=unique(levels(factor(design.data$time))))
else
design.data$time=cut(design.data$time,parameters[[i]]$time.bins,include.lowest=TRUE,right=right)
if(model.list$closed | model.list$robust )
{
if(names(parameters)[i]=="p" )
{
if(!is.null(parameters[[i]]$share)) design.data$c=0
design.data$age=NULL
design.data$Age=NULL
}
if(names(parameters)[i]=="c")
{
design.data$c=1
design.data$age=NULL
design.data$Age=NULL
}
if(names(parameters)[i]=="N" | (names(parameters)[i]=="pi" & !is.null(parameters[[i]]$mix)))
{
design.data$age=NULL
design.data$Age=NULL
design.data$time=NULL
design.data$Time=NULL
}
}
if(data$model%in%c("RDMSOccRepro","RDMSOccupancy"))
{
if(names(parameters)[i]=="R")
design.data=design.data[order(design.data$group,design.data$stratum),]
else
if(names(parameters)[i]=="Delta")
design.data=design.data[order(design.data$group,design.data$stratum,design.data$session),]
}
full.design.data[[i]]=cbind(par.index=1:nrow(design.data),model.index=1:nrow(design.data),design.data)
row.names(full.design.data[[i]])=1:nrow(full.design.data[[i]])
pimtypes[[i]]=list(pim.type=parameters[[i]]$pim.type)
if(!is.null(subtract.stratum))pimtypes[[i]]$subtract.stratum=subtract.stratum
if(parameters[[i]]$type%in%c("Triang","STriang")&¶meters[[i]]$pim.type=="all")anyTriang=TRUE
if(parameters[[i]]$type =="Square")anySquare=TRUE
}
} # end of loop over each parameter
names(full.design.data)=names(parameters)
null.design.data=sapply(full.design.data,is.null)
parameters=parameters[!null.design.data]
full.design.data=full.design.data[!null.design.data]
# For MultiScaleOcc models add primary field for p parameter
if(data$model%in%c("MultScalOcc","RDMultScalOcc"))
full.design.data[["p"]]=cbind(full.design.data[["p"]],primary=rep(rep(1:(nrow(full.design.data[["p"]])/(data$mixtures*data$nocc)),each=data$mixtures),times=data$nocc))
# add model indices
prev=0
for(i in 1:length(full.design.data))
{
full.design.data[[i]]$model.index=full.design.data[[i]]$par.index+prev
prev=max(full.design.data[[i]]$model.index)
}
#
# Remove unused design data
#
if(remove.unused)
{
ch=data$data$ch
if(data$model=="Multistrata")
ch=gsub("[A-Z a-z 1-9]","1",ch)
if(anyTriang)
{
#
# Loop over groups
#
number.of.groups=dim(data$group.covariates)[1]
if(is.null(number.of.groups))number.of.groups=1
for(j in 1:number.of.groups)
{
remove.cohort=NULL
for(k in 1:data$nocc)
{
if(k>1)
first.0=paste(rep("0",k-1),collapse="")
else
first.0=""
if(number.of.groups==1)
{
if(!any(substr(ch,1,k)==paste(first.0,"1",sep="")))
remove.cohort=c(remove.cohort,k)
}
else
if(!any(substr(ch[data$data$group==j],1,k)==paste(first.0,"1",sep="")))
remove.cohort=c(remove.cohort,k)
}
for(i in 1:length(parameters))
{
if(parameters[[i]]$type %in%c("Triang","STriang")&¶meters[[i]]$pim.type=="all")
{
if(number.of.groups==1)
full.design.data[[i]]=full.design.data[[i]][!(full.design.data[[i]]$occ.cohort%in%remove.cohort),]
# full.design.data[[i]]=full.design.data[[i]][!(as.numeric(full.design.data[[i]]$cohort)%in%remove.cohort),]
else
{
# modified 7 Apr 08 to handle different begin.times between groups
full.design.data[[i]]=full.design.data[[i]][!(as.numeric(full.design.data[[i]]$group)==j &
full.design.data[[i]]$occ.cohort%in%remove.cohort),]
# full.design.data[[i]]=full.design.data[[i]][!(as.numeric(full.design.data[[i]]$group)==j &
# as.numeric(factor(full.design.data[[i]]$cohort,levels=unique(full.design.data[[i]]$cohort[as.numeric(full.design.data[[i]]$group)==j ])))%in%remove.cohort),]
# modified 10 Aug to remove unused levels created in removing cohorts
full.design.data[[i]]$cohort=factor(full.design.data[[i]]$cohort)
full.design.data[[i]]$age=factor(full.design.data[[i]]$age)
full.design.data[[i]]$time=factor(full.design.data[[i]]$time)
}
}
}
}
}
# if reverse Multistrata model, remove design data for S,Psi and p for added occasions/intervals
if(data$reverse)
{
full.design.data[["S"]]=full.design.data[["S"]][!full.design.data[["S"]]$occ%in%seq(1,data$nocc-1,2),]
full.design.data[["p"]]=full.design.data[["p"]][!full.design.data[["p"]]$occ%in%seq(1,data$nocc-1,2),]
full.design.data[["Psi"]]=full.design.data[["Psi"]][!full.design.data[["Psi"]]$occ%in%seq(2,data$nocc-1,2),]
}
if(anySquare)
{
for(i in 1:length(parameters))
{
if(parameters[[i]]$type =="Square"&is.null(parameters[[i]]$leave.unused))
{
time=full.design.data[[i]]$time
cohort=full.design.data[[i]]$cohort
full.design.data[[i]]=full.design.data[[i]][as.numeric(levels(time)[as.numeric(time)])
>= as.numeric(levels(cohort)[as.numeric(cohort)]),]
}
}
}
# drop any unused factor levels after removing design data
for(i in 1:length(parameters))full.design.data[[i]]=droplevels(full.design.data[[i]])
}
# Delete occ.cohort which is only used to remove unused cohorts if any
if(data$reverse)
for(i in 1:length(parameters))
full.design.data[[i]]$occ.cohort=NULL
# make pim type assignments and return results
pimtypes=pimtypes[!null.design.data]
names(pimtypes)=names(parameters)
full.design.data$pimtypes=pimtypes
return(full.design.data)
}
|
58ec200d22a5c9201b05db78626eb177c869f47a | 5c852d2a4f164ba82ac165288f448b2401949b7a | /server.R | c221518aa7a9f79054911d2b4d276a5a526e82cf | [] | no_license | cocassel/user-activity-dashboard | 9c26799bf45736e1ec5f14debae2e79e1b473bb4 | 7846208d63a63ebb900612a0ee1411abfc9d6c0a | refs/heads/master | 2020-04-21T20:30:49.979657 | 2019-02-11T05:18:43 | 2019-02-11T05:18:43 | 169,847,273 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 41,907 | r | server.R | #
# This is the server logic of a Shiny web application.
#
# You can run the application by clicking 'Run App' above.
#
#to install a package, follow the following format:
#install.packages('plyr')
#or open the packages.R file
library(shiny)
library(plyr)
library(anytime)
library(ggplot2)
library(scales)
library(data.table)
library(tidyr)
# SWITCH THIS TO "testing = TRUE" WHEN YOU WOULD LIKE TO RUN THE APP WITH testing.csv
testing = FALSE
# CSV names
csv_database = "firebase_data_shining_heat_4904_nonredundant.csv"
csv_database_test = "tests/testing.csv"
# Don't read column labels of CSV - data for sequence of events charts
if(testing == FALSE) {
data = read.csv(csv_database, na.strings=c(""," ","NA"))
nonredundant_data = read.table(csv_database, sep=",",skip = 1)
} else {
data = read.csv(csv_database_test, na.strings=c(""," ","NA"))
nonredundant_data = read.table(csv_database_test, sep=",",skip = 1)
}
session_ids <- as.character(unique(nonredundant_data[,"V2"], incomparables = FALSE, MARGIN = 1, fromLast = FALSE))
num_sessions <- NROW(session_ids)
op <- options(digits.secs = 6)
# Set the timezone
Sys.setenv(TZ='EST')
colour_reference <- list(
"New Keyframe" = "darkgreen",
"Copy Main" = "darkturquoise",
"Paste" = "darkslategray",
"Delete Keyframe" = "red",
"Copy Example" = "hotpink2",
"Example Selected: v4" = "lightcoral",
"Example Selected: v3" = "plum1",
"Example Selected: sineExample" = "gold2",
"Example Selected: randomExample" = "pink",
"Example Selected: v10_09_1_8" = "aquamarine2",
"Example Selected: v10_augmented" = "purple",
"Example Selected: textures" = "blue",
"Frame Selected: example" = "royalblue1",
"Frame Selected: main" = "skyblue1",
"Playback Unmuted" = 'cadetblue2',
"Playback On" = "limegreen",
"Dragging Main Playhead" = "orange",
"Dragging Example Playhead" = "mediumpurple3",
"Dragging Keyframe" = "sienna2",
"UNDO" = "grey",
"REDO" = "yellowgreen",
"Start Task" = "lightslategray",
"Other" = "magenta2")
session_id_with_behaviour_counts = as.data.frame(table(data$session))
session_id_with_behaviour_counts = session_id_with_behaviour_counts[order(-session_id_with_behaviour_counts$Freq),]
session_id_with_behaviour_counts = unite(session_id_with_behaviour_counts,"Session Ids with Behaviour Counts",c(Var1,Freq),sep = " Count:", remove=TRUE)
# Define server logic required to draw charts
shinyServer(function(input, output, session) {
observeEvent(input$reset,{
updateDateRangeInput(session,"dateRange1",
start = min(as.Date(data$datetime)),
end = max(as.Date(data$datetime)),
min = min(as.Date(data$datetime)),
max = max(as.Date(data$datetime)))
}
)
observe({
#Date variables passed to and from the filters
updateDateRangeInput(session,"dateRange1",
start = max(input$dateRange1[1],min(as.Date(data$datetime))),
end = min(input$dateRange1[2],max(as.Date(data$datetime))),
min = max(input$dateRange1[1],min(as.Date(data$datetime))),
max = min(input$dateRange1[2],max(as.Date(data$datetime))))
updateSelectInput(session,"select_id",
choices = session_id_with_behaviour_counts)
})
output$dates <- function(){
min_date = min(as.Date(data$datetime))
max_date = max(as.Date(data$datetime))
return(c(min_date,max_date))
}
output$actionPerSession <- renderPlot({
#**** number of actions per session histograph****#
start_date = input$dateRange1[1]
end_date = input$dateRange1[2]
y = data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]
unique_session= unique(data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]$sessions)
#check for data existing within the range
number_of_rows = NROW(y)
if (number_of_rows == 0) {
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n')
text(x = 0.34, y = 0.9, paste("No data available within selected date range"),
cex = 1.5, col = "black", adj=0.5)
}
else {
#count the number of rows that have the same session name
y = count(data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,], 'session')
hist(y[,2],
main= "Histogram for Actions per session",
xlab = "Number of Actions per Session",
ylab = "Unique Session Count",
border = "black",
col = "pink",
breaks = 30,
xaxt='n'
)
#used to create a tick on the x axis for every other bucket
mround <- function(x,base){
base*round(x/base) + base
}
round_max = mround(max(y[,2]),15) / 15
xtick<-seq(0, round_max*15, by = round_max)
axis(side=1, at=xtick, labels = FALSE)
text(x=xtick,
par("usr")[3],
labels = xtick,
offset = 1.2,
srt = 45,
pos = 1,
xpd = TRUE)
}
}
)
output$actionfrequency <- renderPlot({
#pulling start and end dates
start_date = input$dateRange1[1]
end_date = input$dateRange1[2]
values=data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]$value
newvalue=as.character(values)
counts = count(newvalue)
num_row=NROW(counts)
if (num_row <= 10) {
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n')
text(x = 0.34, y = 0.9, paste("Not enough data available within selected date range"),
cex = 1.5, col = "black", adj=0.5)
}
else{
#creating vectors for graphs
alpha=dim(counts)[1]-9
beta = dim(counts)[1]
a=counts$freq[order(counts$freq)[alpha:beta]]
b=as.character(counts$x[order(counts$freq)[alpha:beta]])
#plot top 10 actions
par(mar=c(5,16,4,2))
par(las=2)
barplot(a, main="Action Frequency",horiz=TRUE, xlab="number of actions" ,
names.arg=b)
}
})
output$sessionQuartileLine<-renderPlot({
#filtering the data on user input dateRange
start_date = input$dateRange1[1]
end_date = input$dateRange1[2]
#set up data
y = data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]
number_of_rows = NROW(y)
if (number_of_rows == 0) {
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n')
text(x = 0.34, y = 0.9, paste("No data available within selected date range"),
cex = 1.5, col = "black", adj=0.5)
}
else {
#quartile reutrn a matrix of one session for the defined percentage
quartile_sample <- function(quartile, b ){
index = which.min(abs(b$freq-quartile))
session_id_of_quartile = b$session[index]
count = b$freq[index]
count_vec = seq(count, 1, -1)
quartile_action_mat <- y[y$session == session_id_of_quartile,]
quartile_action_mat$actions_remaining <- count_vec
return (quartile_action_mat)
}
#calculate the time difference
y$to_second <- (y$start_time)/1000
y$start_seconds<- anytime(y$to_second)
y$diff_time <- difftime(y$datetime,y$start_seconds,units="secs")
y$diff_time_ceiling <- ceiling(y$diff_time)
#find actions per session
b = count(y, "session")
st = sort.list(b$freq)
#get quantile
q = quantile(b$freq)
#assign quantile count
q25 = as.numeric(as.character(q[1]))
q50 = as.numeric(as.character(q[2]))
q75 = as.numeric(as.character(q[3]))
q100 = as.numeric(as.character(q[4]))
#set up sorted datatable and sorted frequency list
dt = data.table(st, val = st)
setattr(dt, "sorted", "st")
#get 25 quartile information
q25_action = quartile_sample(q25, b)
##get 50 quartile information
q50_action = quartile_sample(q50, b)
##get 75 quartile information
q75_action = quartile_sample(q75,b)
##get 100 quartile information
q100_action = quartile_sample(q100,b)
#### setting up plots to print ####
colors <- rainbow(4)
plot(q50_action$diff_time_ceiling,q50_action$actions_remaining,
type = "o",
ylim = c(0,max(q100_action$actions_remaining)),
xlim = c(0,max(q100_action$diff_time_ceiling)),
xlab = "Seconds",
ylab = "Actions Remaining in Session (Burndown)",
col = colors[2],
main= "Quartile User Behavior" )
linetype <- c(1:4)
lines(q75_action$diff_time_ceiling, q75_action$actions_remaining,
type="o",
col=colors[3])
lines(q100_action$diff_time_ceiling, q100_action$actions_remaining,
type="o",
col=colors[4])
lines(q25_action$diff_time_ceiling, q25_action$actions_remaining,
type="o",
col=colors[1])
legend("topright",
legend=c("25% User", "50% User", "75% User", "100% User"),
col=colors,
lty=1,
cex=0.8)
}
}
)
output$sessionOverTime <- renderPlot({
#***** number of sessions over time*****#
#Time units will be variable and dependent on user input ***#
#filtering the data on user input dateRange
start_date = input$dateRange1[1]
end_date = input$dateRange1[2]
x = data[!duplicated(data$session) & as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]
number_of_rows = NROW(x)
if (number_of_rows == 0) {
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n')
text(x = 0.34, y = 0.9, paste("No data available within selected date range"),
cex = 1.5, col = "black", adj=0.5)
}
else {
#creating new columns called date, time, week, month
x$Date <- as.Date(x$datetime)
x$Week <- cut(x$Date,
breaks = "week",
start.on.monday = TRUE)
#counting sessions per week
m = count(x, 'Week')
len_m = length(m[,2])
#extending boundaries below
plot(m[,2],
main= "Sessions Per Week",
type = "o" ,
col = "coral2",
xlab = "",
ylab = "Number of sessions",
xaxt='n'
)
axis(1, at=1:len_m, labels=m[,1], las = 2)
title(xlab="Weeks" ,mgp=c(7,1,0))
}
}
)
output$actionPerSecond <- renderPlot({
#filter the data
start_date = input$dateRange1[1]
end_date = input$dateRange1[2]
y = data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]
number_of_rows = NROW(y)
if (number_of_rows == 0) {
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n')
text(x = 0.34, y = 0.9, paste("No data available within selected date range"),
cex = 1.5, col = "black", adj=0.5)
}
else {
#calculate the time difference
y$to_second <- (y$start_time)/1000
y$start_seconds<- anytime(y$to_second)
y$diff_time <- difftime(y$datetime,y$start_seconds,units="secs")
y$diff_time_ceiling <- ceiling(y$diff_time)
#count actions per second
n = count(y, 'diff_time_ceiling')
#plot as a line graph
plot(n[,2],
main= "Average Actions Frequency Per Session (Max 150 seconds)",
type = "l",
col = "deeppink4",
xlab = "Seconds",
ylab = "Number of actions per second",
xlim = c(0,150)
)
}
})
sessionDurationInfo <- function(){
#filter the data
start_date = input$dateRange1[1]
end_date = input$dateRange1[2]
data = data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]
summary_data = data[as.Date(data$datetime)>=start_date & as.Date(data$datetime)<=end_date,]
number_of_rows = NROW(summary_data)
if (number_of_rows == 0) {
return(NULL)
}
else {
counter = 0
for(i in 2:nrow(data)){
if(data[i,2]!=data[i-1,2]){
counter = counter+1
}
}
summary_data = data.frame(matrix('',nrow=counter,ncol=4),stringsAsFactors=FALSE)
colnames(summary_data) = c("session","start_time","end_time","session_duration")
counter=0
for(i in 2:nrow(data)){
if(data[i,2]!=data[(i-1),2]){
counter = counter+1
summary_data[counter,1]=toString((data[i,2]))
summary_data[counter,2]=((data[i,6]))
summary_data[counter,3]=((data[i,7]))
summary_data[counter,4]=((as.numeric(data[(i),7]))-as.numeric((data[(i),6])))/1000
}
if(i == nrow(data)){
counter = counter+1
summary_data[counter,1]=toString((data[i,2]))
summary_data[counter,2]=((data[i,6]))
summary_data[counter,3]=((data[i,7]))
summary_data[counter,4]=((as.numeric(data[(i),7]))-as.numeric((data[(i),6])))/1000
}
}
summary_data$num_duration <- as.numeric(as.character(summary_data$session_duration))
summary_data$rounded_second <- ceiling(summary_data$num_duration)
n = count(summary_data,"rounded_second")
return(n)
}
}
output$sessionDuration <- renderPlot({
n = sessionDurationInfo()
if (empty(n)){
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n')
text(x = 0.34, y = 0.9, paste("No data available within selected date range"),
cex = 1.5, col = "black", adj=0.5)
} else{
barplot(n[,2],
main= "Session Duration - Barplot",
xlab = "Frequency",
ylab = "Session Length in seconds",
names.arg = n[,1],
border = "black",
col = "deepskyblue3",
horiz = TRUE,
ylim = c(0,50)
)
}
})
output$sessionDurationTypeH <- renderPlot({
n = sessionDurationInfo()
if( empty(n)){
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n')
text(x = 0.34, y = 0.9, paste("No data available within selected date range"),
cex = 1.5, col = "black", adj=0.5)
} else{
plot(n[,2],
type = "h",
main= "Session Duration - Plot",
xlab = "Session Length in seconds",
ylab = "Frequency",
xlim = c(0,50)
)
}
})
#Generate the sequence chart
output$SequencePlot <- renderPlot(height = 700, width = 1200, units="px", {
# Plot sequence chart
session_id = input$select_id
plot_sequence_chart(gsub(" Count\\:.*","",session_id))
})
######################## Function to produce a sequence plot for a given session id ###################################
plot_sequence_chart <- function(id) {
# Get all the rows for this session id
session_rows = as.data.frame(nonredundant_data[nonredundant_data[,2]==id,])
session_rows = na.omit(session_rows)
num_rows = NROW(session_rows)
# Can only plot if we have at least one event
if(num_rows != 0) {
# We use the first row to identify animation because the value is identical across all rows of a session id
animation = session_rows[1,4]
session_start_string = session_rows[1,9]
session_start = as.POSIXct(session_start_string)
#If there's only 1 event in the session, assign an arbitrary session length of 0.5 seconds
if(num_rows == 1) {
session_end_string = strftime(session_start + 0.5)
}
else {
session_end_string = session_rows[num_rows, 9]
}
session_end = as.POSIXct(session_end_string)
session_start_date = as.Date(session_start_string)
time_span = difftime(session_end, session_start, units = "secs")
if(time_span >= 0) {
# For events that happen at a single point in time we will use this as a time span
# We also use this for the last event regardless of its nature because we have no other way of calculating end time
time_for_event = time_span/140
# We need to adjust the timespan and session end time to account for the time we'll be adding
session_end_string = strftime(session_end + time_for_event)
time_span = time_span + time_for_event
# Calculate spacing for our x axis ticks (we use 10 ticks)
x_axis_tick_breaks = paste(as.character(time_span/10), "sec", sep=" ")
################################## Deal with main/exmample frame view events ###########################################
selected_frame_rows = as.data.frame(subset(session_rows, grepl("VTICON_SELECT_", session_rows$V8), drop = FALSE))
if(NROW(selected_frame_rows) != 0) {
timestamps_frame = as.matrix(selected_frame_rows[,9])
timestamps_frame = rbind(timestamps_frame, as.matrix(session_end_string))
num_timestamps_frame = NROW(timestamps_frame)
frame_select_events = as.matrix(selected_frame_rows[,8])
frame_select_events = sapply(frame_select_events, gsub, pattern = "VTICON_SELECT_", replacement = "Frame Selected: ", fixed = TRUE)
event_types_frame = matrix('Frame Selected', nrow = NROW(frame_select_events), ncol = 1 )
xmin_frame = timestamps_frame[1:num_timestamps_frame-1]
xmax_frame = timestamps_frame[2:num_timestamps_frame]
ymax_frame = matrix(0.33, nrow = num_timestamps_frame-1, ncol = 1)
}
# If neither frames were selected during the session we don't want to add any events to our sequence chart
else {
frame_select_events = matrix(nrow = 0, ncol = 1)
event_types_frame = matrix(nrow = 0, ncol = 1)
xmin_frame = matrix(nrow = 0, ncol = 1)
xmax_frame = matrix(nrow = 0, ncol = 1)
ymax_frame = matrix(nrow = 0, ncol = 1)
}
################################## Deal with playback start and end events ###########################################
play_rows = as.data.frame(subset(session_rows, grepl("PLAYBACK_SETPLAY_true|PLAYBACK_PLAYEND", session_rows$V8), drop = FALSE))
play_start_rows = as.data.frame(subset(session_rows, grepl("PLAYBACK_SETPLAY_true", session_rows$V8), drop = FALSE))
play_end_rows = as.data.frame(subset(session_rows, grepl("PLAYBACK_SETPLAY_false|PLAYBACK_PLAYEND", session_rows$V8), drop = FALSE))
# If the number of start play events equals the number of stop play events (expected), no need to manipulate data
if(NROW(play_start_rows) == NROW(play_end_rows) && NROW(play_start_rows) != 0) {
play_events = matrix('Playback On', nrow = NROW(play_start_rows), ncol = 1 )
event_types_play = matrix('Playback On', nrow = NROW(play_start_rows), ncol = 1 )
xmin_play = as.matrix(play_start_rows[,9])
xmax_play = as.matrix(play_end_rows[,9])
ymax_play = matrix(0.33, nrow = NROW(play_start_rows), ncol = 1)
}
# If the number of start play events is one greater than the number of stop play events (unusual but has happened),
# we should add a fake play end event at the session end time
else if(NROW(play_start_rows) == NROW(play_end_rows) + 1 && NROW(play_start_rows) != 0) {
play_events = matrix('Playback On', nrow = NROW(play_start_rows), ncol = 1 )
event_types_play = matrix('Playback On', nrow = NROW(play_start_rows), ncol = 1 )
xmin_play = as.matrix(play_start_rows[,9])
xmax_play = as.matrix(play_end_rows[,9])
xmax_play = rbind(xmax_play, as.matrix(session_end_string))
ymax_play = matrix(0.33, nrow = NROW(play_start_rows), ncol = 1)
}
# If neither frames were selected during the session we don't want to add any events to our sequence chart
# Also catch any uncrompehsible data here
else {
play_events = matrix(nrow = 0, ncol = 1)
event_types_play = matrix(nrow = 0, ncol = 1)
xmin_play = matrix(nrow = 0, ncol = 1)
xmax_play = matrix(nrow = 0, ncol = 1)
ymax_play = matrix(nrow = 0, ncol = 1)
}
################################ Deal with adjsuting main playhead start and end events ###########################################
drag_start_rows = as.data.frame(subset(session_rows, grepl("STARTDRAG", session_rows$V8), drop = FALSE))
drag_stop_rows = as.data.frame(subset(session_rows, grepl("STOPDRAG", session_rows$V8), drop = FALSE))
# 3 scenarios: 1. No start events 2. Equal number of start and stop events 3. One more start event
xmin_playhead_main = as.character(subset(drag_start_rows$V9, grepl("STARTDRAG_PLAYHEAD_main", drag_start_rows$V8), drop = FALSE))
xmax_playhead_main = as.character(subset(drag_stop_rows$V9, grepl("STARTDRAG_PLAYHEAD_main", drag_start_rows$V8), drop = FALSE))
# Check to make sure we don't have Scenario 1 and check to make sure we have either Scenario 2 or 3
if(NROW(xmin_playhead_main) != 0 && sum(is.na(xmax_playhead_main)) <=1) {
# Add a fake stopdrag event timestamp at the session end time if the last startdrag doesn't have an accompanying end drag
if(is.na(xmax_playhead_main[NROW(xmax_playhead_main)])) {
xmax_playhead_main[NROW(xmax_playhead_main)] <- as.character(session_end_string)
}
ymax_playhead_main = matrix(0.33, nrow = NROW(xmin_playhead_main), ncol = 1)
playhead_main_events = matrix('Dragging Main Playhead', nrow = NROW(xmin_playhead_main), ncol = 1 )
event_types_playhead_main = matrix('Playhead Drag', nrow = NROW(xmin_playhead_main), ncol = 1 )
}
# No adjusting playhead rows. Also catch uninterprettable data
else {
playhead_main_events = matrix(nrow = 0, ncol = 1)
event_types_playhead_main = matrix(nrow = 0, ncol = 1)
xmin_playhead_main = matrix(nrow = 0, ncol = 1)
xmax_playhead_main = matrix(nrow = 0, ncol = 1)
ymax_playhead_main = matrix(nrow = 0, ncol = 1)
}
################################ Deal with playhead example dragging events ###########################################
drag_start_rows = as.data.frame(subset(session_rows, grepl("STARTDRAG", session_rows$V8), drop = FALSE))
drag_stop_rows = as.data.frame(subset(session_rows, grepl("STOPDRAG", session_rows$V8), drop = FALSE))
# 3 scenarios: 1. No start events 2. Equal number of start and stop events 3. One more start event
xmin_playhead_example = as.character(subset(drag_start_rows$V9, grepl("STARTDRAG_PLAYHEAD_example", drag_start_rows$V8), drop = FALSE))
xmax_playhead_example = as.character(subset(drag_stop_rows$V9, grepl("STARTDRAG_PLAYHEAD_example", drag_start_rows$V8), drop = FALSE))
if(NROW(xmin_playhead_example) != 0 && sum(is.na(xmax_playhead_example)) <=1) {
# Add a fake stopdrag event timestamp at the session end time if the last startdrag doesn't have an accompanying end drag
if(is.na(xmax_playhead_example[NROW(xmax_playhead_example)])) {
xmax_playhead_example[NROW(xmax_playhead_example)] <- as.character(session_end_string)
}
ymax_playhead_example = matrix(0.33, nrow = NROW(xmin_playhead_example), ncol = 1)
playhead_example_events = matrix('Dragging Example Playhead', nrow = NROW(xmin_playhead_example), ncol = 1 )
event_types_playhead_example = matrix('Playhead Drag', nrow = NROW(xmin_playhead_example), ncol = 1 )
}
# No adjusting playhead rows. Also catch uninterprettable data
else {
playhead_example_events = matrix(nrow = 0, ncol = 1)
event_types_playhead_example = matrix(nrow = 0, ncol = 1)
xmin_playhead_example = matrix(nrow = 0, ncol = 1)
xmax_playhead_example = matrix(nrow = 0, ncol = 1)
ymax_playhead_example = matrix(nrow = 0, ncol = 1)
}
################################ Deal with keyframe dragging events ###########################################
drag_start_rows = as.data.frame(subset(session_rows, grepl("STARTDRAG", session_rows$V8), drop = FALSE), stringsAsFactors=FALSE)
drag_stop_rows = as.data.frame(subset(session_rows, grepl("STOPDRAG", session_rows$V8), drop = FALSE), stringsAsFactors=FALSE)
# 3 scenarios: 1. No start events 2. Equal number of start and stop events 3. One more start event (at the end)
xmin_keyframe = as.character(subset(drag_start_rows$V9, grepl("STARTDRAG_KEYFRAME", drag_start_rows$V8), drop = FALSE), stringsAsFactors=FALSE)
xmax_keyframe = as.character(subset(drag_stop_rows$V9, grepl("STARTDRAG_KEYFRAME", drag_start_rows$V8), drop = FALSE), stringsAsFactors=FALSE)
if(NROW(xmin_keyframe) != 0 && sum(is.na(xmax_keyframe) <=1)) {
# Add a fake stopdrag event timestamp at the session end time if the last startdrag doesn't have an accompanying end drag
if(is.na(xmax_keyframe[NROW(xmax_keyframe)])) {
xmax_keyframe[NROW(xmax_keyframe)] <- as.character(session_end_string)
}
ymax_keyframe = matrix(0.33, nrow = NROW(xmin_keyframe), ncol = 1)
keyframe_events = matrix('Dragging Keyframe', nrow = NROW(xmin_keyframe), ncol = 1 )
event_types_keyframe = matrix('Keyframe Drag', nrow = NROW(xmin_keyframe), ncol = 1 )
}
# No keyframe dragging rows. Also catch uninterprettable data
else {
keyframe_events = matrix(nrow = 0, ncol = 1)
event_types_keyframe = matrix(nrow = 0, ncol = 1)
xmin_keyframe = matrix(nrow = 0, ncol = 1)
xmax_keyframe = matrix(nrow = 0, ncol = 1)
ymax_keyframe = matrix(nrow = 0, ncol = 1)
}
################################## Deal with selected_example events ###########################################
selected_example_rows = as.data.frame(subset(session_rows, grepl("EXAMPLE_SELECT_", session_rows$V8), drop = FALSE))
# If no examples were selected we want to show the default example as selected for the entire session
if(NROW(selected_example_rows) == 0) {
example_select_events = as.matrix("Example Selected: v4")
event_types_examples = matrix('Example Selected', nrow = 1, ncol = 1 )
xmin_examples = session_start_string
xmax_examples = session_end_string
ymax_examples = 0.33
}
else {
# If the first event is an example selection (i.e. the session starts at the moment an example is selected),
# we do not need to add an event to convey that an example is selected by default
if(selected_example_rows[1,9] == session_start_string) {
timestamps_examples = as.matrix(selected_example_rows[,9])
example_select_events = as.matrix(selected_example_rows[,8])
}
# If the first event is not an example selection we must add a fake example selection event (and an
# associated timestamp) to express the default example that is selected
else {
# If the first example selection event is the default example ("v4"), we don't want to
# show two events for the same example selection so instead, we just change the
# start time of the first event to the session start time
if(selected_example_rows[1,8] == "EXAMPLE_SELECT_v4") {
timestamps_examples = as.matrix(selected_example_rows[,9])
timestamps_examples[1,1] = as.matrix(session_start_string)
example_select_events = as.matrix(selected_example_rows[,8])
}
# If the first example selection event is not selecting the default example (v4), we add in
# a fake example selection event at the session start time to show that v4 is selected by default
# until a different example is selected
else {
timestamps_examples = as.matrix(session_start_string)
timestamps_examples = rbind(timestamps_examples, as.matrix(selected_example_rows[,9]))
example_select_events = as.matrix("EXAMPLE_SELECT_v4")
example_select_events = rbind(example_select_events, as.matrix(selected_example_rows[,8]))
}
}
timestamps_examples = rbind(timestamps_examples, as.matrix(session_end_string))
num_timestamps = NROW(timestamps_examples)
xmin_examples = timestamps_examples[1:num_timestamps-1]
xmax_examples = timestamps_examples[2:num_timestamps]
ymax_examples = matrix(0.33, nrow = num_timestamps-1, ncol = 1)
example_select_events = sapply(example_select_events, gsub, pattern = "EXAMPLE_SELECT_", replacement = "Example Selected: ", fixed = TRUE)
event_types_examples = matrix('Example Selected', nrow = NROW(example_select_events), ncol = 1 )
}
################################## Deal with mute set to true/false ###########################################
mute_false_rows = as.data.frame(subset(session_rows, grepl("PLAYBACK_MUTE_false", session_rows$V8), drop = FALSE))
mute_true_rows = as.data.frame(subset(session_rows, grepl("PLAYBACK_MUTE_true", session_rows$V8), drop = FALSE))
# Since PLAYBACK_MUTE always starts as true and the value is logged when the checkbox is selected/unselected,
# there are only three possible scenarios: 1. Muted is true the whole session (no logged event) 2. There are equal
# numbers of muted = true and muted = false events 3. There is exactly one more muted = false event than muted = true events
# If the number of PLAYBACK_MUTE_false events equals the number of PLAYBACK_MUTE_true events we have the
# same number of start and end times so there is no need to manipulate the data
if(NROW(mute_false_rows) == NROW(mute_true_rows) && NROW(mute_false_rows) != 0 ) {
mute_events = matrix('Playback Unmuted', nrow = NROW(mute_false_rows), ncol = 1 )
event_types_mute = matrix('Playback Unmuted', nrow = NROW(mute_false_rows), ncol = 1 )
xmin_mute = as.matrix(mute_false_rows[,9])
xmax_mute = as.matrix(mute_true_rows[,9])
ymax_mute = matrix(0.33, nrow = NROW(mute_false_rows), ncol = 1)
}
# If there is one more PLAYBACK_MUTE_false event than PLAYBACK_MUTE_true events, we will add a fake
# PLAYBACK_MUTE_true event timestamp at the session end time so we have equal numbers of start and end times
else if(NROW(mute_false_rows) == NROW(mute_true_rows) + 1 && NROW(mute_false_rows) != 0 ) {
mute_events = matrix('Playback Unmuted', nrow = NROW(mute_false_rows), ncol = 1 )
event_types_mute = matrix('Playback Unmuted', nrow = NROW(mute_false_rows), ncol = 1 )
xmin_mute = as.matrix(mute_false_rows[,9])
xmax_mute = as.matrix(mute_true_rows[,9])
xmax_mute = rbind(xmax_mute, as.matrix(session_end_string))
ymax_mute = matrix(0.33, nrow = NROW(mute_false_rows), ncol = 1)
}
# If the mute checkbox is never changed (i.e. is muted the entire session), don't produce a strip in the chart
# Also catch any uncrompehsible data here
else {
mute_events = matrix(nrow = 0, ncol = 1)
event_types_mute = matrix(nrow = 0, ncol = 1)
xmin_mute = matrix(nrow = 0, ncol = 1)
xmax_mute = matrix(nrow = 0, ncol = 1)
ymax_mute = matrix(nrow = 0, ncol = 1)
}
################################## Deal with other events ###########################################
rows_other = as.data.frame(
subset(session_rows,
!grepl("VTICON_SELECT_", session_rows$V8) &
!grepl("EXAMPLE_SELECT_", session_rows$V8) &
!grepl("PLAYBACK_MUTE_", session_rows$V8) &
!grepl("PLAYBACK_PLAYEND", session_rows$V8) &
!grepl("PLAYBACK_SETPLAY_", session_rows$V8) &
!grepl("STARTDRAG", session_rows$V8) &
!grepl("STOPDRAG", session_rows$V8),
drop = FALSE), stringsAsFactors=FALSE)
if(NROW(rows_other) != 0) {
events_other = matrix(rows_other$V8)
events_other[grep("COPY_main", events_other)] <- "Copy Main"
events_other[grep("COPY_example", events_other)] <- "Copy Example"
events_other[grep("PASTE", events_other)] <- "Paste"
events_other[grep("VTICON_DELETEKEYFRAMES", events_other)] <- "Delete Keyframe"
events_other[grep("VTICON_NEWKEYFRAME", events_other)] <- "New Keyframe"
events_other[grep("START_TASK", events_other)] <- "Start Task"
event_types_other = matrix('User Actions', nrow = NROW(events_other), ncol = 1 )
xmin_other = as.POSIXct(rows_other[,9])
xmax_other = xmin_other
for(i in 1:NROW(xmax_other)) {
# Make the end time time_for_event seconds after the start time or if the next event start
# is less than time_for_event seconds away make the end time the next event start time -
# for the last event just add time_for_event seconds
if(i == NROW(xmax_other)) {
xmax_other[i] = xmin_other[i] + time_for_event
}
else {
current_event_start = xmin_other[i]
next_event_start = xmin_other[i + 1]
time_diff = difftime(next_event_start, current_event_start, units = "secs")
if(time_diff > time_for_event) {
xmax_other[i] = xmin_other[i] + time_for_event
}
else {
xmax_other[i] = xmin_other[i] + time_diff
}
}
}
xmin_other = strftime(xmin_other)
xmax_other = strftime(xmax_other)
ymax_other = matrix(0.33, nrow = NROW(events_other), ncol = 1)
}
else {
events_other = matrix(nrow = 0, ncol = 1)
event_types_other = matrix(nrow = 0, ncol = 1)
xmin_other = matrix(nrow = 0, ncol = 1)
xmax_other = matrix(nrow = 0, ncol = 1)
ymax_other = matrix(nrow = 0, ncol = 1)
}
####################################### Create data frame to pass to ggplot ###################################
event_types = rbind(
as.matrix(event_types_other),
as.matrix(event_types_examples),
as.matrix(event_types_frame),
as.matrix(event_types_mute),
as.matrix(event_types_play),
as.matrix(event_types_playhead_main),
as.matrix(event_types_playhead_example),
as.matrix(event_types_keyframe))
events = rbind(
as.matrix(events_other),
as.matrix(example_select_events),
as.matrix(frame_select_events),
as.matrix(mute_events),
as.matrix(play_events),
as.matrix(playhead_main_events),
as.matrix(playhead_example_events),
as.matrix(keyframe_events))
xmin = as.POSIXct(rbind(
as.matrix(xmin_other),
as.matrix(xmin_examples),
as.matrix(xmin_frame),
as.matrix(xmin_mute),
as.matrix(xmin_play),
as.matrix(xmin_playhead_main),
as.matrix(xmin_playhead_example),
as.matrix(xmin_keyframe)))
xmax = as.POSIXct(rbind(
as.matrix(xmax_other),
as.matrix(xmax_examples),
as.matrix(xmax_frame),
as.matrix(xmax_mute),
as.matrix(xmax_play),
as.matrix(xmax_playhead_main),
as.matrix(xmax_playhead_example),
as.matrix(xmax_keyframe)))
ymax = rbind(
as.matrix(ymax_other),
as.matrix(ymax_examples),
as.matrix(ymax_frame),
as.matrix(ymax_mute),
as.matrix(ymax_play),
as.matrix(ymax_playhead_main),
as.matrix(ymax_playhead_example),
as.matrix(ymax_keyframe))
data = data.frame(
subject = event_types,
Events = events,
xmin=xmin,
xmax=xmax,
ymin = 0,
ymax = ymax)
######################################## Construct colour vector for events ######################################
event_types_unique = sort(as.character(unique(events)))
fill_colours = vector(mode = "character", length=NROW(event_types_unique))
for(i in 1: NROW(event_types_unique)) {
event_type = event_types_unique[i]
if(!is.null(colour_reference[[event_type]])) {
fill_colours[i] = as.character(colour_reference[[event_type]])
} else {
fill_colours[i] = as.character(colour_reference[["Other"]])
}
}
################################################### Make the plot ############################################
user_sequence_plot <-
ggplot(data, aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, fill = Events)) +
scale_fill_manual(values = alpha(fill_colours, 0.92)) +
theme_bw() +
labs(title=paste("Session ID:",id, sep=" "),
subtitle=paste("Session Timespan:",round(time_span, digits = 2), "seconds Start Date:", session_start_date, " Animation:", animation , sep=" ")) +
geom_rect(colour = "black", size = 0.3) +
facet_grid(subject ~ ., space = "free", scale = "free") +
theme(axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
text = element_text(size=12),
axis.text.y.right = element_text(size = 4),
legend.text = element_text(size = 14),
plot.subtitle = element_text(size =16),
plot.title = element_text(color="#666666", face="bold", size=24, hjust=0)) +
scale_x_datetime(labels = date_format("%H:%M:%S"),
breaks = date_breaks(x_axis_tick_breaks))
print(user_sequence_plot)
}
else {
plot(1,1,col="white",xaxt='n',yaxt='n',ann=FALSE)
text(1,1,"The timespan for this session's logged events is invalid. Please select another session.", cex=1.5)
}
}
else {
plot(1,1,col="white",xaxt='n',yaxt='n',ann=FALSE)
text(1,1,"This session ID has no valid logged data. Please select another session.", cex=1.5)
}
}
})
|
7dd1df7d35d0a8a44e86b5d7a196817ee1489949 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /books/rgui/ch-WebGUIs/tmp/simpleapp3/app.R | 8dea885103126b8fa48b2a1c2eed4a644c029468 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 2,081 | r | app.R | ## for older versions of R
if(!exists("grepl"))
grepl <- function(pattern, x,...) length(grep(pattern, x)) > 0
#' similar to URLencode, but encodes <, >, &, " as entities
HTMLencode <- function(str) {
str <- as.character(str)
vals <- list(c('&','&'),
c('"','"'),
c('<','<'),
c('>','>')
)
for(i in vals)
str <- gsub(i[1],i[2],str)
str
}
#' get with default values -- identity if defined, else default
get_d <- function(x, default="") ifelse(is.null(x) || is.na(x) || x == "", default, x)
require(brew, quietly=TRUE)
require(hwriter, quietly=TRUE)
dir <- "/var/www/GUI/simpleapp"
setwd(dir)
df <- mtcars
processError <- function(e) {
e$title <- "Error"
with(e, brew("error.brew"))
}
## functions to call up web pages in proper context
showLogon <- function() {
title <- "Logon"
brew("login-form.brew")
}
selectID <- function() {
title <- "Select an ID"
context <- list(nms=rownames(df)) # adjust
with(context, brew("select-id.brew"))
}
showID <- function() {
title <- "Show an ID"
id <- GET$id
if(! id %in% rownames(df)) # use whitelist
processError(list(message="id does not match"))
context <- list(d=df[id,], id=id)
with(context, brew("show-id.brew"))
}
## process and dispatch
## the user name
user_name <- ""
if (!is.null(POST)) {
user_name <- get_d(POST$name, "")
}
if(user_name == "" && !is.null(COOKIES)) {
user_name <- get_d(COOKIES$name, "")
}
## start output
setContentType("text/html")
if(user_name != "")
setCookie("name",user_name)
## Use path_info to dispatch on
## this is how django works
urls <- list(select=list(call="selectID", regexp="^/select"),
id = list(call="showID", regexp = "^/id")
)
default_call <- "showLogon"
path_info <- SERVER$path_info
flag <- FALSE
for(i in urls) {
if(!flag && grepl(i$regexp, path_info)) {
flag <- TRUE
tryCatch(do.call(i$call, list()), error=processError)
}
}
if(!flag)
tryCatch(do.call(default_call, list()), error=processError)
DONE
|
a82e412c0cda9a55ba05fd1118e65897f38406cc | 4b80c97c193bec524a24c1df7aba3af037660db8 | /tests/testthat/test-clean_observations.R | ec69bc713297745a73727db38883b194bcf7f8da | [] | no_license | cran/lvmisc | 39ce176be2c08be16b5d09cbf8feba08c5a6747a | 738fa1a1f295a508764102c7bdda46c9945b2a92 | refs/heads/master | 2023-04-01T01:52:11.303753 | 2021-04-05T14:20:02 | 2021-04-05T14:20:02 | 341,248,797 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 913 | r | test-clean_observations.R | test_that("error handling works", {
data <- data.frame(x = 1:10, y = 1:10)
expect_error(
clean_observations("data", x, y, 1)
)
expect_error(
clean_observations(data, id, x, 1),
"Column `id` not found in `data`.",
class = "error_column_not_found"
)
expect_error(
clean_observations(data, x, var, 1),
"Column `var` not found in `data`.",
class = "error_column_not_found"
)
expect_error(
clean_observations(data, x, y, 1.5),
"`max_na` must be interger; not double.",
class = "error_argument_type"
)
})
test_that("clean_observations() works", {
set.seed(20200606)
data <- data.frame(
id = rep(1:5, each = 4),
var = sample(c(1:5, rep(NA, 3)), 20, replace = TRUE)
)
out <- clean_observations(data, id, var, 1)
out <- out[["var"]]
expect_equal(
out,
c(NA, 1, 1, 4, NA, NA, NA, NA, 3, 5, 5, NA, 3, 1, NA, 1, NA, NA, NA, NA)
)
})
|
b4a42ac8b9b1b8f9b1dadf5e693e21bdc28dde8f | 99f350773f2db550c3547b9e73424c2511cf654d | /scripts R/process imj.R | 89b0d581ec075009b5cc9a8bbb368cc16da926ca | [] | no_license | Clara-Reichert/R4meropy | e42f60cb596d3e9bebea746a24d44a114f71387d | c6cbe551a52a3d1c236a921ba49b78c70dd655c5 | refs/heads/master | 2022-11-24T02:31:35.393708 | 2020-08-04T13:05:44 | 2020-08-04T13:05:44 | 273,433,253 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 291 | r | process imj.R | IMJ<- read.csv("C:/Users/clare/Desktop/IMJ/SummaryV46.csv")
C<- read.csv("C:/Users/clare/Desktop/mais/mais_csv/mais_train_counted2.csv", sep=";")
C=C[-23,]
M=match(C$name, IMJ$Slice)
C$imagej=IMJ$Count
C$diff=C$tag-C$imagej
moyabs=mean(abs(C$diff))
moy=mean(C$diff)
X=subset(C, diff==0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.