content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
server <- function(input, output) {
sourceDirectory("sections", recursive = TRUE)
showNotification("Attention: There is an issue with the source data from Johns Hopkins University. I am aware of
the issue and working on it. In the meantime the numbers shown in the dashboard are flawed.", duration = NULL,
type = "error")
# Trigger once an hour
dataLoadingTrigger <- reactiveTimer(3600000)
observeEvent(dataLoadingTrigger, {
updateData()
})
observe({
data <- data_atDate(input$timeSlider)
})
} | /server.R | permissive | SharanjeetSingh/covid19_dashboard | R | false | false | 623 | r | server <- function(input, output) {
sourceDirectory("sections", recursive = TRUE)
showNotification("Attention: There is an issue with the source data from Johns Hopkins University. I am aware of
the issue and working on it. In the meantime the numbers shown in the dashboard are flawed.", duration = NULL,
type = "error")
# Trigger once an hour
dataLoadingTrigger <- reactiveTimer(3600000)
observeEvent(dataLoadingTrigger, {
updateData()
})
observe({
data <- data_atDate(input$timeSlider)
})
} |
#functions for converting RGB and
#calculating green cover
GLI <- function(img,i,j,k){
r<-getValues(img[[i]])
g<-getValues(img[[j]])
b<-getValues(img[[k]])
sumras<-r+g+b
r1<-r/sumras;
g1<-g/sumras;
b1<-b/sumras;
GLI <- (2*g1-b1-r1)/(2*g1+b1+r1);
GLI_ras<-img[[1]];
values(GLI_ras)<-GLI
return(GLI_ras);
}
VARI <- function(img,i,j,k){
r<-getValues(img[[i]])
g<-getValues(img[[j]])
b<-getValues(img[[k]])
sumras<-r+g+b
r1<-r/sumras;
g1<-g/sumras;
b1<-b/sumras;
VARI <- (g1-r1)/(g1+r1-b1);
VARI_ras<-img[[1]];
values(VARI_ras)<-VARI
return(VARI_ras);
}
pctgreen <- function(files,dates,threshold){
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Analysing", value = 0)
# Close the progress when this reactive exits (even if there's an error)
on.exit(progress$close())
pctg <- matrix(ncol = 2, nrow = length(files))
for (i in seq_along(files)){
progress$inc(1/seq_along(files), detail = paste("image", i))
myraster<-brick(files[i])
pctgreen1=(sum(getValues(myraster>threshold),na.rm = TRUE)/ncell(myraster))*100
pctg[i,]<-c(pctgreen1,dates[i])
}
pctg<-as.data.frame(pctg)
names(pctg)<-c("pct","date")
pctg$date<-as.Date(pctg$date,origin="1970-01-01")
names(pctg)<-c("pct","date")
return(pctg);
}
pctgreenNDVI <- function(files,dates,threshold){
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Analysing", value = 0)
# Close the progress when this reactive exits (even if there's an error)
on.exit(progress$close())
pctg <- matrix(ncol = 2, nrow = length(files))
for (i in seq_along(files)){
progress$inc(1/seq_along(files), detail = paste("image", i))
myraster<-brick(files[i])
pctgreen1=(cellStats(myraster>threshold,'sum')/ncell(myraster))*100
pctg[i,]<-c(pctgreen1,dates[i])
}
pctg<-as.data.frame(pctg)
names(pctg)<-c("pct","date")
pctg$date<-as.Date(pctg$date,origin="1970-01-01")
names(pctg)<-c("pct","date")
return(pctg);
}
##statistics of raster pixels above given threshold##
thrshav <- function(files,dates,threshold,stat){
pctg <- matrix(ncol = 2, nrow = length(files))
for (i in seq_along(files)){
myraster<-brick(files[i])
myraster[myraster<threshold]<- 0
av_abv_t=cellStats(myraster, stat)
pctg[i,]<-c(av_abv_t,dates[i])
}
pctg<-as.data.frame(pctg)
names(pctg)<-c("mean","date")
pctg$date<-as.Date(pctg$date,origin="1970-01-01")
return(pctg);
}
shiny_busy <- function() {
# use for some alignment, if needed
HTML(" ", paste0(
'<span data-display-if="',
'$('html').attr('class')=='shiny-busy'',
'">',
'<i class="fa fa-spinner fa-pulse fa-fw" style="color:orange"></i>',
'</span>'
))
} | /helpers.R | permissive | yangxhcaf/pct_green | R | false | false | 2,920 | r |
#functions for converting RGB and
#calculating green cover
GLI <- function(img,i,j,k){
r<-getValues(img[[i]])
g<-getValues(img[[j]])
b<-getValues(img[[k]])
sumras<-r+g+b
r1<-r/sumras;
g1<-g/sumras;
b1<-b/sumras;
GLI <- (2*g1-b1-r1)/(2*g1+b1+r1);
GLI_ras<-img[[1]];
values(GLI_ras)<-GLI
return(GLI_ras);
}
VARI <- function(img,i,j,k){
r<-getValues(img[[i]])
g<-getValues(img[[j]])
b<-getValues(img[[k]])
sumras<-r+g+b
r1<-r/sumras;
g1<-g/sumras;
b1<-b/sumras;
VARI <- (g1-r1)/(g1+r1-b1);
VARI_ras<-img[[1]];
values(VARI_ras)<-VARI
return(VARI_ras);
}
pctgreen <- function(files,dates,threshold){
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Analysing", value = 0)
# Close the progress when this reactive exits (even if there's an error)
on.exit(progress$close())
pctg <- matrix(ncol = 2, nrow = length(files))
for (i in seq_along(files)){
progress$inc(1/seq_along(files), detail = paste("image", i))
myraster<-brick(files[i])
pctgreen1=(sum(getValues(myraster>threshold),na.rm = TRUE)/ncell(myraster))*100
pctg[i,]<-c(pctgreen1,dates[i])
}
pctg<-as.data.frame(pctg)
names(pctg)<-c("pct","date")
pctg$date<-as.Date(pctg$date,origin="1970-01-01")
names(pctg)<-c("pct","date")
return(pctg);
}
pctgreenNDVI <- function(files,dates,threshold){
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Analysing", value = 0)
# Close the progress when this reactive exits (even if there's an error)
on.exit(progress$close())
pctg <- matrix(ncol = 2, nrow = length(files))
for (i in seq_along(files)){
progress$inc(1/seq_along(files), detail = paste("image", i))
myraster<-brick(files[i])
pctgreen1=(cellStats(myraster>threshold,'sum')/ncell(myraster))*100
pctg[i,]<-c(pctgreen1,dates[i])
}
pctg<-as.data.frame(pctg)
names(pctg)<-c("pct","date")
pctg$date<-as.Date(pctg$date,origin="1970-01-01")
names(pctg)<-c("pct","date")
return(pctg);
}
##statistics of raster pixels above given threshold##
thrshav <- function(files,dates,threshold,stat){
pctg <- matrix(ncol = 2, nrow = length(files))
for (i in seq_along(files)){
myraster<-brick(files[i])
myraster[myraster<threshold]<- 0
av_abv_t=cellStats(myraster, stat)
pctg[i,]<-c(av_abv_t,dates[i])
}
pctg<-as.data.frame(pctg)
names(pctg)<-c("mean","date")
pctg$date<-as.Date(pctg$date,origin="1970-01-01")
return(pctg);
}
shiny_busy <- function() {
# use for some alignment, if needed
HTML(" ", paste0(
'<span data-display-if="',
'$('html').attr('class')=='shiny-busy'',
'">',
'<i class="fa fa-spinner fa-pulse fa-fw" style="color:orange"></i>',
'</span>'
))
} |
# DM_09_03.R
# INSTALL AND LOAD PACKAGES ################################
pacman::p_load(pacman, tm, SnowballC, dplyr)
# IMPORT DATA ##############################################
# Don't need to specify file path if file is in the same
# directory or folder as the R script. Also, the metadata
# has already been stripped from beginning and end of text
# files.
# "Jane Eyre" by Charlotte Brontë, published 1847
bookJE <- readLines('D:/Data Mining/Text Mining/R/JaneEyre.txt')
# "Wuthering Heights" by Emily Brontë, also published 1847
bookWH <- readLines('D:/Data Mining/Text Mining/R/WutheringHeights.txt')
# CORPUS FOR JANE EYRE #####################################
# Note: Most of the operations take a moment or two. Make
# sure to let them finish before moving to the next step.
# Preliminary corpus
corpusJE <- Corpus(VectorSource(bookJE)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(stripWhitespace) %>%
tm_map(stemDocument)
# Create term-document matrices & remove sparse terms
tdmJE <- DocumentTermMatrix(corpusJE) %>%
removeSparseTerms(1 - (5/length(corpusJE)))
# Calculate and sort by word frequencies
word.freqJE <- sort(colSums(as.matrix(tdmJE)),
decreasing = T)
# Create frequency table
tableJE <- data.frame(word = names(word.freqJE),
absolute.frequency = word.freqJE,
relative.frequency =
word.freqJE/length(word.freqJE))
# Remove the words from the row names
rownames(tableJE) <- NULL
# Show the 10 most common words
head(tableJE, 10)
# Export the 1000 most common words in CSV files
write.csv(tableJE[1:1000, ], "JE_1000.csv")
# CORPUS FOR WUTHERING HEIGHTS #############################
corpusWH <- Corpus(VectorSource(bookWH)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(stripWhitespace) %>%
tm_map(stemDocument)
tdmWH <- DocumentTermMatrix(corpusWH) %>%
removeSparseTerms(1 - (5/length(corpusWH)))
word.freqWH <- sort(colSums(as.matrix(tdmWH)),
decreasing = T)
tableWH <- data.frame(word = names(word.freqWH),
absolute.frequency = word.freqWH,
relative.frequency = word.freqWH/length(word.freqWH))
rownames(tableWH) <- NULL
head(tableWH, 10)
write.csv(tableWH[1:1000, ], "WH_1000.csv")
# MOST DISTINCTIVE WORDS ###################################
# Set number of digits for output
options(digits = 2)
# Compare relative frequencies (via subtraction)
bronte <- tableJE %>%
merge(tableWH, by = "word") %>%
mutate(dProp =
relative.frequency.x -
relative.frequency.y,
dAbs = abs(dProp)) %>%
arrange(desc(dAbs)) %>%
rename(JE.freq = absolute.frequency.x,
JE.prop = relative.frequency.x,
WH.freq = absolute.frequency.y,
WH.prop = relative.frequency.y)
# Show the 10 most distinctive terms
head(bronte, 10)
# View table of all results
View(bronte)
# Save full table to CSV
write.csv(bronte, "bronte_table.csv")
# CLEAN UP #################################################
# Clear workspace
rm(list = ls())
# Clear packages
p_unload(pacman, tm, SnowballC, dplyr)
# Clear console
cat("\014") # ctrl+L
| /Text Mining/R/Text Mining.R | no_license | samanmunikar/Data-Mining | R | false | false | 3,463 | r | # DM_09_03.R
# INSTALL AND LOAD PACKAGES ################################
pacman::p_load(pacman, tm, SnowballC, dplyr)
# IMPORT DATA ##############################################
# Don't need to specify file path if file is in the same
# directory or folder as the R script. Also, the metadata
# has already been stripped from beginning and end of text
# files.
# "Jane Eyre" by Charlotte Brontë, published 1847
bookJE <- readLines('D:/Data Mining/Text Mining/R/JaneEyre.txt')
# "Wuthering Heights" by Emily Brontë, also published 1847
bookWH <- readLines('D:/Data Mining/Text Mining/R/WutheringHeights.txt')
# CORPUS FOR JANE EYRE #####################################
# Note: Most of the operations take a moment or two. Make
# sure to let them finish before moving to the next step.
# Preliminary corpus
corpusJE <- Corpus(VectorSource(bookJE)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(stripWhitespace) %>%
tm_map(stemDocument)
# Create term-document matrices & remove sparse terms
tdmJE <- DocumentTermMatrix(corpusJE) %>%
removeSparseTerms(1 - (5/length(corpusJE)))
# Calculate and sort by word frequencies
word.freqJE <- sort(colSums(as.matrix(tdmJE)),
decreasing = T)
# Create frequency table
tableJE <- data.frame(word = names(word.freqJE),
absolute.frequency = word.freqJE,
relative.frequency =
word.freqJE/length(word.freqJE))
# Remove the words from the row names
rownames(tableJE) <- NULL
# Show the 10 most common words
head(tableJE, 10)
# Export the 1000 most common words in CSV files
write.csv(tableJE[1:1000, ], "JE_1000.csv")
# CORPUS FOR WUTHERING HEIGHTS #############################
corpusWH <- Corpus(VectorSource(bookWH)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(stripWhitespace) %>%
tm_map(stemDocument)
tdmWH <- DocumentTermMatrix(corpusWH) %>%
removeSparseTerms(1 - (5/length(corpusWH)))
word.freqWH <- sort(colSums(as.matrix(tdmWH)),
decreasing = T)
tableWH <- data.frame(word = names(word.freqWH),
absolute.frequency = word.freqWH,
relative.frequency = word.freqWH/length(word.freqWH))
rownames(tableWH) <- NULL
head(tableWH, 10)
write.csv(tableWH[1:1000, ], "WH_1000.csv")
# MOST DISTINCTIVE WORDS ###################################
# Set number of digits for output
options(digits = 2)
# Compare relative frequencies (via subtraction)
bronte <- tableJE %>%
merge(tableWH, by = "word") %>%
mutate(dProp =
relative.frequency.x -
relative.frequency.y,
dAbs = abs(dProp)) %>%
arrange(desc(dAbs)) %>%
rename(JE.freq = absolute.frequency.x,
JE.prop = relative.frequency.x,
WH.freq = absolute.frequency.y,
WH.prop = relative.frequency.y)
# Show the 10 most distinctive terms
head(bronte, 10)
# View table of all results
View(bronte)
# Save full table to CSV
write.csv(bronte, "bronte_table.csv")
# CLEAN UP #################################################
# Clear workspace
rm(list = ls())
# Clear packages
p_unload(pacman, tm, SnowballC, dplyr)
# Clear console
cat("\014") # ctrl+L
|
#' Get the TEfit model formula
#'
#' \code{\link{TEfit}} internal
#'
#' @param modList List of TEfit model details
#'
#' @export
#'
#' @noRd
#'
tef_vars2forms <- function(modList){
# # set up covariates
{
if(dim(modList$varIn)[2] >2){
modList$covars <- names(modList$varIn)[3:ncol(modList$varIn)]
}
if(exists('blockTimeVar',modList)){
modList$covars <- modList$covars[modList$covars!=modList$blockTimeVar]
}
if(modList$linkFun$link=='d_prime'){
modList$covars <- modList$covars[modList$covars!= modList$linkFun$presence]
}
if(modList$linkFun$link=='logit'){
if(!exists('logistX',modList$linkFun)){cat('\nYou need to define a logistX for your logit link')}
modList$covars <- modList$covars[modList$covars!= modList$linkFun$logistX]
}
if(modList$linkFun$link=='weibull'){
if(!exists('weibullX',modList$linkFun)){cat('\nYou need to define a weibullX for your weibull link')}
modList$covars <- modList$covars[modList$covars!= modList$linkFun$weibullX]
}
##
## ##
## ## ##
# define your change function:
modList <- tef_getLinkedFun(modList)
}
if(nchar(modList$explicit)>0){
modList$modl_fun <- as.formula(modList$explicit)
}
# # get your data and parameter names out
if (length(modList$modl_fun) == 2L) {
modList$modl_fun[[3L]] <- modList$modl_fun[[2L]]
modList$modl_fun[[2L]] <- 0
}
varNames <- all.vars(modList$modl_fun)
modList$dnames <- names(modList$varIn)
modList$pNames <- varNames[is.na(match(varNames, modList$dnames))]
## get special pNames:
if(modList$errFun=='exGauss_mu'){
require(retimes)
modList$pNames <- c(modList$pNames,'sigma_param','tau_param')
modList$null_pNames <- c(modList$null_pNames,'sigma_param','tau_param')
}
if(modList$errFun=='exGauss_tau'){
require(retimes)
modList$pNames <- c(modList$pNames,'mu_param','sigma_param')
modList$null_pNames <- c(modList$null_pNames,'mu_param','sigma_param')
}
if(modList$errFun=='wiener_dr'){
require(RWiener)
modList$pNames <- c(modList$pNames,'bs_param','ndt_param','bias_param')
modList$null_pNames <- c(modList$null_pNames,'bs_param','ndt_param','bias_param')
}
##
modList$evalFun <- modList$modl_fun[[3]]
if (length(modList$null_fun) == 2L) {
modList$null_fun[[3L]] <- modList$null_fun[[2L]]
modList$null_fun[[2L]] <- 0
}
modList$null_fun <- modList$null_fun[[3]]
return(modList)
}
| /R/tef_vars2forms.R | permissive | akcochrane/TEfits | R | false | false | 2,464 | r | #' Get the TEfit model formula
#'
#' \code{\link{TEfit}} internal
#'
#' @param modList List of TEfit model details
#'
#' @export
#'
#' @noRd
#'
tef_vars2forms <- function(modList){
# # set up covariates
{
if(dim(modList$varIn)[2] >2){
modList$covars <- names(modList$varIn)[3:ncol(modList$varIn)]
}
if(exists('blockTimeVar',modList)){
modList$covars <- modList$covars[modList$covars!=modList$blockTimeVar]
}
if(modList$linkFun$link=='d_prime'){
modList$covars <- modList$covars[modList$covars!= modList$linkFun$presence]
}
if(modList$linkFun$link=='logit'){
if(!exists('logistX',modList$linkFun)){cat('\nYou need to define a logistX for your logit link')}
modList$covars <- modList$covars[modList$covars!= modList$linkFun$logistX]
}
if(modList$linkFun$link=='weibull'){
if(!exists('weibullX',modList$linkFun)){cat('\nYou need to define a weibullX for your weibull link')}
modList$covars <- modList$covars[modList$covars!= modList$linkFun$weibullX]
}
##
## ##
## ## ##
# define your change function:
modList <- tef_getLinkedFun(modList)
}
if(nchar(modList$explicit)>0){
modList$modl_fun <- as.formula(modList$explicit)
}
# # get your data and parameter names out
if (length(modList$modl_fun) == 2L) {
modList$modl_fun[[3L]] <- modList$modl_fun[[2L]]
modList$modl_fun[[2L]] <- 0
}
varNames <- all.vars(modList$modl_fun)
modList$dnames <- names(modList$varIn)
modList$pNames <- varNames[is.na(match(varNames, modList$dnames))]
## get special pNames:
if(modList$errFun=='exGauss_mu'){
require(retimes)
modList$pNames <- c(modList$pNames,'sigma_param','tau_param')
modList$null_pNames <- c(modList$null_pNames,'sigma_param','tau_param')
}
if(modList$errFun=='exGauss_tau'){
require(retimes)
modList$pNames <- c(modList$pNames,'mu_param','sigma_param')
modList$null_pNames <- c(modList$null_pNames,'mu_param','sigma_param')
}
if(modList$errFun=='wiener_dr'){
require(RWiener)
modList$pNames <- c(modList$pNames,'bs_param','ndt_param','bias_param')
modList$null_pNames <- c(modList$null_pNames,'bs_param','ndt_param','bias_param')
}
##
modList$evalFun <- modList$modl_fun[[3]]
if (length(modList$null_fun) == 2L) {
modList$null_fun[[3L]] <- modList$null_fun[[2L]]
modList$null_fun[[2L]] <- 0
}
modList$null_fun <- modList$null_fun[[3]]
return(modList)
}
|
app <- ShinyDriver$new("../../", loadTimeout = 15000)
app$snapshotInit("mytest")
app$waitForValue("without_connection_upgrade", iotype = "output")
app$snapshot()
| /apps/153-connection-header/tests/shinytest/mytest.R | no_license | masimonson791/shinycoreci-apps | R | false | false | 163 | r | app <- ShinyDriver$new("../../", loadTimeout = 15000)
app$snapshotInit("mytest")
app$waitForValue("without_connection_upgrade", iotype = "output")
app$snapshot()
|
# R script to accompany Intro to R
# Copy for RStudent 2019
######################################################
# You will see that you can put non-executable "comment" lines
# in your script by putting "#" at the beginning of the lines.
#
# In fact, you can put comments anywhere on a line as long as
# you put "#" before them.
| /Intro to R.R | no_license | srose622/Intro-to-R-2ed | R | false | false | 335 | r | # R script to accompany Intro to R
# Copy for RStudent 2019
######################################################
# You will see that you can put non-executable "comment" lines
# in your script by putting "#" at the beginning of the lines.
#
# In fact, you can put comments anywhere on a line as long as
# you put "#" before them.
|
sim.taxa <-
function (numbsim, n, m=n, waitsp, waitext="rexp(0)", symmetric=TRUE, complete=TRUE, tiplabel=c("sp.", "ext.","Ss", "Se"),
shiftsp=list(prob=0, strength="runif(0.5,0.9)"), shiftext=list(prob=0, strength="runif(0.1,0.2)"),
sampling=list(frac=1, branchprop=FALSE), sampling.gsa=1, gsa=FALSE) {
# numbsim is the number of simulated trees
# n is the Number of tips in sampled trees (Number of extant sampled leaves)
# m is the number of standing taxa that will exist on the first generated trees, to then be sampled for n number of tips. Case gsa=TRUE, m is equal to n.
# distributionspname is the name of the desired probability function that will be used for the speciation process (e.g. distributionspname <- "rexp"). Note that the name should contain an `r` before it, since it refers to the random number of the desired function (e.g. "rweibull", "runif")
# distributionspparameters are the parameters for the specific function desired for speciation.
# IMPORTANT: this vector of fuction parameters must *start by the second one, since the first parameter will always be one for all the function and is added already by this function*. HINT: see the help of the desired function for more details (e.g. ?rexp) Example of parameter for a exponential distribution with lambda of one (distributionspparameters <- c(1)). Entry in the distributionparameters can be "#", # or c(#,#) in case of more characters
# distributionextname is the same as the distributionspname but for the probability of extinction (e.g. distributionextname <- "rexp")
# distributionextparameters is the same as the distributionspparameters but for the extinction probability function. By default extinction is set to ZERO, i.e. no extinction (e.g. distributionextparameters <- c(0)). Entry in can be "#", # or c(#,#) in case of more characters
# symmetric tells which macro-evolutionary model should be used. If symmetric=TRUE the symmetric model will be used, else if FALSE, asymmetric model will be used. By default symmetric=TRUE
# complete: If complete = TRUE, the tree with the extinct and non-sampled lineages is returned. If complete = FALSE, the extinct and non-sampled lineages are suppressed. Complete=FALSE by default
# labellivingsp is the label that will be drawn on each tip surving until the present. An automatic sequential number will be added to the chosen name. By default labellivingsp="sp."
# labelextinctsp is the label that will be drawn on each extinct tip. By default labelextinctsp <- "ext."
# sampling: stochastic sampling, default
# gsa TRUE indicates that the sim.gsa.taxa will be used, the n parameter indicates the final number of species. Note that m needs to be allways bigger then n. If gsa = FALSE, there is no need of specifying n, once the final trees will be of size m
# entry in the distributionparameters can be "#", # or c(#,#) in case of more variables
# shiftspprob: frequency by which a speciation shift happens, default is 0, that means no shift. This value should range from 0 (no shift) to 1 (all species are shifted)
# shiftdistributionspname: distribution by which the shift (waiting time multiplier) will be drawn
# shiftdistributionspparameters: parameters of the chosen distribution
# shiftextprob: frequency by which a extinction shift happens, default is 0, that means no shift. This value should range from 0 (no shift) to 1 (all species are shifted)
# shiftdistributionextname: distribution by which the shift (waiting time multiplier) will be drawn
# shiftdistributionextparameters: parameters of the chosen distribution
# shiftsplabel: label to be added to the species that suffered speciation shift
# shiftextlabel: label to be added to the species that suffered extinction shift
if (complete==TRUE & sampling$frac!=1) {
warning("Sampling on taxa based can only be used with complete=FALSE, thus complete was changed to FALSE")
complete=FALSE
}
if (sampling$frac>1 | sampling$frac<0){
warning("Sampling Sampling fraction needs to range between 0 and 1, thus sampling$frac was changed to 1")
sampling$frac=1
}
if (sampling$frac!=1){
n=round(n/sampling$frac)
if (m<n) {
warning("You are using sampling, thus tips=n/sampling$frac. m is smaller than n, thus we changed m=n/frac")
m=n
}
}
if (m<n){
warning("m can not be samller than n, thus we changed m=n")
m=n
}
check<-gsa
if (gsa==F && complete==T){check<-T}
mytreegsazed <- list()
while (length(mytreegsazed) < numbsim)
{
mytree <- list()
step <- 1
{
if (symmetric == TRUE)
{
for (step in 1: (numbsim) ){
mytreenext <- mytree.symmetric.taxa(m=m, waitsp=waitsp, waitext=waitext, complete=check, tiplabel=tiplabel,
shiftsp=shiftsp, shiftext=shiftext, sampling=sampling, gsa=gsa)
mytree<- c(mytree, list(mytreenext))
}
}
else
{
for (step in 1: (numbsim) ){
mytreenext <- mytree.asymmetric.taxa(m=m, waitsp=waitsp, waitext=waitext, complete=check, tiplabel=tiplabel,
shiftsp=shiftsp, shiftext=shiftext, sampling=sampling, gsa=gsa)
mytree<- c(mytree, list(mytreenext))
}
}
}
{
if (gsa==T)
{
mytreegsa <- sim.gsa.taxa(mytree, n=n, sampling=sampling.gsa, frac=sampling$frac, complete=complete)
}
else
{
mytreegsa <- mytree #gsa is FALSE
}
}
mytreegsazed <- c(mytreegsazed, mytreegsa)
}
mytreegsazeds <- sample(mytreegsazed, numbsim)
mytreegsazed <- mytreegsazeds
return(mytreegsazed)
}
| /R/sim.taxa.R | no_license | cran/TreeSimGM | R | false | false | 5,670 | r | sim.taxa <-
function (numbsim, n, m=n, waitsp, waitext="rexp(0)", symmetric=TRUE, complete=TRUE, tiplabel=c("sp.", "ext.","Ss", "Se"),
shiftsp=list(prob=0, strength="runif(0.5,0.9)"), shiftext=list(prob=0, strength="runif(0.1,0.2)"),
sampling=list(frac=1, branchprop=FALSE), sampling.gsa=1, gsa=FALSE) {
# numbsim is the number of simulated trees
# n is the Number of tips in sampled trees (Number of extant sampled leaves)
# m is the number of standing taxa that will exist on the first generated trees, to then be sampled for n number of tips. Case gsa=TRUE, m is equal to n.
# distributionspname is the name of the desired probability function that will be used for the speciation process (e.g. distributionspname <- "rexp"). Note that the name should contain an `r` before it, since it refers to the random number of the desired function (e.g. "rweibull", "runif")
# distributionspparameters are the parameters for the specific function desired for speciation.
# IMPORTANT: this vector of fuction parameters must *start by the second one, since the first parameter will always be one for all the function and is added already by this function*. HINT: see the help of the desired function for more details (e.g. ?rexp) Example of parameter for a exponential distribution with lambda of one (distributionspparameters <- c(1)). Entry in the distributionparameters can be "#", # or c(#,#) in case of more characters
# distributionextname is the same as the distributionspname but for the probability of extinction (e.g. distributionextname <- "rexp")
# distributionextparameters is the same as the distributionspparameters but for the extinction probability function. By default extinction is set to ZERO, i.e. no extinction (e.g. distributionextparameters <- c(0)). Entry in can be "#", # or c(#,#) in case of more characters
# symmetric tells which macro-evolutionary model should be used. If symmetric=TRUE the symmetric model will be used, else if FALSE, asymmetric model will be used. By default symmetric=TRUE
# complete: If complete = TRUE, the tree with the extinct and non-sampled lineages is returned. If complete = FALSE, the extinct and non-sampled lineages are suppressed. Complete=FALSE by default
# labellivingsp is the label that will be drawn on each tip surving until the present. An automatic sequential number will be added to the chosen name. By default labellivingsp="sp."
# labelextinctsp is the label that will be drawn on each extinct tip. By default labelextinctsp <- "ext."
# sampling: stochastic sampling, default
# gsa TRUE indicates that the sim.gsa.taxa will be used, the n parameter indicates the final number of species. Note that m needs to be allways bigger then n. If gsa = FALSE, there is no need of specifying n, once the final trees will be of size m
# entry in the distributionparameters can be "#", # or c(#,#) in case of more variables
# shiftspprob: frequency by which a speciation shift happens, default is 0, that means no shift. This value should range from 0 (no shift) to 1 (all species are shifted)
# shiftdistributionspname: distribution by which the shift (waiting time multiplier) will be drawn
# shiftdistributionspparameters: parameters of the chosen distribution
# shiftextprob: frequency by which a extinction shift happens, default is 0, that means no shift. This value should range from 0 (no shift) to 1 (all species are shifted)
# shiftdistributionextname: distribution by which the shift (waiting time multiplier) will be drawn
# shiftdistributionextparameters: parameters of the chosen distribution
# shiftsplabel: label to be added to the species that suffered speciation shift
# shiftextlabel: label to be added to the species that suffered extinction shift
if (complete==TRUE & sampling$frac!=1) {
warning("Sampling on taxa based can only be used with complete=FALSE, thus complete was changed to FALSE")
complete=FALSE
}
if (sampling$frac>1 | sampling$frac<0){
warning("Sampling Sampling fraction needs to range between 0 and 1, thus sampling$frac was changed to 1")
sampling$frac=1
}
if (sampling$frac!=1){
n=round(n/sampling$frac)
if (m<n) {
warning("You are using sampling, thus tips=n/sampling$frac. m is smaller than n, thus we changed m=n/frac")
m=n
}
}
if (m<n){
warning("m can not be samller than n, thus we changed m=n")
m=n
}
check<-gsa
if (gsa==F && complete==T){check<-T}
mytreegsazed <- list()
while (length(mytreegsazed) < numbsim)
{
mytree <- list()
step <- 1
{
if (symmetric == TRUE)
{
for (step in 1: (numbsim) ){
mytreenext <- mytree.symmetric.taxa(m=m, waitsp=waitsp, waitext=waitext, complete=check, tiplabel=tiplabel,
shiftsp=shiftsp, shiftext=shiftext, sampling=sampling, gsa=gsa)
mytree<- c(mytree, list(mytreenext))
}
}
else
{
for (step in 1: (numbsim) ){
mytreenext <- mytree.asymmetric.taxa(m=m, waitsp=waitsp, waitext=waitext, complete=check, tiplabel=tiplabel,
shiftsp=shiftsp, shiftext=shiftext, sampling=sampling, gsa=gsa)
mytree<- c(mytree, list(mytreenext))
}
}
}
{
if (gsa==T)
{
mytreegsa <- sim.gsa.taxa(mytree, n=n, sampling=sampling.gsa, frac=sampling$frac, complete=complete)
}
else
{
mytreegsa <- mytree #gsa is FALSE
}
}
mytreegsazed <- c(mytreegsazed, mytreegsa)
}
mytreegsazeds <- sample(mytreegsazed, numbsim)
mytreegsazed <- mytreegsazeds
return(mytreegsazed)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparisons.R
\name{contains_one_of}
\alias{contains_one_of}
\title{Check if a value matches multiple regular expressions}
\usage{
contains_one_of(x, ..., ignore.case = FALSE)
}
\arguments{
\item{x}{A character vector}
\item{...}{unquoted or quoted regular expressions}
\item{ignore.case}{A logical. If FALSE (default) case is ignored in the comparison}
}
\value{
The vector x is checked against each regular expression in the array provided. Any match
results in TRUE for the element
}
\description{
Check if a value matches multiple regular expressions
}
\examples{
nams=c("George Washington", "Washington DC", "King George")
contains_one_of(nams, Washington, DC)
contains_one_of(nams, ing)
contains_one_of(nams, "^Wash.*$")
}
| /man/contains_one_of.Rd | no_license | qPharmetra/PMDatR | R | false | true | 811 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparisons.R
\name{contains_one_of}
\alias{contains_one_of}
\title{Check if a value matches multiple regular expressions}
\usage{
contains_one_of(x, ..., ignore.case = FALSE)
}
\arguments{
\item{x}{A character vector}
\item{...}{unquoted or quoted regular expressions}
\item{ignore.case}{A logical. If FALSE (default) case is ignored in the comparison}
}
\value{
The vector x is checked against each regular expression in the array provided. Any match
results in TRUE for the element
}
\description{
Check if a value matches multiple regular expressions
}
\examples{
nams=c("George Washington", "Washington DC", "King George")
contains_one_of(nams, Washington, DC)
contains_one_of(nams, ing)
contains_one_of(nams, "^Wash.*$")
}
|
1.
data<- read.csv("Data/student-mat.csv",sep=";")
2.
data <- data[,-31]
data <- data[,-31]
str(data)
3.
hist(G3)
4.
install.packages("glmnet")
library(glmnet)
x<- model.matrix(G3~.,data)[,-1]
class(x)
y<-data$G3
set.seed(1)
cvfit <- cv.glmnet(x,y,alpha=0)
plot(cvfit)
cvfit$lambda.min
cvfit$lambda.1se
coef(cvfit,s = "lambda.min")
5.
cvfit <- cv.glmnet(x,y,alpha=1)
plot(cvfit)
cvfit$lambda.min
cvfit$lambda.1se
coef(cvfit,s = "lambda.min")
6.
cvfit <- cv.glmnet(x,y,alpha=0.5)
plot(cvfit)
cvfit$lambda.min
cvfit$lambda.1se
coef(cvfit,s = "lambda.min")
7.
set.seed(1)
foldid<- sample(1:10,size = 395,replace = TRUE)
cv.error<- numeric(11)
for (i in 1:11){
cvfit<-cv.glmnet(x,y,foldid = foldid,alpha = (i-1)/10)
cv.error[i] <-min(cvfit$cvm)
}
cv.error
min(cv.error)
which.min(cv.error)
8.
set.seed(1)
train <-sample(395,295)
cvfit <- cv.glmnet(x[train,],y[train],alpha=0)
coef(cvfit,s = "lambda.min")
bestlam <- cvfit$lambda.min
pred_train <- predict(cvfit,newx = x[train,],s=bestlam)
mean((pred_train-y[train])^2)
pred_test <- predict(cvfit,newx = x[-train,],s=bestlam)
mean((pred_test-y[-train])^2)
coef(cvfit,s = "lambda.min")
| /007 Fuzzy Name Matching/20210702/机器学习课后习题/R命令/9-1.R | no_license | LingJiang2641/SCUT-Engagement-2021 | R | false | false | 1,216 | r | 1.
data<- read.csv("Data/student-mat.csv",sep=";")
2.
data <- data[,-31]
data <- data[,-31]
str(data)
3.
hist(G3)
4.
install.packages("glmnet")
library(glmnet)
x<- model.matrix(G3~.,data)[,-1]
class(x)
y<-data$G3
set.seed(1)
cvfit <- cv.glmnet(x,y,alpha=0)
plot(cvfit)
cvfit$lambda.min
cvfit$lambda.1se
coef(cvfit,s = "lambda.min")
5.
cvfit <- cv.glmnet(x,y,alpha=1)
plot(cvfit)
cvfit$lambda.min
cvfit$lambda.1se
coef(cvfit,s = "lambda.min")
6.
cvfit <- cv.glmnet(x,y,alpha=0.5)
plot(cvfit)
cvfit$lambda.min
cvfit$lambda.1se
coef(cvfit,s = "lambda.min")
7.
set.seed(1)
foldid<- sample(1:10,size = 395,replace = TRUE)
cv.error<- numeric(11)
for (i in 1:11){
cvfit<-cv.glmnet(x,y,foldid = foldid,alpha = (i-1)/10)
cv.error[i] <-min(cvfit$cvm)
}
cv.error
min(cv.error)
which.min(cv.error)
8.
set.seed(1)
train <-sample(395,295)
cvfit <- cv.glmnet(x[train,],y[train],alpha=0)
coef(cvfit,s = "lambda.min")
bestlam <- cvfit$lambda.min
pred_train <- predict(cvfit,newx = x[train,],s=bestlam)
mean((pred_train-y[train])^2)
pred_test <- predict(cvfit,newx = x[-train,],s=bestlam)
mean((pred_test-y[-train])^2)
coef(cvfit,s = "lambda.min")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SS_writectl_3.30.R
\name{SS_writectl_3.30}
\alias{SS_writectl_3.30}
\title{write control file for SS version 3.30}
\usage{
SS_writectl_3.30(ctllist, outfile, overwrite = FALSE, verbose = FALSE)
}
\arguments{
\item{ctllist}{List object created by \code{\link[=SS_readctl]{SS_readctl()}}.}
\item{outfile}{Filename for where to write new data file.}
\item{overwrite}{A logical value specifying if the existing file(s)
should be overwritten. The default value is \code{overwrite = FALSE}.}
\item{verbose}{A logical value specifying if output should be printed
to the screen.}
}
\description{
write Stock Synthesis control file from list object in R which was created
using \code{\link[=SS_readctl]{SS_readctl()}}.This function is designed to be called
using \code{\link[=SS_writectl]{SS_writectl()}} and should not be called directly.
}
\seealso{
\code{\link[=SS_readctl]{SS_readctl()}}, \code{\link[=SS_readctl_3.30]{SS_readctl_3.30()}},\code{\link[=SS_readstarter]{SS_readstarter()}},
\code{\link[=SS_readforecast]{SS_readforecast()}},
\code{\link[=SS_writestarter]{SS_writestarter()}}, \code{\link[=SS_writeforecast]{SS_writeforecast()}},
\code{\link[=SS_writedat]{SS_writedat()}}
}
\author{
Kathryn L. Doering, Yukio Takeuchi, Neil Klaer, Watal M. Iwasaki,
Nathan R. Vaughan
}
| /man/SS_writectl_3.30.Rd | no_license | r4ss/r4ss | R | false | true | 1,358 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SS_writectl_3.30.R
\name{SS_writectl_3.30}
\alias{SS_writectl_3.30}
\title{write control file for SS version 3.30}
\usage{
SS_writectl_3.30(ctllist, outfile, overwrite = FALSE, verbose = FALSE)
}
\arguments{
\item{ctllist}{List object created by \code{\link[=SS_readctl]{SS_readctl()}}.}
\item{outfile}{Filename for where to write new data file.}
\item{overwrite}{A logical value specifying if the existing file(s)
should be overwritten. The default value is \code{overwrite = FALSE}.}
\item{verbose}{A logical value specifying if output should be printed
to the screen.}
}
\description{
write Stock Synthesis control file from list object in R which was created
using \code{\link[=SS_readctl]{SS_readctl()}}.This function is designed to be called
using \code{\link[=SS_writectl]{SS_writectl()}} and should not be called directly.
}
\seealso{
\code{\link[=SS_readctl]{SS_readctl()}}, \code{\link[=SS_readctl_3.30]{SS_readctl_3.30()}},\code{\link[=SS_readstarter]{SS_readstarter()}},
\code{\link[=SS_readforecast]{SS_readforecast()}},
\code{\link[=SS_writestarter]{SS_writestarter()}}, \code{\link[=SS_writeforecast]{SS_writeforecast()}},
\code{\link[=SS_writedat]{SS_writedat()}}
}
\author{
Kathryn L. Doering, Yukio Takeuchi, Neil Klaer, Watal M. Iwasaki,
Nathan R. Vaughan
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temporal_torus_translation.R
\name{cyclic_shift}
\alias{cyclic_shift}
\alias{temporal_torus_translation}
\title{Cyclic Shift Permutations}
\usage{
cyclic_shift(df, time.var, species.var, abundance.var,
replicate.var = NA, FUN, bootnumber)
}
\arguments{
\item{df}{A data frame containing time, species and abundance columns and an optional column of replicates}
\item{time.var}{The name of the time column}
\item{species.var}{The name of the species column}
\item{abundance.var}{The name of the abundance column}
\item{replicate.var}{The name of the replicate column. Defaults to \code{NA}, indicating no replicates (i.e., data are from a single plot).}
\item{FUN}{A function to calculate on the null community}
\item{bootnumber}{Number of null simulations to run}
}
\value{
The cyclic_shift function returns an S3 object of class "cyclic_shift" and parameter "out".
The length of the "out" parameter is the number of null iterations as specified by bootnumber.
If multiple replicates are specified, null values are averaged among replicates for each iteration, but a different cyclic shift permutation is applied to each replicate within an iteration.
}
\description{
Performs a user-specified function on a null ecological community created via cyclic shift permutations (Harms et al. 2001, Hallett et al. 2014).
The null community is formed by randomly selected different starting years for the time series of each species.
This generates a null community matrix in which species abundances vary independently but within-species autocorrelation is maintained.
The user-specified function must require a species x time input.
}
\details{
The input data frame needs to contain columns for time, species and abundance; time.var, species.var and abundance.var are used to indicate which columns contain those variables.
}
\examples{
# Calculate a covariance matrix on a null community
data(knz_001d)
a1_cyclic <- cyclic_shift(subset(knz_001d, subplot == "A_1"),
time.var = "year",
species.var = "species",
abundance.var = "abundance",
FUN = cov,
bootnumber = 10)
}
\references{
Hallett, Lauren M., Joanna S. Hsu, Elsa E. Cleland, Scott L. Collins, Timothy L. Dickson, Emily C. Farrer, Laureano A. Gherardi, et al. "Biotic Mechanisms of Community Stability Shift along a Precipitation Gradient." Ecology 95, no. 6 (2014): 1693-1700.
Harms, Kyle E., Richard Condit, Stephen P. Hubbell, and Robin B. Foster. "Habitat Associations of Trees and Shrubs in a 50-Ha Neotropical Forest Plot." Journal of Ecology 89, no. 6 (2001): 947-59.
}
| /man/cyclic_shift.Rd | permissive | lizferguson5/codyn | R | false | true | 2,711 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temporal_torus_translation.R
\name{cyclic_shift}
\alias{cyclic_shift}
\alias{temporal_torus_translation}
\title{Cyclic Shift Permutations}
\usage{
cyclic_shift(df, time.var, species.var, abundance.var,
replicate.var = NA, FUN, bootnumber)
}
\arguments{
\item{df}{A data frame containing time, species and abundance columns and an optional column of replicates}
\item{time.var}{The name of the time column}
\item{species.var}{The name of the species column}
\item{abundance.var}{The name of the abundance column}
\item{replicate.var}{The name of the replicate column. Defaults to \code{NA}, indicating no replicates (i.e., data are from a single plot).}
\item{FUN}{A function to calculate on the null community}
\item{bootnumber}{Number of null simulations to run}
}
\value{
The cyclic_shift function returns an S3 object of class "cyclic_shift" and parameter "out".
The length of the "out" parameter is the number of null iterations as specified by bootnumber.
If multiple replicates are specified, null values are averaged among replicates for each iteration, but a different cyclic shift permutation is applied to each replicate within an iteration.
}
\description{
Performs a user-specified function on a null ecological community created via cyclic shift permutations (Harms et al. 2001, Hallett et al. 2014).
The null community is formed by randomly selected different starting years for the time series of each species.
This generates a null community matrix in which species abundances vary independently but within-species autocorrelation is maintained.
The user-specified function must require a species x time input.
}
\details{
The input data frame needs to contain columns for time, species and abundance; time.var, species.var and abundance.var are used to indicate which columns contain those variables.
}
\examples{
# Calculate a covariance matrix on a null community
data(knz_001d)
a1_cyclic <- cyclic_shift(subset(knz_001d, subplot == "A_1"),
time.var = "year",
species.var = "species",
abundance.var = "abundance",
FUN = cov,
bootnumber = 10)
}
\references{
Hallett, Lauren M., Joanna S. Hsu, Elsa E. Cleland, Scott L. Collins, Timothy L. Dickson, Emily C. Farrer, Laureano A. Gherardi, et al. "Biotic Mechanisms of Community Stability Shift along a Precipitation Gradient." Ecology 95, no. 6 (2014): 1693-1700.
Harms, Kyle E., Richard Condit, Stephen P. Hubbell, and Robin B. Foster. "Habitat Associations of Trees and Shrubs in a 50-Ha Neotropical Forest Plot." Journal of Ecology 89, no. 6 (2001): 947-59.
}
|
library(readr)
load("~/Downloads/super_data.Rdata")
Super <- apra.sr.full %>% group_by (Year) %>%
mutate (Funds_to_market = sum(Total_contributions_, na.rm=TRUE),
Total_payments= sum(Total_benefit_payments_, na.rm=T),
mr = mean (Rate_of_return, na.rm=TRUE),
number_of_funds = n(),
TotalFUM = sum(last(Total_assets_at_end_of_period_)),
TotalCont = sum(last(Total_contributions_)),
TotalWithd = sum(last(Total_benefit_payments_)),
TotalFUMstart = sum(first(Total_assets_at_end_of_period_)),
TotalContstart = sum(first(Total_contributions_)),
TotalWithdstart = sum(first(Total_benefit_payments_))) %>%
group_by (Fund_name) %>%
mutate (ms = Total_contributions_+Net_rollovers_,
lag_mr = lag(mr),
performance = Rate_of_return - mr,
lag_perf = lag(performance),
lag2_perf = lag(lag_perf),
lag3_perf = lag(lag2_perf),
perf_3year = (lag_perf+lag2_perf+lag3_perf)/3,
FUM = Total_assets_at_end_of_period_,
over_50 = ((X50_59_female + X50_59_male+X60_65_female+X60_65_male+
X66_female+X66_male)/Number_of_members)*100,
female = ((X_35_female+X35_49_female+X50_59_female+
X60_65_female+X66_female)/Number_of_members)*100) %>%
dplyr::select(Fund_name, Year, ms, lag_mr, performance, lag_perf, FUM, over_50, female, Average.fees, Fund_type, Cost.ratio) %>%
ungroup %>% group_by(Year) %>%
mutate(ms = ms/sum(ms, na.rm = T),
ms = 100*ifelse(ms<0, 0, ms) %>% as.numeric) %>% filter(ms>0)
write_csv(Super, "~/Documents/MSW/super_market_share.csv")
save(Super, file = "~/Documents/MSW/super_market_share.RData")
| /make_super_data.R | no_license | anhnguyendepocen/MSW | R | false | false | 1,743 | r | library(readr)
load("~/Downloads/super_data.Rdata")
Super <- apra.sr.full %>% group_by (Year) %>%
mutate (Funds_to_market = sum(Total_contributions_, na.rm=TRUE),
Total_payments= sum(Total_benefit_payments_, na.rm=T),
mr = mean (Rate_of_return, na.rm=TRUE),
number_of_funds = n(),
TotalFUM = sum(last(Total_assets_at_end_of_period_)),
TotalCont = sum(last(Total_contributions_)),
TotalWithd = sum(last(Total_benefit_payments_)),
TotalFUMstart = sum(first(Total_assets_at_end_of_period_)),
TotalContstart = sum(first(Total_contributions_)),
TotalWithdstart = sum(first(Total_benefit_payments_))) %>%
group_by (Fund_name) %>%
mutate (ms = Total_contributions_+Net_rollovers_,
lag_mr = lag(mr),
performance = Rate_of_return - mr,
lag_perf = lag(performance),
lag2_perf = lag(lag_perf),
lag3_perf = lag(lag2_perf),
perf_3year = (lag_perf+lag2_perf+lag3_perf)/3,
FUM = Total_assets_at_end_of_period_,
over_50 = ((X50_59_female + X50_59_male+X60_65_female+X60_65_male+
X66_female+X66_male)/Number_of_members)*100,
female = ((X_35_female+X35_49_female+X50_59_female+
X60_65_female+X66_female)/Number_of_members)*100) %>%
dplyr::select(Fund_name, Year, ms, lag_mr, performance, lag_perf, FUM, over_50, female, Average.fees, Fund_type, Cost.ratio) %>%
ungroup %>% group_by(Year) %>%
mutate(ms = ms/sum(ms, na.rm = T),
ms = 100*ifelse(ms<0, 0, ms) %>% as.numeric) %>% filter(ms>0)
write_csv(Super, "~/Documents/MSW/super_market_share.csv")
save(Super, file = "~/Documents/MSW/super_market_share.RData")
|
\name{plot.Selectmodel}
\alias{plot.Selectmodel}
\title{ Subset AR Graph for "Selectmodel" Object }
\description{
A graphical depiction is given of the output from \code{SelectModel}.
}
\usage{
\method{plot}{Selectmodel}(x, ...)
}
\arguments{
\item{x}{ out from \code{SelectModel} }
\item{...}{ optional arguments }
}
\details{
The relative plausibility of Model A vs the best Model B, is defined as
\eqn{R = e^{(AIC_B-AIC_A)/2}}.
Values of R less than 1% indicate an implausible model.
R is defined similarly if the BIC/UBIC criterion is used.
}
\value{
No value. Plot produced as side-effect.
}
\author{ A.I. McLeod }
\seealso{ \code{\link{SelectModel}} }
\examples{
#takes about 10 seconds
\dontrun{
out<-SelectModel(log(Willamette),lag.max=150,ARModel="AR",Best=5,Criterion="AIC")
plot(out)
}
}
\keyword{ ts }
| /man/plot.Selectmodel.Rd | no_license | githubfun/FitAR | R | false | false | 864 | rd | \name{plot.Selectmodel}
\alias{plot.Selectmodel}
\title{ Subset AR Graph for "Selectmodel" Object }
\description{
A graphical depiction is given of the output from \code{SelectModel}.
}
\usage{
\method{plot}{Selectmodel}(x, ...)
}
\arguments{
\item{x}{ out from \code{SelectModel} }
\item{...}{ optional arguments }
}
\details{
The relative plausibility of Model A vs the best Model B, is defined as
\eqn{R = e^{(AIC_B-AIC_A)/2}}.
Values of R less than 1% indicate an implausible model.
R is defined similarly if the BIC/UBIC criterion is used.
}
\value{
No value. Plot produced as side-effect.
}
\author{ A.I. McLeod }
\seealso{ \code{\link{SelectModel}} }
\examples{
#takes about 10 seconds
\dontrun{
out<-SelectModel(log(Willamette),lag.max=150,ARModel="AR",Best=5,Criterion="AIC")
plot(out)
}
}
\keyword{ ts }
|
library(rTensor)
### Name: t-methods
### Title: Tensor Transpose
### Aliases: t,Tensor-method t-methods
### ** Examples
tnsr <- rand_tensor()
identical(t(tnsr)@data[,,1],t(tnsr@data[,,1]))
identical(t(tnsr)@data[,,2],t(tnsr@data[,,5]))
identical(t(t(tnsr)),tnsr)
| /data/genthat_extracted_code/rTensor/examples/t-methods.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 270 | r | library(rTensor)
### Name: t-methods
### Title: Tensor Transpose
### Aliases: t,Tensor-method t-methods
### ** Examples
tnsr <- rand_tensor()
identical(t(tnsr)@data[,,1],t(tnsr@data[,,1]))
identical(t(tnsr)@data[,,2],t(tnsr@data[,,5]))
identical(t(t(tnsr)),tnsr)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_text.r
\name{browse_texts}
\alias{browse_texts}
\title{Create and view a full text browser}
\usage{
browse_texts(tc, doc_ids = NULL, token_col = "token", n = 500,
select = c("first", "random"), header = "", subheader = NULL,
highlight = NULL, scale = NULL, category = NULL,
meta_cols = NULL, seed = NA, nav = NULL, view = T,
highlight_col = "yellow", scale_col = c("blue", "red"),
filename = NULL)
}
\arguments{
\item{tc}{a tCorpus}
\item{doc_ids}{A vector with document ids to view}
\item{token_col}{The name of the column in tc$tokens that contain the token text}
\item{n}{Only n of the results are printed (to prevent accidentally making huge browsers).}
\item{select}{If n is smaller than the number of documents in tc, select determines how the n documents are selected}
\item{header}{Optionally, a title presented at the top of the browser}
\item{subheader}{Optionally, overwrite the subheader. By default the subheader reports the number of documents}
\item{highlight}{The name of a numeric column in tc$tokens with values between 0 and 1, used to highlight tokens.
Can also be a character vector, in which case al non-NA values are highlighted}
\item{scale}{The name of a numeric column in tc$tokens with values between -1 and 1, used to color tokens on a scale (set colors with scale_col)}
\item{category}{The name of a character or factor column in tc$tokens. Each unique value will have its own color, and navigation for categories will be added (nav cannot be used with this option)}
\item{meta_cols}{A character vector with names of columns in tc$meta, used to only show the selected columns}
\item{seed}{If select is "random", seed can be used to set a random seed}
\item{nav}{Optionally, a column in tc$meta to add navigation (only supports simple filtering on unique values).
This is not possible if annotate is used.}
\item{view}{If TRUE (default), view the browser in the Viewer window (turn off if this is not supported)}
\item{highlight_col}{If highlight is used, the color for highlighting}
\item{scale_col}{If scale is used, a vector with 2 or more colors used to create a color ramp. That is, -1 is first color, +1 is last color, if three colors are given 0 matches the middle color, and colors in between are interpolated.}
\item{filename}{Optionally, save the browser at a specified location}
}
\value{
The url for the file location is returned (invisibly)
}
\description{
Creates a static HTML file to view the texts in the tcorpus in full text mode.
}
\examples{
\dontrun{
tc = create_tcorpus(sotu_texts, doc_column='id')
url = browse_texts(tc)
}
}
| /man/browse_texts.Rd | no_license | MichaelChirico/corpustools | R | false | true | 2,686 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_text.r
\name{browse_texts}
\alias{browse_texts}
\title{Create and view a full text browser}
\usage{
browse_texts(tc, doc_ids = NULL, token_col = "token", n = 500,
select = c("first", "random"), header = "", subheader = NULL,
highlight = NULL, scale = NULL, category = NULL,
meta_cols = NULL, seed = NA, nav = NULL, view = T,
highlight_col = "yellow", scale_col = c("blue", "red"),
filename = NULL)
}
\arguments{
\item{tc}{a tCorpus}
\item{doc_ids}{A vector with document ids to view}
\item{token_col}{The name of the column in tc$tokens that contain the token text}
\item{n}{Only n of the results are printed (to prevent accidentally making huge browsers).}
\item{select}{If n is smaller than the number of documents in tc, select determines how the n documents are selected}
\item{header}{Optionally, a title presented at the top of the browser}
\item{subheader}{Optionally, overwrite the subheader. By default the subheader reports the number of documents}
\item{highlight}{The name of a numeric column in tc$tokens with values between 0 and 1, used to highlight tokens.
Can also be a character vector, in which case al non-NA values are highlighted}
\item{scale}{The name of a numeric column in tc$tokens with values between -1 and 1, used to color tokens on a scale (set colors with scale_col)}
\item{category}{The name of a character or factor column in tc$tokens. Each unique value will have its own color, and navigation for categories will be added (nav cannot be used with this option)}
\item{meta_cols}{A character vector with names of columns in tc$meta, used to only show the selected columns}
\item{seed}{If select is "random", seed can be used to set a random seed}
\item{nav}{Optionally, a column in tc$meta to add navigation (only supports simple filtering on unique values).
This is not possible if annotate is used.}
\item{view}{If TRUE (default), view the browser in the Viewer window (turn off if this is not supported)}
\item{highlight_col}{If highlight is used, the color for highlighting}
\item{scale_col}{If scale is used, a vector with 2 or more colors used to create a color ramp. That is, -1 is first color, +1 is last color, if three colors are given 0 matches the middle color, and colors in between are interpolated.}
\item{filename}{Optionally, save the browser at a specified location}
}
\value{
The url for the file location is returned (invisibly)
}
\description{
Creates a static HTML file to view the texts in the tcorpus in full text mode.
}
\examples{
\dontrun{
tc = create_tcorpus(sotu_texts, doc_column='id')
url = browse_texts(tc)
}
}
|
##Assignement 1 exploratory data analysis
##Plot 3
rm(list=ls())
#reading data
data<-read.table('~/desktop/data-science/exploratory-data-analysis/household_power_consumption.txt',sep=";",nrows= 2075260, header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting the data related to two days
sub_Set<- subset(data, (data$Date == "1/2/2007" | data$Date== "2/2/2007"))
# Changing the class of Date variable from character to Date:
sub_Set$Date <- as.Date(sub_Set$Date, format = "%d/%m/%Y")
# Combining the Date and Time variable and creating a new column in dataset named "DateTime"
sub_Set$DateTime <- as.POSIXct(paste(sub_Set$Date, sub_Set$Time))
# Creating the plot3
png("plot3.png", width = 480, height = 480)
plot(sub_Set$DateTime, sub_Set$Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="")
lines(sub_Set$DateTime, sub_Set$Sub_metering_2, type="l", col="red")
lines(sub_Set$DateTime, sub_Set$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
#end
dev.off() | /Plot3Code.R | no_license | PhilShort86/ExData_Plotting1 | R | false | false | 1,121 | r | ##Assignement 1 exploratory data analysis
##Plot 3
rm(list=ls())
#reading data
data<-read.table('~/desktop/data-science/exploratory-data-analysis/household_power_consumption.txt',sep=";",nrows= 2075260, header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting the data related to two days
sub_Set<- subset(data, (data$Date == "1/2/2007" | data$Date== "2/2/2007"))
# Changing the class of Date variable from character to Date:
sub_Set$Date <- as.Date(sub_Set$Date, format = "%d/%m/%Y")
# Combining the Date and Time variable and creating a new column in dataset named "DateTime"
sub_Set$DateTime <- as.POSIXct(paste(sub_Set$Date, sub_Set$Time))
# Creating the plot3
png("plot3.png", width = 480, height = 480)
plot(sub_Set$DateTime, sub_Set$Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="")
lines(sub_Set$DateTime, sub_Set$Sub_metering_2, type="l", col="red")
lines(sub_Set$DateTime, sub_Set$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
#end
dev.off() |
complete <- function( directory, id=1:332 ) {
## directory is a character vector representing the location
## of the CSV files
##
## id is an integer vector indicating the monitor ID numbers
## to be used
##
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor id number and 'nobs' is the
## number of complete cases
##
nobs = numeric()
for (i in id) {
filename <- sprintf("%03d.csv", i)
filePath <- file.path( directory, filename, fsep = .Platform$file.sep )
data <- read.csv(filePath)
dataClean <- data[complete.cases(data), ]
nobs <- c( nobs, nrow(dataClean) )
}
data.frame(id, nobs)
}
| /02-RProgramming/complete.R | permissive | Stocastico/datasciencecoursera | R | false | false | 698 | r | complete <- function( directory, id=1:332 ) {
## directory is a character vector representing the location
## of the CSV files
##
## id is an integer vector indicating the monitor ID numbers
## to be used
##
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor id number and 'nobs' is the
## number of complete cases
##
nobs = numeric()
for (i in id) {
filename <- sprintf("%03d.csv", i)
filePath <- file.path( directory, filename, fsep = .Platform$file.sep )
data <- read.csv(filePath)
dataClean <- data[complete.cases(data), ]
nobs <- c( nobs, nrow(dataClean) )
}
data.frame(id, nobs)
}
|
#' Events to activities
#' @description
#' `r lifecycle::badge("deprecated")` Create an activity log starting from an event log or regular data.frame. This function is deprecated and replaced by the function `activitylog` (for dataframe) and `to_activitylog` for eventlogs.
#'
#' @param eventlog The event log to be converted. An object of class
#' \code{eventlog} or \code{data.frame}
#' @param case_id If eventlog is data.frame, the case classifier of the event log. A character vector containing variable names of length 1 or more.
#' @param activity_id If eventlog is data.frame, the activity classifier of the event log. A character vector containing variable names of length 1 or more.
#' @param activity_instance_id If eventlog is data.frame, the activity instance classifier of the event log.
#' @param lifecycle_id If eventlog is data.frame, the life cycle classifier of the event log.
#' @param timestamp If eventlog is data.frame, the timestamp of the event log. Should refer to a Date or POSIXct field.
#' @param resource_id If eventlog is data.frame, the resource identifier of the event log. A character vector containing variable names of length 1 or more.
#' @param ... Additional argments, i.e. for fixing resource inconsistencies
#' @importFrom stringr str_subset
#' @importFrom lifecycle deprecate_warn
#'
#' @export
#'
events_to_activitylog <- function(eventlog, case_id, activity_id, activity_instance_id, lifecycle_id, timestamp, resource_id, ...) {
UseMethod("events_to_activitylog")
}
#' @export
events_to_activitylog.data.frame <- function(eventlog, case_id, activity_id, activity_instance_id, lifecycle_id, timestamp, resource_id, ...) {
deprecate_warn("5.0.0", "events_to_activitylog()", "to_activitylog()")
suppressWarnings({
eventlog(eventlog,
case_id = case_id,
activity_id = activity_id,
activity_instance_id = activity_instance_id,
lifecycle_id = lifecycle_id,
timestamp = timestamp,
resource_id = resource_id) -> eventlog
})
suppressMessages({fixed <- invisible(fix_resource_inconsistencies(eventlog, ..., details = F))})
if(!is.null(fixed))
eventlog <- fixed
events_to_activitylog(eventlog)
}
#' @export
events_to_activitylog.eventlog <- function(eventlog, case_id = NULL, activity_id = NULL, activity_instance_id = NULL, lifecycle_id = NULL, timestamp = NULL, resource_id = NULL, ...) {
deprecate_warn("5.0.0", "events_to_activitylog()", "to_activitylog()")
.order <- NULL
if(!is.null(suppressMessages(detect_resource_inconsistencies(eventlog)))) {
stop("Eventlog contains resource inconsistencies. First use fix_resource_inconsistencies to fix problem")
}
eventlog <- standardize_lifecycle(eventlog)
eventlog %>%
select(-.order, force_df = TRUE) %>%
spread(!!lifecycle_id_(eventlog), !!timestamp_(eventlog)) -> activitylog
### Check if the columns start and complete exist. If not, initiate them to NA
if(!("start" %in% colnames(activitylog))){
warning("No start events were found. Creating and initialising 'start' to NA.")
activitylog$start <- lubridate::NA_POSIXct_
}
if(!("complete" %in% colnames(activitylog))){
warning("No complete events were found. Creating and initialising 'complete' to NA.")
activitylog$complete <- lubridate::NA_POSIXct_
}
activitylog(as.data.frame(activitylog),
case_id = case_id(eventlog),
activity_id = activity_id(eventlog),
resource_id = resource_id(eventlog),
lifecycle_ids = as.vector(c("start","complete", str_subset(lifecycle_labels(eventlog), c("(start)|(complete)"), negate = T)))
)
}
| /R/events_to_activitylog.R | no_license | cran/bupaR | R | false | false | 3,643 | r |
#' Events to activities
#' @description
#' `r lifecycle::badge("deprecated")` Create an activity log starting from an event log or regular data.frame. This function is deprecated and replaced by the function `activitylog` (for dataframe) and `to_activitylog` for eventlogs.
#'
#' @param eventlog The event log to be converted. An object of class
#' \code{eventlog} or \code{data.frame}
#' @param case_id If eventlog is data.frame, the case classifier of the event log. A character vector containing variable names of length 1 or more.
#' @param activity_id If eventlog is data.frame, the activity classifier of the event log. A character vector containing variable names of length 1 or more.
#' @param activity_instance_id If eventlog is data.frame, the activity instance classifier of the event log.
#' @param lifecycle_id If eventlog is data.frame, the life cycle classifier of the event log.
#' @param timestamp If eventlog is data.frame, the timestamp of the event log. Should refer to a Date or POSIXct field.
#' @param resource_id If eventlog is data.frame, the resource identifier of the event log. A character vector containing variable names of length 1 or more.
#' @param ... Additional argments, i.e. for fixing resource inconsistencies
#' @importFrom stringr str_subset
#' @importFrom lifecycle deprecate_warn
#'
#' @export
#'
events_to_activitylog <- function(eventlog, case_id, activity_id, activity_instance_id, lifecycle_id, timestamp, resource_id, ...) {
UseMethod("events_to_activitylog")
}
#' @export
events_to_activitylog.data.frame <- function(eventlog, case_id, activity_id, activity_instance_id, lifecycle_id, timestamp, resource_id, ...) {
deprecate_warn("5.0.0", "events_to_activitylog()", "to_activitylog()")
suppressWarnings({
eventlog(eventlog,
case_id = case_id,
activity_id = activity_id,
activity_instance_id = activity_instance_id,
lifecycle_id = lifecycle_id,
timestamp = timestamp,
resource_id = resource_id) -> eventlog
})
suppressMessages({fixed <- invisible(fix_resource_inconsistencies(eventlog, ..., details = F))})
if(!is.null(fixed))
eventlog <- fixed
events_to_activitylog(eventlog)
}
#' @export
events_to_activitylog.eventlog <- function(eventlog, case_id = NULL, activity_id = NULL, activity_instance_id = NULL, lifecycle_id = NULL, timestamp = NULL, resource_id = NULL, ...) {
deprecate_warn("5.0.0", "events_to_activitylog()", "to_activitylog()")
.order <- NULL
if(!is.null(suppressMessages(detect_resource_inconsistencies(eventlog)))) {
stop("Eventlog contains resource inconsistencies. First use fix_resource_inconsistencies to fix problem")
}
eventlog <- standardize_lifecycle(eventlog)
eventlog %>%
select(-.order, force_df = TRUE) %>%
spread(!!lifecycle_id_(eventlog), !!timestamp_(eventlog)) -> activitylog
### Check if the columns start and complete exist. If not, initiate them to NA
if(!("start" %in% colnames(activitylog))){
warning("No start events were found. Creating and initialising 'start' to NA.")
activitylog$start <- lubridate::NA_POSIXct_
}
if(!("complete" %in% colnames(activitylog))){
warning("No complete events were found. Creating and initialising 'complete' to NA.")
activitylog$complete <- lubridate::NA_POSIXct_
}
activitylog(as.data.frame(activitylog),
case_id = case_id(eventlog),
activity_id = activity_id(eventlog),
resource_id = resource_id(eventlog),
lifecycle_ids = as.vector(c("start","complete", str_subset(lifecycle_labels(eventlog), c("(start)|(complete)"), negate = T)))
)
}
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
options(shiny.maxRequestSize=600*1024^2)
shinyServer(function(input, output, session) {
session$onSessionEnded(function() {
unlink("ImageAppoutput",recursive = TRUE)
stopApp()
})
output$status <- renderText({
if (is.null(input$gfp_file)) "Select inputs" else "Hit Process" })
#
observeEvent(input$run,{
if (is.null( input$gfp_file) )
{
return()
} else {
#process data
output$status <- renderText("Processing Date")
setwd(path.expand("~"))
suppressWarnings(
withProgress(message = "Processing Data",value = 0,{
main_script(input$gfp_file$datapath,input$total_file$datapath,
input$map_file$datapath,getwd())
}
)
)
zip::zip("results.zip","ImageAppoutput",recurse = TRUE)
output$status <- renderText("Analysis Completed")
enable("downloadData")
}
})
#### Download output
output$downloadData <- downloadHandler(
filename = function() {
paste0("Results-",format(Sys.time(),"%Y-%m-%d--%H:%M"),".zip")
},
content = function(file) {
file.copy(from = "results.zip", to = file)
}
,contentType = "application/zip"
)
#observe file inputs
file_exists <- reactive({
list(input$gfp_file, input$total_file, input$map_file)
})
observeEvent( file_exists(), {
if (!( is.null(input$gfp_file) | is.null(input$total_file) | is.null(input$map_file) ) ){
enable("run")
}
})
})
| /server.R | no_license | ceparman/Imaging-App | R | false | false | 1,938 | r |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
options(shiny.maxRequestSize=600*1024^2)
shinyServer(function(input, output, session) {
session$onSessionEnded(function() {
unlink("ImageAppoutput",recursive = TRUE)
stopApp()
})
output$status <- renderText({
if (is.null(input$gfp_file)) "Select inputs" else "Hit Process" })
#
observeEvent(input$run,{
if (is.null( input$gfp_file) )
{
return()
} else {
#process data
output$status <- renderText("Processing Date")
setwd(path.expand("~"))
suppressWarnings(
withProgress(message = "Processing Data",value = 0,{
main_script(input$gfp_file$datapath,input$total_file$datapath,
input$map_file$datapath,getwd())
}
)
)
zip::zip("results.zip","ImageAppoutput",recurse = TRUE)
output$status <- renderText("Analysis Completed")
enable("downloadData")
}
})
#### Download output
output$downloadData <- downloadHandler(
filename = function() {
paste0("Results-",format(Sys.time(),"%Y-%m-%d--%H:%M"),".zip")
},
content = function(file) {
file.copy(from = "results.zip", to = file)
}
,contentType = "application/zip"
)
#observe file inputs
file_exists <- reactive({
list(input$gfp_file, input$total_file, input$map_file)
})
observeEvent( file_exists(), {
if (!( is.null(input$gfp_file) | is.null(input$total_file) | is.null(input$map_file) ) ){
enable("run")
}
})
})
|
source("helper.R")
server <- function(input, output, session) {
## Render overview charts
output$dept_class_host_barchart <- renderPlot(dept_class_host_barchart())
output$class_host_barchart <- renderPlot(class_host_barchart())
## Render network diagram
output$network <- renderVisNetwork({
visNetwork(nodes, edges) %>%
visOptions(selectedBy = list(variable="DEPT"),
highlightNearest = list(enabled=T,
degree = list(from=1, to=1),
algorithm = "hierarchical"),
) %>%
visLegend(useGroups=F, addNodes=lnodes) %>%
tweak_graph()
})
df_filter <- reactive({nodes %>%
filter(DEPT %in% input$dept) %>%
filter(CLASSIFICATION %in% input$classification) %>%
filter(HOSTING_MODEL %in% input$hosting)
})
choice_list <- reactive({
df_filter() %>%
make_choice_list
})
## Update checkboxes upon activation of toggle button
update_all <- make_mult_updater(list(make_checkbox_updator(session, "dept", {nodes %>% pull(DEPT) %>% levels}),
make_checkbox_updator(session, "hosting", {nodes %>% pull(HOSTING_MODEL) %>% levels}),
make_checkbox_updator(session, "classification", {nodes %>% pull(CLASSIFICATION) %>% levels})))
observeEvent(input$toggle_all, {update_all()})
update_depts <- make_checkbox_updator(session, "dept", {nodes %>% pull(DEPT) %>% levels})
observeEvent(input$toggle_depts, {update_depts()})
update_hosting <- make_checkbox_updator(session, "hosting", {nodes %>% pull(HOSTING_MODEL) %>% levels})
observeEvent(input$toggle_hosting, {update_hosting()})
update_class <- make_checkbox_updator(session, "classification", {nodes %>% pull(CLASSIFICATION) %>% levels})
observeEvent(input$toggle_class, {update_class()})
# All systems
## Nodes table
output$nodesTable <- renderDataTable({{nodes %>% select(matches("^[A-Z_]+$", ignore.case = F))}})
# Update dropdown list of systems
observe({updateSelectizeInput(session, "system_select", choices = choice_list())})
## Chosen/selected system
chosen_system <- reactiveVal()
selected_isnull <- reactive({nchar(input$system_select)==0})
updated_selected_null <- function(is_null){
if (is_null) {
updateButton(session=session,
inputId="choose_system",
label="Invalid selection",
style= "secondary",
disabled=T)
}
else{
updateButton(session=session,
inputId="choose_system",
label="Choose",
style="primary",
disabled=F)
}
}
observeEvent(selected_isnull(), {updated_selected_null(selected_isnull())})
observeEvent(input$choose_system, {chosen_system(input$system_select)})
## Build system inspector
output$inspect_system <- renderUI({system_inspector(chosen_system())})
output$sys_feature_table <- renderTable({nodes %>%
filter(id == chosen_system()) %>%
select(matches("^[A-Z]", ignore.case=F))})
output$inspected_network <- renderVisNetwork(vis_inspect(nodes, edges, chosen_system()))
system_inspector <- function(system_select){
if (is.null(system_select)) {
h3({"Please select a system !!!"})
}
else{
fillPage(
box(title=div("Selected system: ", {nodes %>% filter(id==system_select) %>% pull(FULL_NAME)}),
status="primary",
tableOutput("sys_feature_table"),
width=12),
box(title="View system",
status="primary",
width=12,
visNetworkOutput("inspected_network", width = "100%")),
actionButton(inputId="switch_system",
label="Switch to selected system",
class="pull-right",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
)
}
}
observeEvent(input$switch_system, {chosen_system(input$inspected_network_selected)})
## Debugging etc.
observe({cat("selected_isnull: ", selected_isnull(), "\n")})
observe({cat("selected: ", input$system_select, "\n")})
observe({cat("chosen: ", chosen_system(), "\n")})
} | /server.R | no_license | io614/IT-systems-dashboard | R | false | false | 4,544 | r | source("helper.R")
server <- function(input, output, session) {
## Render overview charts
output$dept_class_host_barchart <- renderPlot(dept_class_host_barchart())
output$class_host_barchart <- renderPlot(class_host_barchart())
## Render network diagram
output$network <- renderVisNetwork({
visNetwork(nodes, edges) %>%
visOptions(selectedBy = list(variable="DEPT"),
highlightNearest = list(enabled=T,
degree = list(from=1, to=1),
algorithm = "hierarchical"),
) %>%
visLegend(useGroups=F, addNodes=lnodes) %>%
tweak_graph()
})
df_filter <- reactive({nodes %>%
filter(DEPT %in% input$dept) %>%
filter(CLASSIFICATION %in% input$classification) %>%
filter(HOSTING_MODEL %in% input$hosting)
})
choice_list <- reactive({
df_filter() %>%
make_choice_list
})
## Update checkboxes upon activation of toggle button
update_all <- make_mult_updater(list(make_checkbox_updator(session, "dept", {nodes %>% pull(DEPT) %>% levels}),
make_checkbox_updator(session, "hosting", {nodes %>% pull(HOSTING_MODEL) %>% levels}),
make_checkbox_updator(session, "classification", {nodes %>% pull(CLASSIFICATION) %>% levels})))
observeEvent(input$toggle_all, {update_all()})
update_depts <- make_checkbox_updator(session, "dept", {nodes %>% pull(DEPT) %>% levels})
observeEvent(input$toggle_depts, {update_depts()})
update_hosting <- make_checkbox_updator(session, "hosting", {nodes %>% pull(HOSTING_MODEL) %>% levels})
observeEvent(input$toggle_hosting, {update_hosting()})
update_class <- make_checkbox_updator(session, "classification", {nodes %>% pull(CLASSIFICATION) %>% levels})
observeEvent(input$toggle_class, {update_class()})
# All systems
## Nodes table
output$nodesTable <- renderDataTable({{nodes %>% select(matches("^[A-Z_]+$", ignore.case = F))}})
# Update dropdown list of systems
observe({updateSelectizeInput(session, "system_select", choices = choice_list())})
## Chosen/selected system
chosen_system <- reactiveVal()
selected_isnull <- reactive({nchar(input$system_select)==0})
updated_selected_null <- function(is_null){
if (is_null) {
updateButton(session=session,
inputId="choose_system",
label="Invalid selection",
style= "secondary",
disabled=T)
}
else{
updateButton(session=session,
inputId="choose_system",
label="Choose",
style="primary",
disabled=F)
}
}
observeEvent(selected_isnull(), {updated_selected_null(selected_isnull())})
observeEvent(input$choose_system, {chosen_system(input$system_select)})
## Build system inspector
output$inspect_system <- renderUI({system_inspector(chosen_system())})
output$sys_feature_table <- renderTable({nodes %>%
filter(id == chosen_system()) %>%
select(matches("^[A-Z]", ignore.case=F))})
output$inspected_network <- renderVisNetwork(vis_inspect(nodes, edges, chosen_system()))
system_inspector <- function(system_select){
if (is.null(system_select)) {
h3({"Please select a system !!!"})
}
else{
fillPage(
box(title=div("Selected system: ", {nodes %>% filter(id==system_select) %>% pull(FULL_NAME)}),
status="primary",
tableOutput("sys_feature_table"),
width=12),
box(title="View system",
status="primary",
width=12,
visNetworkOutput("inspected_network", width = "100%")),
actionButton(inputId="switch_system",
label="Switch to selected system",
class="pull-right",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
)
}
}
observeEvent(input$switch_system, {chosen_system(input$inspected_network_selected)})
## Debugging etc.
observe({cat("selected_isnull: ", selected_isnull(), "\n")})
observe({cat("selected: ", input$system_select, "\n")})
observe({cat("chosen: ", chosen_system(), "\n")})
} |
#' The fitted values from the Ridge regression 'ridgereg'
#'
#' The pred()-statement returns a vector including all fitted values from the Ridge regression.
#'
#'
#' @export
pred.ridgereg<-function(l,newdata = NA) {
if(any(is.na(newdata))){
return(l$y.hat)
} else if(is.matrix(newdata) | is.data.frame(newdata)) {
if(!all(newdata[,1] == 1)){
newdata<-as.matrix(cbind(as.matrix(rep(1,nrow(newdata)),ncol=1),newdata))
#Creats an extra column with 1's so the b0-coef gets calculated.
}
yhat <- newdata %*% l$coefficients
return(yhat)
}
}
#pred <- function(l,newdata =NA) UseMethod("pred")
#pred(mylm, newdata=longley[11:16,-1])
| /R/pred.ridgereg.R | no_license | Antpe404/lab4 | R | false | false | 710 | r | #' The fitted values from the Ridge regression 'ridgereg'
#'
#' The pred()-statement returns a vector including all fitted values from the Ridge regression.
#'
#'
#' @export
pred.ridgereg<-function(l,newdata = NA) {
if(any(is.na(newdata))){
return(l$y.hat)
} else if(is.matrix(newdata) | is.data.frame(newdata)) {
if(!all(newdata[,1] == 1)){
newdata<-as.matrix(cbind(as.matrix(rep(1,nrow(newdata)),ncol=1),newdata))
#Creats an extra column with 1's so the b0-coef gets calculated.
}
yhat <- newdata %*% l$coefficients
return(yhat)
}
}
#pred <- function(l,newdata =NA) UseMethod("pred")
#pred(mylm, newdata=longley[11:16,-1])
|
#' @importFrom ggplot2 position_nudge
#' @title interactive textual annotations.
#'
#' @description
#' The geometry is based on \code{\link[ggplot2]{geom_text}}.
#' See the documentation for those functions for more details.
#'
#' @seealso \code{\link{ggiraph}}
#' @inheritParams geom_point_interactive
#' @param parse See \code{\link[ggplot2]{geom_point}}.
#' @param nudge_x,nudge_y See \code{\link[ggplot2]{geom_point}}.
#' @param check_overlap See \code{\link[ggplot2]{geom_point}}.
#' @examples
#' # add interactive polygons to a ggplot -------
#' @example examples/geom_text_interactive.R
#' @export
geom_text_interactive <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", parse = FALSE, ...,
nudge_x = 0, nudge_y = 0, check_overlap = FALSE,
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
{
if (!missing(nudge_x) || !missing(nudge_y)) {
if (!missing(position)) {
stop("Specify either `position` or `nudge_x`/`nudge_y`", call. = FALSE)
}
position <- position_nudge(nudge_x, nudge_y)
}
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomInteractiveText,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
parse = parse,
check_overlap = check_overlap,
na.rm = na.rm,
...
)
)
}
GeomInteractiveText <- ggproto(
"GeomInteractiveText",
Geom,
required_aes = c("x", "y", "label"),
default_aes = aes(
colour = "black", size = 3.88, angle = 0,
hjust = 0.5, vjust = 0.5, alpha = NA,
family = "", fontface = 1, lineheight = 1.2,
tooltip = NULL, onclick = NULL, data_id = NULL
),
draw_panel = function(data,
panel_scales,
coord,
parse = FALSE,
na.rm = FALSE,
check_overlap = FALSE) {
lab <- data$label
if (parse) {
lab <- parse(text = as.character(lab))
}
data <- coord$transform(data, panel_scales)
if (is.character(data$vjust)) {
data$vjust <- compute_just(data$vjust, data$y)
}
if (is.character(data$hjust)) {
data$hjust <- compute_just(data$hjust, data$x)
}
if( !is.null(data$tooltip) && !is.character(data$tooltip) )
data$tooltip <- as.character(data$tooltip)
if( !is.null(data$onclick) && !is.character(data$onclick) )
data$onclick <- as.character(data$onclick)
if( !is.null(data$data_id) && !is.character(data$data_id) )
data$data_id <- as.character(data$data_id)
interactive_text_grob(
lab,
data$x,
data$y,
tooltip = data$tooltip,
onclick = data$onclick,
data_id = data$data_id,
default.units = "native",
hjust = data$hjust,
vjust = data$vjust,
rot = data$angle,
gp = gpar(
col = alpha(data$colour, data$alpha),
fontsize = data$size * .pt,
fontfamily = data$family,
fontface = data$fontface,
lineheight = data$lineheight
),
check.overlap = check_overlap
)
},
draw_key = draw_key_text
)
compute_just <- function(just, x) {
inward <- just == "inward"
just[inward] <- c("left", "middle", "right")[just_dir(x[inward])]
outward <- just == "outward"
just[outward] <- c("right", "middle", "left")[just_dir(x[outward])]
unname(c(left = 0, center = 0.5, right = 1,
bottom = 0, middle = 0.5, top = 1)[just])
}
just_dir <- function(x, tol = 0.001) {
out <- rep(2L, length(x))
out[x < 0.5 - tol] <- 1L
out[x > 0.5 + tol] <- 3L
out
}
| /R/geom_text_interactive.R | no_license | vbisrikkanth/ggiraph | R | false | false | 3,699 | r | #' @importFrom ggplot2 position_nudge
#' @title interactive textual annotations.
#'
#' @description
#' The geometry is based on \code{\link[ggplot2]{geom_text}}.
#' See the documentation for those functions for more details.
#'
#' @seealso \code{\link{ggiraph}}
#' @inheritParams geom_point_interactive
#' @param parse See \code{\link[ggplot2]{geom_point}}.
#' @param nudge_x,nudge_y See \code{\link[ggplot2]{geom_point}}.
#' @param check_overlap See \code{\link[ggplot2]{geom_point}}.
#' @examples
#' # add interactive polygons to a ggplot -------
#' @example examples/geom_text_interactive.R
#' @export
geom_text_interactive <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", parse = FALSE, ...,
nudge_x = 0, nudge_y = 0, check_overlap = FALSE,
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
{
if (!missing(nudge_x) || !missing(nudge_y)) {
if (!missing(position)) {
stop("Specify either `position` or `nudge_x`/`nudge_y`", call. = FALSE)
}
position <- position_nudge(nudge_x, nudge_y)
}
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomInteractiveText,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
parse = parse,
check_overlap = check_overlap,
na.rm = na.rm,
...
)
)
}
GeomInteractiveText <- ggproto(
"GeomInteractiveText",
Geom,
required_aes = c("x", "y", "label"),
default_aes = aes(
colour = "black", size = 3.88, angle = 0,
hjust = 0.5, vjust = 0.5, alpha = NA,
family = "", fontface = 1, lineheight = 1.2,
tooltip = NULL, onclick = NULL, data_id = NULL
),
draw_panel = function(data,
panel_scales,
coord,
parse = FALSE,
na.rm = FALSE,
check_overlap = FALSE) {
lab <- data$label
if (parse) {
lab <- parse(text = as.character(lab))
}
data <- coord$transform(data, panel_scales)
if (is.character(data$vjust)) {
data$vjust <- compute_just(data$vjust, data$y)
}
if (is.character(data$hjust)) {
data$hjust <- compute_just(data$hjust, data$x)
}
if( !is.null(data$tooltip) && !is.character(data$tooltip) )
data$tooltip <- as.character(data$tooltip)
if( !is.null(data$onclick) && !is.character(data$onclick) )
data$onclick <- as.character(data$onclick)
if( !is.null(data$data_id) && !is.character(data$data_id) )
data$data_id <- as.character(data$data_id)
interactive_text_grob(
lab,
data$x,
data$y,
tooltip = data$tooltip,
onclick = data$onclick,
data_id = data$data_id,
default.units = "native",
hjust = data$hjust,
vjust = data$vjust,
rot = data$angle,
gp = gpar(
col = alpha(data$colour, data$alpha),
fontsize = data$size * .pt,
fontfamily = data$family,
fontface = data$fontface,
lineheight = data$lineheight
),
check.overlap = check_overlap
)
},
draw_key = draw_key_text
)
compute_just <- function(just, x) {
inward <- just == "inward"
just[inward] <- c("left", "middle", "right")[just_dir(x[inward])]
outward <- just == "outward"
just[outward] <- c("right", "middle", "left")[just_dir(x[outward])]
unname(c(left = 0, center = 0.5, right = 1,
bottom = 0, middle = 0.5, top = 1)[just])
}
just_dir <- function(x, tol = 0.001) {
out <- rep(2L, length(x))
out[x < 0.5 - tol] <- 1L
out[x > 0.5 + tol] <- 3L
out
}
|
##### The following R script was written by G. V. DiRenzo
### Please send questions to: grace.direnzo@gmail.com
# Objective:
# To extract temp and RMI estimates for the sampled Pshen sites
# To extract temp and RMI estimates for the known Pshen range for different scenarios:
# temp: scenario 1 and 2
# RMI: +/- 2
# Load library
library(rgdal)
library(sp)
library(sf)
library(RColorBrewer)
library(raster)
# Set working directory
setwd("/Volumes/GVD/Yeti/ShenSal/Dropbox/USGS/ShenandoahSalamander/")
# Read in Hobo logger positions data
# This will be used to re-project other layers
hobo <- st_read("./Data/SHENgisForGrace/StephanTempleHobo_positions/Hobo_positions.shp")
# Store the projection in an object
WGS_proj <- projection(hobo)
# Read in sites for known sites
site <- read.csv("./Data/site_lat_long.csv")
# Assign coordinates to the file - turns it into a spatial point file
coordinates(site)= ~ Longitude + Latitude
# Assign the project to match the hobo loggers
proj4string(site)<- CRS(WGS_proj)
# Projection (number of years for PVA)
projection <- 60
#############################
#############################
################ Known Shenandoah range
#############################
#############################
# Read in the data
range <- st_read("./Data/KnownRange/KnownPshenRange.shp")
# Shapefile reprojection
range_WGS <- st_transform(range, crs(WGS_proj))
# Save shape file as a data frame
df_range <-as.data.frame(range_WGS)
# rasterize the shapefile
r <- raster(ncol = 100, nrow = 100)
# Change the extent to include all of the surrounding area
extent(r) <- c(-78.42, -78.31, 38.54, 38.64)
# Rasterize
rp <- rasterize(range_WGS, r)
# New projection
epgs_proj <- "+proj=lcc +lat_1=39.2 +lat_2=38.03333333333333 +lat_0=37.66666666666666 +lon_0=-78.5 +x_0=3500000 +y_0=2000000 +ellps=GRS80 +units=m +no_defs"
# Change resolution to 100 x 100 m
rp2 <-projectRaster(rp,
res = c(100, 100),
crs = epgs_proj)
# add a 300 m buffer around the edge of the range
rp_buffer <- buffer(rp2, width = 300)
pdf("/Volumes/GVD/Yeti/ShenSal/Figures/Shen_area.pdf", width = 8, height = 8)
# Plot the buffer
plot(rp_buffer, ylab = "UTM Northing",
xlab = "UTM Easting",
col = "black", legend = F)
# Plot the known range
plot(rp2, add = TRUE, col = "goldenrod3", legend = F)
scalebar(1000, type = "bar", label = "1 km", xy = c(3507000, 2097500))
legend("topleft", c("known range", "buffer"), fill = c("goldenrod3", "black"), bty = "n")
dev.off()
# Convert raster to SpatialPointsDataFrame
r.pts <- rasterToPoints(rp_buffer, spatial = TRUE)
# Check the projection
proj4string(r.pts)
# reproject SpatialPointsDataFrame object to match hobo
r.pts <- spTransform(r.pts, CRS(WGS_proj))
# Check the reprojection
proj4string(r.pts)
# Assign coordinates to @data slot
r.pts@data <- data.frame(r.pts@data,
long=coordinates(r.pts)[,1],
lat=coordinates(r.pts)[,2])
#############################
#############################
################ Present temperatures for the random sites
#############################
#############################
# Set path
dpath<-"./Data/Climate data_no correction/tmax_PRES_nocorrection/w001001.adf"
# Read in the raster
shen <- raster(dpath)
# Check projection
crs(shen)
# Simplest approach to re-project a raster
pr1 <- projectRaster(shen, crs=WGS_proj)
#plot(pr1)
#plot(site, add = TRUE)
# Re-project to another system to calculate size of grid cells
# EPSG 6592
# pr2 <- projectRaster(shen, crs="+init=epsg:6592")
# 100 x 100 m
# Extract the values for the locations in the site data frame
values <- extract(pr1, site, df = TRUE)
# Create a dataframe with the present day temperatures along with lat/long data
daf3 <- data.frame(Tmax_p = values$w001001,
Longitude = site$Longitude,
Latitude = site$Latitude)
# Extract values for the range of shenandoah
range_values <- extract(pr1, r.pts, df = TRUE)
# Extract present day temperatures across the entire Pshen range
range_Tmax_p <- data.frame(Tmax_p = range_values$w001001,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
#############################
#############################
################ Present RMI for the random sites
#############################
#############################
# Set path
dpath<-"./Data/TopoRelativeMoistureIndex/trmi.img"
# Read in the raster
shen_RMI <- raster(dpath)
# Check projection
crs(shen_RMI)
# Simplest approach to re-project a raster
pr1_rmi <- projectRaster(shen_RMI, crs=WGS_proj)
#plot(pr1_rmi)
#plot(site, add = TRUE)
# Re-project to another system to calculate size of grid cells
# EPSG 6592
# pr2_rmi <- projectRaster(shen_RMI, crs="+init=epsg:6592")
# pr2_rmi
# Extract the values for the locations in the site data frame
values_rmi <- extract(pr1_rmi, site, df = TRUE)
range_RMI_p <- cbind(daf3,
RMI_p = values_rmi$layer)
# Extract the values across the RANGE
range_values_RMI <- extract(pr1_rmi, r.pts, df = TRUE)
range_RMI <- data.frame(RMI = range_values_RMI$layer,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
nrow(range_RMI)
##########################
### = Linear fit -> adding 2sd
#########################
# Create a dataframe with present day & future RMI estimates
RMI_range_add <- data.frame(
present = range_RMI$RMI,
future = range_RMI$RMI + (2 * sd(range_RMI$RMI))
)
# Create a dataframe with the time for present day (t = 1) and future (t = 60)
time <- data.frame(
present = rep(1, times = nrow(RMI_range_add)),
future = rep(projection, times = nrow(RMI_range_add)))
# Make a plot to visualize the increase
#plot(c(t(RMI_range_add)) ~ c(t(time)),
# ylim = c(min(RMI_range_add)-2, max(RMI_range_add)+2),
# las = 1, pch = 21,
# col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Relative Moisture index",
# main = "Linear increase")
# Create an empty matrix to hold the projections
# dimensions = columns = years, rows = sites
RMI_mat_range_add <- matrix(NA, ncol = projection, nrow = nrow(range_RMI))
# The first columns = present day
RMI_mat_range_add[,1] <- RMI_range_add$present
# data frame with list of years
df <- data.frame(x = 1:projection)
for(i in 1:nrow(RMI_mat_range_add)){
# Fit a model to the temperature and time
mod <- lm(c(t(RMI_range_add[i,])) ~ c(t(time[i,])))
RMI_mat_range_add[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(RMI_mat_range_add[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
RMI_mat_range_add[,1] <- RMI_range_add$present
# Replace the last column with future estimates
RMI_mat_range_add[,ncol(RMI_mat_range_add)] <- RMI_range_add$future
##########################
### = Linear fit -> subtract 2sd
#########################
# Create a dataframe with present day & future RMI estimates
RMI_range_sub <- data.frame(
present = range_RMI$RMI,
future = range_RMI$RMI - (2 * sd(range_RMI$RMI))
)
# Create a dataframe with the time for present day (t = 1) and future (t = 60)
time <- data.frame(
present = rep(1, times = nrow(RMI_range_sub)),
future = rep(projection, times = nrow(RMI_range_sub)))
# Make a plot
#plot(c(t(RMI_range_sub)) ~ c(t(time)),
# ylim = c(min(RMI_range_sub)-2, max(RMI_range_sub)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Relative Moisture index",
# main = "Linear decrease")
# Create an empty matrix to hold the projections
# dimensions = columns = years, rows = sites
RMI_mat_range_sub <- matrix(NA, ncol = projection, nrow = nrow(range_RMI))
# The first columns = present day
RMI_mat_range_sub[,1] <- RMI_range_sub$present
# data frame with list of years
df <- data.frame(x = 1:projection)
# Loop through every site - fit a model and calcualte the RMI for each year from 1:60
for(i in 1:nrow(RMI_mat_range_sub)){
# Fit a model to the temperature and time
mod <- lm(c(t(RMI_range_sub[i,])) ~ c(t(time[i,])))
RMI_mat_range_sub[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(RMI_mat_range_sub[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
RMI_mat_range_sub[,1] <- RMI_range_sub$present
# Replace the last column with future estimates
RMI_mat_range_sub[,ncol(RMI_mat_range_sub)] <- RMI_range_sub$future
#############################
#############################
################ FUTURE temperatures
#############################
#############################
##############################
########## Scenario 1
##############################
# Set path
dpath<-"./Data/Climate data_no correction/tmax_SCEN1_nocorrection/w001001x.adf"
shen <- raster(dpath)
# Check projection
crs(shen)
# Simplest approach to re-project
pr1 <- projectRaster(shen, crs = WGS_proj)
# Extract values for the range of shenandoah
range_temp_SCEN1 <- extract(pr1, r.pts, df = TRUE)
range_Tmax_SCEN1 <- data.frame(Tmax_f = range_temp_SCEN1$w001001x,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
##############################
########## Scenario 2
##############################
# Set path
dpath<-"./Data/Climate data_no correction/tmax_SCEN2_nocorrection/w001001x.adf"
shen2 <- raster(dpath)
# Check projection
crs(shen2)
# Simplest approach to re-project
pr1_2 <- projectRaster(shen2, crs=WGS_proj)
# Extract values for the range of shenandoah
range_temp_SCEN2 <- extract(pr1_2, r.pts, df = TRUE)
range_Tmax_SCEN2 <- data.frame(Tmax_f = range_temp_SCEN2$w001001x,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
#############################
#############################
################ Calculate future temp within the Pshen range for 60 years - scen1
#############################
#############################
##################
### Assuming mean temperature increases by 3.3C over 60 yrs
temp_range_SCEN1 <- data.frame(
present = range_Tmax_p$Tmax_p,
future = range_Tmax_SCEN1$Tmax_f)
time <- data.frame(
present = rep(1, times = nrow(temp_range_SCEN1)),
future = rep(projection, times = nrow(temp_range_SCEN1)))
#######################################
################## SCENARIO 1
#######################################
### = Linear fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_range_SCEN1 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN1))
# First year = present day temps
temp_mat_range_SCEN1[,1] <- temp_range_SCEN1$present
#plot(c(t(temp_range_SCEN1)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN1)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Linear increase")
#
for(i in 1:nrow(temp_mat_range_SCEN1)){
# Fit a model to the temperature and time
mod <- lm(c(t(temp_range_SCEN1[i,])) ~ c(t(time[i,])))
temp_mat_range_SCEN1[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(temp_mat_range_SCEN1[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_range_SCEN1[,1] <- temp_range_SCEN1$present
# Replace the last column with future estimates
temp_mat_range_SCEN1[,ncol(temp_mat_range_SCEN1)] <- temp_range_SCEN1$future
### = Exponenatial fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_exp_range_SCEN1 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN1))
# First year = present day temps
temp_mat_exp_range_SCEN1[,1] <- temp_range_SCEN1$present
# Formatting data to be used by nls
y <- c(t(temp_range_SCEN1[1,]),
rep(temp_range_SCEN1[1,1], times = 4))
x <- c(t(time[1,]), 2:5)
dat <- data.frame(y = y, x = x)
# fit non-linear model
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
pred <- predict(mod, list(x = df$x))
## add fitted curve
#plot(c(t(temp_range_SCEN1)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN1)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Exponential increase")
#lines(df$x, pred, col = "deepskyblue3", lwd = 3)
for(i in 1:nrow(temp_mat_exp_range_SCEN1)){
y <- c(t(temp_range_SCEN1[i,]),
rep(temp_range_SCEN1[i,1], times = 4))
x <- c(t(time[i,]), 2:5)
dat <- data.frame(y = y, x = x)
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
temp_mat_exp_range_SCEN1[i,] <- predict(mod, list(x = df$x))
#lines(temp_mat_exp_range_SCEN1[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_exp_range_SCEN1[,1] <- temp_range_SCEN1$present
# Replace the last column with future estimates
temp_mat_exp_range_SCEN1[,ncol(temp_mat_exp_range_SCEN1)] <- temp_range_SCEN1$future
#############################
#############################
################ Calculate future temp within the Pshen range for 60 years- scen2
#############################
#############################
##################
### Assuming mean temperature increases by 6.0C over 60 yrs
temp_range_SCEN2 <- data.frame(
present = range_Tmax_p$Tmax_p,
future = range_Tmax_SCEN2$Tmax_f)
time <- data.frame(
present = rep(1, times = nrow(temp_range_SCEN2)),
future = rep(projection, times = nrow(temp_range_SCEN2)))
#######################################
################## SCENARIO 2
#######################################
### = Linear fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_range_SCEN2 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN2))
# First year = present day temps
temp_mat_range_SCEN2[,1] <- temp_range_SCEN2$present
## Plot it to make sure it looks good
#plot(c(t(temp_range_SCEN2)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN2)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Linear increase")
# Loop through each site
for(i in 1:nrow(temp_mat_range_SCEN2)){
# Fit a model to the temperature and time
mod <- lm(c(t(temp_range_SCEN2[i,])) ~ c(t(time[i,])))
temp_mat_range_SCEN2[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(temp_mat_range_SCEN2[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_range_SCEN2[,1] <- temp_range_SCEN2$present
# Replace the last column with future estimates
temp_mat_range_SCEN2[,ncol(temp_mat_range_SCEN2)] <- temp_range_SCEN2$future
######
### = Exponenatial fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_exp_range_SCEN2 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN2))
# First year = present day temps
temp_mat_exp_range_SCEN2[,1] <- temp_range_SCEN2$present
y <- c(t(temp_range_SCEN2[1,]), rep(temp_range_SCEN2[1,1], times = 4))
x <- c(t(time[1,]), 2:5)
dat <- data.frame(y = y, x = x)
# fit non-linear model
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
pred <- predict(mod, list(x = df$x))
## add fitted curve
#plot(c(t(temp_range_SCEN2)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN2)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Exponential increase")
#lines(df$x, pred, col = "deepskyblue3", lwd = 3)
# Loop through each site
for(i in 1:nrow(temp_mat_exp_range_SCEN2)){
y <- c(t(temp_range_SCEN2[i,]), rep(temp_range_SCEN2[i,1], times = 4))
x <- c(t(time[i,]), 2:5)
dat <- data.frame(y = y, x = x)
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
temp_mat_exp_range_SCEN2[i,] <- predict(mod, list(x = df$x))
# lines(temp_mat_exp_range_SCEN2[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_exp_range_SCEN2[,1] <- temp_range_SCEN2$present
# Replace the last column with future estimates
temp_mat_exp_range_SCEN2[,ncol(temp_mat_exp_range_SCEN2)] <- temp_range_SCEN2$future
#############################
#############################
################ Data files to use and summary
#############################
#############################
# Present day temperatures:
# Observed sites: daf3
nrow(daf3)
# Pshen range: range_Tmax_p
nrow(range_Tmax_p)
# Present day RMI-
# Observed sites: range_RMI_p
nrow(range_RMI_p)
# Pshen range:
nrow(range_RMI)
# Future RMI - year by year
# Pshen range:
# + 2 sd: RMI_mat_range_add
nrow(RMI_mat_range_add)
# - 2 sd:
nrow(RMI_mat_range_sub)
# Future: year-by-year
# Scenario 1
# Pshen range:
# Linear
# temp_mat_range_SCEN1
nrow(temp_mat_range_SCEN1)
# Exponential
# temp_mat_exp_range_SCEN1
nrow(temp_mat_exp_range_SCEN1)
# Scenario 2
# Pshen range:
# Linear
# temp_mat_range_SCEN2
nrow(temp_mat_range_SCEN2)
# Exponential
# temp_mat_exp_range_SCEN2
nrow(temp_mat_exp_range_SCEN2)
#### Bundle the data
dat <- list(
site.temp = daf3,
site.RMI = range_RMI_p,
range.temp = range_Tmax_p,
range.rmi = range_RMI,
#
RMI_mat_range_add = RMI_mat_range_add,
RMI_mat_range_sub = RMI_mat_range_sub,
temp_mat_range_SCEN1 = temp_mat_range_SCEN1,
temp_mat_exp_range_SCEN1 = temp_mat_exp_range_SCEN1,
temp_mat_range_SCEN2 = temp_mat_range_SCEN2,
temp_mat_exp_range_SCEN2 = temp_mat_exp_range_SCEN2
)
save(dat, file = "./Data/temp_rmi.rda") | /Code/FormatData/Format temp and RMI data.R | no_license | jwerba14/Shenandoah_Salamander | R | false | false | 18,381 | r | ##### The following R script was written by G. V. DiRenzo
### Please send questions to: grace.direnzo@gmail.com
# Objective:
# To extract temp and RMI estimates for the sampled Pshen sites
# To extract temp and RMI estimates for the known Pshen range for different scenarios:
# temp: scenario 1 and 2
# RMI: +/- 2
# Load library
library(rgdal)
library(sp)
library(sf)
library(RColorBrewer)
library(raster)
# Set working directory
setwd("/Volumes/GVD/Yeti/ShenSal/Dropbox/USGS/ShenandoahSalamander/")
# Read in Hobo logger positions data
# This will be used to re-project other layers
hobo <- st_read("./Data/SHENgisForGrace/StephanTempleHobo_positions/Hobo_positions.shp")
# Store the projection in an object
WGS_proj <- projection(hobo)
# Read in sites for known sites
site <- read.csv("./Data/site_lat_long.csv")
# Assign coordinates to the file - turns it into a spatial point file
coordinates(site)= ~ Longitude + Latitude
# Assign the project to match the hobo loggers
proj4string(site)<- CRS(WGS_proj)
# Projection (number of years for PVA)
projection <- 60
#############################
#############################
################ Known Shenandoah range
#############################
#############################
# Read in the data
range <- st_read("./Data/KnownRange/KnownPshenRange.shp")
# Shapefile reprojection
range_WGS <- st_transform(range, crs(WGS_proj))
# Save shape file as a data frame
df_range <-as.data.frame(range_WGS)
# rasterize the shapefile
r <- raster(ncol = 100, nrow = 100)
# Change the extent to include all of the surrounding area
extent(r) <- c(-78.42, -78.31, 38.54, 38.64)
# Rasterize
rp <- rasterize(range_WGS, r)
# New projection
epgs_proj <- "+proj=lcc +lat_1=39.2 +lat_2=38.03333333333333 +lat_0=37.66666666666666 +lon_0=-78.5 +x_0=3500000 +y_0=2000000 +ellps=GRS80 +units=m +no_defs"
# Change resolution to 100 x 100 m
rp2 <-projectRaster(rp,
res = c(100, 100),
crs = epgs_proj)
# add a 300 m buffer around the edge of the range
rp_buffer <- buffer(rp2, width = 300)
pdf("/Volumes/GVD/Yeti/ShenSal/Figures/Shen_area.pdf", width = 8, height = 8)
# Plot the buffer
plot(rp_buffer, ylab = "UTM Northing",
xlab = "UTM Easting",
col = "black", legend = F)
# Plot the known range
plot(rp2, add = TRUE, col = "goldenrod3", legend = F)
scalebar(1000, type = "bar", label = "1 km", xy = c(3507000, 2097500))
legend("topleft", c("known range", "buffer"), fill = c("goldenrod3", "black"), bty = "n")
dev.off()
# Convert raster to SpatialPointsDataFrame
r.pts <- rasterToPoints(rp_buffer, spatial = TRUE)
# Check the projection
proj4string(r.pts)
# reproject SpatialPointsDataFrame object to match hobo
r.pts <- spTransform(r.pts, CRS(WGS_proj))
# Check the reprojection
proj4string(r.pts)
# Assign coordinates to @data slot
r.pts@data <- data.frame(r.pts@data,
long=coordinates(r.pts)[,1],
lat=coordinates(r.pts)[,2])
#############################
#############################
################ Present temperatures for the random sites
#############################
#############################
# Set path
dpath<-"./Data/Climate data_no correction/tmax_PRES_nocorrection/w001001.adf"
# Read in the raster
shen <- raster(dpath)
# Check projection
crs(shen)
# Simplest approach to re-project a raster
pr1 <- projectRaster(shen, crs=WGS_proj)
#plot(pr1)
#plot(site, add = TRUE)
# Re-project to another system to calculate size of grid cells
# EPSG 6592
# pr2 <- projectRaster(shen, crs="+init=epsg:6592")
# 100 x 100 m
# Extract the values for the locations in the site data frame
values <- extract(pr1, site, df = TRUE)
# Create a dataframe with the present day temperatures along with lat/long data
daf3 <- data.frame(Tmax_p = values$w001001,
Longitude = site$Longitude,
Latitude = site$Latitude)
# Extract values for the range of shenandoah
range_values <- extract(pr1, r.pts, df = TRUE)
# Extract present day temperatures across the entire Pshen range
range_Tmax_p <- data.frame(Tmax_p = range_values$w001001,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
#############################
#############################
################ Present RMI for the random sites
#############################
#############################
# Set path
dpath<-"./Data/TopoRelativeMoistureIndex/trmi.img"
# Read in the raster
shen_RMI <- raster(dpath)
# Check projection
crs(shen_RMI)
# Simplest approach to re-project a raster
pr1_rmi <- projectRaster(shen_RMI, crs=WGS_proj)
#plot(pr1_rmi)
#plot(site, add = TRUE)
# Re-project to another system to calculate size of grid cells
# EPSG 6592
# pr2_rmi <- projectRaster(shen_RMI, crs="+init=epsg:6592")
# pr2_rmi
# Extract the values for the locations in the site data frame
values_rmi <- extract(pr1_rmi, site, df = TRUE)
range_RMI_p <- cbind(daf3,
RMI_p = values_rmi$layer)
# Extract the values across the RANGE
range_values_RMI <- extract(pr1_rmi, r.pts, df = TRUE)
range_RMI <- data.frame(RMI = range_values_RMI$layer,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
nrow(range_RMI)
##########################
### = Linear fit -> adding 2sd
#########################
# Create a dataframe with present day & future RMI estimates
RMI_range_add <- data.frame(
present = range_RMI$RMI,
future = range_RMI$RMI + (2 * sd(range_RMI$RMI))
)
# Create a dataframe with the time for present day (t = 1) and future (t = 60)
time <- data.frame(
present = rep(1, times = nrow(RMI_range_add)),
future = rep(projection, times = nrow(RMI_range_add)))
# Make a plot to visualize the increase
#plot(c(t(RMI_range_add)) ~ c(t(time)),
# ylim = c(min(RMI_range_add)-2, max(RMI_range_add)+2),
# las = 1, pch = 21,
# col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Relative Moisture index",
# main = "Linear increase")
# Create an empty matrix to hold the projections
# dimensions = columns = years, rows = sites
RMI_mat_range_add <- matrix(NA, ncol = projection, nrow = nrow(range_RMI))
# The first columns = present day
RMI_mat_range_add[,1] <- RMI_range_add$present
# data frame with list of years
df <- data.frame(x = 1:projection)
for(i in 1:nrow(RMI_mat_range_add)){
# Fit a model to the temperature and time
mod <- lm(c(t(RMI_range_add[i,])) ~ c(t(time[i,])))
RMI_mat_range_add[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(RMI_mat_range_add[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
RMI_mat_range_add[,1] <- RMI_range_add$present
# Replace the last column with future estimates
RMI_mat_range_add[,ncol(RMI_mat_range_add)] <- RMI_range_add$future
##########################
### = Linear fit -> subtract 2sd
#########################
# Create a dataframe with present day & future RMI estimates
RMI_range_sub <- data.frame(
present = range_RMI$RMI,
future = range_RMI$RMI - (2 * sd(range_RMI$RMI))
)
# Create a dataframe with the time for present day (t = 1) and future (t = 60)
time <- data.frame(
present = rep(1, times = nrow(RMI_range_sub)),
future = rep(projection, times = nrow(RMI_range_sub)))
# Make a plot
#plot(c(t(RMI_range_sub)) ~ c(t(time)),
# ylim = c(min(RMI_range_sub)-2, max(RMI_range_sub)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Relative Moisture index",
# main = "Linear decrease")
# Create an empty matrix to hold the projections
# dimensions = columns = years, rows = sites
RMI_mat_range_sub <- matrix(NA, ncol = projection, nrow = nrow(range_RMI))
# The first columns = present day
RMI_mat_range_sub[,1] <- RMI_range_sub$present
# data frame with list of years
df <- data.frame(x = 1:projection)
# Loop through every site - fit a model and calcualte the RMI for each year from 1:60
for(i in 1:nrow(RMI_mat_range_sub)){
# Fit a model to the temperature and time
mod <- lm(c(t(RMI_range_sub[i,])) ~ c(t(time[i,])))
RMI_mat_range_sub[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(RMI_mat_range_sub[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
RMI_mat_range_sub[,1] <- RMI_range_sub$present
# Replace the last column with future estimates
RMI_mat_range_sub[,ncol(RMI_mat_range_sub)] <- RMI_range_sub$future
#############################
#############################
################ FUTURE temperatures
#############################
#############################
##############################
########## Scenario 1
##############################
# Set path
dpath<-"./Data/Climate data_no correction/tmax_SCEN1_nocorrection/w001001x.adf"
shen <- raster(dpath)
# Check projection
crs(shen)
# Simplest approach to re-project
pr1 <- projectRaster(shen, crs = WGS_proj)
# Extract values for the range of shenandoah
range_temp_SCEN1 <- extract(pr1, r.pts, df = TRUE)
range_Tmax_SCEN1 <- data.frame(Tmax_f = range_temp_SCEN1$w001001x,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
##############################
########## Scenario 2
##############################
# Set path
dpath<-"./Data/Climate data_no correction/tmax_SCEN2_nocorrection/w001001x.adf"
shen2 <- raster(dpath)
# Check projection
crs(shen2)
# Simplest approach to re-project
pr1_2 <- projectRaster(shen2, crs=WGS_proj)
# Extract values for the range of shenandoah
range_temp_SCEN2 <- extract(pr1_2, r.pts, df = TRUE)
range_Tmax_SCEN2 <- data.frame(Tmax_f = range_temp_SCEN2$w001001x,
Longitude = r.pts@data$long,
Latitude = r.pts@data$lat)
#############################
#############################
################ Calculate future temp within the Pshen range for 60 years - scen1
#############################
#############################
##################
### Assuming mean temperature increases by 3.3C over 60 yrs
temp_range_SCEN1 <- data.frame(
present = range_Tmax_p$Tmax_p,
future = range_Tmax_SCEN1$Tmax_f)
time <- data.frame(
present = rep(1, times = nrow(temp_range_SCEN1)),
future = rep(projection, times = nrow(temp_range_SCEN1)))
#######################################
################## SCENARIO 1
#######################################
### = Linear fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_range_SCEN1 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN1))
# First year = present day temps
temp_mat_range_SCEN1[,1] <- temp_range_SCEN1$present
#plot(c(t(temp_range_SCEN1)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN1)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Linear increase")
#
for(i in 1:nrow(temp_mat_range_SCEN1)){
# Fit a model to the temperature and time
mod <- lm(c(t(temp_range_SCEN1[i,])) ~ c(t(time[i,])))
temp_mat_range_SCEN1[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(temp_mat_range_SCEN1[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_range_SCEN1[,1] <- temp_range_SCEN1$present
# Replace the last column with future estimates
temp_mat_range_SCEN1[,ncol(temp_mat_range_SCEN1)] <- temp_range_SCEN1$future
### = Exponenatial fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_exp_range_SCEN1 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN1))
# First year = present day temps
temp_mat_exp_range_SCEN1[,1] <- temp_range_SCEN1$present
# Formatting data to be used by nls
y <- c(t(temp_range_SCEN1[1,]),
rep(temp_range_SCEN1[1,1], times = 4))
x <- c(t(time[1,]), 2:5)
dat <- data.frame(y = y, x = x)
# fit non-linear model
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
pred <- predict(mod, list(x = df$x))
## add fitted curve
#plot(c(t(temp_range_SCEN1)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN1)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Exponential increase")
#lines(df$x, pred, col = "deepskyblue3", lwd = 3)
for(i in 1:nrow(temp_mat_exp_range_SCEN1)){
y <- c(t(temp_range_SCEN1[i,]),
rep(temp_range_SCEN1[i,1], times = 4))
x <- c(t(time[i,]), 2:5)
dat <- data.frame(y = y, x = x)
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
temp_mat_exp_range_SCEN1[i,] <- predict(mod, list(x = df$x))
#lines(temp_mat_exp_range_SCEN1[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_exp_range_SCEN1[,1] <- temp_range_SCEN1$present
# Replace the last column with future estimates
temp_mat_exp_range_SCEN1[,ncol(temp_mat_exp_range_SCEN1)] <- temp_range_SCEN1$future
#############################
#############################
################ Calculate future temp within the Pshen range for 60 years- scen2
#############################
#############################
##################
### Assuming mean temperature increases by 6.0C over 60 yrs
temp_range_SCEN2 <- data.frame(
present = range_Tmax_p$Tmax_p,
future = range_Tmax_SCEN2$Tmax_f)
time <- data.frame(
present = rep(1, times = nrow(temp_range_SCEN2)),
future = rep(projection, times = nrow(temp_range_SCEN2)))
#######################################
################## SCENARIO 2
#######################################
### = Linear fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_range_SCEN2 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN2))
# First year = present day temps
temp_mat_range_SCEN2[,1] <- temp_range_SCEN2$present
## Plot it to make sure it looks good
#plot(c(t(temp_range_SCEN2)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN2)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Linear increase")
# Loop through each site
for(i in 1:nrow(temp_mat_range_SCEN2)){
# Fit a model to the temperature and time
mod <- lm(c(t(temp_range_SCEN2[i,])) ~ c(t(time[i,])))
temp_mat_range_SCEN2[i,] <- coef(mod)[1] + df$x * coef(mod)[2]
# plot the temperature v time
# lines(temp_mat_range_SCEN2[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_range_SCEN2[,1] <- temp_range_SCEN2$present
# Replace the last column with future estimates
temp_mat_range_SCEN2[,ncol(temp_mat_range_SCEN2)] <- temp_range_SCEN2$future
######
### = Exponenatial fit
# Create an empty matrix to fill in
# dimensions = sites within range x number of years
temp_mat_exp_range_SCEN2 <- matrix(NA,
ncol = projection,
nrow = nrow(temp_range_SCEN2))
# First year = present day temps
temp_mat_exp_range_SCEN2[,1] <- temp_range_SCEN2$present
y <- c(t(temp_range_SCEN2[1,]), rep(temp_range_SCEN2[1,1], times = 4))
x <- c(t(time[1,]), 2:5)
dat <- data.frame(y = y, x = x)
# fit non-linear model
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
pred <- predict(mod, list(x = df$x))
## add fitted curve
#plot(c(t(temp_range_SCEN2)) ~ c(t(time)),
# ylim = c(20, max(temp_range_SCEN2)+2),
# las = 1, pch = 21, col = "black",
# bg = "deepskyblue3",
# xlab = "Time (Yrs)",
# ylab = "Temp C",
# main = "Exponential increase")
#lines(df$x, pred, col = "deepskyblue3", lwd = 3)
# Loop through each site
for(i in 1:nrow(temp_mat_exp_range_SCEN2)){
y <- c(t(temp_range_SCEN2[i,]), rep(temp_range_SCEN2[i,1], times = 4))
x <- c(t(time[i,]), 2:5)
dat <- data.frame(y = y, x = x)
mod <- nls(y ~ a + b^x, data = dat, start = list(a = dat$y[1], b = 1))
temp_mat_exp_range_SCEN2[i,] <- predict(mod, list(x = df$x))
# lines(temp_mat_exp_range_SCEN2[i,] ~ df$x, lwd = 2, col = "deepskyblue3")
} #i
# Replace the first column with present day estimates
temp_mat_exp_range_SCEN2[,1] <- temp_range_SCEN2$present
# Replace the last column with future estimates
temp_mat_exp_range_SCEN2[,ncol(temp_mat_exp_range_SCEN2)] <- temp_range_SCEN2$future
#############################
#############################
################ Data files to use and summary
#############################
#############################
# Present day temperatures:
# Observed sites: daf3
nrow(daf3)
# Pshen range: range_Tmax_p
nrow(range_Tmax_p)
# Present day RMI-
# Observed sites: range_RMI_p
nrow(range_RMI_p)
# Pshen range:
nrow(range_RMI)
# Future RMI - year by year
# Pshen range:
# + 2 sd: RMI_mat_range_add
nrow(RMI_mat_range_add)
# - 2 sd:
nrow(RMI_mat_range_sub)
# Future: year-by-year
# Scenario 1
# Pshen range:
# Linear
# temp_mat_range_SCEN1
nrow(temp_mat_range_SCEN1)
# Exponential
# temp_mat_exp_range_SCEN1
nrow(temp_mat_exp_range_SCEN1)
# Scenario 2
# Pshen range:
# Linear
# temp_mat_range_SCEN2
nrow(temp_mat_range_SCEN2)
# Exponential
# temp_mat_exp_range_SCEN2
nrow(temp_mat_exp_range_SCEN2)
#### Bundle the data
dat <- list(
site.temp = daf3,
site.RMI = range_RMI_p,
range.temp = range_Tmax_p,
range.rmi = range_RMI,
#
RMI_mat_range_add = RMI_mat_range_add,
RMI_mat_range_sub = RMI_mat_range_sub,
temp_mat_range_SCEN1 = temp_mat_range_SCEN1,
temp_mat_exp_range_SCEN1 = temp_mat_exp_range_SCEN1,
temp_mat_range_SCEN2 = temp_mat_range_SCEN2,
temp_mat_exp_range_SCEN2 = temp_mat_exp_range_SCEN2
)
save(dat, file = "./Data/temp_rmi.rda") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emojirx.R
\name{runes}
\alias{runes}
\title{runes}
\usage{
runes(x)
}
\arguments{
\item{str}{character representation of runes, e.g `"U+1F468"`}
}
\description{
runes
}
| /man/runes.Rd | no_license | ThinkR-open/emojitsu | R | false | true | 247 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emojirx.R
\name{runes}
\alias{runes}
\title{runes}
\usage{
runes(x)
}
\arguments{
\item{str}{character representation of runes, e.g `"U+1F468"`}
}
\description{
runes
}
|
#' Preprocess Gene List
#'
#' This function preprocess gene list for futhur process
#'
#' @param geneList a named vector containing the value of gene expression
#' @return a named vecter containing the value of gene expression
#' @examples
#' preprocessGeneList(geneList)
preprocessGeneList <- function(geneList) {
geneList <- geneList[which((!is.na(geneList)) & (names(geneList)!="") & (!is.na(names(geneList))))]
geneList <- tapply(t(geneList), names(geneList), max)
geneList[order(geneList, decreasing = TRUE)]
}
#' Calculate Enrichment Score
#'
#' This function calculates enrichment score of a gene list on a specific gene set.
#'
#' @param geneList a named vector containing the values of gene expression
#' @param geneSet a vector containing genes to represent a gene set
#' @return a numeric indicating enrichment score
#' @useDynLib DeepCC
#' @import Rcpp
#' @export
#' @examples
#' calcEnrichmentScore(geneList, geneSet)
calcEnrichmentScore <- function(geneList, geneSet)
{
calcEnrichmentScoreCPP((names(geneList) %in% geneSet), geneList, 1)
}
#' Generate Functional Spectra
#'
#' This function generates functional spectra for given gene expressin profiles.
#'
#' @param eps a data.frame containing gene expression profiles (each row presents one sample)
#' @param geneSet a List containing gene sets (default: MSigDB v6)
#' @param cores a integer indicating cpu cores used in parallel computing (default = all cores -2 )
#' @return a data.frame containing functional spectra
#' @seealso \code{\link{getFunctionalSpectrum}} for a single expression profile.
#' @importFrom foreach foreach %dopar%
#' @export
#' @examples
#' getFunctionalSpectra(eps)
getFunctionalSpectra <- function(eps, geneSets = 'MSigDBv7', scale = T, cores = parallel::detectCores() - 2) {
if (geneSets == 'MSigDBv5') {
data(MSigDBv5)
geneSets = MSigDBv5
} else if (geneSets == 'MSigDBv6') {
data(MSigDBv6)
geneSets = MSigDBv6
} else if(geneSets == 'MSigDBv7') {
data(MSigDBv7)
geneSets = MSigDBv7
}
if(scale) eps <- scale(eps, scale = FALSE)
doParallel::registerDoParallel(cores)
res <- foreach(idx = 1:nrow(eps), .combine = rbind) %dopar% {
geneList <- preprocessGeneList(eps[idx, ])
sapply(geneSets, function(x) calcEnrichmentScore(geneList, x))
}
rownames(res) <- rownames(eps)
res
}
#' Generate Functional Spectrum
#'
#' This function generates functional spectrum for a single gene expression profile.
#'
#' @param expressionProfile a named numeric vector containing gene expression profile
#' @param geneSets a List containing gene sets (default: MSiDB v6)
#' @param refExp a character indicating cancer typer according to TCGA's indentifier, or a named vector reference expression
#' @param logChange a logical flag indicating whether the input data is already in log change form, e.g., for two color microarray, you should turn it on. (default: FALSE)
#' @param inverseRescale a logical flag indicating whether we rescale the reference to the scale of input data. If your single sample is microarray data and the reference is RNA-Seq, you should turn it on. (default: FALSE)
#' @param filter a numeric indicating the cutoff value of expression. (default: -3)
#' @return a numeric vector containing functional spectrum
#' @note You can generate the reference expression profile from your previous data or public data, which is the same(similiar) cancer type and platform.
#' In DeepCC we also prepared average expression profiles of each cancer types in TCGA project as references. To use them, just use the TCGA identifier (COADREAD, BRCA, OV, etc.) to indicate the cancer type.
#' If your single sample is microarray data, we strongly sugguest turn the parameter \code{inverseRescale} on, since TCGA is RNA-Seq, which has very small expression value for low expressed genes, compared with microarray.
#' @seealso \code{\link{getFunctionalSpectra}} for a batch of gene expression profiles.
#' @export
#' @examples
#' getFunctionalSpectrum(ep, refExp = "COADREAD")
getFunctionalSpectrum <- function(expressionProfile, geneSets = 'MSigDBv7', refExp = NULL, logChange = F, inverseRescale = F, filter = -3) {
expressionProfile <- unlist(expressionProfile)
if(!logChange) {
if(is.null(refExp)) stop("Must have a reference expression profile!")
if(is.character(refExp)) {
if(!(refExp %in% rownames(ref_eps))) stop(paste(refExp, "is not a support identifier of cancer types!\n Please use one in :", paste(row.names(ref_eps), collapse = " ")))
refExp <- ref_eps[refExp, ]
}
# filter low expressed genes
refExp <- refExp[refExp > filter]
common <- intersect(names(expressionProfile), names(refExp))
if(!inverseRescale) {
expressionProfile <- predict(lm(refExp[common] ~ expressionProfile[common])) -expressionProfile[common]
} else {
expressionProfile <- expressionProfile[common] - predict(lm(expressionProfile[common] ~ refExp[common]))
}
}
geneList <- preprocessGeneList(expressionProfile)
if (geneSets == 'MSigDBv5') {
data(MSigDBv5)
geneSets = MSiDBv5
} else if (geneSets == 'MSigDBv6') {
data(MSigDBv6)
geneSets = MSigDBv6
} else if (geneSets == 'MSigDBv7') {
data(MSigDBv7)
geneSets = MSigDBv7
}
res <- sapply(1:length(geneSets), function(idx) calcEnrichmentScore(geneList, geneSets[[idx]]))
names(res) <- names(geneSets)
res
}
| /R/FunctionalSpectra.R | permissive | CityUHK-CompBio/DeepCC | R | false | false | 5,391 | r | #' Preprocess Gene List
#'
#' This function preprocess gene list for futhur process
#'
#' @param geneList a named vector containing the value of gene expression
#' @return a named vecter containing the value of gene expression
#' @examples
#' preprocessGeneList(geneList)
preprocessGeneList <- function(geneList) {
geneList <- geneList[which((!is.na(geneList)) & (names(geneList)!="") & (!is.na(names(geneList))))]
geneList <- tapply(t(geneList), names(geneList), max)
geneList[order(geneList, decreasing = TRUE)]
}
#' Calculate Enrichment Score
#'
#' This function calculates enrichment score of a gene list on a specific gene set.
#'
#' @param geneList a named vector containing the values of gene expression
#' @param geneSet a vector containing genes to represent a gene set
#' @return a numeric indicating enrichment score
#' @useDynLib DeepCC
#' @import Rcpp
#' @export
#' @examples
#' calcEnrichmentScore(geneList, geneSet)
calcEnrichmentScore <- function(geneList, geneSet)
{
calcEnrichmentScoreCPP((names(geneList) %in% geneSet), geneList, 1)
}
#' Generate Functional Spectra
#'
#' This function generates functional spectra for given gene expressin profiles.
#'
#' @param eps a data.frame containing gene expression profiles (each row presents one sample)
#' @param geneSet a List containing gene sets (default: MSigDB v6)
#' @param cores a integer indicating cpu cores used in parallel computing (default = all cores -2 )
#' @return a data.frame containing functional spectra
#' @seealso \code{\link{getFunctionalSpectrum}} for a single expression profile.
#' @importFrom foreach foreach %dopar%
#' @export
#' @examples
#' getFunctionalSpectra(eps)
getFunctionalSpectra <- function(eps, geneSets = 'MSigDBv7', scale = T, cores = parallel::detectCores() - 2) {
if (geneSets == 'MSigDBv5') {
data(MSigDBv5)
geneSets = MSigDBv5
} else if (geneSets == 'MSigDBv6') {
data(MSigDBv6)
geneSets = MSigDBv6
} else if(geneSets == 'MSigDBv7') {
data(MSigDBv7)
geneSets = MSigDBv7
}
if(scale) eps <- scale(eps, scale = FALSE)
doParallel::registerDoParallel(cores)
res <- foreach(idx = 1:nrow(eps), .combine = rbind) %dopar% {
geneList <- preprocessGeneList(eps[idx, ])
sapply(geneSets, function(x) calcEnrichmentScore(geneList, x))
}
rownames(res) <- rownames(eps)
res
}
#' Generate Functional Spectrum
#'
#' This function generates functional spectrum for a single gene expression profile.
#'
#' @param expressionProfile a named numeric vector containing gene expression profile
#' @param geneSets a List containing gene sets (default: MSiDB v6)
#' @param refExp a character indicating cancer typer according to TCGA's indentifier, or a named vector reference expression
#' @param logChange a logical flag indicating whether the input data is already in log change form, e.g., for two color microarray, you should turn it on. (default: FALSE)
#' @param inverseRescale a logical flag indicating whether we rescale the reference to the scale of input data. If your single sample is microarray data and the reference is RNA-Seq, you should turn it on. (default: FALSE)
#' @param filter a numeric indicating the cutoff value of expression. (default: -3)
#' @return a numeric vector containing functional spectrum
#' @note You can generate the reference expression profile from your previous data or public data, which is the same(similiar) cancer type and platform.
#' In DeepCC we also prepared average expression profiles of each cancer types in TCGA project as references. To use them, just use the TCGA identifier (COADREAD, BRCA, OV, etc.) to indicate the cancer type.
#' If your single sample is microarray data, we strongly sugguest turn the parameter \code{inverseRescale} on, since TCGA is RNA-Seq, which has very small expression value for low expressed genes, compared with microarray.
#' @seealso \code{\link{getFunctionalSpectra}} for a batch of gene expression profiles.
#' @export
#' @examples
#' getFunctionalSpectrum(ep, refExp = "COADREAD")
getFunctionalSpectrum <- function(expressionProfile, geneSets = 'MSigDBv7', refExp = NULL, logChange = F, inverseRescale = F, filter = -3) {
expressionProfile <- unlist(expressionProfile)
if(!logChange) {
if(is.null(refExp)) stop("Must have a reference expression profile!")
if(is.character(refExp)) {
if(!(refExp %in% rownames(ref_eps))) stop(paste(refExp, "is not a support identifier of cancer types!\n Please use one in :", paste(row.names(ref_eps), collapse = " ")))
refExp <- ref_eps[refExp, ]
}
# filter low expressed genes
refExp <- refExp[refExp > filter]
common <- intersect(names(expressionProfile), names(refExp))
if(!inverseRescale) {
expressionProfile <- predict(lm(refExp[common] ~ expressionProfile[common])) -expressionProfile[common]
} else {
expressionProfile <- expressionProfile[common] - predict(lm(expressionProfile[common] ~ refExp[common]))
}
}
geneList <- preprocessGeneList(expressionProfile)
if (geneSets == 'MSigDBv5') {
data(MSigDBv5)
geneSets = MSiDBv5
} else if (geneSets == 'MSigDBv6') {
data(MSigDBv6)
geneSets = MSigDBv6
} else if (geneSets == 'MSigDBv7') {
data(MSigDBv7)
geneSets = MSigDBv7
}
res <- sapply(1:length(geneSets), function(idx) calcEnrichmentScore(geneList, geneSets[[idx]]))
names(res) <- names(geneSets)
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grouping.R
\name{nms_from_fasta}
\alias{nms_from_fasta}
\title{Read tip labels from sequence file}
\usage{
nms_from_fasta(flpth)
}
\arguments{
\item{flpth}{File path to .fasta file}
}
\value{
character vector
}
\description{
Return .
}
| /man/nms_from_fasta.Rd | permissive | AntonelliLab/gaius | R | false | true | 314 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grouping.R
\name{nms_from_fasta}
\alias{nms_from_fasta}
\title{Read tip labels from sequence file}
\usage{
nms_from_fasta(flpth)
}
\arguments{
\item{flpth}{File path to .fasta file}
}
\value{
character vector
}
\description{
Return .
}
|
library(shiny)
library(reshape2)
library(ggplot2)
library(cowplot)
rm(list=ls())
init_p <- 0.5
init_AA <- 100
init_Aa <- 50
init_aa <- 0
init_ve <- 25^2
init_sample_size <- log10(1000)
variances <- new.env()
## initialize to nonsense values
##
variances$va <- -999.0
variances$vg <- -999.0
variances$ve <- -999.0
variances$x <- c(-999.0, -999.0, -999.0)
parent.env(variances)
get_variances <- function(p, x11, x12, x22, ve) {
x_bar <- p^2*x11 + 2.0*p*(1.0 - p)*x12 + (1.0 - p)^2*x22
a1 <- p*x11 + (1.0 - p)*x12 - x_bar/2.0
a2 <- p*x12 + (1.0 - p)*x22 - x_bar/2.0
vg <- p^2*(x11 - x_bar)^2 + 2.0*p*(1.0 - p)*(x12 - x_bar)^2 +
(1.0 - p)^2*(x22 - x_bar)^2
va <- p^2*(2*a1 - x_bar)^2 + 2.0*p*(1.0 - p)*(a1+a2 - x_bar)^2 +
(1.0 - p)^2*(2*a2 - x_bar)^2
vd <- p^2*(2*a1 - x11)^2 + 2.0*p*(1.0 - p)*(a1+a2 - x12)^2 +
(1.0 - p)^2*(2*a2 - x22)^2
variances$vg <- vg
variances$va <- va
variances$ve <- ve
variances$x <- c(x11, x12, x22)
dat <- data.frame(Component = c("Vp", "Vg", "Va", "Vd", "Ve"),
variance = c(round(vg + ve, 3),
round(vg, 3),
round(va, 3),
round(vd, 3),
round(ve, 3)))
return(dat)
}
## genotypes:
##
## AA - 3
## Aa - 2
## aa - 1
##
## gamete returned
##
## A - 1
## a - 0
##
get_gamete <- function(x) {
if (x == 3) {
retval <- 1
} else if (x == 1) {
retval <- 0
} else {
if (runif(1) < 0.5) {
retval <- 1
} else {
retval <- 0
}
}
return(retval)
}
get_offspring <- function(x, y) {
a <- get_gamete(x)
b <- get_gamete(y)
## + 1 because genotypes go from 1..3
##
return(a + b + 1)
}
## Define UI
##
ui <- fluidPage(
titlePanel("Resemblance between parents and offspring"),
sidebarLayout(
sidebarPanel(
sliderInput("p",
"Allele frequency",
min = 0.0,
max = 1.0,
value = init_p),
sliderInput("AA",
"Genotypic value of AA",
min = 0.0,
max = 100.0,
value = init_AA),
sliderInput("Aa",
"Genotypic value of Aa",
min = 0.0,
max = 100.0,
value = init_Aa),
sliderInput("aa",
"Genotypic value of aa",
min = 0.0,
max = 100.0,
value = init_aa),
sliderInput("ve",
"Environmental variance",
min = 1.0,
max = 10000.0,
value = init_ve),
sliderInput("sample_size",
"log10(Sample size) for parent-offspring regression",
min = log10(10),
max = log10(100000),
value = init_sample_size)
),
mainPanel(
p("This application illustrates the resemblance between parents and offspring. It uses the allele frequency and genotypic values to calculate the additive and dominance variance. The phenotypic variance is the sum of those and the environmental variance. You'll find more information in the notes at"),
uiOutput("darwin"),
h2("Phenotypic distribution"),
plotOutput("phenotypes"),
h2("Variance components"),
fluidRow(
column(2,
dataTableOutput("variances")
)
),
h2("Heritability"),
uiOutput("narrow"),
uiOutput("broad"),
h2("Parent-offspring regression"),
p("To illustrate the relationship between parents and offspring, we"),
HTML("<ol>
<li>Generate females and males at random by (a) picking a genotypic values at random based on the underlying genotype frequencies and (b) assigning by at random from a normal distribution with a mean given by the genotypic value and a variance give by Ve.</li>
<li>Make male-female pairs at random.</li>
<li>Produce 1 offspring per pair using Mendel's rules.</li>
<li>Assign the offspring a phenotype based on its genotype and Ve.</li>
<li>Plot the offspring on the y-axis vs. the mid-parent value on the x-axis and calculate the regression. The slope of the regression should be approximately equal to the heritability calculated above</li>
</ol>"),
plotOutput("parent_offspring"),
hr(),
p("Source code for this and other Shiny applications is available at:"),
uiOutput("github")
)
)
)
## Define server logic
##
server <- function(input, output, session) {
url_1 <- a("http://darwin.eeb.uconn.edu/eeb348-notes/quant-resemblance.pdf",
href="http://darwin.eeb.uconn.edu/eeb348-notes/quant-resemblance.pdf")
output$darwin <- renderUI({
tagList("", url_1)
})
url_2 <- a("https://kholsinger.github.io/PopGen-Shiny/",
href="https://kholsinger.github.io/PopGen-Shiny/")
output$github <- renderUI({
tagList("", url_2)
})
output$phenotypes <- renderImage({
minimum <- min(input$AA, input$Aa, input$aa) - 4*sqrt(input$ve)
maximum <- max(input$AA, input$Aa, input$aa) + 4*sqrt(input$ve)
x <- seq(from = minimum,
to = maximum,
by = 0.01)
AA <- dnorm(x, mean = input$AA, sd = sqrt(input$ve))
Aa <- dnorm(x, mean = input$Aa, sd = sqrt(input$ve))
aa <- dnorm(x, mean = input$aa, sd = sqrt(input$ve))
p <- input$p
Population <- p^2*AA + 2.0*p*(1.0 - p)*Aa + (1.0 - p)^2*aa
wide.for.plot <- data.frame(x = x,
AA = p^2*AA,
Aa = 2.0*p*(1.0 - p)*Aa,
aa = (1.0 - p)^2*aa,
Population = Population)
for.plot <- melt(wide.for.plot,
id.vars = "x",
variable.name = "Genotype",
value.name = "y")
outfile <- tempfile(fileext = ".png")
png(outfile, width = 800, height = 400)
pop_plot <- ggplot(for.plot, aes(x = x, y = y, color = Genotype)) +
geom_line() +
scale_fill_discrete(limits = c("AA", "Aa", "aa", "Population")) +
labs(x = "Phenotype") +
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank()) +
scale_y_continuous(breaks = NULL)
print(pop_plot)
dev.off()
return(list(src = outfile,
alt = "A graph showing phenotype distributions"))
}, deleteFile = TRUE)
output$variances <- renderDataTable(get_variances(input$p,
input$AA,
input$Aa,
input$aa,
input$ve),
options=list("paging"=FALSE,
"ordering"=FALSE,
"info"=FALSE,
"searching"=FALSE))
output$narrow <- renderText({
variances <- get_variances(input$p,
input$AA,
input$Aa,
input$aa,
input$ve)
va <- variances$variance[variances$Component == "Va"]
vg <- variances$variance[variances$Component == "Vg"]
ve <- variances$variance[variances$Component == "Ve"]
HTML(paste("h<sup>2</sup><sub>N</sub> = "),
round(va/(vg + ve), 3))
})
output$broad <- renderText({
variances <- get_variances(input$p,
input$AA,
input$Aa,
input$aa,
input$ve)
vg <- variances$variance[variances$Component == "Vg"]
ve <- variances$variance[variances$Component == "Ve"]
HTML(paste("h<sup>2</sup><sub>B</sub> = ",
round(vg/(vg + ve), 3)))
})
output$parent_offspring <- renderImage({
p <- input$p
ve <- input$ve
sample_size <- 10^input$sample_size
geno_values <- c(input$AA, input$Aa, input$aa)
genotypes <- c(p^2, 2.0*p*(1.0 - p), (1.0 - p)^2)
female <- sample(3, size = sample_size, prob = genotypes, replace = TRUE)
male <- sample(3, size = sample_size, prob = genotypes, replace = TRUE)
mid <- numeric(sample_size)
off <- numeric(sample_size)
for (i in seq(from = 1, to = sample_size)) {
f_pheno <- rnorm(1, mean = geno_values[female[i]], sd = sqrt(ve))
m_pheno <- rnorm(1, mean = geno_values[male[i]], sd = sqrt(ve))
mid[i] <- (f_pheno + m_pheno)/2.0
off_geno <- get_offspring(female[i], male[i])
off[i] <- rnorm(1, mean = geno_values[off_geno], sd = sqrt(ve))
}
for.plot <- data.frame(x = mid,
y = off,
x_center = mid - mean(mid),
y_center = off - mean(off))
## regression on centered mid-parent and offspring values
## forces regression line through mean of each
##
model <- lm(y_center ~ x_center, data = for.plot)
slope <- summary(model)$coefficients["x_center", "Estimate"]
intercept <- summary(model)$coefficients["(Intercept)", "Estimate"]
## y-intercept is on shifted scale. Must add y offset (mean(mid)) and
## project back to original x-axis
##
intercept <- mean(mid) - mean(off)*slope
label <- paste("h^2 == ",
round(slope, 3))
outfile <- tempfile(fileext = ".png")
png(outfile, width = 800, height = 400)
po_plot <- ggplot(for.plot, aes(x = x, y = y)) +
geom_point() +
geom_abline(slope = slope, intercept = intercept, color="blue") +
geom_vline(xintercept = mean(mid), linetype = "dashed") +
geom_hline(yintercept = mean(off), linetype = "dashed") +
xlab("Mid-parent value") +
ylab("Offspring value") +
annotate("text", label = label,
x = min(for.plot$x) + 5, y = max(for.plot$y) - 20,
parse = TRUE) +
theme_bw()
print(po_plot)
dev.off()
return(list(src = outfile,
alt = "A graph showing a parent-offspring regression"))
}, deleteFile = TRUE)
}
## Run the application
##
shinyApp(ui = ui, server = server)
| /Quant-gen-resemblance/app.R | no_license | kholsinger/PopGen-Shiny | R | false | false | 10,421 | r | library(shiny)
library(reshape2)
library(ggplot2)
library(cowplot)
rm(list=ls())
init_p <- 0.5
init_AA <- 100
init_Aa <- 50
init_aa <- 0
init_ve <- 25^2
init_sample_size <- log10(1000)
variances <- new.env()
## initialize to nonsense values
##
variances$va <- -999.0
variances$vg <- -999.0
variances$ve <- -999.0
variances$x <- c(-999.0, -999.0, -999.0)
parent.env(variances)
get_variances <- function(p, x11, x12, x22, ve) {
x_bar <- p^2*x11 + 2.0*p*(1.0 - p)*x12 + (1.0 - p)^2*x22
a1 <- p*x11 + (1.0 - p)*x12 - x_bar/2.0
a2 <- p*x12 + (1.0 - p)*x22 - x_bar/2.0
vg <- p^2*(x11 - x_bar)^2 + 2.0*p*(1.0 - p)*(x12 - x_bar)^2 +
(1.0 - p)^2*(x22 - x_bar)^2
va <- p^2*(2*a1 - x_bar)^2 + 2.0*p*(1.0 - p)*(a1+a2 - x_bar)^2 +
(1.0 - p)^2*(2*a2 - x_bar)^2
vd <- p^2*(2*a1 - x11)^2 + 2.0*p*(1.0 - p)*(a1+a2 - x12)^2 +
(1.0 - p)^2*(2*a2 - x22)^2
variances$vg <- vg
variances$va <- va
variances$ve <- ve
variances$x <- c(x11, x12, x22)
dat <- data.frame(Component = c("Vp", "Vg", "Va", "Vd", "Ve"),
variance = c(round(vg + ve, 3),
round(vg, 3),
round(va, 3),
round(vd, 3),
round(ve, 3)))
return(dat)
}
## genotypes:
##
## AA - 3
## Aa - 2
## aa - 1
##
## gamete returned
##
## A - 1
## a - 0
##
get_gamete <- function(x) {
if (x == 3) {
retval <- 1
} else if (x == 1) {
retval <- 0
} else {
if (runif(1) < 0.5) {
retval <- 1
} else {
retval <- 0
}
}
return(retval)
}
get_offspring <- function(x, y) {
a <- get_gamete(x)
b <- get_gamete(y)
## + 1 because genotypes go from 1..3
##
return(a + b + 1)
}
## Define UI
##
ui <- fluidPage(
titlePanel("Resemblance between parents and offspring"),
sidebarLayout(
sidebarPanel(
sliderInput("p",
"Allele frequency",
min = 0.0,
max = 1.0,
value = init_p),
sliderInput("AA",
"Genotypic value of AA",
min = 0.0,
max = 100.0,
value = init_AA),
sliderInput("Aa",
"Genotypic value of Aa",
min = 0.0,
max = 100.0,
value = init_Aa),
sliderInput("aa",
"Genotypic value of aa",
min = 0.0,
max = 100.0,
value = init_aa),
sliderInput("ve",
"Environmental variance",
min = 1.0,
max = 10000.0,
value = init_ve),
sliderInput("sample_size",
"log10(Sample size) for parent-offspring regression",
min = log10(10),
max = log10(100000),
value = init_sample_size)
),
mainPanel(
p("This application illustrates the resemblance between parents and offspring. It uses the allele frequency and genotypic values to calculate the additive and dominance variance. The phenotypic variance is the sum of those and the environmental variance. You'll find more information in the notes at"),
uiOutput("darwin"),
h2("Phenotypic distribution"),
plotOutput("phenotypes"),
h2("Variance components"),
fluidRow(
column(2,
dataTableOutput("variances")
)
),
h2("Heritability"),
uiOutput("narrow"),
uiOutput("broad"),
h2("Parent-offspring regression"),
p("To illustrate the relationship between parents and offspring, we"),
HTML("<ol>
<li>Generate females and males at random by (a) picking a genotypic values at random based on the underlying genotype frequencies and (b) assigning by at random from a normal distribution with a mean given by the genotypic value and a variance give by Ve.</li>
<li>Make male-female pairs at random.</li>
<li>Produce 1 offspring per pair using Mendel's rules.</li>
<li>Assign the offspring a phenotype based on its genotype and Ve.</li>
<li>Plot the offspring on the y-axis vs. the mid-parent value on the x-axis and calculate the regression. The slope of the regression should be approximately equal to the heritability calculated above</li>
</ol>"),
plotOutput("parent_offspring"),
hr(),
p("Source code for this and other Shiny applications is available at:"),
uiOutput("github")
)
)
)
## Define server logic
##
server <- function(input, output, session) {
url_1 <- a("http://darwin.eeb.uconn.edu/eeb348-notes/quant-resemblance.pdf",
href="http://darwin.eeb.uconn.edu/eeb348-notes/quant-resemblance.pdf")
output$darwin <- renderUI({
tagList("", url_1)
})
url_2 <- a("https://kholsinger.github.io/PopGen-Shiny/",
href="https://kholsinger.github.io/PopGen-Shiny/")
output$github <- renderUI({
tagList("", url_2)
})
output$phenotypes <- renderImage({
minimum <- min(input$AA, input$Aa, input$aa) - 4*sqrt(input$ve)
maximum <- max(input$AA, input$Aa, input$aa) + 4*sqrt(input$ve)
x <- seq(from = minimum,
to = maximum,
by = 0.01)
AA <- dnorm(x, mean = input$AA, sd = sqrt(input$ve))
Aa <- dnorm(x, mean = input$Aa, sd = sqrt(input$ve))
aa <- dnorm(x, mean = input$aa, sd = sqrt(input$ve))
p <- input$p
Population <- p^2*AA + 2.0*p*(1.0 - p)*Aa + (1.0 - p)^2*aa
wide.for.plot <- data.frame(x = x,
AA = p^2*AA,
Aa = 2.0*p*(1.0 - p)*Aa,
aa = (1.0 - p)^2*aa,
Population = Population)
for.plot <- melt(wide.for.plot,
id.vars = "x",
variable.name = "Genotype",
value.name = "y")
outfile <- tempfile(fileext = ".png")
png(outfile, width = 800, height = 400)
pop_plot <- ggplot(for.plot, aes(x = x, y = y, color = Genotype)) +
geom_line() +
scale_fill_discrete(limits = c("AA", "Aa", "aa", "Population")) +
labs(x = "Phenotype") +
theme(legend.title = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank()) +
scale_y_continuous(breaks = NULL)
print(pop_plot)
dev.off()
return(list(src = outfile,
alt = "A graph showing phenotype distributions"))
}, deleteFile = TRUE)
output$variances <- renderDataTable(get_variances(input$p,
input$AA,
input$Aa,
input$aa,
input$ve),
options=list("paging"=FALSE,
"ordering"=FALSE,
"info"=FALSE,
"searching"=FALSE))
output$narrow <- renderText({
variances <- get_variances(input$p,
input$AA,
input$Aa,
input$aa,
input$ve)
va <- variances$variance[variances$Component == "Va"]
vg <- variances$variance[variances$Component == "Vg"]
ve <- variances$variance[variances$Component == "Ve"]
HTML(paste("h<sup>2</sup><sub>N</sub> = "),
round(va/(vg + ve), 3))
})
output$broad <- renderText({
variances <- get_variances(input$p,
input$AA,
input$Aa,
input$aa,
input$ve)
vg <- variances$variance[variances$Component == "Vg"]
ve <- variances$variance[variances$Component == "Ve"]
HTML(paste("h<sup>2</sup><sub>B</sub> = ",
round(vg/(vg + ve), 3)))
})
output$parent_offspring <- renderImage({
p <- input$p
ve <- input$ve
sample_size <- 10^input$sample_size
geno_values <- c(input$AA, input$Aa, input$aa)
genotypes <- c(p^2, 2.0*p*(1.0 - p), (1.0 - p)^2)
female <- sample(3, size = sample_size, prob = genotypes, replace = TRUE)
male <- sample(3, size = sample_size, prob = genotypes, replace = TRUE)
mid <- numeric(sample_size)
off <- numeric(sample_size)
for (i in seq(from = 1, to = sample_size)) {
f_pheno <- rnorm(1, mean = geno_values[female[i]], sd = sqrt(ve))
m_pheno <- rnorm(1, mean = geno_values[male[i]], sd = sqrt(ve))
mid[i] <- (f_pheno + m_pheno)/2.0
off_geno <- get_offspring(female[i], male[i])
off[i] <- rnorm(1, mean = geno_values[off_geno], sd = sqrt(ve))
}
for.plot <- data.frame(x = mid,
y = off,
x_center = mid - mean(mid),
y_center = off - mean(off))
## regression on centered mid-parent and offspring values
## forces regression line through mean of each
##
model <- lm(y_center ~ x_center, data = for.plot)
slope <- summary(model)$coefficients["x_center", "Estimate"]
intercept <- summary(model)$coefficients["(Intercept)", "Estimate"]
## y-intercept is on shifted scale. Must add y offset (mean(mid)) and
## project back to original x-axis
##
intercept <- mean(mid) - mean(off)*slope
label <- paste("h^2 == ",
round(slope, 3))
outfile <- tempfile(fileext = ".png")
png(outfile, width = 800, height = 400)
po_plot <- ggplot(for.plot, aes(x = x, y = y)) +
geom_point() +
geom_abline(slope = slope, intercept = intercept, color="blue") +
geom_vline(xintercept = mean(mid), linetype = "dashed") +
geom_hline(yintercept = mean(off), linetype = "dashed") +
xlab("Mid-parent value") +
ylab("Offspring value") +
annotate("text", label = label,
x = min(for.plot$x) + 5, y = max(for.plot$y) - 20,
parse = TRUE) +
theme_bw()
print(po_plot)
dev.off()
return(list(src = outfile,
alt = "A graph showing a parent-offspring regression"))
}, deleteFile = TRUE)
}
## Run the application
##
shinyApp(ui = ui, server = server)
|
#' Download OTP Jar File
#'
#' @description Download the OTP jar file from maven.org
#'
#' @param path path to folder where OTP is to be stored
#' @param version a character string of the version number default is "1.5.0"
#' @param file_name file name to give the otp default "otp.jar"
#' @param url URL to the download server
#' @param quiet logical, passed to download.file, default FALSE
#' @param cache logical, default TRUE, see details
#' @return The path to the OTP file
#' @details As of version 0.3.0.0 `otp_dl_jar` will cache the JAR file within
#' the package and ignore the `path` argument. You can force a new download to
#' be saved in the `path` location by setting `cache = FALSE`.
#' @family setup
#' @examples
#' \dontrun{
#' otp_dl_jar(tempdir())
#' }
#' @export
otp_dl_jar <- function(path = NULL,
version = "1.5.0",
file_name = paste0("otp-", version, "-shaded.jar"),
url = "https://repo1.maven.org/maven2/org/opentripplanner/otp",
quiet = FALSE,
cache = TRUE) {
if (cache) {
# Check we can find the package
libs <- .libPaths()[1]
if (!checkmate::test_directory_exists(file.path(libs, "opentripplanner"))) {
cache <- FALSE
}
}
if (cache) {
# Check for JAR folder can find the package
if (!checkmate::test_directory_exists(file.path(libs, "opentripplanner", "jar"))) {
dir.create(file.path(libs, "opentripplanner", "jar"))
}
destfile <- file.path(libs, "opentripplanner", "jar", file_name)
if (checkmate::test_file_exists(destfile)) {
message("Using cached version from ", destfile)
return(destfile)
}
} else {
checkmate::assert_directory_exists(path)
destfile <- file.path(path, file_name)
}
if (version == "2.0.0") {
warning("OTP2 support is in beta \n")
}
url <- paste0(url, "/", version, "/otp-", version, "-shaded.jar")
message("The OTP will be saved to ", destfile)
utils::download.file(url = url, destfile = destfile, mode = "wb", quiet = quiet)
return(destfile)
}
#' Download Demo Data
#'
#' @description
#' Download the demonstration data for the Isle of Wight
#'
#' @param path_data path to folder where data for OTP is to be stored
#' @param url URL to data
#' @param quiet logical, passed to download.file, default FALSE
#' @family setup
#' @examples
#' \dontrun{
#' otp_dl_demo(tempdir())
#' }
#' @export
otp_dl_demo <- function(
path_data = NULL,
url = paste0(
"https://github.com/ropensci/opentripplanner/",
"releases/download/0.1/isle-of-wight-demo.zip"
),
quiet = FALSE) {
if (!dir.exists(path_data)) {
stop(paste0("Can't find folder ", path_data))
}
if (!dir.exists(file.path(path_data, "graphs"))) {
dir.create(file.path(path_data, "graphs"))
}
if (!dir.exists(file.path(path_data, "graphs", "default"))) {
dir.create(file.path(path_data, "graphs", "default"))
}
message("The demo data will be saved to ", path_data)
utils::download.file(
url = url,
destfile = file.path(path_data, "isle-of-wight-demo.zip"),
mode = "wb",
quiet = quiet
)
utils::unzip(file.path(path_data, "isle-of-wight-demo.zip"),
exdir = file.path(path_data, "graphs", "default")
)
unlink(file.path(path_data, "isle-of-wight-demo.zip"))
}
| /R/otp-download.R | no_license | cran/opentripplanner | R | false | false | 3,579 | r | #' Download OTP Jar File
#'
#' @description Download the OTP jar file from maven.org
#'
#' @param path path to folder where OTP is to be stored
#' @param version a character string of the version number default is "1.5.0"
#' @param file_name file name to give the otp default "otp.jar"
#' @param url URL to the download server
#' @param quiet logical, passed to download.file, default FALSE
#' @param cache logical, default TRUE, see details
#' @return The path to the OTP file
#' @details As of version 0.3.0.0 `otp_dl_jar` will cache the JAR file within
#' the package and ignore the `path` argument. You can force a new download to
#' be saved in the `path` location by setting `cache = FALSE`.
#' @family setup
#' @examples
#' \dontrun{
#' otp_dl_jar(tempdir())
#' }
#' @export
otp_dl_jar <- function(path = NULL,
version = "1.5.0",
file_name = paste0("otp-", version, "-shaded.jar"),
url = "https://repo1.maven.org/maven2/org/opentripplanner/otp",
quiet = FALSE,
cache = TRUE) {
if (cache) {
# Check we can find the package
libs <- .libPaths()[1]
if (!checkmate::test_directory_exists(file.path(libs, "opentripplanner"))) {
cache <- FALSE
}
}
if (cache) {
# Check for JAR folder can find the package
if (!checkmate::test_directory_exists(file.path(libs, "opentripplanner", "jar"))) {
dir.create(file.path(libs, "opentripplanner", "jar"))
}
destfile <- file.path(libs, "opentripplanner", "jar", file_name)
if (checkmate::test_file_exists(destfile)) {
message("Using cached version from ", destfile)
return(destfile)
}
} else {
checkmate::assert_directory_exists(path)
destfile <- file.path(path, file_name)
}
if (version == "2.0.0") {
warning("OTP2 support is in beta \n")
}
url <- paste0(url, "/", version, "/otp-", version, "-shaded.jar")
message("The OTP will be saved to ", destfile)
utils::download.file(url = url, destfile = destfile, mode = "wb", quiet = quiet)
return(destfile)
}
#' Download Demo Data
#'
#' @description
#' Download the demonstration data for the Isle of Wight
#'
#' @param path_data path to folder where data for OTP is to be stored
#' @param url URL to data
#' @param quiet logical, passed to download.file, default FALSE
#' @family setup
#' @examples
#' \dontrun{
#' otp_dl_demo(tempdir())
#' }
#' @export
otp_dl_demo <- function(
path_data = NULL,
url = paste0(
"https://github.com/ropensci/opentripplanner/",
"releases/download/0.1/isle-of-wight-demo.zip"
),
quiet = FALSE) {
if (!dir.exists(path_data)) {
stop(paste0("Can't find folder ", path_data))
}
if (!dir.exists(file.path(path_data, "graphs"))) {
dir.create(file.path(path_data, "graphs"))
}
if (!dir.exists(file.path(path_data, "graphs", "default"))) {
dir.create(file.path(path_data, "graphs", "default"))
}
message("The demo data will be saved to ", path_data)
utils::download.file(
url = url,
destfile = file.path(path_data, "isle-of-wight-demo.zip"),
mode = "wb",
quiet = quiet
)
utils::unzip(file.path(path_data, "isle-of-wight-demo.zip"),
exdir = file.path(path_data, "graphs", "default")
)
unlink(file.path(path_data, "isle-of-wight-demo.zip"))
}
|
rm(list=ls())
#i1 represent the index of genotype file
#i1 ranges from 1 to 596
arg <- commandArgs(trailingOnly=T)
i1 <- as.numeric(arg[[1]])
print(i1)
library(R.utils)
library(data.table)
library(devtools)
library(withr)
library(gtools)
library(doParallel)
library(foreach)
#install R package
#bc2 is a development version of TOP package
#I used bc2 in my previous analyses
#the function of bc2 and TOP are almost the same
#TOP has more documentation
#to install bc2 or TOP, one needs to use install_github function
#you can specify the directory to your local directory
#with_libpaths(new = "/home/zhangh24/R/x86_64-pc-linux-gnu-library/4.2/", install_github('andrewhaoyu/bc2'))
library(bc2,
lib.loc ="/home/zhangh24/R/x86_64-pc-linux-gnu-library/4.2/")
setwd("/data/zhangh24/breast_cancer_data_analysis/")
#imputation file subject order
if(i1<=564){
subject.file <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
}else{
subject.file <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_order_23.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
}
setwd("/data/zhangh24/breast_cancer_data_analysis/")
#load the phenotypes data
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#x.test.all.mis1 <- data1[,c(27:206)]
SG_ID <- data1$SG_ID
#load the covariates for the model: PC1-10, age
x.covar.mis1 <- data1[,c(5:14,204)]
age <- data1[,204]
#find the people with missing ages
idx.incomplete <- which(age==888)
table(y.pheno.mis1[idx.incomplete,1])
idx.complete <- which(age!=888)
#remove people with missing age
y.pheno.mis1 <- y.pheno.mis1[idx.complete,]
x.covar.mis1 <- x.covar.mis1[idx.complete,]
SG_ID <- SG_ID[idx.complete]
#number of subject in the genotype file is n
n <- length(Icog.order[,1])
#creat a intial value for snpvalue
snpvalue <- rep(0,n)
#the phenotype data is a subset a the genotype data
#find the correponding subset
idx.fil <- Icog.order[,1]%in%SG_ID
#match the phenotype data with genotype data
idx.match <- match(SG_ID,Icog.order[idx.fil,1])
#idx.fil and idx.match will be used in later step for matching phenotype and genotype
#load the null hypothesis results for other covariates
#this component will be needed in later ScoreTest
load("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/score.test.support.icog.ERPRHER2Grade.Rdata")
#load all the imputed files
Filesdir <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/"
Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T)
#order the imputed files
Files <- mixedsort(Files)
#specific one filegeno.file
geno.file <- Files[i1]
#count the number of variants in the file
num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T))
#num = 10
#number of tumor characteristis is four
num.of.tumor <- ncol(y.pheno.mis1)-1
#number of subject in the phenotype files
n.sub <- nrow(y.pheno.mis1)
idx.control <- which(y.pheno.mis1[,1]==0)
#count the number of control in the data
n.control <- length(idx.control)
#get the three different z design matrix
z.design.list = GenerateZDesignCombination(y.pheno.mis1)
z.additive = z.design.list[[1]]
z.interaction = z.design.list[[2]]
z.saturated = z.design.list[[3]]
#number of second stage parameters
#if use additive model
n.second = ncol(z.additive)
#if use pair-wise interaction model
#n.second = ncol(z.interaction)
#if use saturated model
#n.second = ncol(z.saturated)
#parallel computing with foreach function
#the default of biowulf job allocation is two cores
#without parallel, we are only using 50% of the computing resources
#the job is running on two cores simultaneously
#parallel computing is faster, but sometimes also hard to debug
#it's also okay to just use a single for loop
#single for loop is easier to debug
#here I am splitting the jobs onto two cores
no.cores <- 2
inner.size <- 2
registerDoParallel(no.cores)
result.list <- foreach(job.i = 1:inner.size)%dopar%{
print(job.i)
#startend is a function in bc2 package
#specific the total loop number, the number of inner jobs
#startend will equally split the total loop
#startend will return with the start and the end of the job line
#job.i is the index of the inner jobs
#for example, if num = 10, inner.size =2, job.i = 1, then start = 1, end = 5
#for example, if num = 10, inner.size =2, job.i = 2, then start = 6, end = 10
start.end <- startend(num,inner.size,job.i)
start <- start.end[1]
end <- start.end[2]
inner.num <- end-start+1
#score_matrix, each row is the score vector for a genetic marker
score_result <- matrix(0,inner.num,n.second)
#information matrix, each row is the as.vector(information matrix) for a genetic marker
infor_result <- matrix(0,inner.num,(n.second)^2)
#snpid information
snpid_result <- rep("c",inner.num)
#frequencies of the genetic marker
freq.all <- rep(0,inner.num)
temp <- 0
#open the file
con <- gzfile(geno.file)
open(con)
for(i in 1:num){
#print the index every 500 SNPs
#if(i%%500==0){
print(i)
#}
#read one line of genetic file
oneLine <- readLines(con,n=1)
#the total number of SNPs are split into two sub-jobs
#only start run the test after the start location
if(i>=start){
temp <- temp+1
#the readLine result is a vector
myVector <- strsplit(oneLine," ")
#load the SNP ID
snpid <- as.character(myVector[[1]][2])
snpid_result[temp] <- snpid
snpvalue <- rep(0,n)
#load the imputed score for the genetic marker
#3 * number of subjects length
#every three columns are the probality for aa, Aa, AA for one subject
snppro <- as.numeric(unlist(myVector)[6:length(myVector[[1]])])
if(length(snppro)!=(3*n)){
break
}
#calculate the expected genotype score of the subject. Value between 0 to 2.
snpvalue <- convert(snppro,n)
#match the genotype to the phenotype data
snpvalue <- snpvalue[idx.fil][idx.match]
#calculate the allele frequencies only use controls
snpvalue.control <- snpvalue[idx.control]
freq <- sum(snpvalue.control)/(2*n.control)
freq.all[temp] <- freq
#print(paste0("freq",freq))
#only keep SNPs with allele frequency between 0.006 to 0.994
if(freq<0.006|freq>0.994){
#if the SNP is too rare, just keep as score 0.
score_result[temp,] <- 0
infor_result[temp,] <- 0.1
}else{
#fit the ScoreTest
#change second.stage.structure to second.stage.structure = pairwise.interaction for interaction model
#change second.stage.structure to second.stage.structure = saturated for saturated
score.test.icog<- ScoreTest(y=y.pheno.mis1,
x=snpvalue,
second.stage.structure="additive",
score.test.support=score.test.support.icog.ERPRHER2Grade,
missingTumorIndicator=888)
#the first element is score
score_result[temp,] <- score.test.icog[[1]]
#the second element is the efficient information matrix
infor_result[temp,] <- as.vector(score.test.icog[[2]])
}
}
if(i==end){
break
}
}
close(con)
result <- list(snpid_result,score_result,infor_result,freq.all)
return(result)
}
stopImplicitCluster()
#the output of foreach is saved as two list
#the attached code combine the two list as one
score_result <- matrix(0,num,n.second)
infor_result <- matrix(0,num,(n.second)^2)
snpid_result <- rep("c",num)
freq.all <- rep(0,num)
total <- 0
for(i in 1:inner.size){
result.temp <- result.list[[i]]
temp <- length(result.temp[[1]])
snpid_result[total+(1:temp)] <- result.temp[[1]]
score_result[total+(1:temp),] <- result.temp[[2]]
infor_result[total+(1:temp),] <- result.temp[[3]]
freq.all[total+(1:temp)] <- result.temp[[4]]
total <- total+temp
}
result <- list(snpid_reuslt=snpid_result,score_result=score_result,infor_result=infor_result,freq.all=freq.all)
#change the directory to your local directory
save(result,file=paste0("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/ERPRHER2Grade_fixed_baseline",i1))
| /whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/code/2_whole_genome_ERPRHER2Grade_fixed_baseline.R | no_license | andrewhaoyu/breast_cancer_data_analysis | R | false | false | 8,618 | r | rm(list=ls())
#i1 represent the index of genotype file
#i1 ranges from 1 to 596
arg <- commandArgs(trailingOnly=T)
i1 <- as.numeric(arg[[1]])
print(i1)
library(R.utils)
library(data.table)
library(devtools)
library(withr)
library(gtools)
library(doParallel)
library(foreach)
#install R package
#bc2 is a development version of TOP package
#I used bc2 in my previous analyses
#the function of bc2 and TOP are almost the same
#TOP has more documentation
#to install bc2 or TOP, one needs to use install_github function
#you can specify the directory to your local directory
#with_libpaths(new = "/home/zhangh24/R/x86_64-pc-linux-gnu-library/4.2/", install_github('andrewhaoyu/bc2'))
library(bc2,
lib.loc ="/home/zhangh24/R/x86_64-pc-linux-gnu-library/4.2/")
setwd("/data/zhangh24/breast_cancer_data_analysis/")
#imputation file subject order
if(i1<=564){
subject.file <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
}else{
subject.file <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_order_23.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
}
setwd("/data/zhangh24/breast_cancer_data_analysis/")
#load the phenotypes data
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#x.test.all.mis1 <- data1[,c(27:206)]
SG_ID <- data1$SG_ID
#load the covariates for the model: PC1-10, age
x.covar.mis1 <- data1[,c(5:14,204)]
age <- data1[,204]
#find the people with missing ages
idx.incomplete <- which(age==888)
table(y.pheno.mis1[idx.incomplete,1])
idx.complete <- which(age!=888)
#remove people with missing age
y.pheno.mis1 <- y.pheno.mis1[idx.complete,]
x.covar.mis1 <- x.covar.mis1[idx.complete,]
SG_ID <- SG_ID[idx.complete]
#number of subject in the genotype file is n
n <- length(Icog.order[,1])
#creat a intial value for snpvalue
snpvalue <- rep(0,n)
#the phenotype data is a subset a the genotype data
#find the correponding subset
idx.fil <- Icog.order[,1]%in%SG_ID
#match the phenotype data with genotype data
idx.match <- match(SG_ID,Icog.order[idx.fil,1])
#idx.fil and idx.match will be used in later step for matching phenotype and genotype
#load the null hypothesis results for other covariates
#this component will be needed in later ScoreTest
load("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/score.test.support.icog.ERPRHER2Grade.Rdata")
#load all the imputed files
Filesdir <- "/data/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/"
Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T)
#order the imputed files
Files <- mixedsort(Files)
#specific one filegeno.file
geno.file <- Files[i1]
#count the number of variants in the file
num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T))
#num = 10
#number of tumor characteristis is four
num.of.tumor <- ncol(y.pheno.mis1)-1
#number of subject in the phenotype files
n.sub <- nrow(y.pheno.mis1)
idx.control <- which(y.pheno.mis1[,1]==0)
#count the number of control in the data
n.control <- length(idx.control)
#get the three different z design matrix
z.design.list = GenerateZDesignCombination(y.pheno.mis1)
z.additive = z.design.list[[1]]
z.interaction = z.design.list[[2]]
z.saturated = z.design.list[[3]]
#number of second stage parameters
#if use additive model
n.second = ncol(z.additive)
#if use pair-wise interaction model
#n.second = ncol(z.interaction)
#if use saturated model
#n.second = ncol(z.saturated)
#parallel computing with foreach function
#the default of biowulf job allocation is two cores
#without parallel, we are only using 50% of the computing resources
#the job is running on two cores simultaneously
#parallel computing is faster, but sometimes also hard to debug
#it's also okay to just use a single for loop
#single for loop is easier to debug
#here I am splitting the jobs onto two cores
no.cores <- 2
inner.size <- 2
registerDoParallel(no.cores)
result.list <- foreach(job.i = 1:inner.size)%dopar%{
print(job.i)
#startend is a function in bc2 package
#specific the total loop number, the number of inner jobs
#startend will equally split the total loop
#startend will return with the start and the end of the job line
#job.i is the index of the inner jobs
#for example, if num = 10, inner.size =2, job.i = 1, then start = 1, end = 5
#for example, if num = 10, inner.size =2, job.i = 2, then start = 6, end = 10
start.end <- startend(num,inner.size,job.i)
start <- start.end[1]
end <- start.end[2]
inner.num <- end-start+1
#score_matrix, each row is the score vector for a genetic marker
score_result <- matrix(0,inner.num,n.second)
#information matrix, each row is the as.vector(information matrix) for a genetic marker
infor_result <- matrix(0,inner.num,(n.second)^2)
#snpid information
snpid_result <- rep("c",inner.num)
#frequencies of the genetic marker
freq.all <- rep(0,inner.num)
temp <- 0
#open the file
con <- gzfile(geno.file)
open(con)
for(i in 1:num){
#print the index every 500 SNPs
#if(i%%500==0){
print(i)
#}
#read one line of genetic file
oneLine <- readLines(con,n=1)
#the total number of SNPs are split into two sub-jobs
#only start run the test after the start location
if(i>=start){
temp <- temp+1
#the readLine result is a vector
myVector <- strsplit(oneLine," ")
#load the SNP ID
snpid <- as.character(myVector[[1]][2])
snpid_result[temp] <- snpid
snpvalue <- rep(0,n)
#load the imputed score for the genetic marker
#3 * number of subjects length
#every three columns are the probality for aa, Aa, AA for one subject
snppro <- as.numeric(unlist(myVector)[6:length(myVector[[1]])])
if(length(snppro)!=(3*n)){
break
}
#calculate the expected genotype score of the subject. Value between 0 to 2.
snpvalue <- convert(snppro,n)
#match the genotype to the phenotype data
snpvalue <- snpvalue[idx.fil][idx.match]
#calculate the allele frequencies only use controls
snpvalue.control <- snpvalue[idx.control]
freq <- sum(snpvalue.control)/(2*n.control)
freq.all[temp] <- freq
#print(paste0("freq",freq))
#only keep SNPs with allele frequency between 0.006 to 0.994
if(freq<0.006|freq>0.994){
#if the SNP is too rare, just keep as score 0.
score_result[temp,] <- 0
infor_result[temp,] <- 0.1
}else{
#fit the ScoreTest
#change second.stage.structure to second.stage.structure = pairwise.interaction for interaction model
#change second.stage.structure to second.stage.structure = saturated for saturated
score.test.icog<- ScoreTest(y=y.pheno.mis1,
x=snpvalue,
second.stage.structure="additive",
score.test.support=score.test.support.icog.ERPRHER2Grade,
missingTumorIndicator=888)
#the first element is score
score_result[temp,] <- score.test.icog[[1]]
#the second element is the efficient information matrix
infor_result[temp,] <- as.vector(score.test.icog[[2]])
}
}
if(i==end){
break
}
}
close(con)
result <- list(snpid_result,score_result,infor_result,freq.all)
return(result)
}
stopImplicitCluster()
#the output of foreach is saved as two list
#the attached code combine the two list as one
score_result <- matrix(0,num,n.second)
infor_result <- matrix(0,num,(n.second)^2)
snpid_result <- rep("c",num)
freq.all <- rep(0,num)
total <- 0
for(i in 1:inner.size){
result.temp <- result.list[[i]]
temp <- length(result.temp[[1]])
snpid_result[total+(1:temp)] <- result.temp[[1]]
score_result[total+(1:temp),] <- result.temp[[2]]
infor_result[total+(1:temp),] <- result.temp[[3]]
freq.all[total+(1:temp)] <- result.temp[[4]]
total <- total+temp
}
result <- list(snpid_reuslt=snpid_result,score_result=score_result,infor_result=infor_result,freq.all=freq.all)
#change the directory to your local directory
save(result,file=paste0("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/ERPRHER2Grade_fixed_baseline",i1))
|
## TidyX Episode 72:
# https://dbplyr.tidyverse.org/
### Packages ---------------------------------------------
library(tidyverse)
library(dbplyr) ## this is technically called under the hood by tidyverse
library(RSQLite)
library(DBI)
library(shiny)
library(shinyWidgets)
### Interact with database ---------------------------------------------
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/072-Databases_with_Shiny/nba_seasons.db")
)
## what tables exist?
dbListTables(db_con)
## Grouping by
d <- map_dfr(
paste0("season_",2001:2005),
function(season){
tbl(db_con, season) %>%
filter(home == "Los Angeles Lakers") %>%
select(game_id, home_points, visitor_points) %>%
as.data.frame()
})
head(d)
tail(d)
dbDisconnect(db_con)
## Shiny Application
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/072-Databases_with_Shiny/nba_seasons.db")
)
## get list of teams
team <- dbGetQuery(
db_con,
paste0("SELECT HOME FROM season_",2001:2020, collapse = " UNION ")
)
dbDisconnect(db_con)
## SHINY APP
ui <- fluidPage(
title = "Home Scoring Margin: 2001 - 2020",
sidebarPanel(
pickerInput(inputId = "home",
label = "Pick Teams:",
choices = team,
multiple = TRUE,
selected = "Cleveland Cavaliers")
),
mainPanel(plotOutput(outputId = "plt"))
)
server <- function(input, output){
sql_dat <- reactive({
req(input$home)
## connect to database
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/072-Databases_with_Shiny/nba_seasons.db")
)
##disconnect when reactive finishes
on.exit(dbDisconnect(db_con))
## query database
sql_dat <- map_dfr(paste0("season_",2001:2020),function(season){
tbl(db_con, season) %>%
filter(home %in% !!input$home) %>%
select(game_id, home, home_points, visitor_points) %>%
as.data.frame()
})
sql_dat
})
dat <- reactive({
d <- sql_dat() %>%
mutate(season = substring(game_id, first = 1, last = 4)) %>%
group_by(home, season) %>%
summarize(home_pts_mov = mean(home_points - visitor_points))
d
})
output$plt <- renderPlot({
dat() %>%
ggplot(aes(x = season, y = home_pts_mov, color = home, group = home)) +
labs(
title = "Average Point Differential at Home Stadium Across Seasons",
y = "Average Home Stadium Point Differential",
x = "Season"
) +
geom_hline(yintercept = 0) +
geom_line(size = 1.2) +
geom_point(shape = 21,
fill = "white",
size = 5) +
theme_classic()
})
}
shinyApp(ui, server)
| /TidyTuesday_Explained/072-Databases_with_Shiny/Episode 72- Query Database from Shiny.R | no_license | Jpzhaoo/TidyX | R | false | false | 2,835 | r |
## TidyX Episode 72:
# https://dbplyr.tidyverse.org/
### Packages ---------------------------------------------
library(tidyverse)
library(dbplyr) ## this is technically called under the hood by tidyverse
library(RSQLite)
library(DBI)
library(shiny)
library(shinyWidgets)
### Interact with database ---------------------------------------------
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/072-Databases_with_Shiny/nba_seasons.db")
)
## what tables exist?
dbListTables(db_con)
## Grouping by
d <- map_dfr(
paste0("season_",2001:2005),
function(season){
tbl(db_con, season) %>%
filter(home == "Los Angeles Lakers") %>%
select(game_id, home_points, visitor_points) %>%
as.data.frame()
})
head(d)
tail(d)
dbDisconnect(db_con)
## Shiny Application
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/072-Databases_with_Shiny/nba_seasons.db")
)
## get list of teams
team <- dbGetQuery(
db_con,
paste0("SELECT HOME FROM season_",2001:2020, collapse = " UNION ")
)
dbDisconnect(db_con)
## SHINY APP
ui <- fluidPage(
title = "Home Scoring Margin: 2001 - 2020",
sidebarPanel(
pickerInput(inputId = "home",
label = "Pick Teams:",
choices = team,
multiple = TRUE,
selected = "Cleveland Cavaliers")
),
mainPanel(plotOutput(outputId = "plt"))
)
server <- function(input, output){
sql_dat <- reactive({
req(input$home)
## connect to database
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/072-Databases_with_Shiny/nba_seasons.db")
)
##disconnect when reactive finishes
on.exit(dbDisconnect(db_con))
## query database
sql_dat <- map_dfr(paste0("season_",2001:2020),function(season){
tbl(db_con, season) %>%
filter(home %in% !!input$home) %>%
select(game_id, home, home_points, visitor_points) %>%
as.data.frame()
})
sql_dat
})
dat <- reactive({
d <- sql_dat() %>%
mutate(season = substring(game_id, first = 1, last = 4)) %>%
group_by(home, season) %>%
summarize(home_pts_mov = mean(home_points - visitor_points))
d
})
output$plt <- renderPlot({
dat() %>%
ggplot(aes(x = season, y = home_pts_mov, color = home, group = home)) +
labs(
title = "Average Point Differential at Home Stadium Across Seasons",
y = "Average Home Stadium Point Differential",
x = "Season"
) +
geom_hline(yintercept = 0) +
geom_line(size = 1.2) +
geom_point(shape = 21,
fill = "white",
size = 5) +
theme_classic()
})
}
shinyApp(ui, server)
|
# plot 1
# setting my work directory
setwd("ExData_Plotting1/")
#filter the data into the dates required
# define the name of the data in zip
zipfile="household_power_consumption.zip"
# define the name of data
file="household_power_consumption.txt"
# read the data direct of hte ZIP file
data <- read.csv(unz(zipfile, file), sep=";", stringsAsFactors=FALSE)
# merge the date and time
data$Date <- paste(data$Date, data$Time)
#convert the date format
data$Date=strptime(data$Date, "%d/%m/%Y %H:%M:%S")
# we will use data from the specific date
data_fil <- subset(data, Date >= "2007-02-01" & Date <= "2007-02-03")
#define the required columns
numcols <- names(data_fil)[3:length(names(data))]
#converting the columns to numbers
data_fil[numcols] <- lapply(data_fil[numcols], as.numeric)
# choosing the device with the specific features
png('plot1.png', width=480, height=480, units="px", bg = "transparent")
#plotting
hist(data_fil$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power(Kilowatts)")
dev.off()
| /plot1.R | no_license | eduarger/ExData_Plotting1 | R | false | false | 1,044 | r | # plot 1
# setting my work directory
setwd("ExData_Plotting1/")
#filter the data into the dates required
# define the name of the data in zip
zipfile="household_power_consumption.zip"
# define the name of data
file="household_power_consumption.txt"
# read the data direct of hte ZIP file
data <- read.csv(unz(zipfile, file), sep=";", stringsAsFactors=FALSE)
# merge the date and time
data$Date <- paste(data$Date, data$Time)
#convert the date format
data$Date=strptime(data$Date, "%d/%m/%Y %H:%M:%S")
# we will use data from the specific date
data_fil <- subset(data, Date >= "2007-02-01" & Date <= "2007-02-03")
#define the required columns
numcols <- names(data_fil)[3:length(names(data))]
#converting the columns to numbers
data_fil[numcols] <- lapply(data_fil[numcols], as.numeric)
# choosing the device with the specific features
png('plot1.png', width=480, height=480, units="px", bg = "transparent")
#plotting
hist(data_fil$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power(Kilowatts)")
dev.off()
|
.onLoad <- function(...){
options(tmle3sim.stacktrace = TRUE)
options(tmle3sim.dumpfile = FALSE)
options(tmle3sim.verbose = FALSE)
}
.tmle3sim_env = new.env() | /R/zzz.R | no_license | WenxinZhang25/tmle3sim | R | false | false | 165 | r | .onLoad <- function(...){
options(tmle3sim.stacktrace = TRUE)
options(tmle3sim.dumpfile = FALSE)
options(tmle3sim.verbose = FALSE)
}
.tmle3sim_env = new.env() |
# Install and load the 'devtools' package, which is needed to access the 'rlpi' package from GitHub
install.packages("devtools")
library(devtools)
# Install from main ZSL repository online
install_github("Zoological-Society-of-London/rlpi", dependencies=TRUE)
# Load the 'rlpi' package
library(rlpi)
#############################################
# #
# Simulate the population time-series #
# #
#############################################
# Set the random seed to replicate the stochastic process in the manuscript
set.seed(42)
# Define the duration of the simulation
years <- 1970:2020
# Set the number of species in the simulation
S <- 500
# Create a dummy variable to standardise the simulation duration between 0 and 1 (used to generate non-linear trajectories)
x <- seq(0,1,l=length(years))
# Generate random noise for the low- and high-fluctuation scenarios for the 500 species. Set the first and last values to zero so all time-series share the same start and end values.
N.low <- matrix(rnorm(n=S*length(years),mean=0,sd=1), nrow=S,ncol=length(years)) ; N.low[,c(1,length(years))] <- 0
N.high <- matrix(rnorm(n=S*length(years),mean=0,sd=7), nrow=S,ncol=length(years)) ; N.high[,c(1,length(years))] <- 0
# Here I used the same naming conventionas in Figure 1. However, here the code '50' refers to a concave-up trajectory, '100' is a linear trajectory, and '150' is a concave-down trajectory
vect_50 <- ((60*(1 - x^0.2)) + 40)
vect_100 <- ((60*(1 - x^1)) + 40)
vect_150 <- ((60*(1 - x^5)) + 40)
# Add the noise to the trajectories to make 500 unique populations for the low ('l') fluctuation scenario
N_50l <- sweep(N.low, MARGIN=2, vect_50, '+')
N_100l <- sweep(N.low, MARGIN=2, vect_100, '+')
N_150l <- sweep(N.low, MARGIN=2, vect_150, '+')
# Calculate the average for the 500 low fluctuation populations
Ave_50l <- apply(N_50l,2,function(x) {mean(x,na.rm=T)})
Ave_100l <- apply(N_100l,2,function(x) {mean(x,na.rm=T)})
Ave_150l <- apply(N_150l,2,function(x) {mean(x,na.rm=T)})
# Add the noise to the trajectories to make 500 unique populations for the high ('h') fluctuation scenario
N_50h <- sweep(N.high, MARGIN=2, vect_50, '+')
N_100h <- sweep(N.high, MARGIN=2, vect_100, '+')
N_150h <- sweep(N.high, MARGIN=2, vect_150, '+')
# Calculate the average for the 500 high fluctuation populations
Ave_50h <- apply(N_50h,2,function(x) {mean(x,na.rm=T)})
Ave_100h <- apply(N_100h,2,function(x) {mean(x,na.rm=T)})
Ave_150h <- apply(N_150h,2,function(x) {mean(x,na.rm=T)})
#####################################################
# #
# Prepare the data for the Living Planet Index #
# #
#####################################################
# Define unique integer ID for each time-series
ID <- 1:S
# Define unique integer ID for each species
Species <- as.factor(1:S)
# Combine the ID and species name to the simulated low-fluctuation populations
Pop_50l <- cbind(ID,Species,N_50l)
Pop_100l <- cbind(ID,Species,N_100l)
Pop_150l <- cbind(ID,Species,N_150l)
# Add the default column names as required by the 'rlpi' package
colnames(Pop_50l) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_100l) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_150l) <- (c("ID","Binomial",paste0("X",as.factor(years))))
# Combine the ID and species name to the simulated high-fluctuation populations
Pop_50h <- cbind(ID,Species,N_50h)
Pop_100h <- cbind(ID,Species,N_100h)
Pop_150h <- cbind(ID,Species,N_150h)
# Add the default column names as required by the 'rlpi' package
colnames(Pop_50h) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_100h) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_150h) <- (c("ID","Binomial",paste0("X",as.factor(years))))
############################################################################################
######################################
# #
# Empirical Living Planet Index #
# #
######################################
# This is just an index vector for which time-series hould be included in calculations
# Here, we include all time series (Note: this command in needed when you want to use a subset of the data)
index_vector <- rep(TRUE, S)
#This creates an 'infile' for calcualting the LPI
# Note: you need a folder names 'LPI_files' in your working directory
infile_50l <- create_infile(as.data.frame(Pop_50l), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_50l")
lpi_50l <- LPIMain(infile_50l, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_100l <- create_infile(as.data.frame(Pop_100l), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_100l")
lpi_100l <- LPIMain(infile_100l, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_150l <- create_infile(as.data.frame(Pop_150l), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_150l")
lpi_150l <- LPIMain(infile_150l, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
#This creates an 'infile' for calcualting the LPI
# Note: you need a folder names 'LPI_files' in your working directory
infile_50h <- create_infile(as.data.frame(Pop_50h), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_50h")
lpi_50h <- LPIMain(infile_50h, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_100h <- create_infile(as.data.frame(Pop_100h), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_100h")
lpi_100h <- LPIMain(infile_100h, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_150h <- create_infile(as.data.frame(Pop_150h), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_150h")
lpi_150h <- LPIMain(infile_150h, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
###############################################################################
###########################################
# #
# Function for reshuffling null model #
# #
###########################################
null.mod <- function(POP.mat){
N.shuf <- matrix(NA,ncol=dim(POP.mat)[2],nrow=dim(POP.mat)[1])
# Run a loop to reshuffle each population time-series
for (k in 1:dim(POP.mat)[1]){
# ISolate the time-series
pop.vect <- as.numeric(POP.mat[k,])
# Calculate incremental changes bewtween time-points (i.e. deltas)
deltas <- diff(pop.vect)
# Reshuffle the deltas by resamplin without replacement
null2 <- sample(deltas,length(deltas),replace=F)
# Simulate a new reshuffled vector
new.vect <- c(pop.vect[1],(pop.vect[1] + cumsum(null2)))
# If a population becomes negative, replace it with a blank value
if(min(new.vect)<=0){
new.vect[which(new.vect<=0)] <- NA
}
# Assign the resuffled time-series to the new object
N.shuf[k,] <- new.vect
}
# Add the default columns to the reshuffled data
POP.shuf <- cbind(ID,Species,N.shuf)
# Add the default column names required by the 'rlpi' package
colnames(POP.shuf) <- (c("ID","Binomial",paste0("X",as.factor(years))))
# Generate the infile and calcualte the Living Planet Index
infile_null <- create_infile(as.data.frame(POP.shuf), start_col_name="X1970", end_col_name="X2020", index_vector=rep(TRUE,500), name="LPI_files/lpi_null")
lpi_null <- LPIMain(infile_null, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
# Identify and report the final LPI value from the reshuffled community
final.val <- lpi_null[dim(lpi_null)[1],1]
final.val
}
########################################################################
############################################
# #
# Iterating the reshuffling null model #
# #
############################################
# NUmber of iterations
iterations <- 100
# Create a blank holder-matrix for the 6 assemblages (2 x Fluctuation intensity, 3 x trajectory shape)
output.mat <- matrix(NA,nrow=iterations, ncol=6)
# Iterate the null model in a loop
# The text just adds a laple to the automatically generated plot to keep track of the simulation
for (p in 1:iterations){
output.mat[p,1] <- null.mod(N_50l)
text(1995,1.9,paste("iteration = ",p, "val = 1"))
output.mat[p,2] <- null.mod(N_100l)
text(1995,1.9,paste("iteration = ",p, "val = 2"))
output.mat[p,3] <- null.mod(N_150l)
text(1995,1.9,paste("iteration = ",p, "val = 3"))
output.mat[p,4] <- null.mod(N_50h)
text(1995,1.9,paste("iteration = ",p, "val = 4"))
output.mat[p,5] <- null.mod(N_100h)
text(1995,1.9,paste("iteration = ",p, "val = 5"))
output.mat[p,6] <- null.mod(N_150h)
text(1995,1.9,paste("iteration = ",p, "val = 6"))
}
# Add column names to the iterated output
colnames(output.mat) <- c("ConcaveL", "LinearL","ConvexL","ConcaveH", "LinearH","ConvexH")
# Write the output to file, which can be imported to replicate Figure 4
write.table(output.mat,file= "IterationOutput.txt",quote=T,sep="\t",row.names=F,col.names=T)
| /Cloud Simulations/NonlinearTrajectories.R | permissive | falko-buschke/LPI | R | false | false | 9,621 | r | # Install and load the 'devtools' package, which is needed to access the 'rlpi' package from GitHub
install.packages("devtools")
library(devtools)
# Install from main ZSL repository online
install_github("Zoological-Society-of-London/rlpi", dependencies=TRUE)
# Load the 'rlpi' package
library(rlpi)
#############################################
# #
# Simulate the population time-series #
# #
#############################################
# Set the random seed to replicate the stochastic process in the manuscript
set.seed(42)
# Define the duration of the simulation
years <- 1970:2020
# Set the number of species in the simulation
S <- 500
# Create a dummy variable to standardise the simulation duration between 0 and 1 (used to generate non-linear trajectories)
x <- seq(0,1,l=length(years))
# Generate random noise for the low- and high-fluctuation scenarios for the 500 species. Set the first and last values to zero so all time-series share the same start and end values.
N.low <- matrix(rnorm(n=S*length(years),mean=0,sd=1), nrow=S,ncol=length(years)) ; N.low[,c(1,length(years))] <- 0
N.high <- matrix(rnorm(n=S*length(years),mean=0,sd=7), nrow=S,ncol=length(years)) ; N.high[,c(1,length(years))] <- 0
# Here I used the same naming conventionas in Figure 1. However, here the code '50' refers to a concave-up trajectory, '100' is a linear trajectory, and '150' is a concave-down trajectory
vect_50 <- ((60*(1 - x^0.2)) + 40)
vect_100 <- ((60*(1 - x^1)) + 40)
vect_150 <- ((60*(1 - x^5)) + 40)
# Add the noise to the trajectories to make 500 unique populations for the low ('l') fluctuation scenario
N_50l <- sweep(N.low, MARGIN=2, vect_50, '+')
N_100l <- sweep(N.low, MARGIN=2, vect_100, '+')
N_150l <- sweep(N.low, MARGIN=2, vect_150, '+')
# Calculate the average for the 500 low fluctuation populations
Ave_50l <- apply(N_50l,2,function(x) {mean(x,na.rm=T)})
Ave_100l <- apply(N_100l,2,function(x) {mean(x,na.rm=T)})
Ave_150l <- apply(N_150l,2,function(x) {mean(x,na.rm=T)})
# Add the noise to the trajectories to make 500 unique populations for the high ('h') fluctuation scenario
N_50h <- sweep(N.high, MARGIN=2, vect_50, '+')
N_100h <- sweep(N.high, MARGIN=2, vect_100, '+')
N_150h <- sweep(N.high, MARGIN=2, vect_150, '+')
# Calculate the average for the 500 high fluctuation populations
Ave_50h <- apply(N_50h,2,function(x) {mean(x,na.rm=T)})
Ave_100h <- apply(N_100h,2,function(x) {mean(x,na.rm=T)})
Ave_150h <- apply(N_150h,2,function(x) {mean(x,na.rm=T)})
#####################################################
# #
# Prepare the data for the Living Planet Index #
# #
#####################################################
# Define unique integer ID for each time-series
ID <- 1:S
# Define unique integer ID for each species
Species <- as.factor(1:S)
# Combine the ID and species name to the simulated low-fluctuation populations
Pop_50l <- cbind(ID,Species,N_50l)
Pop_100l <- cbind(ID,Species,N_100l)
Pop_150l <- cbind(ID,Species,N_150l)
# Add the default column names as required by the 'rlpi' package
colnames(Pop_50l) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_100l) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_150l) <- (c("ID","Binomial",paste0("X",as.factor(years))))
# Combine the ID and species name to the simulated high-fluctuation populations
Pop_50h <- cbind(ID,Species,N_50h)
Pop_100h <- cbind(ID,Species,N_100h)
Pop_150h <- cbind(ID,Species,N_150h)
# Add the default column names as required by the 'rlpi' package
colnames(Pop_50h) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_100h) <- (c("ID","Binomial",paste0("X",as.factor(years))))
colnames(Pop_150h) <- (c("ID","Binomial",paste0("X",as.factor(years))))
############################################################################################
######################################
# #
# Empirical Living Planet Index #
# #
######################################
# This is just an index vector for which time-series hould be included in calculations
# Here, we include all time series (Note: this command in needed when you want to use a subset of the data)
index_vector <- rep(TRUE, S)
#This creates an 'infile' for calcualting the LPI
# Note: you need a folder names 'LPI_files' in your working directory
infile_50l <- create_infile(as.data.frame(Pop_50l), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_50l")
lpi_50l <- LPIMain(infile_50l, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_100l <- create_infile(as.data.frame(Pop_100l), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_100l")
lpi_100l <- LPIMain(infile_100l, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_150l <- create_infile(as.data.frame(Pop_150l), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_150l")
lpi_150l <- LPIMain(infile_150l, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
#This creates an 'infile' for calcualting the LPI
# Note: you need a folder names 'LPI_files' in your working directory
infile_50h <- create_infile(as.data.frame(Pop_50h), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_50h")
lpi_50h <- LPIMain(infile_50h, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_100h <- create_infile(as.data.frame(Pop_100h), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_100h")
lpi_100h <- LPIMain(infile_100h, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
infile_150h <- create_infile(as.data.frame(Pop_150h), start_col_name="X1970", end_col_name="X2020", index_vector=index_vector, name="LPI_files/lpi_150h")
lpi_150h <- LPIMain(infile_150h, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
###############################################################################
###########################################
# #
# Function for reshuffling null model #
# #
###########################################
null.mod <- function(POP.mat){
N.shuf <- matrix(NA,ncol=dim(POP.mat)[2],nrow=dim(POP.mat)[1])
# Run a loop to reshuffle each population time-series
for (k in 1:dim(POP.mat)[1]){
# ISolate the time-series
pop.vect <- as.numeric(POP.mat[k,])
# Calculate incremental changes bewtween time-points (i.e. deltas)
deltas <- diff(pop.vect)
# Reshuffle the deltas by resamplin without replacement
null2 <- sample(deltas,length(deltas),replace=F)
# Simulate a new reshuffled vector
new.vect <- c(pop.vect[1],(pop.vect[1] + cumsum(null2)))
# If a population becomes negative, replace it with a blank value
if(min(new.vect)<=0){
new.vect[which(new.vect<=0)] <- NA
}
# Assign the resuffled time-series to the new object
N.shuf[k,] <- new.vect
}
# Add the default columns to the reshuffled data
POP.shuf <- cbind(ID,Species,N.shuf)
# Add the default column names required by the 'rlpi' package
colnames(POP.shuf) <- (c("ID","Binomial",paste0("X",as.factor(years))))
# Generate the infile and calcualte the Living Planet Index
infile_null <- create_infile(as.data.frame(POP.shuf), start_col_name="X1970", end_col_name="X2020", index_vector=rep(TRUE,500), name="LPI_files/lpi_null")
lpi_null <- LPIMain(infile_null, REF_YEAR = 1970, PLOT_MAX = 2019, BOOT_STRAP_SIZE = 100, VERBOSE=FALSE, plot_lpi=TRUE)
# Identify and report the final LPI value from the reshuffled community
final.val <- lpi_null[dim(lpi_null)[1],1]
final.val
}
########################################################################
############################################
# #
# Iterating the reshuffling null model #
# #
############################################
# NUmber of iterations
iterations <- 100
# Create a blank holder-matrix for the 6 assemblages (2 x Fluctuation intensity, 3 x trajectory shape)
output.mat <- matrix(NA,nrow=iterations, ncol=6)
# Iterate the null model in a loop
# The text just adds a laple to the automatically generated plot to keep track of the simulation
for (p in 1:iterations){
output.mat[p,1] <- null.mod(N_50l)
text(1995,1.9,paste("iteration = ",p, "val = 1"))
output.mat[p,2] <- null.mod(N_100l)
text(1995,1.9,paste("iteration = ",p, "val = 2"))
output.mat[p,3] <- null.mod(N_150l)
text(1995,1.9,paste("iteration = ",p, "val = 3"))
output.mat[p,4] <- null.mod(N_50h)
text(1995,1.9,paste("iteration = ",p, "val = 4"))
output.mat[p,5] <- null.mod(N_100h)
text(1995,1.9,paste("iteration = ",p, "val = 5"))
output.mat[p,6] <- null.mod(N_150h)
text(1995,1.9,paste("iteration = ",p, "val = 6"))
}
# Add column names to the iterated output
colnames(output.mat) <- c("ConcaveL", "LinearL","ConvexL","ConcaveH", "LinearH","ConvexH")
# Write the output to file, which can be imported to replicate Figure 4
write.table(output.mat,file= "IterationOutput.txt",quote=T,sep="\t",row.names=F,col.names=T)
|
# Libraries
library(tidyverse)
library(dplyr)
library(plotly)
library(data.table)
library(purrr)
source('formula.R')
import::from(plotly, ggplotly)
import::from(jsonlite, fromJSON)
#Load Questionnaire
df <- read.csv("Questionnaire_responses.csv",header = TRUE)
#Select column and label user to touch typist or not ,and which language user type in.
type_typist <- df %>%
select(PID, Do.you.use.the.touch.typing.systems.with.all.10.fingers.,
How.many.years.of.experience.do.you.have.in.using.the.touch.typing.systems.with.all.10.fingers.,
In.this.study..which.language.did.you.type.in.) %>%
mutate(Typist = case_when((Do.you.use.the.touch.typing.systems.with.all.10.fingers. == 'Yes') &
(How.many.years.of.experience.do.you.have.in.using.the.touch.typing.systems.with.all.10.fingers. > 1) ~ 'touch_typist',
TRUE ~ 'non_touch_typist')) %>%
select(PID,Typist, In.this.study..which.language.did.you.type.in.)
#Separate phrase into 3 categories: random, sentence, mix sentence
#load Phrases in TextTest++ to label the phrases in class logs.
#load Phrases in TextTest++ and label them into 3 categories
phrase_de <- read.delim("Phrases/phrases_de.txt", header = FALSE, col.names = c("Present")) %>%
mutate(Stimulus_Type = case_when( row_number() <= 50 ~ 'sentence',
row_number() > 50 & row_number() <= 100 ~ 'random',
row_number() > 100 ~ 'mix'))
phrase_en <- read.delim("Phrases/phrases_en.txt", header = FALSE, col.names = c("Present")) %>%
mutate(Stimulus_Type = case_when( row_number() <= 50 ~ 'sentence',
row_number() > 50 & row_number() <= 100 ~ 'random',
row_number() > 100 ~ 'mix'))
#Get file list from class-logs folder
file_list <- list.files(path="class-logs")
file_list <- file_list[1:length(file_list)-1]
#results tibble
result_random <- tibble(PID=integer(), Typist=character(), avg_UER=numeric(), WPM=numeric(), Avg.IKI=numeric(), KE=numeric(), Sti_Type=character())
result_sentence <- tibble(PID=integer(),Typist=character(), avg_UER=numeric(), WPM=numeric(), Avg.IKI=numeric(), KE=numeric(), Sti_Type=character())
result_mix <- tibble(PID=integer(), Typist=character(), avg_UER=numeric(), WPM=numeric(), Avg.IKI=numeric(), KE=numeric(), Sti_Type=character())
for (i in 1:length(file_list)){
#join each json file with stimulus type and order by stimulus type#
user_info <- filter(type_typist, PID == (i - 1))
if (user_info$In.this.study..which.language.did.you.type.in. == "English") {
user_log <- fromJSON(read_file(paste("class-logs/", file_list[i], sep = ""))) %>%
inner_join(phrase_en)
} else{
user_log <- fromJSON(read_file(paste("class-logs/", file_list[i], sep = ""))) %>%
inner_join(phrase_de)
}
#Calculate WPM
wpm <- user_log %>%
group_by(Stimulus_Type) %>%
summarise(WPM=mean(caculateWPM(Transcribed, Time)))
#Sum UER
uer <- user_log %>%
group_by(Stimulus_Type) %>%
summarise(UER = mean(as.numeric(UER)))
#Calculate AVG.IKI
avg_iki <- user_log %>%
unnest(Transcribe) %>%
mutate(IKI = calculateIKI(TimeStamp)) %>%
group_by(Trial, Stimulus_Type) %>%
summarise(AVG_iki = mean(IKI)) %>%
group_by(Stimulus_Type) %>%
summarise(AVG_IKI = mean(AVG_iki))
#Calculate KE
ke <- user_log %>%
group_by(Stimulus_Type) %>%
summarise(KE=mean(calculateKE(Transcribed, lengths(Action))))
#add result
result_mix <-result_mix %>%
add_row(PID = (i- 1), Typist = user_info$Typist, avg_UER = uer$UER[1],
WPM = wpm$WPM[1], Avg.IKI = avg_iki$AVG_IKI[1], KE = ke$KE[2], Sti_Type='Mix')
result_random <- result_random %>%
add_row(PID = (i- 1), Typist = user_info$Typist, avg_UER = uer$UER[2],
WPM = wpm$WPM[2], Avg.IKI = avg_iki$AVG_IKI[2], KE = ke$KE[2], Sti_Type='Random')
result_sentence <-result_sentence %>%
add_row(PID = (i- 1), Typist = user_info$Typist, avg_UER = uer$UER[3],
WPM = wpm$WPM[3], Avg.IKI = avg_iki$AVG_IKI[3] , KE = ke$KE[3], Sti_Type="Sentence")
}
#split result to touch-typist and non-touch typist
non_t_mix <- result_mix %>%
group_by(Typist) %>%
subset(Typist == "non_touch_typist")
non_t_random <- result_random %>%
group_by(Typist) %>%
subset(Typist == "non_touch_typist")
non_t_sentence <- result_sentence %>%
group_by(Typist) %>%
subset(Typist == "non_touch_typist")
t_mix <- result_mix %>%
group_by(Typist) %>%
subset(Typist == "touch_typist")
t_random <- result_random %>%
group_by(Typist) %>%
subset(Typist == "touch_typist")
t_sentence <- result_sentence %>%
group_by(Typist) %>%
subset(Typist == "touch_typist")
#draw UER Bar graph
uer_class <- plot_ly() %>%
add_bars(x = ~non_t_mix$PID,
y = ~non_t_mix$avg_UER,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~non_t_random$PID,
y = ~non_t_random$avg_UER,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~non_t_sentence$PID,
y = ~non_t_sentence$avg_UER,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~t_mix$PID,
y = ~t_mix$avg_UER,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~t_random$PID,
y = ~t_random$avg_UER,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~t_sentence$PID,
y = ~t_sentence$avg_UER,
name = "Sentence: Touch Typist",
marker = list(color = "#FFC408")) %>%
layout(barmode = "stack",
title = "Uncorrected Error Rate of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Uncorrected Error Rate (%)",
zeroline = FALSE))
#Draw WPM Bar graph
wpm_class <- plot_ly() %>%
add_bars(x = ~non_t_mix$PID,
y = ~non_t_mix$WPM,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~non_t_random$PID,
y = ~non_t_random$WPM,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~non_t_sentence$PID,
y = ~non_t_sentence$WPM,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~t_mix$PID,
y = ~t_mix$WPM,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~t_random$PID,
y = ~t_random$WPM,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~t_sentence$PID,
y = ~t_sentence$WPM,
name = "Sentence:Touch Typist",
marker = list(color = "#FFC408")) %>%
layout(barmode = "stack",
title = "Word Per Minute of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Word Per Minute ()",
zeroline = FALSE))
#Prepare KE graph data
ke_graph_data <- rbind(result_mix, result_random) %>%
rbind(result_sentence) %>%
select(Sti_Type, KE, Typist)
#Draw KE boxplot
ke_class <- plot_ly(ggplot2::diamonds, x = ~ke_graph_data$Sti_Type, y = ~ke_graph_data$KE,
color = ~ke_graph_data$Typist, type = "box", quartilemethod="inclusive") %>%
layout(boxmode = "group",
title = "Keyboard Efficiency of Non-touch Typist and Touch Typist",
xaxis = list(title='Stimulus Type'),
yaxis = list(title='Keyboard Efficiency (%)'))
#Prepare AVG.IKI graph data
avg_iki_graph_data <- rbind(result_mix, result_random) %>%
rbind(result_sentence) %>%
select(Sti_Type, Avg.IKI, Typist)
#Draw Avg.IKI boxplot
avg_iki_class <- plot_ly(ggplot2::diamonds, x = ~avg_iki_graph_data$Sti_Type, y = ~avg_iki_graph_data$Avg.IKI,
color = ~avg_iki_graph_data$Typist, type = "box", quartilemethod="inclusive") %>%
layout(boxmode = "group",
title = "Average Inter-key interval of Non-touch Typist and Touch Typist",
xaxis = list(title='Stimulus Type'),
yaxis = list(title='Average Inter-key interval (ms)'))
################# Unit of data analysis ################################
# RQ1: different typing performance between touch typists and non-touch typist?
# Approch: For four metrics, explore if the performance average values differs. ANOVA (Analysis of variance) would the first option to
# test the equavalence of average values. But normarlity test and homogeneity test are prepositive in order to guarantee the
# reliance of ANOVA.
results <- bind_rows(result_mix, result_random, result_sentence)
## For Uncorrected Error Rate (UER) metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
results_t <- results %>%
filter(Typist == "touch_typist")
results_non_t <- results %>%
filter(Typist == "non_touch_typist")
shapiro.test(results_t[["avg_UER"]])
shapiro.test(results_non_t[["avg_UER"]])
##Here two p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["avg_UER"]], results[["Typist"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(results[["avg_UER"]] ~ results[["Typist"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Mann-Whitney U Test, H0:The two populations are equal versus
wilcox.test(results[["avg_UER"]] ~ results[["Typist"]])
## For Words Per Minute (WPM) metric
# Normality Test for WPM metric, H0 hypothesis is that data is normally distributed.
results_t <- results %>%
filter(Typist == "touch_typist")
results_non_t <- results %>%
filter(Typist == "non_touch_typist")
shapiro.test(results_t[["WPM"]])
shapiro.test(results_non_t[["WPM"]])
##Here two p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["WPM"]], results[["Typist"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(results[["WPM"]] ~ results[["Typist"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Mann-Whitney U Test, H0:The two populations are equal versus
wilcox.test(results[["WPM"]] ~ results[["Typist"]])
## For Keyboard Efficiency metric
## Normality Test
shapiro.test(results_t[["KE"]])
shapiro.test(results_non_t[["KE"]])
##Here two p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["KE"]], results[["Typist"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(results[["KE"]] ~ results[["Typist"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Mann-Whitney U Test, H0:The two populations are equal versus
wilcox.test(results[["KE"]] ~ results[["Typist"]])
# RQ2: How does fimilarity of text influence the typing performance?
## For Uncorrected Error Rate metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(result_sentence[["avg_UER"]])
shapiro.test(result_mix[["avg_UER"]])
shapiro.test(result_random[["avg_UER"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["avg_UER"]], results[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(result_sentence[["avg_UER"]] ~ result_sentence[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(results[["avg_UER"]] ~ results[["Sti_Type"]])
## For Words per minute (WPM) metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(result_sentence[["WPM"]])
shapiro.test(result_mix[["WPM"]])
shapiro.test(result_random[["WPM"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["WPM"]], results[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(result_sentence[["WPM"]] ~ result_sentence[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(results[["WPM"]] ~ results[["Sti_Type"]])
## For Keyboard Efficiency metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(result_sentence[["KE"]])
shapiro.test(result_mix[["KE"]])
shapiro.test(result_random[["KE"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["KE"]], results[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(result_sentence[["KE"]] ~ result_sentence[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(results[["KE"]] ~ results[["Sti_Type"]])
#
####################### PAPER DATA#####################
all_paper_files <- list.files(path = "paper-logs", recursive = TRUE,
pattern = "log",
full.names = TRUE)
all_paper <- rbindlist(sapply(all_paper_files, fread, simplify = FALSE),
use.names = TRUE, idcol = "FileName")
pp_response <- as_tibble(read.csv("paper_responses.csv",check.names = FALSE))
colnames(pp_response)[1] <- "user_id"
colnames(pp_response)[13]<- "Touchtyping_years"
paper_data <- left_join(pp_response, all_paper, by=c("user_id"="user_id")) %>%
select(1,12,13,38:52)
colnames(paper_data)[2] <- "TouchTypist"
paper_analysis<-paper_data %>%
select(user_id, TouchTypist, Touchtyping_years,ke,uer, iki, sd_iki, wpm, input_time_ms, condition) %>%
mutate(Typist= case_when(Touchtyping_years >=1 ~ "touch_typist",Touchtyping_years<=1 ~ "non_touch_typist")) %>%
group_by(user_id,Typist,condition) %>%
summarise(WPM=mean(wpm),avg_UER=mean(uer),avg_IKI=mean(iki),KE=mean(ke))
colnames(paper_analysis)[3] <- "Sti_Type"
colnames(paper_analysis)[1] <- "PID"
numbered_paper_analysis<-paper_analysis %>%
rowid_to_column(var='observation')
## Get the mean of the paper data for WPM
mean(paper_analysis$WPM)
min(paper_analysis$WPM)
max(paper_analysis$WPM)
mean(paper_analysis$KE)
#####Filter Data from paper#########
paper_analysis_mix_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "touch_typist")
paper_analysis_random_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Random"& Typist == "touch_typist")
paper_analysis_sent_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "touch_typist")
paper_analysis_mix_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "non_touch_typist")
paper_analysis_random_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Random" & Typist == "non_touch_typist")
paper_analysis_sent_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "non_touch_typist")
###PLOTS####
wpm_class <- plot_ly() %>%
add_bars(x = ~paper_analysis_mix_nt$observation,
y = ~paper_analysis_mix_nt$WPM,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~paper_analysis_random_nt$observation,
y = ~paper_analysis_random_nt$WPM,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~paper_analysis_sent_nt$observation,
y = ~paper_analysis_sent_nt$WPM,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~paper_analysis_mix_t$observation,
y = ~paper_analysis_mix_t$WPM,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~paper_analysis_random_t$observation,
y = ~paper_analysis_random_t$WPM,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~paper_analysis_sent_t$observation,
y = ~paper_analysis_sent_t$WPM,
name = "Sentence:Touch Typist",
marker = list(color = "#FFC408"))%>%
layout(barmode = "stack",
title = "Word Per Minute of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Word Per Minute ()",
zeroline = FALSE))
paper_analysis<-paper_data %>%
select(user_id, TouchTypist, Touchtyping_years,ke,uer, iki, sd_iki, wpm, input_time_ms, condition) %>%
mutate(Typist= case_when(Touchtyping_years >=1 ~ "touch_typist",Touchtyping_years<=1 ~ "non_touch_typist")) %>%
group_by(user_id,Typist,condition) %>%
summarise(WPM=mean(wpm),avg_UER=mean(uer),avg_IKI=mean(iki),KE=mean(ke))
colnames(paper_analysis)[3] <- "Sti_Type"
colnames(paper_analysis)[1] <- "PID"
numbered_paper_analysis<-paper_analysis %>%
rowid_to_column(var='observation')
## Get the mean of the paper data for WPM
mean(paper_analysis$WPM)
min(paper_analysis$WPM)
max(paper_analysis$WPM)
mean(paper_analysis$KE)
#####Filter Data from paper#########
paper_analysis_mix_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "touch_typist")
paper_analysis_random_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Random"& Typist == "touch_typist")
paper_analysis_sent_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "touch_typist")
paper_analysis_mix_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "non_touch_typist")
paper_analysis_random_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Random" & Typist == "non_touch_typist")
paper_analysis_sent_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "non_touch_typist")
###PLOTS####
wpm_class <- plot_ly() %>%
add_bars(x = ~paper_analysis_mix_nt$observation,
y = ~paper_analysis_mix_nt$WPM,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~paper_analysis_random_nt$observation,
y = ~paper_analysis_random_nt$WPM,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~paper_analysis_sent_nt$observation,
y = ~paper_analysis_sent_nt$WPM,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~paper_analysis_mix_t$observation,
y = ~paper_analysis_mix_t$WPM,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~paper_analysis_random_t$observation,
y = ~paper_analysis_random_t$WPM,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~paper_analysis_sent_t$observation,
y = ~paper_analysis_sent_t$WPM,
name = "Sentence:Touch Typist",
marker = list(color = "#FFC408"))%>%
layout(barmode = "stack",
title = "Word Per Minute of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Word Per Minute ()",
zeroline = FALSE))
wpm_class
#Draw KE boxplot
ke_paper <- plot_ly(ggplot2::diamonds, x = ~paper_analysis$Sti_Type, y = ~paper_analysis$KE,
color = ~paper_analysis$Typist, type = "box", quartilemethod="inclusive") %>%
layout(boxmode = "group",
title = "Keyboard Efficiency of Non-touch Typist and Touch Typist",
xaxis = list(title='Stimulus Type'),
yaxis = list(title='Keyboard Efficiency (%)'))
## For Uncorrected Error Rate metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(paper_analysis[["avg_UER"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(paper_analysis[["avg_UER"]], paper_analysis[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(paper_analysis[["avg_UER"]] ~ paper_analysis[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(paper_analysis[["avg_UER"]] ~ paper_analysis[["Sti_Type"]])
## For Words per minute (WPM) metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(paper_analysis[["WPM"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(paper_analysis[["WPM"]], paper_analysis[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(paper_analysis[["WPM"]] ~ paper_analysis[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(paper_analysis[["WPM"]] ~ paper_analysis[["Sti_Type"]])
## For Keyboard Efficiency metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(paper_analysis[["KE"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(paper_analysis[["KE"]], paper_analysis[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(paper_analysis[["KE"]] ~ paper_analysis[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(paper_analysis[["KE"]] ~ paper_analysis[["Sti_Type"]])
| /analyseClassLogs.R | no_license | 0zyxel0/QuantHCI2020 | R | false | false | 25,525 | r | # Libraries
library(tidyverse)
library(dplyr)
library(plotly)
library(data.table)
library(purrr)
source('formula.R')
import::from(plotly, ggplotly)
import::from(jsonlite, fromJSON)
#Load Questionnaire
df <- read.csv("Questionnaire_responses.csv",header = TRUE)
#Select column and label user to touch typist or not ,and which language user type in.
type_typist <- df %>%
select(PID, Do.you.use.the.touch.typing.systems.with.all.10.fingers.,
How.many.years.of.experience.do.you.have.in.using.the.touch.typing.systems.with.all.10.fingers.,
In.this.study..which.language.did.you.type.in.) %>%
mutate(Typist = case_when((Do.you.use.the.touch.typing.systems.with.all.10.fingers. == 'Yes') &
(How.many.years.of.experience.do.you.have.in.using.the.touch.typing.systems.with.all.10.fingers. > 1) ~ 'touch_typist',
TRUE ~ 'non_touch_typist')) %>%
select(PID,Typist, In.this.study..which.language.did.you.type.in.)
#Separate phrase into 3 categories: random, sentence, mix sentence
#load Phrases in TextTest++ to label the phrases in class logs.
#load Phrases in TextTest++ and label them into 3 categories
phrase_de <- read.delim("Phrases/phrases_de.txt", header = FALSE, col.names = c("Present")) %>%
mutate(Stimulus_Type = case_when( row_number() <= 50 ~ 'sentence',
row_number() > 50 & row_number() <= 100 ~ 'random',
row_number() > 100 ~ 'mix'))
phrase_en <- read.delim("Phrases/phrases_en.txt", header = FALSE, col.names = c("Present")) %>%
mutate(Stimulus_Type = case_when( row_number() <= 50 ~ 'sentence',
row_number() > 50 & row_number() <= 100 ~ 'random',
row_number() > 100 ~ 'mix'))
#Get file list from class-logs folder
file_list <- list.files(path="class-logs")
file_list <- file_list[1:length(file_list)-1]
#results tibble
result_random <- tibble(PID=integer(), Typist=character(), avg_UER=numeric(), WPM=numeric(), Avg.IKI=numeric(), KE=numeric(), Sti_Type=character())
result_sentence <- tibble(PID=integer(),Typist=character(), avg_UER=numeric(), WPM=numeric(), Avg.IKI=numeric(), KE=numeric(), Sti_Type=character())
result_mix <- tibble(PID=integer(), Typist=character(), avg_UER=numeric(), WPM=numeric(), Avg.IKI=numeric(), KE=numeric(), Sti_Type=character())
for (i in 1:length(file_list)){
#join each json file with stimulus type and order by stimulus type#
user_info <- filter(type_typist, PID == (i - 1))
if (user_info$In.this.study..which.language.did.you.type.in. == "English") {
user_log <- fromJSON(read_file(paste("class-logs/", file_list[i], sep = ""))) %>%
inner_join(phrase_en)
} else{
user_log <- fromJSON(read_file(paste("class-logs/", file_list[i], sep = ""))) %>%
inner_join(phrase_de)
}
#Calculate WPM
wpm <- user_log %>%
group_by(Stimulus_Type) %>%
summarise(WPM=mean(caculateWPM(Transcribed, Time)))
#Sum UER
uer <- user_log %>%
group_by(Stimulus_Type) %>%
summarise(UER = mean(as.numeric(UER)))
#Calculate AVG.IKI
avg_iki <- user_log %>%
unnest(Transcribe) %>%
mutate(IKI = calculateIKI(TimeStamp)) %>%
group_by(Trial, Stimulus_Type) %>%
summarise(AVG_iki = mean(IKI)) %>%
group_by(Stimulus_Type) %>%
summarise(AVG_IKI = mean(AVG_iki))
#Calculate KE
ke <- user_log %>%
group_by(Stimulus_Type) %>%
summarise(KE=mean(calculateKE(Transcribed, lengths(Action))))
#add result
result_mix <-result_mix %>%
add_row(PID = (i- 1), Typist = user_info$Typist, avg_UER = uer$UER[1],
WPM = wpm$WPM[1], Avg.IKI = avg_iki$AVG_IKI[1], KE = ke$KE[2], Sti_Type='Mix')
result_random <- result_random %>%
add_row(PID = (i- 1), Typist = user_info$Typist, avg_UER = uer$UER[2],
WPM = wpm$WPM[2], Avg.IKI = avg_iki$AVG_IKI[2], KE = ke$KE[2], Sti_Type='Random')
result_sentence <-result_sentence %>%
add_row(PID = (i- 1), Typist = user_info$Typist, avg_UER = uer$UER[3],
WPM = wpm$WPM[3], Avg.IKI = avg_iki$AVG_IKI[3] , KE = ke$KE[3], Sti_Type="Sentence")
}
#split result to touch-typist and non-touch typist
non_t_mix <- result_mix %>%
group_by(Typist) %>%
subset(Typist == "non_touch_typist")
non_t_random <- result_random %>%
group_by(Typist) %>%
subset(Typist == "non_touch_typist")
non_t_sentence <- result_sentence %>%
group_by(Typist) %>%
subset(Typist == "non_touch_typist")
t_mix <- result_mix %>%
group_by(Typist) %>%
subset(Typist == "touch_typist")
t_random <- result_random %>%
group_by(Typist) %>%
subset(Typist == "touch_typist")
t_sentence <- result_sentence %>%
group_by(Typist) %>%
subset(Typist == "touch_typist")
#draw UER Bar graph
uer_class <- plot_ly() %>%
add_bars(x = ~non_t_mix$PID,
y = ~non_t_mix$avg_UER,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~non_t_random$PID,
y = ~non_t_random$avg_UER,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~non_t_sentence$PID,
y = ~non_t_sentence$avg_UER,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~t_mix$PID,
y = ~t_mix$avg_UER,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~t_random$PID,
y = ~t_random$avg_UER,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~t_sentence$PID,
y = ~t_sentence$avg_UER,
name = "Sentence: Touch Typist",
marker = list(color = "#FFC408")) %>%
layout(barmode = "stack",
title = "Uncorrected Error Rate of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Uncorrected Error Rate (%)",
zeroline = FALSE))
#Draw WPM Bar graph
wpm_class <- plot_ly() %>%
add_bars(x = ~non_t_mix$PID,
y = ~non_t_mix$WPM,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~non_t_random$PID,
y = ~non_t_random$WPM,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~non_t_sentence$PID,
y = ~non_t_sentence$WPM,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~t_mix$PID,
y = ~t_mix$WPM,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~t_random$PID,
y = ~t_random$WPM,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~t_sentence$PID,
y = ~t_sentence$WPM,
name = "Sentence:Touch Typist",
marker = list(color = "#FFC408")) %>%
layout(barmode = "stack",
title = "Word Per Minute of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Word Per Minute ()",
zeroline = FALSE))
#Prepare KE graph data
ke_graph_data <- rbind(result_mix, result_random) %>%
rbind(result_sentence) %>%
select(Sti_Type, KE, Typist)
#Draw KE boxplot
ke_class <- plot_ly(ggplot2::diamonds, x = ~ke_graph_data$Sti_Type, y = ~ke_graph_data$KE,
color = ~ke_graph_data$Typist, type = "box", quartilemethod="inclusive") %>%
layout(boxmode = "group",
title = "Keyboard Efficiency of Non-touch Typist and Touch Typist",
xaxis = list(title='Stimulus Type'),
yaxis = list(title='Keyboard Efficiency (%)'))
#Prepare AVG.IKI graph data
avg_iki_graph_data <- rbind(result_mix, result_random) %>%
rbind(result_sentence) %>%
select(Sti_Type, Avg.IKI, Typist)
#Draw Avg.IKI boxplot
avg_iki_class <- plot_ly(ggplot2::diamonds, x = ~avg_iki_graph_data$Sti_Type, y = ~avg_iki_graph_data$Avg.IKI,
color = ~avg_iki_graph_data$Typist, type = "box", quartilemethod="inclusive") %>%
layout(boxmode = "group",
title = "Average Inter-key interval of Non-touch Typist and Touch Typist",
xaxis = list(title='Stimulus Type'),
yaxis = list(title='Average Inter-key interval (ms)'))
################# Unit of data analysis ################################
# RQ1: different typing performance between touch typists and non-touch typist?
# Approch: For four metrics, explore if the performance average values differs. ANOVA (Analysis of variance) would the first option to
# test the equavalence of average values. But normarlity test and homogeneity test are prepositive in order to guarantee the
# reliance of ANOVA.
results <- bind_rows(result_mix, result_random, result_sentence)
## For Uncorrected Error Rate (UER) metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
results_t <- results %>%
filter(Typist == "touch_typist")
results_non_t <- results %>%
filter(Typist == "non_touch_typist")
shapiro.test(results_t[["avg_UER"]])
shapiro.test(results_non_t[["avg_UER"]])
##Here two p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["avg_UER"]], results[["Typist"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(results[["avg_UER"]] ~ results[["Typist"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Mann-Whitney U Test, H0:The two populations are equal versus
wilcox.test(results[["avg_UER"]] ~ results[["Typist"]])
## For Words Per Minute (WPM) metric
# Normality Test for WPM metric, H0 hypothesis is that data is normally distributed.
results_t <- results %>%
filter(Typist == "touch_typist")
results_non_t <- results %>%
filter(Typist == "non_touch_typist")
shapiro.test(results_t[["WPM"]])
shapiro.test(results_non_t[["WPM"]])
##Here two p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["WPM"]], results[["Typist"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(results[["WPM"]] ~ results[["Typist"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Mann-Whitney U Test, H0:The two populations are equal versus
wilcox.test(results[["WPM"]] ~ results[["Typist"]])
## For Keyboard Efficiency metric
## Normality Test
shapiro.test(results_t[["KE"]])
shapiro.test(results_non_t[["KE"]])
##Here two p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["KE"]], results[["Typist"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(results[["KE"]] ~ results[["Typist"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Mann-Whitney U Test, H0:The two populations are equal versus
wilcox.test(results[["KE"]] ~ results[["Typist"]])
# RQ2: How does fimilarity of text influence the typing performance?
## For Uncorrected Error Rate metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(result_sentence[["avg_UER"]])
shapiro.test(result_mix[["avg_UER"]])
shapiro.test(result_random[["avg_UER"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["avg_UER"]], results[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(result_sentence[["avg_UER"]] ~ result_sentence[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(results[["avg_UER"]] ~ results[["Sti_Type"]])
## For Words per minute (WPM) metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(result_sentence[["WPM"]])
shapiro.test(result_mix[["WPM"]])
shapiro.test(result_random[["WPM"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["WPM"]], results[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(result_sentence[["WPM"]] ~ result_sentence[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(results[["WPM"]] ~ results[["Sti_Type"]])
## For Keyboard Efficiency metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(result_sentence[["KE"]])
shapiro.test(result_mix[["KE"]])
shapiro.test(result_random[["KE"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(results[["KE"]], results[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(result_sentence[["KE"]] ~ result_sentence[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(results[["KE"]] ~ results[["Sti_Type"]])
#
####################### PAPER DATA#####################
all_paper_files <- list.files(path = "paper-logs", recursive = TRUE,
pattern = "log",
full.names = TRUE)
all_paper <- rbindlist(sapply(all_paper_files, fread, simplify = FALSE),
use.names = TRUE, idcol = "FileName")
pp_response <- as_tibble(read.csv("paper_responses.csv",check.names = FALSE))
colnames(pp_response)[1] <- "user_id"
colnames(pp_response)[13]<- "Touchtyping_years"
paper_data <- left_join(pp_response, all_paper, by=c("user_id"="user_id")) %>%
select(1,12,13,38:52)
colnames(paper_data)[2] <- "TouchTypist"
paper_analysis<-paper_data %>%
select(user_id, TouchTypist, Touchtyping_years,ke,uer, iki, sd_iki, wpm, input_time_ms, condition) %>%
mutate(Typist= case_when(Touchtyping_years >=1 ~ "touch_typist",Touchtyping_years<=1 ~ "non_touch_typist")) %>%
group_by(user_id,Typist,condition) %>%
summarise(WPM=mean(wpm),avg_UER=mean(uer),avg_IKI=mean(iki),KE=mean(ke))
colnames(paper_analysis)[3] <- "Sti_Type"
colnames(paper_analysis)[1] <- "PID"
numbered_paper_analysis<-paper_analysis %>%
rowid_to_column(var='observation')
## Get the mean of the paper data for WPM
mean(paper_analysis$WPM)
min(paper_analysis$WPM)
max(paper_analysis$WPM)
mean(paper_analysis$KE)
#####Filter Data from paper#########
paper_analysis_mix_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "touch_typist")
paper_analysis_random_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Random"& Typist == "touch_typist")
paper_analysis_sent_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "touch_typist")
paper_analysis_mix_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "non_touch_typist")
paper_analysis_random_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Random" & Typist == "non_touch_typist")
paper_analysis_sent_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "non_touch_typist")
###PLOTS####
wpm_class <- plot_ly() %>%
add_bars(x = ~paper_analysis_mix_nt$observation,
y = ~paper_analysis_mix_nt$WPM,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~paper_analysis_random_nt$observation,
y = ~paper_analysis_random_nt$WPM,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~paper_analysis_sent_nt$observation,
y = ~paper_analysis_sent_nt$WPM,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~paper_analysis_mix_t$observation,
y = ~paper_analysis_mix_t$WPM,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~paper_analysis_random_t$observation,
y = ~paper_analysis_random_t$WPM,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~paper_analysis_sent_t$observation,
y = ~paper_analysis_sent_t$WPM,
name = "Sentence:Touch Typist",
marker = list(color = "#FFC408"))%>%
layout(barmode = "stack",
title = "Word Per Minute of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Word Per Minute ()",
zeroline = FALSE))
paper_analysis<-paper_data %>%
select(user_id, TouchTypist, Touchtyping_years,ke,uer, iki, sd_iki, wpm, input_time_ms, condition) %>%
mutate(Typist= case_when(Touchtyping_years >=1 ~ "touch_typist",Touchtyping_years<=1 ~ "non_touch_typist")) %>%
group_by(user_id,Typist,condition) %>%
summarise(WPM=mean(wpm),avg_UER=mean(uer),avg_IKI=mean(iki),KE=mean(ke))
colnames(paper_analysis)[3] <- "Sti_Type"
colnames(paper_analysis)[1] <- "PID"
numbered_paper_analysis<-paper_analysis %>%
rowid_to_column(var='observation')
## Get the mean of the paper data for WPM
mean(paper_analysis$WPM)
min(paper_analysis$WPM)
max(paper_analysis$WPM)
mean(paper_analysis$KE)
#####Filter Data from paper#########
paper_analysis_mix_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "touch_typist")
paper_analysis_random_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Random"& Typist == "touch_typist")
paper_analysis_sent_t <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "touch_typist")
paper_analysis_mix_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Mix" & Typist == "non_touch_typist")
paper_analysis_random_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Random" & Typist == "non_touch_typist")
paper_analysis_sent_nt <- numbered_paper_analysis %>%
filter(Sti_Type == "Sentences"& Typist == "non_touch_typist")
###PLOTS####
wpm_class <- plot_ly() %>%
add_bars(x = ~paper_analysis_mix_nt$observation,
y = ~paper_analysis_mix_nt$WPM,
name = "Mix: Non-touch Typists",
marker = list(color = "teal")) %>%
add_bars(x = ~paper_analysis_random_nt$observation,
y = ~paper_analysis_random_nt$WPM,
name = "Random: Non-touch Typists",
marker = list(color = "#69b3a2")) %>%
add_bars(x = ~paper_analysis_sent_nt$observation,
y = ~paper_analysis_sent_nt$WPM,
name = "Sentence: Non-touch Typists",
marker = list(color = "#006284")) %>%
add_bars(x = ~paper_analysis_mix_t$observation,
y = ~paper_analysis_mix_t$WPM,
name = "Mix: Touch Typist",
marker = list(color = "#AB3B3A")) %>%
add_bars(x = ~paper_analysis_random_t$observation,
y = ~paper_analysis_random_t$WPM,
name = "Random: Touch Typist",
marker = list(color = "#F05E1C")) %>%
add_bars(x = ~paper_analysis_sent_t$observation,
y = ~paper_analysis_sent_t$WPM,
name = "Sentence:Touch Typist",
marker = list(color = "#FFC408"))%>%
layout(barmode = "stack",
title = "Word Per Minute of Non-touch Typist and Touch Typist",
xaxis = list(title = "Participants",
zeroline = FALSE),
yaxis = list(title = "Word Per Minute ()",
zeroline = FALSE))
wpm_class
#Draw KE boxplot
ke_paper <- plot_ly(ggplot2::diamonds, x = ~paper_analysis$Sti_Type, y = ~paper_analysis$KE,
color = ~paper_analysis$Typist, type = "box", quartilemethod="inclusive") %>%
layout(boxmode = "group",
title = "Keyboard Efficiency of Non-touch Typist and Touch Typist",
xaxis = list(title='Stimulus Type'),
yaxis = list(title='Keyboard Efficiency (%)'))
## For Uncorrected Error Rate metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(paper_analysis[["avg_UER"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(paper_analysis[["avg_UER"]], paper_analysis[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(paper_analysis[["avg_UER"]] ~ paper_analysis[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(paper_analysis[["avg_UER"]] ~ paper_analysis[["Sti_Type"]])
## For Words per minute (WPM) metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(paper_analysis[["WPM"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(paper_analysis[["WPM"]], paper_analysis[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(paper_analysis[["WPM"]] ~ paper_analysis[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(paper_analysis[["WPM"]] ~ paper_analysis[["Sti_Type"]])
## For Keyboard Efficiency metric
# Normality Test for UER metric, H0 hypothesis is that data is normally distributed.
shapiro.test(paper_analysis[["KE"]])
##Here p values will be shown, if p<0.05, then H0 would be rejected, which means data does not comply normality.
# Homogeneity test of variance, H0 hypothsis is that The two populations have homogeneous variances
bartlett.test(paper_analysis[["KE"]], paper_analysis[["Sti_Type"]])
##If p value is smaller than 0.05, two populations' variance are not homogeneous
# ANOVA, usability depending on previous tests. H0 is that two populations' mean value is equavalent
a.aov <- aov(paper_analysis[["KE"]] ~ paper_analysis[["Sti_Type"]])
summary(a.aov)
# Above analysis is ANOVA and its corresponding prepositive tests, if conditions of ANOVA are not met, then non-parameter test would be employed
# Non-parameter test: Kruskal-Wallis Test, H0: N (three in our case) populations are equal versus
kruskal.test(paper_analysis[["KE"]] ~ paper_analysis[["Sti_Type"]])
|
## A set of functions to create the inverse of a matrix and
## cache the resulting matrix
## Usage:
## ##create a 2x2 matrix 'a' with data elements 1:4
## a<-matrix(data=1:4, nrow=2, ncol=2)
##
## ##initialize the makeCacheMatrix object using the matrix 'a'
## ma<-makeCacheMatrix(a)
##
## ## calculate the inverse matrix by calling cachesolve()
## sa <-cacheSolve(ma)
##
## ## now call the same function again, the output should now indicate
## ## that we're calling the cached version of the inverse matrix
## sa <-cacheSolve(ma)
## makeCacheMatrix creates a function that is a list with the methods
## of setting and getting the cached inverse matrix as well as
## calculating the inverse matrix if it has not yet been calculated
makeCacheMatrix <- function(x = matrix()) {
## if the function is called without a method then return NULL
im <- NULL
## a function set that assigns the inverse matrix to 'x'
set <- function(y) {
x <<- y
im <<- NULL
}
## a function that gets the inverse matrix stored in 'x'
## if the inverse matrix has not yet been assigned to 'x'
## then the result will return a NULL indicating
## that the inverse matrix needs to be calculated first
get <- function() {
x
}
## assigns the input solve to variable 'im'
setsolve <- function(solve) {
im <<- solve
}
## retrieve the content of variable 'im'
getsolve <- function() {
im
}
## create a list with the methods for the makeCacheMatrix function
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## This is the actual solver function
cacheSolve <- function(x, ...) {
## Return a matrix 'im' that is the inverse of 'x'
## retrieve the cached inverse matrix via the 'getsolve' method
m <- x$getsolve()
## if 'm' is not NULL then we retrieved the cached inverse matrix
## and can exit out of the function
if(!is.null(m)) {
message("getting cached data")
return(m)
}
message("calculating inverse matrix")
## we now assign the input matrix 'x' to variable 'data'
data <- x$get()
## calculate the inverse matrix to 'data' and assign to 's'
s <- solve(data, ...)
## now cache the resulting inverse matrix
x$setsolve(s)
# return the calculated inverse matrix 's'
return(s)
}
| /cachematrix.R | no_license | sealeopard/ProgrammingAssignment2 | R | false | false | 2,308 | r | ## A set of functions to create the inverse of a matrix and
## cache the resulting matrix
## Usage:
## ##create a 2x2 matrix 'a' with data elements 1:4
## a<-matrix(data=1:4, nrow=2, ncol=2)
##
## ##initialize the makeCacheMatrix object using the matrix 'a'
## ma<-makeCacheMatrix(a)
##
## ## calculate the inverse matrix by calling cachesolve()
## sa <-cacheSolve(ma)
##
## ## now call the same function again, the output should now indicate
## ## that we're calling the cached version of the inverse matrix
## sa <-cacheSolve(ma)
## makeCacheMatrix creates a function that is a list with the methods
## of setting and getting the cached inverse matrix as well as
## calculating the inverse matrix if it has not yet been calculated
makeCacheMatrix <- function(x = matrix()) {
## if the function is called without a method then return NULL
im <- NULL
## a function set that assigns the inverse matrix to 'x'
set <- function(y) {
x <<- y
im <<- NULL
}
## a function that gets the inverse matrix stored in 'x'
## if the inverse matrix has not yet been assigned to 'x'
## then the result will return a NULL indicating
## that the inverse matrix needs to be calculated first
get <- function() {
x
}
## assigns the input solve to variable 'im'
setsolve <- function(solve) {
im <<- solve
}
## retrieve the content of variable 'im'
getsolve <- function() {
im
}
## create a list with the methods for the makeCacheMatrix function
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## This is the actual solver function
cacheSolve <- function(x, ...) {
## Return a matrix 'im' that is the inverse of 'x'
## retrieve the cached inverse matrix via the 'getsolve' method
m <- x$getsolve()
## if 'm' is not NULL then we retrieved the cached inverse matrix
## and can exit out of the function
if(!is.null(m)) {
message("getting cached data")
return(m)
}
message("calculating inverse matrix")
## we now assign the input matrix 'x' to variable 'data'
data <- x$get()
## calculate the inverse matrix to 'data' and assign to 's'
s <- solve(data, ...)
## now cache the resulting inverse matrix
x$setsolve(s)
# return the calculated inverse matrix 's'
return(s)
}
|
\name{D.sq}
\alias{D.sq}
\title{Mahalanobis distance for two sites using a pooled covariance matrix}
\description{Allows much easier multivariate comparisons of groups of sites then provided by the function \code{mahalanobis} in the \code{base} library.
}
\usage{
D.sq(g1, g2)
}
\arguments{
\item{g1}{Community vector for site 1}
\item{g2}{Community vector for site 2}
}
\references{
Legendre, P, and L. Legendre (1998) \emph{Numerical Ecology, 2nd English Edition}. Elsevier,
Amsterdam, The Netherlands.
}
\author{Ken Aho}
\seealso{\code{\link{mahalanobis}}}
\examples{
g1<-matrix(ncol=3,nrow=3,data=c(1,0,3,2,1,3,4,0,2))
g2<-matrix(ncol=3,nrow=3,data=c(1,2,4,5,2,3,4,3,1))
D.sq(g1,g2)$D.sq
}
\keyword{multivariate}
| /man/D.sq.Rd | no_license | cran/asbio | R | false | false | 754 | rd | \name{D.sq}
\alias{D.sq}
\title{Mahalanobis distance for two sites using a pooled covariance matrix}
\description{Allows much easier multivariate comparisons of groups of sites then provided by the function \code{mahalanobis} in the \code{base} library.
}
\usage{
D.sq(g1, g2)
}
\arguments{
\item{g1}{Community vector for site 1}
\item{g2}{Community vector for site 2}
}
\references{
Legendre, P, and L. Legendre (1998) \emph{Numerical Ecology, 2nd English Edition}. Elsevier,
Amsterdam, The Netherlands.
}
\author{Ken Aho}
\seealso{\code{\link{mahalanobis}}}
\examples{
g1<-matrix(ncol=3,nrow=3,data=c(1,0,3,2,1,3,4,0,2))
g2<-matrix(ncol=3,nrow=3,data=c(1,2,4,5,2,3,4,3,1))
D.sq(g1,g2)$D.sq
}
\keyword{multivariate}
|
library(googleVis)
library(shiny)
library(shinythemes)
fluidPage(
navbarPage("Chord Progression in Modern Music", id = 'nav',
tabPanel("Presentation",
div(id = 'about',
fluidRow(
column(width = 12, offset = 1,
br(),
includeHTML("EDA.html")
)
))
),
#####
tabPanel("Artist Signature Explorer",
fluidRow(
column(
width = 2,
style = "background-color: #F8F8F8",
h4('Explore chord progressions by...'),
br(),
selectInput("select_artist",
label = "Choose artist to explore",
choices = artists,
multiple = TRUE,
selectize = TRUE,
selected = "U2"
),
submitButton("Get Signature")
),
column(width = 10,
htmlOutput("sankey_a"))
)
),
####
tabPanel("Genre Signature Explorer",
fluidRow(
column(
width = 2,
style = "background-color:#F8F8F8",
h4('Explore chord progressions by...'),
br(),
textInput("select_genre",
label = "Choose genre to explore",
value = "pop"
),
submitButton("Get Signature")
),
column(width = 10,
htmlOutput("sankey_g")
)
)
),
tabPanel("Insights",
div(id = 'about',
fluidRow(
column(width = 12, offset = 1,
br(),
p("I-V-vi-IV stands out in the dataset as the most commonly used progression."),
br(),
p("Starting with a vi conveys a sadder tone than using I.")
)
))
)
#####
)
)
| /NYCDataSciAcademy/Projects/Web Scraping/OamarGianan/ui.R | no_license | 3rdworldjuander/DataScienceStuff | R | false | false | 3,392 | r | library(googleVis)
library(shiny)
library(shinythemes)
fluidPage(
navbarPage("Chord Progression in Modern Music", id = 'nav',
tabPanel("Presentation",
div(id = 'about',
fluidRow(
column(width = 12, offset = 1,
br(),
includeHTML("EDA.html")
)
))
),
#####
tabPanel("Artist Signature Explorer",
fluidRow(
column(
width = 2,
style = "background-color: #F8F8F8",
h4('Explore chord progressions by...'),
br(),
selectInput("select_artist",
label = "Choose artist to explore",
choices = artists,
multiple = TRUE,
selectize = TRUE,
selected = "U2"
),
submitButton("Get Signature")
),
column(width = 10,
htmlOutput("sankey_a"))
)
),
####
tabPanel("Genre Signature Explorer",
fluidRow(
column(
width = 2,
style = "background-color:#F8F8F8",
h4('Explore chord progressions by...'),
br(),
textInput("select_genre",
label = "Choose genre to explore",
value = "pop"
),
submitButton("Get Signature")
),
column(width = 10,
htmlOutput("sankey_g")
)
)
),
tabPanel("Insights",
div(id = 'about',
fluidRow(
column(width = 12, offset = 1,
br(),
p("I-V-vi-IV stands out in the dataset as the most commonly used progression."),
br(),
p("Starting with a vi conveys a sadder tone than using I.")
)
))
)
#####
)
)
|
testlist <- list(lims = structure(c(2.81901265658761e-308, 1.1251263341228e+224, 1.79124889821742e-306, 6.64450420329141e-27, 0, 0, 0, 0, 0), .Dim = c(9L, 1L)), points = structure(c(7.29112204671794e-304, 3.23790861658519e-319, 7.06861463370568e-304, 1.45350442868274e+135, 1.42448835049118e+214 ), .Dim = c(5L, 1L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) | /palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988754-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 381 | r | testlist <- list(lims = structure(c(2.81901265658761e-308, 1.1251263341228e+224, 1.79124889821742e-306, 6.64450420329141e-27, 0, 0, 0, 0, 0), .Dim = c(9L, 1L)), points = structure(c(7.29112204671794e-304, 3.23790861658519e-319, 7.06861463370568e-304, 1.45350442868274e+135, 1.42448835049118e+214 ), .Dim = c(5L, 1L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) |
library(tippy)
### Name: use_tippy
### Title: Use tippy
### Aliases: use_tippy call_tippy
### ** Examples
if(interactive()){
library(shiny)
shinyApp(
ui = fluidPage(
use_tippy(),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
call_tippy("[title]") # all elements with title
),
server = function(input, output) {}
)
}
| /data/genthat_extracted_code/tippy/examples/use_tippy.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 525 | r | library(tippy)
### Name: use_tippy
### Title: Use tippy
### Aliases: use_tippy call_tippy
### ** Examples
if(interactive()){
library(shiny)
shinyApp(
ui = fluidPage(
use_tippy(),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
p("Some text", title = "tooltip"),
call_tippy("[title]") # all elements with title
),
server = function(input, output) {}
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_slider.R
\name{control_slider_construct}
\alias{control_slider_construct}
\title{helper function that constructs canvas items of a control_slider widget}
\usage{
control_slider_construct(
.switchboard,
inject = "",
minimum = 0,
maximum = 100,
label = " ",
size = 1,
...
)
}
\description{
helper function that constructs canvas items of a control_slider widget
}
\keyword{internal}
| /man/control_slider_construct.Rd | no_license | suharoschi/switchboard | R | false | true | 481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control_slider.R
\name{control_slider_construct}
\alias{control_slider_construct}
\title{helper function that constructs canvas items of a control_slider widget}
\usage{
control_slider_construct(
.switchboard,
inject = "",
minimum = 0,
maximum = 100,
label = " ",
size = 1,
...
)
}
\description{
helper function that constructs canvas items of a control_slider widget
}
\keyword{internal}
|
context("Report Spec Tests")
test_that("create_report sets default values appropriately", {
ret <- create_report()
expect_equal(ret$orientation, "landscape")
expect_equal(ret$output_type, "TXT")
})
test_that("create_report changes parameters appropriately", {
ret <- create_report(output_type = "TXT", orientation = "portrait")
expect_equal(ret$orientation, "portrait")
expect_equal(ret$output_type, "TXT")
})
test_that("create_report traps invalid parameters", {
expect_error(create_report(orientation = "porait"))
expect_error(create_report(output_type = "txt"))
})
test_that("options_fixed sets default parameters appropriately", {
ret <- create_report()
ret <- options_fixed(ret)
expect_equal(ret$cpuom, 12)
})
test_that("options_fixed changes parameters appropriately", {
ret <- create_report()
ret <- options_fixed(ret, cpuom = 10)
expect_equal(ret$cpuom, 10)
})
test_that("options_fixed traps invalid parameters appropriately", {
ret <- create_report()
expect_error(options_fixed(ret, cpuom = 15))
})
test_that("Titles, footnotes, header, and footer limits work as expected.", {
rpt <- create_report("fork.out")
st <- rep("W", 50)
expect_error(titles(rpt, st))
expect_error(footnotes(rpt, st))
expect_error(page_header(rpt, left=st))
expect_error(page_footer(rpt, left=st))
})
test_that("Footnotes traps invalid parameter as expected.", {
rpt <- create_report("fork.out")
expect_error(footnotes(rpt, align = "error"))
# expect_error(footnotes(rpt, valign = "error"))
expect_error(footnotes(rpt, blank_row = "error"))
expect_error(footnotes(rpt, borders = "error"))
})
test_that("add_content works as expected.", {
rpt <- create_report("fork.out")
rpt <- add_content(rpt, "", page_break = FALSE)
# Should put a page break token before the content
expect_equal(length(rpt$content), 1)
expect_equal(rpt$content[[1]]$page_break, FALSE)
# Invalid value
expect_error(add_content(rpt, "", page_break = "sam"))
})
test_that("create_report parameter checks work as expected.", {
expect_error(create_report(units = "fork"))
expect_error(create_report(output_type = "fork"))
expect_error(create_report(orientation = "fork"))
expect_error(create_report(paper_size = "fork"))
rpt <- create_report()
expect_error(write_report(rpt))
expect_error(write_report("fork"))
expect_error(write_report(NA))
})
test_that("line_size and line_count parameter checks work as expected.", {
rpt <- create_report()
expect_error(options_fixed(rpt, line_size = "a"))
expect_error(options_fixed(rpt, line_size = -35))
expect_error(options_fixed(rpt, line_count = "a"))
expect_error(options_fixed(rpt, line_count = -876))
})
test_that("options_fixed parameter checks work as expected.", {
rpt <- create_report()
expect_error(options_fixed(rpt, editor = "fork"))
expect_error(options_fixed(rpt, cpuom = -2))
expect_error(options_fixed(rpt, lpuom = 2356))
})
test_that("preview parameter checks work as expected.", {
rpt <- create_report()
expect_error(write_report(rpt, preview = - 1))
expect_error(write_report(rpt, preview = "a"))
})
test_that("font_type parameter checks work as expected.", {
expect_error(create_report(font_type = "fork"))
expect_error(create_report(font_type = NULL))
})
test_that("title_header function works as expected.", {
tbl <- create_table(mtcars)
th <- tbl %>% title_header("Table 1.0", "MTCARS Sample Data",
right = c("One", "Two"), blank_row = "below")
expect_equal(is.null(th$title_hdr), FALSE)
expect_equal(length(th$title_hdr$titles), 2)
expect_equal(length(th$title_hdr$right), 2)
expect_equal(th$title_hdr$blank_row, "below")
})
test_that("page_by function works as expected.", {
tbl <- create_table(mtcars)
pg <- tbl %>% page_by(mpg, "MPG: ", "right", blank_row = "below")
expect_equal(is.null(pg$page_by), FALSE)
expect_equal(pg$page_by$var, "mpg")
expect_equal(pg$page_by$label, "MPG: ")
expect_equal(pg$page_by$align, "right")
expect_equal(pg$page_by$blank_row, "below")
pg <- tbl %>% page_by("mpg")
expect_equal(is.null(pg$page_by), FALSE)
expect_equal(pg$page_by$var, "mpg")
expect_equal(is.null(pg$page_by$label), FALSE)
expect_equal(pg$page_by$label, "mpg: ")
expect_equal(pg$page_by$align, "left")
expect_equal(pg$page_by$blank_row, "below")
})
| /tests/testthat/test-report_spec.R | no_license | armenic/reporter | R | false | false | 4,556 | r | context("Report Spec Tests")
test_that("create_report sets default values appropriately", {
ret <- create_report()
expect_equal(ret$orientation, "landscape")
expect_equal(ret$output_type, "TXT")
})
test_that("create_report changes parameters appropriately", {
ret <- create_report(output_type = "TXT", orientation = "portrait")
expect_equal(ret$orientation, "portrait")
expect_equal(ret$output_type, "TXT")
})
test_that("create_report traps invalid parameters", {
expect_error(create_report(orientation = "porait"))
expect_error(create_report(output_type = "txt"))
})
test_that("options_fixed sets default parameters appropriately", {
ret <- create_report()
ret <- options_fixed(ret)
expect_equal(ret$cpuom, 12)
})
test_that("options_fixed changes parameters appropriately", {
ret <- create_report()
ret <- options_fixed(ret, cpuom = 10)
expect_equal(ret$cpuom, 10)
})
test_that("options_fixed traps invalid parameters appropriately", {
ret <- create_report()
expect_error(options_fixed(ret, cpuom = 15))
})
test_that("Titles, footnotes, header, and footer limits work as expected.", {
rpt <- create_report("fork.out")
st <- rep("W", 50)
expect_error(titles(rpt, st))
expect_error(footnotes(rpt, st))
expect_error(page_header(rpt, left=st))
expect_error(page_footer(rpt, left=st))
})
test_that("Footnotes traps invalid parameter as expected.", {
rpt <- create_report("fork.out")
expect_error(footnotes(rpt, align = "error"))
# expect_error(footnotes(rpt, valign = "error"))
expect_error(footnotes(rpt, blank_row = "error"))
expect_error(footnotes(rpt, borders = "error"))
})
test_that("add_content works as expected.", {
rpt <- create_report("fork.out")
rpt <- add_content(rpt, "", page_break = FALSE)
# Should put a page break token before the content
expect_equal(length(rpt$content), 1)
expect_equal(rpt$content[[1]]$page_break, FALSE)
# Invalid value
expect_error(add_content(rpt, "", page_break = "sam"))
})
test_that("create_report parameter checks work as expected.", {
expect_error(create_report(units = "fork"))
expect_error(create_report(output_type = "fork"))
expect_error(create_report(orientation = "fork"))
expect_error(create_report(paper_size = "fork"))
rpt <- create_report()
expect_error(write_report(rpt))
expect_error(write_report("fork"))
expect_error(write_report(NA))
})
test_that("line_size and line_count parameter checks work as expected.", {
rpt <- create_report()
expect_error(options_fixed(rpt, line_size = "a"))
expect_error(options_fixed(rpt, line_size = -35))
expect_error(options_fixed(rpt, line_count = "a"))
expect_error(options_fixed(rpt, line_count = -876))
})
test_that("options_fixed parameter checks work as expected.", {
rpt <- create_report()
expect_error(options_fixed(rpt, editor = "fork"))
expect_error(options_fixed(rpt, cpuom = -2))
expect_error(options_fixed(rpt, lpuom = 2356))
})
test_that("preview parameter checks work as expected.", {
rpt <- create_report()
expect_error(write_report(rpt, preview = - 1))
expect_error(write_report(rpt, preview = "a"))
})
test_that("font_type parameter checks work as expected.", {
expect_error(create_report(font_type = "fork"))
expect_error(create_report(font_type = NULL))
})
test_that("title_header function works as expected.", {
tbl <- create_table(mtcars)
th <- tbl %>% title_header("Table 1.0", "MTCARS Sample Data",
right = c("One", "Two"), blank_row = "below")
expect_equal(is.null(th$title_hdr), FALSE)
expect_equal(length(th$title_hdr$titles), 2)
expect_equal(length(th$title_hdr$right), 2)
expect_equal(th$title_hdr$blank_row, "below")
})
test_that("page_by function works as expected.", {
tbl <- create_table(mtcars)
pg <- tbl %>% page_by(mpg, "MPG: ", "right", blank_row = "below")
expect_equal(is.null(pg$page_by), FALSE)
expect_equal(pg$page_by$var, "mpg")
expect_equal(pg$page_by$label, "MPG: ")
expect_equal(pg$page_by$align, "right")
expect_equal(pg$page_by$blank_row, "below")
pg <- tbl %>% page_by("mpg")
expect_equal(is.null(pg$page_by), FALSE)
expect_equal(pg$page_by$var, "mpg")
expect_equal(is.null(pg$page_by$label), FALSE)
expect_equal(pg$page_by$label, "mpg: ")
expect_equal(pg$page_by$align, "left")
expect_equal(pg$page_by$blank_row, "below")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_describe_constraint}
\alias{servicecatalog_describe_constraint}
\title{Gets information about the specified constraint}
\usage{
servicecatalog_describe_constraint(AcceptLanguage, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The identifier of the constraint.}
}
\value{
A list with the following syntax:\preformatted{list(
ConstraintDetail = list(
ConstraintId = "string",
Type = "string",
Description = "string",
Owner = "string",
ProductId = "string",
PortfolioId = "string"
),
ConstraintParameters = "string",
Status = "AVAILABLE"|"CREATING"|"FAILED"
)
}
}
\description{
Gets information about the specified constraint.
}
\section{Request syntax}{
\preformatted{svc$describe_constraint(
AcceptLanguage = "string",
Id = "string"
)
}
}
\keyword{internal}
| /cran/paws.management/man/servicecatalog_describe_constraint.Rd | permissive | TWarczak/paws | R | false | true | 1,057 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_describe_constraint}
\alias{servicecatalog_describe_constraint}
\title{Gets information about the specified constraint}
\usage{
servicecatalog_describe_constraint(AcceptLanguage, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The identifier of the constraint.}
}
\value{
A list with the following syntax:\preformatted{list(
ConstraintDetail = list(
ConstraintId = "string",
Type = "string",
Description = "string",
Owner = "string",
ProductId = "string",
PortfolioId = "string"
),
ConstraintParameters = "string",
Status = "AVAILABLE"|"CREATING"|"FAILED"
)
}
}
\description{
Gets information about the specified constraint.
}
\section{Request syntax}{
\preformatted{svc$describe_constraint(
AcceptLanguage = "string",
Id = "string"
)
}
}
\keyword{internal}
|
######################
# Ben Goodwin #
# MS6371 #
# HW8 #
######################
##################################################################
#Question 1
#read in bball dat
bbDat <- read.csv("teams.csv")
#looks good
#View(bbDat)
#plot wins and payroll
ggplot(data = bbDat, aes(x = Wins, y = Payroll)) +
geom_point(color='blue')
##################################################################
##################################################################
#Question 2
#compute correlation coefficient
wins = bbDat$Wins
payroll = bbDat$Payroll
cor(payroll,wins)
##################################################################
##################################################################
#Question 3
#same data minus SD
bbNoSD <- bbDat[-c(29),]
#looks good
#View(bbNoSD)
#plot wins and payroll
ggplot(data = bbNoSD, aes(x = Wins, y = Payroll)) +
geom_point(color='brown2')
#Check out correlation coefficient
winsNosd=bbNoSD$Wins
payrollNosd=bbNoSD$Payroll
cor(payrollNosd,winsNosd)
##################################################################
##################################################################
#Question 4
hist(log(bbDat$Payroll))
hist(log(bbDat$Wins))
bbDat$Wins <- as.factor(bbDat$Wins)
bbDat$Payroll <- as.factor(bbDat$Payroll)
bb.lm <- lm(Wins~Team, data = bbDat)
bb.av <- aov(bb.lm)
summary(bb.av)
tukey.test <- TukeyHSD(bb.av)
tukey.test
##################################################################
| /Unit8/HW8.r | no_license | bgoodwinSMU/MSDS6371 | R | false | false | 1,522 | r | ######################
# Ben Goodwin #
# MS6371 #
# HW8 #
######################
##################################################################
#Question 1
#read in bball dat
bbDat <- read.csv("teams.csv")
#looks good
#View(bbDat)
#plot wins and payroll
ggplot(data = bbDat, aes(x = Wins, y = Payroll)) +
geom_point(color='blue')
##################################################################
##################################################################
#Question 2
#compute correlation coefficient
wins = bbDat$Wins
payroll = bbDat$Payroll
cor(payroll,wins)
##################################################################
##################################################################
#Question 3
#same data minus SD
bbNoSD <- bbDat[-c(29),]
#looks good
#View(bbNoSD)
#plot wins and payroll
ggplot(data = bbNoSD, aes(x = Wins, y = Payroll)) +
geom_point(color='brown2')
#Check out correlation coefficient
winsNosd=bbNoSD$Wins
payrollNosd=bbNoSD$Payroll
cor(payrollNosd,winsNosd)
##################################################################
##################################################################
#Question 4
hist(log(bbDat$Payroll))
hist(log(bbDat$Wins))
bbDat$Wins <- as.factor(bbDat$Wins)
bbDat$Payroll <- as.factor(bbDat$Payroll)
bb.lm <- lm(Wins~Team, data = bbDat)
bb.av <- aov(bb.lm)
summary(bb.av)
tukey.test <- TukeyHSD(bb.av)
tukey.test
##################################################################
|
# Exploratory data analysis
# Project 2
# Author: Ragnhildur G. Finnbjornsdottir
# Date: 20. september 2014
##############################
setwd("./EDA_Project2")
nei <- readRDS("./summarySCC_PM25.rds")
scc <- readRDS("./Source_Classification_Code.rds")
################################################
# Plot 2:
# Have total emissions from PM2.5 decreased in the Baltimore City,
# Maryland (fips == "24510") from 1999 to 2008? Use the base plotting system to
# make a plot answering this question.
# Subset the data for only Boltimore city
Marynei <- subset(nei, (nei$fips == "24510"))
# Aggregate the data for total emission per year
agg <- aggregate(Emissions ~ year , data = Marynei, FUN = sum)
# Fix margins so titles fit
par(mar=c(5.1, 5.1, 4.1, 2.1))
# Create barplot
barplot(agg$Emissions, names.arg = agg$year,
xlab = "Year",
ylab = expression("Total PM"[2.5]* " emissions"),
main = expression("Total PM"[2.5]* " emissions in Baltimore city"),
col = 15)
# Save figure to png
dev.copy(png, "plot2.png", height = 480, width = 480)
dev.off() | /Plot2.R | no_license | ragnhildurf/Exploratory-data-analysis | R | false | false | 1,093 | r | # Exploratory data analysis
# Project 2
# Author: Ragnhildur G. Finnbjornsdottir
# Date: 20. september 2014
##############################
setwd("./EDA_Project2")
nei <- readRDS("./summarySCC_PM25.rds")
scc <- readRDS("./Source_Classification_Code.rds")
################################################
# Plot 2:
# Have total emissions from PM2.5 decreased in the Baltimore City,
# Maryland (fips == "24510") from 1999 to 2008? Use the base plotting system to
# make a plot answering this question.
# Subset the data for only Boltimore city
Marynei <- subset(nei, (nei$fips == "24510"))
# Aggregate the data for total emission per year
agg <- aggregate(Emissions ~ year , data = Marynei, FUN = sum)
# Fix margins so titles fit
par(mar=c(5.1, 5.1, 4.1, 2.1))
# Create barplot
barplot(agg$Emissions, names.arg = agg$year,
xlab = "Year",
ylab = expression("Total PM"[2.5]* " emissions"),
main = expression("Total PM"[2.5]* " emissions in Baltimore city"),
col = 15)
# Save figure to png
dev.copy(png, "plot2.png", height = 480, width = 480)
dev.off() |
# Data preprocessing
dataset = read.csv('Data.csv')
# Handling missing data
dataset$Age = ifelse(is.na(dataset$Age),
ave(dataset$Age, FUN = function(x) mean(x, na.rm = TRUE )),
dataset$Age)
dataset$Salary = ifelse(is.na(dataset$Salary),
ave(dataset$Salary, FUN = function(x) mean(x, na.rm = TRUE )),
dataset$Salary)
# Encoding categorial variables
dataset$Country = factor(dataset$Country,
levels = c('France', 'Spain', 'Germany'),
labels = c(1, 2, 3))
dataset$Purchased = factor(dataset$Purchased,
levels = c('No', 'Yes'),
labels = c(0, 1))
# Splitting the data into the training and test data sets
#install.packages('caTools')
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature scaling
training_set[, 2:3] = scale(training_set[, 2:3])
test_set[, 2:3] = scale(test_set[, 2:3]) | /Part 1 - Data Preprocessing/data_preprocessing.R | no_license | jannetarsa/MachineLearningA-Z | R | false | false | 1,090 | r | # Data preprocessing
dataset = read.csv('Data.csv')
# Handling missing data
dataset$Age = ifelse(is.na(dataset$Age),
ave(dataset$Age, FUN = function(x) mean(x, na.rm = TRUE )),
dataset$Age)
dataset$Salary = ifelse(is.na(dataset$Salary),
ave(dataset$Salary, FUN = function(x) mean(x, na.rm = TRUE )),
dataset$Salary)
# Encoding categorial variables
dataset$Country = factor(dataset$Country,
levels = c('France', 'Spain', 'Germany'),
labels = c(1, 2, 3))
dataset$Purchased = factor(dataset$Purchased,
levels = c('No', 'Yes'),
labels = c(0, 1))
# Splitting the data into the training and test data sets
#install.packages('caTools')
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature scaling
training_set[, 2:3] = scale(training_set[, 2:3])
test_set[, 2:3] = scale(test_set[, 2:3]) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_pct.R
\name{get_pct_routes_fast}
\alias{get_pct_routes_fast}
\title{Get fast road network results from the PCT}
\usage{
get_pct_routes_fast(region = NULL, purpose = "commute",
geography = "msoa", extension = ".Rds")
}
\arguments{
\item{region}{The PCT region or local authority to download data from (e.g. \code{west-yorkshire} or \code{Leeds}).
See \code{View(pct_regions_lookup)} for a full list of possible region names.}
\item{purpose}{Trip purpose (typically \code{school} or \code{commute})}
\item{geography}{Geographic resolution of outputs (\code{msoa} or \code{lsoa})}
\item{extension}{The type of file to download (typically \code{.Rds})}
}
\description{
Wrapper around \code{[get_pct()]} that gets rf data from the PCT.
}
\examples{
z = get_pct_routes_fast("isle-of-wight")
plot(z)
}
| /man/get_pct_routes_fast.Rd | no_license | HarukoNakao/pct | R | false | true | 882 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_pct.R
\name{get_pct_routes_fast}
\alias{get_pct_routes_fast}
\title{Get fast road network results from the PCT}
\usage{
get_pct_routes_fast(region = NULL, purpose = "commute",
geography = "msoa", extension = ".Rds")
}
\arguments{
\item{region}{The PCT region or local authority to download data from (e.g. \code{west-yorkshire} or \code{Leeds}).
See \code{View(pct_regions_lookup)} for a full list of possible region names.}
\item{purpose}{Trip purpose (typically \code{school} or \code{commute})}
\item{geography}{Geographic resolution of outputs (\code{msoa} or \code{lsoa})}
\item{extension}{The type of file to download (typically \code{.Rds})}
}
\description{
Wrapper around \code{[get_pct()]} that gets rf data from the PCT.
}
\examples{
z = get_pct_routes_fast("isle-of-wight")
plot(z)
}
|
## esta función grafica una variable de salida a lo largo del tiempo separada por tratamientos
graficarD <- function(tabla_datos, variable_salida, parametro, texto_y, texto_cuadro, multiplicar = 1, separar_por, tamano.texto=12) {
############# PAQUETES #############
library(plyr) ## paquete para usar ddply que genera una tabla resumida
library(ggplot2)
library(dplyr) ## paquete para usar función select() y seleccionar columnas de un data.frame
library(scales)
library(xtable)
## se crea una nuevo data.frame con las columnas de interés
tabla_datos.variable_salida <- select(tabla_datos, X.step., X.run.number., parametro, variable_salida, separar_por)
## se extraen solo los datos divisibles entre 6
tabla_datos.variable_salida.6 <- tabla_datos.variable_salida[tabla_datos.variable_salida$X.step.%%6==0,]
tabla_datos.variable_salida.6$ano <- tabla_datos.variable_salida.6$X.step. / 6
## se renombran las columnas para poder trabajar con ellas, no sé por qué pero si no se renombran no funciona
colnames(tabla_datos.variable_salida.6)[colnames(tabla_datos.variable_salida.6)==variable_salida] <- "variable_salida_n"
colnames(tabla_datos.variable_salida.6)[colnames(tabla_datos.variable_salida.6)==parametro] <- "parametro_n"
colnames(tabla_datos.variable_salida.6)[colnames(tabla_datos.variable_salida.6)==separar_por] <- "separar_por_n"
## se genera una tabla que resume los datos agrupando los datos del mismo tiempo y del mismo tratamiento
resumen.tabla_datos.variable_salida.6 <- ddply(tabla_datos.variable_salida.6, .(parametro_n, ano, separar_por_n), summarise,
mediana = median(variable_salida_n),
media = mean(variable_salida_n),
sd = sd(variable_salida_n),
cv = (sd / media) * 100,
se = sd(variable_salida_n) / sqrt(length(variable_salida_n)),
lq = quantile(variable_salida_n , probs = (0.05) ),
uq = quantile(variable_salida_n , probs = (0.95) ),
maxi = max(variable_salida_n),
mini = min(variable_salida_n))
## tabla que reporta el promedio de cada medida
resumen.medidas <- ddply(resumen.tabla_datos.variable_salida.6, .(parametro_n, separar_por_n ), summarise,
media = round(mean(media),2),
#media.se = mean(se),
sd.promedio = round(mean(sd),2),
#vari.promedio = mean(vari),
cv.promedio = round(mean(na.omit(cv)),2)
#rango.promedio = mean(rango),
#media.maxi = mean(maxi),
#media.mini = mean(mini),
#mediana = mean(mediana)
)
print(variable_salida)
print(resumen.medidas)
print(xtable(resumen.medidas, type = "latex"), file = paste("./imgs/",parametro,"-",variable_salida,".tex",sep=""))
write.table(resumen.medidas, file = paste("./imgs/",parametro,"-",variable_salida,".txt",sep=""), sep = ",", quote = FALSE, row.names = T)
resumen.tabla_datos.variable_salida.6$parametro_n <- as.factor(resumen.tabla_datos.variable_salida.6$parametro_n)
################ VISUALIZACIÓN ########################
ggplot(resumen.tabla_datos.variable_salida.6, aes(x=ano, y= multiplicar * media, colour = parametro_n, fill = parametro_n )) +
facet_grid(. ~ separar_por_n) +
geom_ribbon(aes(ymin = multiplicar * lq, ymax = multiplicar * uq), alpha = 0.15, colour = 0) +
geom_line(size = 0.5) +
#xlab("años") +
xlab("years") +
ylab(texto_y) +
labs(fill = texto_cuadro, colour = texto_cuadro) +
theme(panel.spacing = unit(1, "lines")) +
#theme_classic2() +
theme(text = element_text(size=tamano.texto))
#######################################################
}
| /results/scenario-analysis/funcion-graficar-disturbios.R | permissive | laparcela/OMYKmodel | R | false | false | 4,153 | r | ## esta función grafica una variable de salida a lo largo del tiempo separada por tratamientos
graficarD <- function(tabla_datos, variable_salida, parametro, texto_y, texto_cuadro, multiplicar = 1, separar_por, tamano.texto=12) {
############# PAQUETES #############
library(plyr) ## paquete para usar ddply que genera una tabla resumida
library(ggplot2)
library(dplyr) ## paquete para usar función select() y seleccionar columnas de un data.frame
library(scales)
library(xtable)
## se crea una nuevo data.frame con las columnas de interés
tabla_datos.variable_salida <- select(tabla_datos, X.step., X.run.number., parametro, variable_salida, separar_por)
## se extraen solo los datos divisibles entre 6
tabla_datos.variable_salida.6 <- tabla_datos.variable_salida[tabla_datos.variable_salida$X.step.%%6==0,]
tabla_datos.variable_salida.6$ano <- tabla_datos.variable_salida.6$X.step. / 6
## se renombran las columnas para poder trabajar con ellas, no sé por qué pero si no se renombran no funciona
colnames(tabla_datos.variable_salida.6)[colnames(tabla_datos.variable_salida.6)==variable_salida] <- "variable_salida_n"
colnames(tabla_datos.variable_salida.6)[colnames(tabla_datos.variable_salida.6)==parametro] <- "parametro_n"
colnames(tabla_datos.variable_salida.6)[colnames(tabla_datos.variable_salida.6)==separar_por] <- "separar_por_n"
## se genera una tabla que resume los datos agrupando los datos del mismo tiempo y del mismo tratamiento
resumen.tabla_datos.variable_salida.6 <- ddply(tabla_datos.variable_salida.6, .(parametro_n, ano, separar_por_n), summarise,
mediana = median(variable_salida_n),
media = mean(variable_salida_n),
sd = sd(variable_salida_n),
cv = (sd / media) * 100,
se = sd(variable_salida_n) / sqrt(length(variable_salida_n)),
lq = quantile(variable_salida_n , probs = (0.05) ),
uq = quantile(variable_salida_n , probs = (0.95) ),
maxi = max(variable_salida_n),
mini = min(variable_salida_n))
## tabla que reporta el promedio de cada medida
resumen.medidas <- ddply(resumen.tabla_datos.variable_salida.6, .(parametro_n, separar_por_n ), summarise,
media = round(mean(media),2),
#media.se = mean(se),
sd.promedio = round(mean(sd),2),
#vari.promedio = mean(vari),
cv.promedio = round(mean(na.omit(cv)),2)
#rango.promedio = mean(rango),
#media.maxi = mean(maxi),
#media.mini = mean(mini),
#mediana = mean(mediana)
)
print(variable_salida)
print(resumen.medidas)
print(xtable(resumen.medidas, type = "latex"), file = paste("./imgs/",parametro,"-",variable_salida,".tex",sep=""))
write.table(resumen.medidas, file = paste("./imgs/",parametro,"-",variable_salida,".txt",sep=""), sep = ",", quote = FALSE, row.names = T)
resumen.tabla_datos.variable_salida.6$parametro_n <- as.factor(resumen.tabla_datos.variable_salida.6$parametro_n)
################ VISUALIZACIÓN ########################
ggplot(resumen.tabla_datos.variable_salida.6, aes(x=ano, y= multiplicar * media, colour = parametro_n, fill = parametro_n )) +
facet_grid(. ~ separar_por_n) +
geom_ribbon(aes(ymin = multiplicar * lq, ymax = multiplicar * uq), alpha = 0.15, colour = 0) +
geom_line(size = 0.5) +
#xlab("años") +
xlab("years") +
ylab(texto_y) +
labs(fill = texto_cuadro, colour = texto_cuadro) +
theme(panel.spacing = unit(1, "lines")) +
#theme_classic2() +
theme(text = element_text(size=tamano.texto))
#######################################################
}
|
library(rjazz)
### Name: get_block_attributes
### Title: Get the (header) attributes of a block
### Aliases: get_block_attributes
### ** Examples
## Not run:
##D create_source('demo_types')
##D
##D # Write a text file as a block.
##D txt <- c('Hi all,', '', 'This is a file.', '', 'bye,', 'me')
##D str <- paste(txt, collapse = '\n')
##D cat(str)
##D
##D put_raw_block('demo_types', 'blk_1', str)
##D
##D # The block is raw (not interpreted as data by the server) and can be converted to any raw type.
##D set_compatible_data_type('demo_types', 'blk_1', type_const[['BLOCKTYPE_RAW_MIME_TXT']])
##D
##D # curl 127.0.0.1:8888//demo_types.blk_1 (or open in a in a browser)
##D
##D get_block_attributes('demo_types', 'blk_1')
##D
##D # The attribute flags is writable by the user.
##D put_block_flags('demo_types', 'blk_1', 123000444)
##D
##D get_block_attributes('demo_types', 'blk_1')
##D
##D # Unlike the previous block, this block is a data block.
##D put_R_block('demo_types', 'blk_2', 3:6)
##D
##D # This trivial block can also be created by the server as..
##D create_block_seq('demo_types', 'blk_2', 3L, 6)
##D
##D get_block_attributes('demo_types', 'blk_2')
##D
##D # The block is interpreted as data by the server, it is an integer and can be converted to
##D any integer type.
##D set_compatible_data_type('demo_types', 'blk_2', type_const[['BLOCKTYPE_C_R_GRADE']])
##D
##D get_block_attributes('demo_types', 'blk_2')
##D
##D # This returns all the rows in a single string
##D get_block_as_string('demo_types', 'blk_2', '##D
##D
##D # With some help of R functions, the result of get_block_as_string() can be made integer again.
##D any(3:6 != as.integer(strsplit(get_block_as_string('demo_types', 'blk_2', '##D
##D
##D rs <- c('1', '2.7', '3.14')
##D
##D # Creating strings into numeric data. (The parse(.., collapse = '\n') is automatic.)
##D put_strings_as_block('demo_types', 'blk_3', rs, type_const[['BLOCKTYPE_C_R_REAL']])
##D
##D get_block_attributes('demo_types', 'blk_3')
##D
##D any(as.numeric(rs) != get_R_block('demo_types', 'blk_3'))
##D
##D delete_source('demo_types')
## End(Not run)
| /data/genthat_extracted_code/rjazz/examples/get_block_attributes.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,135 | r | library(rjazz)
### Name: get_block_attributes
### Title: Get the (header) attributes of a block
### Aliases: get_block_attributes
### ** Examples
## Not run:
##D create_source('demo_types')
##D
##D # Write a text file as a block.
##D txt <- c('Hi all,', '', 'This is a file.', '', 'bye,', 'me')
##D str <- paste(txt, collapse = '\n')
##D cat(str)
##D
##D put_raw_block('demo_types', 'blk_1', str)
##D
##D # The block is raw (not interpreted as data by the server) and can be converted to any raw type.
##D set_compatible_data_type('demo_types', 'blk_1', type_const[['BLOCKTYPE_RAW_MIME_TXT']])
##D
##D # curl 127.0.0.1:8888//demo_types.blk_1 (or open in a in a browser)
##D
##D get_block_attributes('demo_types', 'blk_1')
##D
##D # The attribute flags is writable by the user.
##D put_block_flags('demo_types', 'blk_1', 123000444)
##D
##D get_block_attributes('demo_types', 'blk_1')
##D
##D # Unlike the previous block, this block is a data block.
##D put_R_block('demo_types', 'blk_2', 3:6)
##D
##D # This trivial block can also be created by the server as..
##D create_block_seq('demo_types', 'blk_2', 3L, 6)
##D
##D get_block_attributes('demo_types', 'blk_2')
##D
##D # The block is interpreted as data by the server, it is an integer and can be converted to
##D any integer type.
##D set_compatible_data_type('demo_types', 'blk_2', type_const[['BLOCKTYPE_C_R_GRADE']])
##D
##D get_block_attributes('demo_types', 'blk_2')
##D
##D # This returns all the rows in a single string
##D get_block_as_string('demo_types', 'blk_2', '##D
##D
##D # With some help of R functions, the result of get_block_as_string() can be made integer again.
##D any(3:6 != as.integer(strsplit(get_block_as_string('demo_types', 'blk_2', '##D
##D
##D rs <- c('1', '2.7', '3.14')
##D
##D # Creating strings into numeric data. (The parse(.., collapse = '\n') is automatic.)
##D put_strings_as_block('demo_types', 'blk_3', rs, type_const[['BLOCKTYPE_C_R_REAL']])
##D
##D get_block_attributes('demo_types', 'blk_3')
##D
##D any(as.numeric(rs) != get_R_block('demo_types', 'blk_3'))
##D
##D delete_source('demo_types')
## End(Not run)
|
#' Nifti Object of the Harvard-Oxford Subcortical Atlas
#'
#' @name hoxsubcort.img
#' @docType data
#' @references \url{http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Atlases}
#' Makris N, Goldstein JM, Kennedy D, Hodge SM, Caviness VS, Faraone SV, Tsuang MT, Seidman LJ. Decreased volume of left and total anterior insular lobule in schizophrenia. Schizophr Res. 2006 Apr;83(2-3):155-71
#' Frazier JA, Chiu S, Breeze JL, Makris N, Lange N, Kennedy DN, Herbert MR, Bent EK, Koneru VK, Dieterich ME, Hodge SM, Rauch SL, Grant PE, Cohen BM, Seidman LJ, Caviness VS, Biederman J. Structural brain magnetic resonance imaging of limbic and thalamic volumes in pediatric bipolar disorder. Am J Psychiatry. 2005 Jul;162(7):1256-65
#' Desikan RS, Segonne F, Fischl B, Quinn BT, Dickerson BC, Blacker D, Buckner RL, Dale AM, Maguire RP, Hyman BT, Albert MS, Killiany RJ. An automated labeling system for subdividing the human cerebral cortex on MRI scans into gyral based regions of interest. Neuroimage. 2006 Jul 1;31(3):968-80.
#' Goldstein JM, Seidman LJ, Makris N, Ahern T, O'Brien LM, Caviness VS Jr, Kennedy DN, Faraone SV, Tsuang MT. Hypothalamic abnormalities in schizophrenia: sex effects and genetic vulnerability. Biol Psychiatry. 2007 Apr 15;61(8):935-45
#' @keywords data
NULL
#' Labels corresponding to Harvard-Oxford Subcortical Atlas
#'
#' @name hoxsubcort.df
#' @docType data
#' @keywords data
#' @seealso \link{hoxsubcort.img}
NULL | /R/hox_subcort.R | no_license | muschellij2/ENARSC2015 | R | false | false | 1,433 | r | #' Nifti Object of the Harvard-Oxford Subcortical Atlas
#'
#' @name hoxsubcort.img
#' @docType data
#' @references \url{http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Atlases}
#' Makris N, Goldstein JM, Kennedy D, Hodge SM, Caviness VS, Faraone SV, Tsuang MT, Seidman LJ. Decreased volume of left and total anterior insular lobule in schizophrenia. Schizophr Res. 2006 Apr;83(2-3):155-71
#' Frazier JA, Chiu S, Breeze JL, Makris N, Lange N, Kennedy DN, Herbert MR, Bent EK, Koneru VK, Dieterich ME, Hodge SM, Rauch SL, Grant PE, Cohen BM, Seidman LJ, Caviness VS, Biederman J. Structural brain magnetic resonance imaging of limbic and thalamic volumes in pediatric bipolar disorder. Am J Psychiatry. 2005 Jul;162(7):1256-65
#' Desikan RS, Segonne F, Fischl B, Quinn BT, Dickerson BC, Blacker D, Buckner RL, Dale AM, Maguire RP, Hyman BT, Albert MS, Killiany RJ. An automated labeling system for subdividing the human cerebral cortex on MRI scans into gyral based regions of interest. Neuroimage. 2006 Jul 1;31(3):968-80.
#' Goldstein JM, Seidman LJ, Makris N, Ahern T, O'Brien LM, Caviness VS Jr, Kennedy DN, Faraone SV, Tsuang MT. Hypothalamic abnormalities in schizophrenia: sex effects and genetic vulnerability. Biol Psychiatry. 2007 Apr 15;61(8):935-45
#' @keywords data
NULL
#' Labels corresponding to Harvard-Oxford Subcortical Atlas
#'
#' @name hoxsubcort.df
#' @docType data
#' @keywords data
#' @seealso \link{hoxsubcort.img}
NULL |
# function example - get measures of central tendency
# and spread for a numeric vector x. The user has a
# choice of measures and whether the results are printed.
mysummary <- function(x,npar=TRUE,print=TRUE) {
if (!npar) {
center <- mean(x); spread <- sd(x)
} else {
center <- median(x); spread <- mad(x)
}
if (print & !npar) {
cat("Mean=", center, "\n", "SD=", spread, "\n")
} else if (print & npar) {
cat("Median=", center, "\n", "MAD=", spread, "\n")
}
result <- list(center=center,spread=spread)
return(result)
} | /mysummary.R | no_license | dolgovsv/test | R | false | false | 553 | r | # function example - get measures of central tendency
# and spread for a numeric vector x. The user has a
# choice of measures and whether the results are printed.
mysummary <- function(x,npar=TRUE,print=TRUE) {
if (!npar) {
center <- mean(x); spread <- sd(x)
} else {
center <- median(x); spread <- mad(x)
}
if (print & !npar) {
cat("Mean=", center, "\n", "SD=", spread, "\n")
} else if (print & npar) {
cat("Median=", center, "\n", "MAD=", spread, "\n")
}
result <- list(center=center,spread=spread)
return(result)
} |
#Adam Burns - 2/10/2015
#From Burns et al. Contribution of neutral processes to the assembly of the gut microbial communities changes over host development
#Fits the neutral model from Sloan et al. 2006 to an OTU table and returns several fitting statistics. Alternatively, will return predicted occurrence frequencies for each OTU based on their abundance in the metacommunity when stats=FALSE.
#spp: A community table for communities of interest with local communities/samples as rows and taxa as columns. All samples must be rarefied to the same depth.
#pool: A community table for defining source community (optional; Default=NULL).
#taxon: A table listing the taxonomic calls for each otu, with OTU ids as row names and taxonomic classifications as columns.
#If stats=TRUE the function will return fitting statistics.
#If stats=FALSE the function will return a table of observed and predicted values for each otu.
sncm.fit <- function(spp, pool=NULL, stats=TRUE, taxon=NULL){
require(minpack.lm)
require(Hmisc)
require(stats4)
options(warn=-1)
#Calculate the number of individuals per community
N <- mean(apply(spp, 1, sum))
#Calculate the average relative abundance of each taxa across communities
if(is.null(pool)){
p.m <- apply(spp, 2, mean)
p.m <- p.m[p.m != 0]
p <- p.m/N
} else {
p.m <- apply(pool, 2, mean)
p.m <- p.m[p.m != 0]
p <- p.m/N
}
#Calculate the occurrence frequency of each taxa across communities
spp.bi <- 1*(spp>0)
freq <- apply(spp.bi, 2, mean)
freq <- freq[freq != 0]
#Combine
C <- merge(p, freq, by=0)
C <- C[order(C[,2]),]
C <- as.data.frame(C)
C.0 <- C[!(apply(C, 1, function(y) any(y == 0))),] #Removes rows with any zero (absent in either source pool or local communities)
p <- C.0[,2]
freq <- C.0[,3]
names(p) <- C.0[,1]
names(freq) <- C.0[,1]
#Calculate the limit of detection
d = 1/N
##Fit model parameter m (or Nm) using Non-linear least squares (NLS)
m.fit <- nlsLM(freq ~ pbeta(d, N*m*p, N*m*(1-p), lower.tail=FALSE), start=list(m=0.1))
m.ci <- confint(m.fit, 'm', level=0.95)
##Fit neutral model parameter m (or Nm) using Maximum likelihood estimation (MLE)
sncm.LL <- function(m, sigma){
R = freq - pbeta(d, N*m*p, N*m*(1-p), lower.tail=FALSE)
R = dnorm(R, 0, sigma)
-sum(log(R))
}
m.mle <- mle(sncm.LL, start=list(m=0.1, sigma=0.1), nobs=length(p))
##Calculate Akaike's Information Criterion (AIC)
aic.fit <- AIC(m.mle, k=2)
bic.fit <- BIC(m.mle)
##Calculate goodness-of-fit (R-squared and Root Mean Squared Error)
freq.pred <- pbeta(d, N*coef(m.fit)*p, N*coef(m.fit)*(1-p), lower.tail=FALSE)
Rsqr <- 1 - (sum((freq - freq.pred)^2))/(sum((freq - mean(freq))^2))
RMSE <- sqrt(sum((freq-freq.pred)^2)/(length(freq)-1))
pred.ci <- binconf(freq.pred*nrow(spp), nrow(spp), alpha=0.05, method="wilson", return.df=TRUE)
##Calculate AIC for binomial model
bino.LL <- function(mu, sigma){
R = freq - pbinom(d, N, p, lower.tail=FALSE)
R = dnorm(R, mu, sigma)
-sum(log(R))
}
bino.mle <- mle(bino.LL, start=list(mu=0, sigma=0.1), nobs=length(p))
aic.bino <- AIC(bino.mle, k=2)
bic.bino <- BIC(bino.mle)
##Goodness of fit for binomial model
bino.pred <- pbinom(d, N, p, lower.tail=FALSE)
Rsqr.bino <- 1 - (sum((freq - bino.pred)^2))/(sum((freq - mean(freq))^2))
RMSE.bino <- sqrt(sum((freq - bino.pred)^2)/(length(freq) - 1))
bino.pred.ci <- binconf(bino.pred*nrow(spp), nrow(spp), alpha=0.05, method="wilson", return.df=TRUE)
##Calculate AIC for Poisson model
pois.LL <- function(mu, sigma){
R = freq - ppois(d, N*p, lower.tail=FALSE)
R = dnorm(R, mu, sigma)
-sum(log(R))
}
pois.mle <- mle(pois.LL, start=list(mu=0, sigma=0.1), nobs=length(p))
aic.pois <- AIC(pois.mle, k=2)
bic.pois <- BIC(pois.mle)
##Goodness of fit for Poisson model
pois.pred <- ppois(d, N*p, lower.tail=FALSE)
Rsqr.pois <- 1 - (sum((freq - pois.pred)^2))/(sum((freq - mean(freq))^2))
RMSE.pois <- sqrt(sum((freq - pois.pred)^2)/(length(freq) - 1))
pois.pred.ci <- binconf(pois.pred*nrow(spp), nrow(spp), alpha=0.05, method="wilson", return.df=TRUE)
##Results
if(stats==TRUE){
fitstats <- data.frame(m=numeric(), m.ci=numeric(), m.mle=numeric(), maxLL=numeric(), binoLL=numeric(), poisLL=numeric(), Rsqr=numeric(), Rsqr.bino=numeric(), Rsqr.pois=numeric(), RMSE=numeric(), RMSE.bino=numeric(), RMSE.pois=numeric(), AIC=numeric(), BIC=numeric(), AIC.bino=numeric(), BIC.bino=numeric(), AIC.pois=numeric(), BIC.pois=numeric(), N=numeric(), Samples=numeric(), Richness=numeric(), Detect=numeric())
fitstats[1,] <- c(coef(m.fit), coef(m.fit)-m.ci[1], m.mle@coef['m'], m.mle@details$value, bino.mle@details$value, pois.mle@details$value, Rsqr, Rsqr.bino, Rsqr.pois, RMSE, RMSE.bino, RMSE.pois, aic.fit, bic.fit, aic.bino, bic.bino, aic.pois, bic.pois, N, nrow(spp), length(p), d)
return(fitstats)
} else {
A <- cbind(p, freq, freq.pred, pred.ci[,2:3], bino.pred, bino.pred.ci[,2:3])
A <- as.data.frame(A)
colnames(A) <- c('p', 'freq', 'freq.pred', 'pred.lwr', 'pred.upr', 'bino.pred', 'bino.lwr', 'bino.upr')
if(is.null(taxon)){
B <- A[order(A[,1]),]
} else {
B <- merge(A, taxon, by=0, all=TRUE)
row.names(B) <- B[,1]
B <- B[,-1]
B <- B[order(B[,1]),]
}
return(B)
}
}
| /script/sncm.fit.R | permissive | ShadeLab/PAPER_Shade_CurrOpinMicro | R | false | false | 5,388 | r | #Adam Burns - 2/10/2015
#From Burns et al. Contribution of neutral processes to the assembly of the gut microbial communities changes over host development
#Fits the neutral model from Sloan et al. 2006 to an OTU table and returns several fitting statistics. Alternatively, will return predicted occurrence frequencies for each OTU based on their abundance in the metacommunity when stats=FALSE.
#spp: A community table for communities of interest with local communities/samples as rows and taxa as columns. All samples must be rarefied to the same depth.
#pool: A community table for defining source community (optional; Default=NULL).
#taxon: A table listing the taxonomic calls for each otu, with OTU ids as row names and taxonomic classifications as columns.
#If stats=TRUE the function will return fitting statistics.
#If stats=FALSE the function will return a table of observed and predicted values for each otu.
sncm.fit <- function(spp, pool=NULL, stats=TRUE, taxon=NULL){
require(minpack.lm)
require(Hmisc)
require(stats4)
options(warn=-1)
#Calculate the number of individuals per community
N <- mean(apply(spp, 1, sum))
#Calculate the average relative abundance of each taxa across communities
if(is.null(pool)){
p.m <- apply(spp, 2, mean)
p.m <- p.m[p.m != 0]
p <- p.m/N
} else {
p.m <- apply(pool, 2, mean)
p.m <- p.m[p.m != 0]
p <- p.m/N
}
#Calculate the occurrence frequency of each taxa across communities
spp.bi <- 1*(spp>0)
freq <- apply(spp.bi, 2, mean)
freq <- freq[freq != 0]
#Combine
C <- merge(p, freq, by=0)
C <- C[order(C[,2]),]
C <- as.data.frame(C)
C.0 <- C[!(apply(C, 1, function(y) any(y == 0))),] #Removes rows with any zero (absent in either source pool or local communities)
p <- C.0[,2]
freq <- C.0[,3]
names(p) <- C.0[,1]
names(freq) <- C.0[,1]
#Calculate the limit of detection
d = 1/N
##Fit model parameter m (or Nm) using Non-linear least squares (NLS)
m.fit <- nlsLM(freq ~ pbeta(d, N*m*p, N*m*(1-p), lower.tail=FALSE), start=list(m=0.1))
m.ci <- confint(m.fit, 'm', level=0.95)
##Fit neutral model parameter m (or Nm) using Maximum likelihood estimation (MLE)
sncm.LL <- function(m, sigma){
R = freq - pbeta(d, N*m*p, N*m*(1-p), lower.tail=FALSE)
R = dnorm(R, 0, sigma)
-sum(log(R))
}
m.mle <- mle(sncm.LL, start=list(m=0.1, sigma=0.1), nobs=length(p))
##Calculate Akaike's Information Criterion (AIC)
aic.fit <- AIC(m.mle, k=2)
bic.fit <- BIC(m.mle)
##Calculate goodness-of-fit (R-squared and Root Mean Squared Error)
freq.pred <- pbeta(d, N*coef(m.fit)*p, N*coef(m.fit)*(1-p), lower.tail=FALSE)
Rsqr <- 1 - (sum((freq - freq.pred)^2))/(sum((freq - mean(freq))^2))
RMSE <- sqrt(sum((freq-freq.pred)^2)/(length(freq)-1))
pred.ci <- binconf(freq.pred*nrow(spp), nrow(spp), alpha=0.05, method="wilson", return.df=TRUE)
##Calculate AIC for binomial model
bino.LL <- function(mu, sigma){
R = freq - pbinom(d, N, p, lower.tail=FALSE)
R = dnorm(R, mu, sigma)
-sum(log(R))
}
bino.mle <- mle(bino.LL, start=list(mu=0, sigma=0.1), nobs=length(p))
aic.bino <- AIC(bino.mle, k=2)
bic.bino <- BIC(bino.mle)
##Goodness of fit for binomial model
bino.pred <- pbinom(d, N, p, lower.tail=FALSE)
Rsqr.bino <- 1 - (sum((freq - bino.pred)^2))/(sum((freq - mean(freq))^2))
RMSE.bino <- sqrt(sum((freq - bino.pred)^2)/(length(freq) - 1))
bino.pred.ci <- binconf(bino.pred*nrow(spp), nrow(spp), alpha=0.05, method="wilson", return.df=TRUE)
##Calculate AIC for Poisson model
pois.LL <- function(mu, sigma){
R = freq - ppois(d, N*p, lower.tail=FALSE)
R = dnorm(R, mu, sigma)
-sum(log(R))
}
pois.mle <- mle(pois.LL, start=list(mu=0, sigma=0.1), nobs=length(p))
aic.pois <- AIC(pois.mle, k=2)
bic.pois <- BIC(pois.mle)
##Goodness of fit for Poisson model
pois.pred <- ppois(d, N*p, lower.tail=FALSE)
Rsqr.pois <- 1 - (sum((freq - pois.pred)^2))/(sum((freq - mean(freq))^2))
RMSE.pois <- sqrt(sum((freq - pois.pred)^2)/(length(freq) - 1))
pois.pred.ci <- binconf(pois.pred*nrow(spp), nrow(spp), alpha=0.05, method="wilson", return.df=TRUE)
##Results
if(stats==TRUE){
fitstats <- data.frame(m=numeric(), m.ci=numeric(), m.mle=numeric(), maxLL=numeric(), binoLL=numeric(), poisLL=numeric(), Rsqr=numeric(), Rsqr.bino=numeric(), Rsqr.pois=numeric(), RMSE=numeric(), RMSE.bino=numeric(), RMSE.pois=numeric(), AIC=numeric(), BIC=numeric(), AIC.bino=numeric(), BIC.bino=numeric(), AIC.pois=numeric(), BIC.pois=numeric(), N=numeric(), Samples=numeric(), Richness=numeric(), Detect=numeric())
fitstats[1,] <- c(coef(m.fit), coef(m.fit)-m.ci[1], m.mle@coef['m'], m.mle@details$value, bino.mle@details$value, pois.mle@details$value, Rsqr, Rsqr.bino, Rsqr.pois, RMSE, RMSE.bino, RMSE.pois, aic.fit, bic.fit, aic.bino, bic.bino, aic.pois, bic.pois, N, nrow(spp), length(p), d)
return(fitstats)
} else {
A <- cbind(p, freq, freq.pred, pred.ci[,2:3], bino.pred, bino.pred.ci[,2:3])
A <- as.data.frame(A)
colnames(A) <- c('p', 'freq', 'freq.pred', 'pred.lwr', 'pred.upr', 'bino.pred', 'bino.lwr', 'bino.upr')
if(is.null(taxon)){
B <- A[order(A[,1]),]
} else {
B <- merge(A, taxon, by=0, all=TRUE)
row.names(B) <- B[,1]
B <- B[,-1]
B <- B[order(B[,1]),]
}
return(B)
}
}
|
# https://stackoverflow.com/questions/46224378/how-to-submit-a-form-that-seems-to-be-handled-by-javascript-using-httr-or-rvest
# https://stackoverflow.com/questions/39516673/rvest-could-not-find-possible-submission-target-when-submitting-form
# https://www.datacamp.com/community/tutorials/scraping-javascript-generated-data-with-r
# https://htmledit.squarefree.com/
# https://stackoverflow.com/questions/46304664/how-to-deal-with-captcha-when-web-scraping-using-r
# https://stackoverflow.com/questions/41466263/r-change-ip-address-programatically
options(timeout= 4000000)
library(httr)
library(magrittr)
library(tidyverse)
library(rvest)
require(xml2)
rm(list = ls())
setwd("/media/crikket/DATA/processosTST/")
# COLETAR PROCESSOS EM PREVISÃO DE PAUTA ----
processosJulgar <- html_session(
#"http://aplicacao5.tst.jus.br/consultapauta/pautaForm.do?relatorProcesso=GMMHM&codOrgaoJudic=74"
"http://aplicacao5.tst.jus.br/consultapauta/pautaForm.do?relatorProcesso=GMWOC&codOrgaoJudic=69"
) %>%
html_nodes("table") %>%
.[[3]] %>%
html_table()
# FORMATAR DADOS ----
processosJulgar <- strsplit(processosJulgar$X1, split = " - ") %>%
do.call(rbind, .) %>%
set_colnames(c("tipoProcesso", "Processo")) %>%
data.frame(
Turma = "T2", ., stringsAsFactors = FALSE
)
# DADOS PREENCHIMENTO FORMULÁRIO ----
#processosJulgar <- read.table("baseProcessos.csv", sep = ";", header = TRUE, stringsAsFactors = FALSE)
# FUNÇÃO GERAR CAMPOS FORMULÁRIO
separarItens <- function(x) {
resultado <- x %>%
strsplit(., split = "-") %>%
unlist %>%
strsplit(., split = "\\.") %>%
unlist %>%
matrix(., nrow = 1) %>%
as.data.frame(., stringsAsFactors = FALSE) %>%
set_colnames(c("Número", "Dígito", "Ano", "Órgao", "Tribunal", "Vara"))
resultado$Número <- sprintf("%07d", as.numeric(resultado$Número))
return(resultado)
}
processosJulgar <- data.frame(
processosJulgar,
processosJulgar$Processo %>%
map(separarItens) %>%
do.call(rbind, .),
stringsAsFactors = FALSE)
# SITE ----
# A URL ONDE HÁ FORMULÁRIOS É UMA CONTENT INNER QUE CHAMA A APLICAÇÃO NO PRÓXIMO SITE
url <- "http://www.tst.jus.br/processos-do-tst"
# URL DA APLICAÇÃO
url <- "http://aplicacao4.tst.jus.br/consultaProcessual/"
# EXTRAIR DADOS ----
# FUNÇÃO EXTRAIR TABELA EVOLUÇÃO PROCESSO ----
w <- 1
valoresRef <- processosJulgar[w,]
formularioParaTabela <- function(valoresRef, site,
IP = NULL, PORT = NULL) {
# ESTABELECER CONEXAO COM O SITE
if (!(is.null(IP) && is.null(PORT))) {
siteProcessos <- html_session(site, use_proxy(url = IP, port = PORT))
} else {
siteProcessos <- html_session(site)
}
formularioInc <- siteProcessos %>%
html_nodes("form") %>%
html_form()
caminho <- formularioInc[[1]]
# CAMPOS PREENCHER NO FORMULÁRIO
formularioComp <- caminho %>%
set_values(
numeroTst = as.numeric(valoresRef$Número),
digitoTst = as.numeric(valoresRef$Dígito),
anoTst = as.numeric(valoresRef$Ano),
orgaoTst = as.numeric(valoresRef$Órgao),
tribunalTst = as.numeric(valoresRef$Tribunal),
varaTst = as.numeric(valoresRef$Vara)
)
# ENVIAR FORMULÁRIO
sessao <- submit_form(siteProcessos, formularioComp)
# PEGAR NÓS QUE SÃO TABELA E TRANSFORMAR EM DATA FRAME
tabelas <- sessao %>%
html_nodes("table")
textoParaTabela <- tabelas[[11]] %>%
html_text() %>%
gsub(pattern = "(\r|<br />)", replacement = "") %>%
gsub(pattern = "(\n|<br />)", replacement = "") %>%
gsub(pattern = "(\t|<br />)", replacement = ";") %>%
strsplit(split = ";") %>%
unlist() %>%
{.[!(. %in% c("", "Histórico do processo"))]}
# REMOVER OBSERVAÇÕES ANTERIORES A 2013
refRemov <- min(c(
grep(pattern = "/2012", textoParaTabela),
grep(pattern = "/2011", textoParaTabela)
))
if (!is.infinite(refRemov)) {
textoParaTabela <- textoParaTabela[seq(refRemov - 1)]
}
nTxt <- length(textoParaTabela)
tabelaRes <- data.frame(
Data = textoParaTabela[seq(1, nTxt, 2)],
Evolucao = textoParaTabela[seq(0, nTxt, 2)],
stringsAsFactors = FALSE
)
return(tabelaRes)
#return(headers(siteProcessos))
}
# ESTRUTURAR TABELA PARA COLETA ----
processosJulgar <- processosJulgar %>% filter(tipoProcesso == "RR") # OPCIONAL
processosJulgar <- split(processosJulgar, seq(nrow(processosJulgar)))
tabelasProcessos <- vector("list", length(processosJulgar))
# VETOR COM TEMPO PARA CANCELAR PROCESSAMENTO
tempoEsperar <- rep(0, length(processosJulgar))
refEsperar <- seq(3, length(tempoEsperar), by = 3)
tempoEsperar[refEsperar] <- runif(n = length(refEsperar), min = 180, max = 200)
# LOOP PARA RODAR
for (w in seq_along(processosJulgar)) {
tabelasProcessos[[w]] <- formularioParaTabela(valoresRef = processosJulgar[[w]], site = url)
print(tempoEsperar[w])
print(w)
Sys.sleep(tempoEsperar[w]) # INSERIR TEMPO DE ESPERA PRA EVITAR CAPTCHA
}
coletarData <- lapply(tabelasProcessos, function(x) {
x %>%
filter(
grepl("Conclusos para voto", Evolucao)
) %>%
mutate(
Data = as.Date(Data, format = "%d/%m/%Y")
) %>%
summarise(
Resultado = min(Data)
) %>%
pull(Resultado)
}) %>%
unlist %>%
as.Date(origin = "1970-01-01")
| /scrape.R | no_license | putzgrillo/filtrarProcessosTST | R | false | false | 5,267 | r | # https://stackoverflow.com/questions/46224378/how-to-submit-a-form-that-seems-to-be-handled-by-javascript-using-httr-or-rvest
# https://stackoverflow.com/questions/39516673/rvest-could-not-find-possible-submission-target-when-submitting-form
# https://www.datacamp.com/community/tutorials/scraping-javascript-generated-data-with-r
# https://htmledit.squarefree.com/
# https://stackoverflow.com/questions/46304664/how-to-deal-with-captcha-when-web-scraping-using-r
# https://stackoverflow.com/questions/41466263/r-change-ip-address-programatically
options(timeout= 4000000)
library(httr)
library(magrittr)
library(tidyverse)
library(rvest)
require(xml2)
rm(list = ls())
setwd("/media/crikket/DATA/processosTST/")
# COLETAR PROCESSOS EM PREVISÃO DE PAUTA ----
processosJulgar <- html_session(
#"http://aplicacao5.tst.jus.br/consultapauta/pautaForm.do?relatorProcesso=GMMHM&codOrgaoJudic=74"
"http://aplicacao5.tst.jus.br/consultapauta/pautaForm.do?relatorProcesso=GMWOC&codOrgaoJudic=69"
) %>%
html_nodes("table") %>%
.[[3]] %>%
html_table()
# FORMATAR DADOS ----
processosJulgar <- strsplit(processosJulgar$X1, split = " - ") %>%
do.call(rbind, .) %>%
set_colnames(c("tipoProcesso", "Processo")) %>%
data.frame(
Turma = "T2", ., stringsAsFactors = FALSE
)
# DADOS PREENCHIMENTO FORMULÁRIO ----
#processosJulgar <- read.table("baseProcessos.csv", sep = ";", header = TRUE, stringsAsFactors = FALSE)
# FUNÇÃO GERAR CAMPOS FORMULÁRIO
separarItens <- function(x) {
resultado <- x %>%
strsplit(., split = "-") %>%
unlist %>%
strsplit(., split = "\\.") %>%
unlist %>%
matrix(., nrow = 1) %>%
as.data.frame(., stringsAsFactors = FALSE) %>%
set_colnames(c("Número", "Dígito", "Ano", "Órgao", "Tribunal", "Vara"))
resultado$Número <- sprintf("%07d", as.numeric(resultado$Número))
return(resultado)
}
processosJulgar <- data.frame(
processosJulgar,
processosJulgar$Processo %>%
map(separarItens) %>%
do.call(rbind, .),
stringsAsFactors = FALSE)
# SITE ----
# A URL ONDE HÁ FORMULÁRIOS É UMA CONTENT INNER QUE CHAMA A APLICAÇÃO NO PRÓXIMO SITE
url <- "http://www.tst.jus.br/processos-do-tst"
# URL DA APLICAÇÃO
url <- "http://aplicacao4.tst.jus.br/consultaProcessual/"
# EXTRAIR DADOS ----
# FUNÇÃO EXTRAIR TABELA EVOLUÇÃO PROCESSO ----
w <- 1
valoresRef <- processosJulgar[w,]
formularioParaTabela <- function(valoresRef, site,
IP = NULL, PORT = NULL) {
# ESTABELECER CONEXAO COM O SITE
if (!(is.null(IP) && is.null(PORT))) {
siteProcessos <- html_session(site, use_proxy(url = IP, port = PORT))
} else {
siteProcessos <- html_session(site)
}
formularioInc <- siteProcessos %>%
html_nodes("form") %>%
html_form()
caminho <- formularioInc[[1]]
# CAMPOS PREENCHER NO FORMULÁRIO
formularioComp <- caminho %>%
set_values(
numeroTst = as.numeric(valoresRef$Número),
digitoTst = as.numeric(valoresRef$Dígito),
anoTst = as.numeric(valoresRef$Ano),
orgaoTst = as.numeric(valoresRef$Órgao),
tribunalTst = as.numeric(valoresRef$Tribunal),
varaTst = as.numeric(valoresRef$Vara)
)
# ENVIAR FORMULÁRIO
sessao <- submit_form(siteProcessos, formularioComp)
# PEGAR NÓS QUE SÃO TABELA E TRANSFORMAR EM DATA FRAME
tabelas <- sessao %>%
html_nodes("table")
textoParaTabela <- tabelas[[11]] %>%
html_text() %>%
gsub(pattern = "(\r|<br />)", replacement = "") %>%
gsub(pattern = "(\n|<br />)", replacement = "") %>%
gsub(pattern = "(\t|<br />)", replacement = ";") %>%
strsplit(split = ";") %>%
unlist() %>%
{.[!(. %in% c("", "Histórico do processo"))]}
# REMOVER OBSERVAÇÕES ANTERIORES A 2013
refRemov <- min(c(
grep(pattern = "/2012", textoParaTabela),
grep(pattern = "/2011", textoParaTabela)
))
if (!is.infinite(refRemov)) {
textoParaTabela <- textoParaTabela[seq(refRemov - 1)]
}
nTxt <- length(textoParaTabela)
tabelaRes <- data.frame(
Data = textoParaTabela[seq(1, nTxt, 2)],
Evolucao = textoParaTabela[seq(0, nTxt, 2)],
stringsAsFactors = FALSE
)
return(tabelaRes)
#return(headers(siteProcessos))
}
# ESTRUTURAR TABELA PARA COLETA ----
processosJulgar <- processosJulgar %>% filter(tipoProcesso == "RR") # OPCIONAL
processosJulgar <- split(processosJulgar, seq(nrow(processosJulgar)))
tabelasProcessos <- vector("list", length(processosJulgar))
# VETOR COM TEMPO PARA CANCELAR PROCESSAMENTO
tempoEsperar <- rep(0, length(processosJulgar))
refEsperar <- seq(3, length(tempoEsperar), by = 3)
tempoEsperar[refEsperar] <- runif(n = length(refEsperar), min = 180, max = 200)
# LOOP PARA RODAR
for (w in seq_along(processosJulgar)) {
tabelasProcessos[[w]] <- formularioParaTabela(valoresRef = processosJulgar[[w]], site = url)
print(tempoEsperar[w])
print(w)
Sys.sleep(tempoEsperar[w]) # INSERIR TEMPO DE ESPERA PRA EVITAR CAPTCHA
}
coletarData <- lapply(tabelasProcessos, function(x) {
x %>%
filter(
grepl("Conclusos para voto", Evolucao)
) %>%
mutate(
Data = as.Date(Data, format = "%d/%m/%Y")
) %>%
summarise(
Resultado = min(Data)
) %>%
pull(Resultado)
}) %>%
unlist %>%
as.Date(origin = "1970-01-01")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qna.R
\name{log_qna_combined_byDate}
\alias{log_qna_combined_byDate}
\title{按天对数据进行合并}
\usage{
log_qna_combined_byDate(
conn = conn_rds_nsic(),
log_date = "2020-08-19",
sep = "|"
)
}
\arguments{
\item{conn}{连接}
\item{log_date}{日期}
\item{sep}{分隔符}
}
\value{
返回值
}
\description{
按天对数据进行合并
}
\examples{
log_qna_combined_byDate()
}
| /man/log_qna_combined_byDate.Rd | no_license | takewiki/caaspkg | R | false | true | 470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qna.R
\name{log_qna_combined_byDate}
\alias{log_qna_combined_byDate}
\title{按天对数据进行合并}
\usage{
log_qna_combined_byDate(
conn = conn_rds_nsic(),
log_date = "2020-08-19",
sep = "|"
)
}
\arguments{
\item{conn}{连接}
\item{log_date}{日期}
\item{sep}{分隔符}
}
\value{
返回值
}
\description{
按天对数据进行合并
}
\examples{
log_qna_combined_byDate()
}
|
library(dplyr)
# train data
X_train <- read.table("./X_train.txt")
Y_train <- read.table("./Y_train.txt")
Sub_train <- read.table("./subject_train.txt")
# test data
X_test <- read.table("./X_test.txt")
Y_test <- read.table("./Y_test.txt")
Sub_test <- read.table("./subject_test.txt")
# data description
variable_names <- read.table("./features.txt")
# activity labels
activity_labels <- read.table("./activity_labels.txt")
# 1. Merges the training and the test sets.
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
# 2. Extracts the measurements on the mean and standard deviation.
sel_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,sel_var[,1]]
# 3. Uses descriptive activity names to name the activities
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
# 4.Labels the data set with descriptive variable names.
colnames(X_total) <- variable_names[sel_var[,1],2]
# 5.Creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "sub"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "./tidydata.txt", row.names = FALSE, col.names = TRUE)
| /run_analysis.R | no_license | winchestersamdean/getting-and-cleaning-data | R | false | false | 1,457 | r | library(dplyr)
# train data
X_train <- read.table("./X_train.txt")
Y_train <- read.table("./Y_train.txt")
Sub_train <- read.table("./subject_train.txt")
# test data
X_test <- read.table("./X_test.txt")
Y_test <- read.table("./Y_test.txt")
Sub_test <- read.table("./subject_test.txt")
# data description
variable_names <- read.table("./features.txt")
# activity labels
activity_labels <- read.table("./activity_labels.txt")
# 1. Merges the training and the test sets.
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
# 2. Extracts the measurements on the mean and standard deviation.
sel_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,sel_var[,1]]
# 3. Uses descriptive activity names to name the activities
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
# 4.Labels the data set with descriptive variable names.
colnames(X_total) <- variable_names[sel_var[,1],2]
# 5.Creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "sub"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "./tidydata.txt", row.names = FALSE, col.names = TRUE)
|
library(ggplot2)
library(reshape2)
library(ggdendro)
library(grid)
library(gridExtra)
library(readr)
library(viridis)
library(cowplot)
base_Dir <- 'D:/Python_Programming/EARShot_TF2/Results'
identifier_List <- c('AGNES')
epoch_List <- c(4000)
hidden_Unit <- 512
index <- 0
for (epoch in epoch_List)
{
for (identifier in identifier_List)
{
work_Dir <- file.path(base_Dir, paste(identifier, '.', 'IDX', index, sep=''), 'Hidden')
#for (flow_Type in c("Phone", "Feature"))
for (flow_Type in c("Feature"))
{
if (!dir.exists(file.path(work_Dir,'Flow', flow_Type, 'PNG')))
{
dir.create(file.path(work_Dir,'Flow', flow_Type, 'PNG'))
}
plot_List <- list()
for (unit_Index in seq(0, hidden_Unit - 1, 1))
{
flow_Data <- read_delim(
file.path(work_Dir,'Flow', flow_Type, 'TXT', paste(flow_Type, '.U_', sprintf('%04d', unit_Index), '.I_ALL.txt', sep='')),
delim= "\t",
escape_double = FALSE,
locale = locale(encoding = "UTF-8"),
trim_ws = TRUE
)
flow_Data.row_Name <- as.matrix(flow_Data[1])
flow_Data <- abs(flow_Data[,-1])
rownames(flow_Data) <- flow_Data.row_Name
mean_Flow_Data <- colMeans(flow_Data)
mean_Flow_Data <- as.data.frame(mean_Flow_Data)
colnames(mean_Flow_Data) <- c("Mean")
mean_Flow_Data$Step <- as.numeric(rownames(mean_Flow_Data))
col_Min <- min(as.numeric(colnames(flow_Data)), na.rm = TRUE)
col_Max <- max(as.numeric(colnames(flow_Data)), na.rm = TRUE)
flow_Data$row_Name.num <- rev(1:length(rownames(flow_Data)))
key.flow_Data.row_Name <- data.frame(row_Name = rownames(flow_Data), row_Name.num = (1:length(rownames(flow_Data))))
mdf <- melt(as.data.frame(flow_Data), id.vars="row_Name.num")
mdf <- merge(mdf, key.flow_Data.row_Name, by = "row_Name.num", all.x = TRUE)
ylabels = rev(rownames(flow_Data))
plot <- ggplot(mdf, aes(x=variable, y=row_Name.num)) +
geom_tile(aes(fill=value)) +
scale_fill_viridis(option="plasma", limits=c(0, 1), breaks=c(0, 1),labels=c(0, 1)) +
scale_x_discrete(
breaks = c(col_Min, seq(0, col_Max, by = 5), col_Max),
labels = c(col_Min, seq(0, col_Max, by = 5), col_Max) * 10
) +
scale_y_continuous(
expand=c(0,0),
breaks = seq(1, max(mdf$row_Name.num), by = 1),
labels = ylabels,
sec.axis = dup_axis()
) +
labs(title=sprintf('%s flow Unit: %s', flow_Type, unit_Index), x= 'Time (ms)', y= flow_Type, fill="") +
theme(
title = element_text(size=20),
axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20),
axis.title.y.right = element_text(size=20),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
axis.ticks = element_blank(),
legend.position="right",
legend.direction="vertical",
legend.key.height = unit(20, "mm"),
plot.margin=unit(c(0,0,0,0),"cm"),
panel.grid=element_blank()
)
if (flow_Type == "Phone")
{
ggsave(
filename = file.path(work_Dir,'Flow', flow_Type, 'PNG', paste(flow_Type, '.U_', sprintf('%04d', unit_Index), '.I_ALL.png', sep='')),
plot = plot,
device = "png",
width = 25,
height = 25,
units = "cm",
dpi = 300
)
}
if (flow_Type == "Feature")
{
ggsave(
filename = file.path(work_Dir,'Flow', flow_Type, 'PNG', paste(flow_Type, '.U_', sprintf('%04d', unit_Index), '.I_ALL.png', sep='')),
plot = plot,
device = "png",
width = 30,
height = 25, #10,
units = "cm",
dpi = 300
)
}
}
}
}
} | /R_Script/Phoneme_and_Feature_Flow(Fig.6).R | permissive | maglab-uconn/EARShot_TF2 | R | false | false | 4,091 | r | library(ggplot2)
library(reshape2)
library(ggdendro)
library(grid)
library(gridExtra)
library(readr)
library(viridis)
library(cowplot)
base_Dir <- 'D:/Python_Programming/EARShot_TF2/Results'
identifier_List <- c('AGNES')
epoch_List <- c(4000)
hidden_Unit <- 512
index <- 0
for (epoch in epoch_List)
{
for (identifier in identifier_List)
{
work_Dir <- file.path(base_Dir, paste(identifier, '.', 'IDX', index, sep=''), 'Hidden')
#for (flow_Type in c("Phone", "Feature"))
for (flow_Type in c("Feature"))
{
if (!dir.exists(file.path(work_Dir,'Flow', flow_Type, 'PNG')))
{
dir.create(file.path(work_Dir,'Flow', flow_Type, 'PNG'))
}
plot_List <- list()
for (unit_Index in seq(0, hidden_Unit - 1, 1))
{
flow_Data <- read_delim(
file.path(work_Dir,'Flow', flow_Type, 'TXT', paste(flow_Type, '.U_', sprintf('%04d', unit_Index), '.I_ALL.txt', sep='')),
delim= "\t",
escape_double = FALSE,
locale = locale(encoding = "UTF-8"),
trim_ws = TRUE
)
flow_Data.row_Name <- as.matrix(flow_Data[1])
flow_Data <- abs(flow_Data[,-1])
rownames(flow_Data) <- flow_Data.row_Name
mean_Flow_Data <- colMeans(flow_Data)
mean_Flow_Data <- as.data.frame(mean_Flow_Data)
colnames(mean_Flow_Data) <- c("Mean")
mean_Flow_Data$Step <- as.numeric(rownames(mean_Flow_Data))
col_Min <- min(as.numeric(colnames(flow_Data)), na.rm = TRUE)
col_Max <- max(as.numeric(colnames(flow_Data)), na.rm = TRUE)
flow_Data$row_Name.num <- rev(1:length(rownames(flow_Data)))
key.flow_Data.row_Name <- data.frame(row_Name = rownames(flow_Data), row_Name.num = (1:length(rownames(flow_Data))))
mdf <- melt(as.data.frame(flow_Data), id.vars="row_Name.num")
mdf <- merge(mdf, key.flow_Data.row_Name, by = "row_Name.num", all.x = TRUE)
ylabels = rev(rownames(flow_Data))
plot <- ggplot(mdf, aes(x=variable, y=row_Name.num)) +
geom_tile(aes(fill=value)) +
scale_fill_viridis(option="plasma", limits=c(0, 1), breaks=c(0, 1),labels=c(0, 1)) +
scale_x_discrete(
breaks = c(col_Min, seq(0, col_Max, by = 5), col_Max),
labels = c(col_Min, seq(0, col_Max, by = 5), col_Max) * 10
) +
scale_y_continuous(
expand=c(0,0),
breaks = seq(1, max(mdf$row_Name.num), by = 1),
labels = ylabels,
sec.axis = dup_axis()
) +
labs(title=sprintf('%s flow Unit: %s', flow_Type, unit_Index), x= 'Time (ms)', y= flow_Type, fill="") +
theme(
title = element_text(size=20),
axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20),
axis.title.y.right = element_text(size=20),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
axis.ticks = element_blank(),
legend.position="right",
legend.direction="vertical",
legend.key.height = unit(20, "mm"),
plot.margin=unit(c(0,0,0,0),"cm"),
panel.grid=element_blank()
)
if (flow_Type == "Phone")
{
ggsave(
filename = file.path(work_Dir,'Flow', flow_Type, 'PNG', paste(flow_Type, '.U_', sprintf('%04d', unit_Index), '.I_ALL.png', sep='')),
plot = plot,
device = "png",
width = 25,
height = 25,
units = "cm",
dpi = 300
)
}
if (flow_Type == "Feature")
{
ggsave(
filename = file.path(work_Dir,'Flow', flow_Type, 'PNG', paste(flow_Type, '.U_', sprintf('%04d', unit_Index), '.I_ALL.png', sep='')),
plot = plot,
device = "png",
width = 30,
height = 25, #10,
units = "cm",
dpi = 300
)
}
}
}
}
} |
# Exercise 3: Vector and function practice
# Create a vector `marbles` with 6 different colors in it (representing marbles)
marbles <- c("orange","yellow", "white", "blue", "green", "black");
# Use the `sample` function to select a single marble
single.marble <- sample(marbles,1)
# Write a function MarbleGame that does the following:
# - Takes in a `guess` of a marble color
# - Randomly samples a marble
# - Returns whether or not the person guessed accurately (preferrably a full phrase)
MarbleGame <- function(color.guess) {
picked <- sample(marbles,1)
if(picked == color.guess) {
return("You guessed correctly")
} else {
return("You guessed wrong")
}
}
# Play the marble game!
# Bonus: Play the marble game until you win, keeping track of how many tries you take
have.not.won <- TRUE
tries <- 0
while(have.not.won) {
tries <- tries + 1
guess <- sample(marbles,1)
if(MarbleGame(guess) == "You guessed correctly") {
have.not.won <- FALSE
}
}
print(tries)
## Double bonus(answer not provided): play the game 1000X (until you win) and track the average number of tries
# Is it what you expected based on the probability
correct <- 0
for(i in 1:1000) {
guess <- sample(marbles,1)
if(MarbleGame(guess) == "You guessed correctly") {
correct <- correct + 1
}
}
probability <- correct / 1000
expected.probability <- 1/6
| /exercise-3/exercise.R | permissive | matthewkli97/m8-vectors | R | false | false | 1,373 | r | # Exercise 3: Vector and function practice
# Create a vector `marbles` with 6 different colors in it (representing marbles)
marbles <- c("orange","yellow", "white", "blue", "green", "black");
# Use the `sample` function to select a single marble
single.marble <- sample(marbles,1)
# Write a function MarbleGame that does the following:
# - Takes in a `guess` of a marble color
# - Randomly samples a marble
# - Returns whether or not the person guessed accurately (preferrably a full phrase)
MarbleGame <- function(color.guess) {
picked <- sample(marbles,1)
if(picked == color.guess) {
return("You guessed correctly")
} else {
return("You guessed wrong")
}
}
# Play the marble game!
# Bonus: Play the marble game until you win, keeping track of how many tries you take
have.not.won <- TRUE
tries <- 0
while(have.not.won) {
tries <- tries + 1
guess <- sample(marbles,1)
if(MarbleGame(guess) == "You guessed correctly") {
have.not.won <- FALSE
}
}
print(tries)
## Double bonus(answer not provided): play the game 1000X (until you win) and track the average number of tries
# Is it what you expected based on the probability
correct <- 0
for(i in 1:1000) {
guess <- sample(marbles,1)
if(MarbleGame(guess) == "You guessed correctly") {
correct <- correct + 1
}
}
probability <- correct / 1000
expected.probability <- 1/6
|
#' Returns dataframe of birding hotspots for a given region. Must enter the regions code for region, for example if you wanted hotspots
#' in the United States you would enter "region = `US`".
#'
#' @param key The users eBird key
#' @param region The region code for the desired region
#' @param back How many days back to collect data, default is 1
#'
#' @return subregions dataframe
#'
#' @importFrom httr GET add_headers
#' @importFrom jsonlite fromJSON
#' @importFrom glue glue
#'
#' @export
get_hotspots <- function(region, back = 1, key) {
url <- glue("https://api.ebird.org/v2/ref/hotspot/{region}")
hotspots <- GET(url,
add_headers("x-ebirdapitoken" = key),
query = list(back = back,
fmt = "json"))
hotspots <- fromJSON(rawToChar(hotspots$content))
hotspots
}
| /R/get_hotspots.R | permissive | sjmarks/Birdr | R | false | false | 840 | r | #' Returns dataframe of birding hotspots for a given region. Must enter the regions code for region, for example if you wanted hotspots
#' in the United States you would enter "region = `US`".
#'
#' @param key The users eBird key
#' @param region The region code for the desired region
#' @param back How many days back to collect data, default is 1
#'
#' @return subregions dataframe
#'
#' @importFrom httr GET add_headers
#' @importFrom jsonlite fromJSON
#' @importFrom glue glue
#'
#' @export
get_hotspots <- function(region, back = 1, key) {
url <- glue("https://api.ebird.org/v2/ref/hotspot/{region}")
hotspots <- GET(url,
add_headers("x-ebirdapitoken" = key),
query = list(back = back,
fmt = "json"))
hotspots <- fromJSON(rawToChar(hotspots$content))
hotspots
}
|
library(xlsx)
library(iMRMC)
# * Creating `data-raw`. ####
# * Adding `data-raw` to `.Rbuildignore`.
# Next:
# * Add data creation scripts in data-raw
# * Use usethis::use_data() to add data to package
# Create usethis::use_data_raw()
# Open and read source data file ####
# We know that the study has 5 participants and 157 candidate mitotic figures
nReaders <- 5
readers <- c("observer.1", "observer.2", "observer.3", "observer.4", "observer.5")
nCases <- 157
cases <- 1:157
nModalities <- 5
modalities <- c("scanner.A", "scanner.B", "scanner.C", "scanner.D", "microscope")
# The source data file is an excel file with 10 sheets:
# one set of 5 sheets for each scanner and
# one set of 5 sheets for each reader.
# The data is redundant across these two sets
fileName <- file.path("data-raw", "mskcc20180627withLoc.xlsx")
# Read each sheet into different data frames
df.scanner.A <- read.xlsx(fileName, sheetIndex = 1)
df.scanner.B <- read.xlsx(fileName, sheetIndex = 2)
df.scanner.C <- read.xlsx(fileName, sheetIndex = 3)
df.scanner.D <- read.xlsx(fileName, sheetIndex = 4)
df.microscope <- read.xlsx(fileName, sheetIndex = 5)
# df.observer.1 <- read.xlsx(fileName, sheetIndex = 6)
# df.observer.2 <- read.xlsx(fileName, sheetIndex = 7)
# df.observer.3 <- read.xlsx(fileName, sheetIndex = 8)
# df.observer.4 <- read.xlsx(fileName, sheetIndex = 9)
# df.observer.5 <- read.xlsx(fileName, sheetIndex = 10)
masterRawWithLoc <- list(
df.scanner.A = df.scanner.A,
df.scanner.B = df.scanner.B,
df.scanner.C = df.scanner.C,
df.scanner.D = df.scanner.D,
df.microscope = df.microscope
# df.observer.1 = df.observer.1,
# df.observer.2 = df.observer.2,
# df.observer.3 = df.observer.3,
# df.observer.4 = df.observer.4,
# df.observer.5 = df.observer.5
)
# Check the truth across all data frames
if (!all(df.scanner.A$Ground.truth == df.scanner.B$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.scanner.C$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.scanner.D$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.microscope$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.1$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.2$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.3$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.4$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.5$Ground.truth)) browser()
# Concatenate the list of data frames to create one master data frame ####
dfMaster <- data.frame()
iModality <- 1
for (iModality in 1:5) {
df.current <- masterRawWithLoc[[iModality]]
df.current$modalityID <- modalities[iModality]
dfMaster <- rbind(dfMaster, df.current)
}
# Rename columns (misspellings)
dfMaster <- iMRMC::renameCol(dfMaster, "figure..", "targetID")
dfMaster <- iMRMC::renameCol(dfMaster, "ROI_ID", "roiID")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.1", "observer.1")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.2", "observer.2")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.3", "observer.3")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.4", "observer.4")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.5", "observer.5")
dfMaster <- iMRMC::renameCol(dfMaster, "Ground.truth", "truth")
# Make targetID a factor
dfMaster$targetID <- factor(dfMaster$targetID)
dfMaster$modalityID <- factor(dfMaster$modalityID)
# dfClassify: dfMaster includes rows corresponding to ROIs with no marks ####
# If there are no marks, then there are no candidates to classify.
# These rows need to be deleted ... "by hand"
dfClassify <- dfMaster
dfClassify <- dfClassify[dfClassify$targetID != 77, ]
dfClassify <- dfClassify[dfClassify$targetID != 114, ]
dfClassify$targetID <- factor(dfClassify$targetID)
# dfCountROI: Create df of counts per ROI and modality: including five readers and one truth ####
# Split the data by ROI and modality
dfMasterSplitByROIandModality <- split(dfMaster, list(dfMaster$roiID, dfMaster$modalityID))
iROI <- 1
dfCountROI <- data.frame()
for (iROI in 1:length(dfMasterSplitByROIandModality)) {
df.current <- dfMasterSplitByROIandModality[[iROI]]
dfCountROI <- rbind(
dfCountROI, data.frame(
wsiName = df.current[1, "wsiName"],
roiID = df.current[1, "roiID"],
modalityID = df.current[1, "modalityID"],
observer.1 = sum(df.current[ , "observer.1"]),
observer.2 = sum(df.current[ , "observer.2"]),
observer.3 = sum(df.current[ , "observer.3"]),
observer.4 = sum(df.current[ , "observer.4"]),
observer.5 = sum(df.current[ , "observer.5"]),
truth = sum(df.current[ , "truth"])
)
)
}
# dfCountWSI: Create df of counts per WSI and modality: including five readers and one truth ####
# Split the data by ROI and modality
dfCountROIsplitByWSI <- split(dfCountROI, list(dfCountROI$wsiName, dfCountROI$modalityID))
iWSI <- 1
dfCountWSI <- data.frame()
for (iWSI in 1:length(dfCountROIsplitByWSI)) {
df.current <- dfCountROIsplitByWSI[[iWSI]]
dfCountWSI <- rbind(
dfCountWSI, data.frame(
wsiName = df.current[1, "wsiName"],
modalityID = df.current[1, "modalityID"],
observer.1 = sum(df.current[ , "observer.1"]),
observer.2 = sum(df.current[ , "observer.2"]),
observer.3 = sum(df.current[ , "observer.3"]),
observer.4 = sum(df.current[ , "observer.4"]),
observer.5 = sum(df.current[ , "observer.5"]),
truth = sum(df.current[ , "truth"])
)
)
}
# Save data ####
dfClassify20180627 = dfClassify
dfCountWSI20180627 = dfCountWSI
dfCountROI20180627 = dfCountROI
usethis::use_data(dfClassify20180627, overwrite = TRUE)
usethis::use_data(dfCountWSI20180627, overwrite = TRUE)
usethis::use_data(dfCountROI20180627, overwrite = TRUE)
write.csv(dfClassify20180627, row.names = FALSE, file.path("data", "dfClassify20180627.csv"))
write.csv(dfCountWSI20180627, row.names = FALSE, file.path("data", "dfCountWSI20180627.csv"))
write.csv(dfCountROI20180627, row.names = FALSE, file.path("data", "dfCountROI20180627.csv"))
| /data-raw/readDataWithLoc.R | permissive | DIDSR/mitoticFigureCounts | R | false | false | 6,275 | r | library(xlsx)
library(iMRMC)
# * Creating `data-raw`. ####
# * Adding `data-raw` to `.Rbuildignore`.
# Next:
# * Add data creation scripts in data-raw
# * Use usethis::use_data() to add data to package
# Create usethis::use_data_raw()
# Open and read source data file ####
# We know that the study has 5 participants and 157 candidate mitotic figures
nReaders <- 5
readers <- c("observer.1", "observer.2", "observer.3", "observer.4", "observer.5")
nCases <- 157
cases <- 1:157
nModalities <- 5
modalities <- c("scanner.A", "scanner.B", "scanner.C", "scanner.D", "microscope")
# The source data file is an excel file with 10 sheets:
# one set of 5 sheets for each scanner and
# one set of 5 sheets for each reader.
# The data is redundant across these two sets
fileName <- file.path("data-raw", "mskcc20180627withLoc.xlsx")
# Read each sheet into different data frames
df.scanner.A <- read.xlsx(fileName, sheetIndex = 1)
df.scanner.B <- read.xlsx(fileName, sheetIndex = 2)
df.scanner.C <- read.xlsx(fileName, sheetIndex = 3)
df.scanner.D <- read.xlsx(fileName, sheetIndex = 4)
df.microscope <- read.xlsx(fileName, sheetIndex = 5)
# df.observer.1 <- read.xlsx(fileName, sheetIndex = 6)
# df.observer.2 <- read.xlsx(fileName, sheetIndex = 7)
# df.observer.3 <- read.xlsx(fileName, sheetIndex = 8)
# df.observer.4 <- read.xlsx(fileName, sheetIndex = 9)
# df.observer.5 <- read.xlsx(fileName, sheetIndex = 10)
masterRawWithLoc <- list(
df.scanner.A = df.scanner.A,
df.scanner.B = df.scanner.B,
df.scanner.C = df.scanner.C,
df.scanner.D = df.scanner.D,
df.microscope = df.microscope
# df.observer.1 = df.observer.1,
# df.observer.2 = df.observer.2,
# df.observer.3 = df.observer.3,
# df.observer.4 = df.observer.4,
# df.observer.5 = df.observer.5
)
# Check the truth across all data frames
if (!all(df.scanner.A$Ground.truth == df.scanner.B$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.scanner.C$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.scanner.D$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.microscope$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.1$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.2$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.3$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.4$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.5$Ground.truth)) browser()
# Concatenate the list of data frames to create one master data frame ####
dfMaster <- data.frame()
iModality <- 1
for (iModality in 1:5) {
df.current <- masterRawWithLoc[[iModality]]
df.current$modalityID <- modalities[iModality]
dfMaster <- rbind(dfMaster, df.current)
}
# Rename columns (misspellings)
dfMaster <- iMRMC::renameCol(dfMaster, "figure..", "targetID")
dfMaster <- iMRMC::renameCol(dfMaster, "ROI_ID", "roiID")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.1", "observer.1")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.2", "observer.2")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.3", "observer.3")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.4", "observer.4")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.5", "observer.5")
dfMaster <- iMRMC::renameCol(dfMaster, "Ground.truth", "truth")
# Make targetID a factor
dfMaster$targetID <- factor(dfMaster$targetID)
dfMaster$modalityID <- factor(dfMaster$modalityID)
# dfClassify: dfMaster includes rows corresponding to ROIs with no marks ####
# If there are no marks, then there are no candidates to classify.
# These rows need to be deleted ... "by hand"
dfClassify <- dfMaster
dfClassify <- dfClassify[dfClassify$targetID != 77, ]
dfClassify <- dfClassify[dfClassify$targetID != 114, ]
dfClassify$targetID <- factor(dfClassify$targetID)
# dfCountROI: Create df of counts per ROI and modality: including five readers and one truth ####
# Split the data by ROI and modality
dfMasterSplitByROIandModality <- split(dfMaster, list(dfMaster$roiID, dfMaster$modalityID))
iROI <- 1
dfCountROI <- data.frame()
for (iROI in 1:length(dfMasterSplitByROIandModality)) {
df.current <- dfMasterSplitByROIandModality[[iROI]]
dfCountROI <- rbind(
dfCountROI, data.frame(
wsiName = df.current[1, "wsiName"],
roiID = df.current[1, "roiID"],
modalityID = df.current[1, "modalityID"],
observer.1 = sum(df.current[ , "observer.1"]),
observer.2 = sum(df.current[ , "observer.2"]),
observer.3 = sum(df.current[ , "observer.3"]),
observer.4 = sum(df.current[ , "observer.4"]),
observer.5 = sum(df.current[ , "observer.5"]),
truth = sum(df.current[ , "truth"])
)
)
}
# dfCountWSI: Create df of counts per WSI and modality: including five readers and one truth ####
# Split the data by ROI and modality
dfCountROIsplitByWSI <- split(dfCountROI, list(dfCountROI$wsiName, dfCountROI$modalityID))
iWSI <- 1
dfCountWSI <- data.frame()
for (iWSI in 1:length(dfCountROIsplitByWSI)) {
df.current <- dfCountROIsplitByWSI[[iWSI]]
dfCountWSI <- rbind(
dfCountWSI, data.frame(
wsiName = df.current[1, "wsiName"],
modalityID = df.current[1, "modalityID"],
observer.1 = sum(df.current[ , "observer.1"]),
observer.2 = sum(df.current[ , "observer.2"]),
observer.3 = sum(df.current[ , "observer.3"]),
observer.4 = sum(df.current[ , "observer.4"]),
observer.5 = sum(df.current[ , "observer.5"]),
truth = sum(df.current[ , "truth"])
)
)
}
# Save data ####
dfClassify20180627 = dfClassify
dfCountWSI20180627 = dfCountWSI
dfCountROI20180627 = dfCountROI
usethis::use_data(dfClassify20180627, overwrite = TRUE)
usethis::use_data(dfCountWSI20180627, overwrite = TRUE)
usethis::use_data(dfCountROI20180627, overwrite = TRUE)
write.csv(dfClassify20180627, row.names = FALSE, file.path("data", "dfClassify20180627.csv"))
write.csv(dfCountWSI20180627, row.names = FALSE, file.path("data", "dfCountWSI20180627.csv"))
write.csv(dfCountROI20180627, row.names = FALSE, file.path("data", "dfCountROI20180627.csv"))
|
# Getting and Cleaning Data
if (!require(data.table)) install.packages("data.table"); library(data.table)
if (!require(dplyr)) install.packages("dplyr"); library(dplyr)
# Download, unzip data
f.url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
df <- "CourseDataset.zip"
if (!file.exists(df)){
download.file(f.url, destfile = df, mode='wb')
}
if (!file.exists("./UCI_HAR_Dataset")){
unzip(df)
}
dnld.date <- date()
# Start reading files
setwd("/cloud/project/UCI HAR Dataset")
# Reading activity
a.test <- read.table("./test/y_test.txt", header = F)
a.train <- read.table("./train/y_train.txt", header = F)
# Reading features
f.test <- read.table("./test/X_test.txt", header = F)
f.train <- read.table("./train/X_train.txt", header = F)
# Reading subject
s.test <- read.table("./test/subject_test.txt", header = F)
s.train <- read.table("./train/subject_train.txt", header = F)
# Reading Activity Lable
a.lable <- read.table("./activity_labels.txt", header = F)
# Read Feature Name
f.name <- read.table("./features.txt", header = F)
# Merge data
f.df <- rbind(f.test, f.train)
s.df <- rbind(s.test, s.train)
a.df <- rbind(a.test, a.train)
# Renaming columns data
names(a.df) <- "ActivityN"
names(a.lable) <- c("ActivityN", "Activity")
# Get Activity names
Activity <- left_join(a.df, a.lable, "ActivityN")[, 2]
# Rename subject columns
names(s.df) <- "Subject"
# Rename f.df columns using columns from f.name
names(f.df) <- f.name$V2
# Create unified Dataset
DataSet <- cbind(s.df, Activity)
DataSet <- cbind(DataSet, f.df)
# Create New dataset by extracting only the measurements on the mean and standard deviation for each measurement
sF.name <- f.name$V2[grep("mean\\(\\)|std\\(\\)", f.name$V2)]
DataNames <- c("Subject", "Activity", as.character(sF.name))
DataSet <- subset(DataSet, select=DataNames)
# Rename the columns with descriptive activity names
names(DataSet)<-gsub("^t", "time", names(DataSet))
names(DataSet)<-gsub("^f", "frequency", names(DataSet))
names(DataSet)<-gsub("Acc", "Accelerometer", names(DataSet))
names(DataSet)<-gsub("Gyro", "Gyroscope", names(DataSet))
names(DataSet)<-gsub("Mag", "Magnitude", names(DataSet))
names(DataSet)<-gsub("BodyBody", "Body", names(DataSet))
# Create independent tidy data
SecondDataSet<-aggregate(. ~Subject + Activity, DataSet, mean)
SecondDataSet<-SecondDataSet[order(SecondDataSet$Subject,SecondDataSet$Activity),]
# Save tidy dataset
write.table(SecondDataSet, file = "tidydata.txt",row.name=FALSE)
| /run_analysis.R | no_license | Arnab-eco/coursera_gCdata_proj | R | false | false | 2,526 | r | # Getting and Cleaning Data
if (!require(data.table)) install.packages("data.table"); library(data.table)
if (!require(dplyr)) install.packages("dplyr"); library(dplyr)
# Download, unzip data
f.url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
df <- "CourseDataset.zip"
if (!file.exists(df)){
download.file(f.url, destfile = df, mode='wb')
}
if (!file.exists("./UCI_HAR_Dataset")){
unzip(df)
}
dnld.date <- date()
# Start reading files
setwd("/cloud/project/UCI HAR Dataset")
# Reading activity
a.test <- read.table("./test/y_test.txt", header = F)
a.train <- read.table("./train/y_train.txt", header = F)
# Reading features
f.test <- read.table("./test/X_test.txt", header = F)
f.train <- read.table("./train/X_train.txt", header = F)
# Reading subject
s.test <- read.table("./test/subject_test.txt", header = F)
s.train <- read.table("./train/subject_train.txt", header = F)
# Reading Activity Lable
a.lable <- read.table("./activity_labels.txt", header = F)
# Read Feature Name
f.name <- read.table("./features.txt", header = F)
# Merge data
f.df <- rbind(f.test, f.train)
s.df <- rbind(s.test, s.train)
a.df <- rbind(a.test, a.train)
# Renaming columns data
names(a.df) <- "ActivityN"
names(a.lable) <- c("ActivityN", "Activity")
# Get Activity names
Activity <- left_join(a.df, a.lable, "ActivityN")[, 2]
# Rename subject columns
names(s.df) <- "Subject"
# Rename f.df columns using columns from f.name
names(f.df) <- f.name$V2
# Create unified Dataset
DataSet <- cbind(s.df, Activity)
DataSet <- cbind(DataSet, f.df)
# Create New dataset by extracting only the measurements on the mean and standard deviation for each measurement
sF.name <- f.name$V2[grep("mean\\(\\)|std\\(\\)", f.name$V2)]
DataNames <- c("Subject", "Activity", as.character(sF.name))
DataSet <- subset(DataSet, select=DataNames)
# Rename the columns with descriptive activity names
names(DataSet)<-gsub("^t", "time", names(DataSet))
names(DataSet)<-gsub("^f", "frequency", names(DataSet))
names(DataSet)<-gsub("Acc", "Accelerometer", names(DataSet))
names(DataSet)<-gsub("Gyro", "Gyroscope", names(DataSet))
names(DataSet)<-gsub("Mag", "Magnitude", names(DataSet))
names(DataSet)<-gsub("BodyBody", "Body", names(DataSet))
# Create independent tidy data
SecondDataSet<-aggregate(. ~Subject + Activity, DataSet, mean)
SecondDataSet<-SecondDataSet[order(SecondDataSet$Subject,SecondDataSet$Activity),]
# Save tidy dataset
write.table(SecondDataSet, file = "tidydata.txt",row.name=FALSE)
|
#calculating weighted mean copy number
#read in data for the original OTU table. Make sure to remove the first line (#contructed from OTU table) and the # from the first column.
library(readr)
ferrenburg_otu_table <- read_delim("~/ferrenburg-otu_table.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
#read in data for the copy number correctd OTU table. Make sure to remove the first line (#contructed from OTU table) and the # from the first column.
library(readr)
Ferrenburg_copy_table <- read_delim("~/Ferrenburg_copy_table.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
#need to remove the OTU ID's from the first column
Ferrenburg_copy_table<-Ferrenburg_copy_table[,-1]
ferrenburg_otu_table<-ferrenburg_otu_table[,-1]
#Divide the original OTU table by the copy number OTU table and write a new table
copy_number_otus<-ferrenburg_otu_table/Ferrenburg_copy_table
#need to get rid of the NaN's so we can math things
copy_number_otus[is.na(copy_number_otus)] <-0
#calculate the relative abundance of each OTU in the original OTU table (can also get this from QIIME)
ferrenburg_normalized<-sweep(ferrenburg_normalized, 2, colSums(ferrenburg_normalized), '/')
#Multiple the relativized abundance OTU table by the copy number OTU table
weighted_mean_copy<-ferrenburg_normalized*copy_number_otus
#take the column sums for this new file and appened it to the column names for the table
weighted_mean_col_sum<-data.frame(colSums(weighted_mean_copy))
write.table(weighted_mean_col_sum, "copy_number.txt", sep=',') | /R-files/Calculating_weighted_mean_copy_number.R | no_license | ShadeLab/PAPER_Kearns_ISMEJ_Centralia_operons_dormancy | R | false | false | 1,553 | r | #calculating weighted mean copy number
#read in data for the original OTU table. Make sure to remove the first line (#contructed from OTU table) and the # from the first column.
library(readr)
ferrenburg_otu_table <- read_delim("~/ferrenburg-otu_table.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
#read in data for the copy number correctd OTU table. Make sure to remove the first line (#contructed from OTU table) and the # from the first column.
library(readr)
Ferrenburg_copy_table <- read_delim("~/Ferrenburg_copy_table.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
#need to remove the OTU ID's from the first column
Ferrenburg_copy_table<-Ferrenburg_copy_table[,-1]
ferrenburg_otu_table<-ferrenburg_otu_table[,-1]
#Divide the original OTU table by the copy number OTU table and write a new table
copy_number_otus<-ferrenburg_otu_table/Ferrenburg_copy_table
#need to get rid of the NaN's so we can math things
copy_number_otus[is.na(copy_number_otus)] <-0
#calculate the relative abundance of each OTU in the original OTU table (can also get this from QIIME)
ferrenburg_normalized<-sweep(ferrenburg_normalized, 2, colSums(ferrenburg_normalized), '/')
#Multiple the relativized abundance OTU table by the copy number OTU table
weighted_mean_copy<-ferrenburg_normalized*copy_number_otus
#take the column sums for this new file and appened it to the column names for the table
weighted_mean_col_sum<-data.frame(colSums(weighted_mean_copy))
write.table(weighted_mean_col_sum, "copy_number.txt", sep=',') |
#Data Vis Final Project
library(tidyverse)
library(reshape2)
library(gridExtra)
h_2015 <- read.csv('2015.csv')
h_2016 <- read.csv('2016.csv')
h_2017 <- read.csv('2017.csv')
##colnames(h_2015)<-c("Country","Region","Happiness Rank","Happiness Score",
"Standard Error","Economy","Family","Health","Freedom","Trust","Generosity" ,"Dystopia Residual")
#top 10% of 2015 data and how variables effect happiness score
h_2015[1:15,]
top15_2015 <- h_2015[1:15,]
haptopmean2015 <-mean(top15_2015$Happiness.Score)
means2015 <-(colMeans(top15_2015[sapply(top15_2015, is.numeric)]))/haptopmean2015
means2015[4:10]
#bottom 10%
bottom15_2015 <- h_2015[144:158,]
hapbottommean2015 <- mean(bottom15_2015$Happiness.Score)
bottommeans2015 <-(colMeans(bottom15_2015[sapply(bottom15_2015, is.numeric)]))/hapbottommean2015
bottommeans2015[4:10]
# Pie Chart with Percentages
slices <- c(0.17472624,0.17401361,0.12168597,0.08294748,0.04021450,0.04822559,0.35818943 )
lbls <- c("Economy (GDP)","Family", "Health (Life Expectancy)","Freedom",
"Trust (Government Corruption)","Generosity","Dystopia Residual")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
topgraph <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, main="How Variables Effect Happiness (Top 10%)")
#Bottom 10%
slices2 <- c(0.07727785,0.15962628,0.09370198,0.10619555,0.03920589,0.07414065,0.44984324)
lbls2 <- c("Economy (GDP)","Family", "Health (Life Expectancy)","Freedom",
"Trust (Government Corruption)","Generosity","Dystopia Residual")
pct2 <- round(slices2/sum(slices2)*100)
lbls2 <- paste(lbls2, pct2) # add percents to labels
botgraph <- paste(lbls2,"%",sep="") # ad % to labels
pie(slices2,labels = lbls2, main="How Variables Effect Happiness (Bottom 10%)")
grid.arrange(topgraph, bottomgraph, nrow = 1)
########## final project
library(dplyr)
library(ggmap)
library(desc)
top_17 <- h_2017 %>%
filter(rank(desc("Happiness.Score"))<=15)
if(!requireNamespace("devtools")) install.packages("devtools")
devtools::install_github("dkahle/ggmap", ref = "tidyup")
register_google(key = "[your key here]", account_type = "premium", day_limit = 100)
world_data <- map_data("world")
worldmap <- ggplot(world_data, aes(x = long, y = lat, group = group)) +
geom_path() +
scale_y_continuous(breaks = (-2:2) * 30) +
scale_x_continuous(breaks = (-4:4) * 45)
worldmap
problem1 #####prints final map
# Create data: 3 sets of data: Low, Medium, High:
low = h_2017[1:51,]
medium = h_2017[52:104,]
high = h_2017[105:155,]
newcol<-c(low,medium,high)
library(rworldmap)
library(ggplot2)
map.world <- map_data(map="world")
#Add the data you want to map countries by to map.world
#In this example, I add lengths of country names plus some offset
map.world$name_len <- nchar(map.world$region) + sample(nrow(map.world))
gg <- ggplot()
gg <- gg + theme(legend.position="none")
gg <- gg + geom_map(data=map.world, map=map.world, aes(map_id=region, x=long, y=lat, fill=name_len))
gg <- gg + scale_fill_gradient(low = "green", high = "red", guide = "colourbar")
gg <- gg + coord_equal()
scale_fill
gg
library(rworldmap)
library(ggplot2)
map.world <- map_data(map="world")
#Add the data you want to map countries by to map.world
#In this example, I add lengths of country names plus some offset
map.world$name_len <- nchar(map.world$region) + sample(nrow(map.world))
gg <- ggplot()
gg <- gg + theme(legend.position="none")
gg <- gg + geom_map(data=map.world, map=map.world, aes(map_id=region, x=long, y=lat, fill=LMH))
gg <- gg + scale_fill_gradient(low = "green", high = "brown3", guide = "colourbar")
gg <- gg + coord_equal()
gg
library(maptools)
data(wrld_simpl)
plot(wrld_simpl,
col = c(gray(.80), "red")[grepl("^U", wrld_simpl@data$NAME) + 1])
#####################################################################
library(rworldmap)
h_2017 <- read.csv('2017.csv',header=TRUE,as.is=TRUE)
sPDF <- joinCountryData2Map(h_2017
, joinCode='NAME'
, nameJoinColumn='Country'
, verbose='TRUE')
#categorise component indices
h_2017$HappinessColor <-
ifelse(h_2017$Happiness.Score < 4.72,'red'
,ifelse(h_2017$Happiness.Score > 5.91,'green'
,'amber' ))
#count red, amber , greens per country
numReds<-
(as.numeric(h_2017$HappinessColor =='red'))
numAmbers<-
(as.numeric(h_2017$HappinessColor =='amber'))
numGreens<-
(as.numeric(h_2017$HappinessColor =='green'))
#calculate HPI colour per country
h_2017$HPIcolour <-
ifelse(h_2017$HappinessColor=='blood red'
| numReds==1,3
,ifelse(numAmbers==1,2
,ifelse(numGreens==1,1
,NA)))
#join data to map
sPDF <- joinCountryData2Map(h_2017
,joinCode="NAME"
,nameJoinColumn="Country")
#set colours
colourPalette <- c('navyblue'
,'deepskyblue2'
,'lightcyan2')
#plot map
mapDevice() #create world map shaped window
mapParams <- mapCountryData(sPDF
,nameColumnToPlot='HPIcolour'
,catMethod='categorical'
,colourPalette=colourPalette
,addLegend=FALSE
,mapTitle='Countries Based on Happiness Rank')
#changing legendText
mapParams$legendText <-
c('1 High'
,'2 Medium'
,'3 Low')
#add legend
do.call(addMapLegendBoxes
, c(mapParams
,x='bottom'
,title="World Happiness Color"))
mapParams
| /Data Vis Final Project.R | no_license | kelsey-moon44/WorldHappiness | R | false | false | 5,626 | r | #Data Vis Final Project
library(tidyverse)
library(reshape2)
library(gridExtra)
h_2015 <- read.csv('2015.csv')
h_2016 <- read.csv('2016.csv')
h_2017 <- read.csv('2017.csv')
##colnames(h_2015)<-c("Country","Region","Happiness Rank","Happiness Score",
"Standard Error","Economy","Family","Health","Freedom","Trust","Generosity" ,"Dystopia Residual")
#top 10% of 2015 data and how variables effect happiness score
h_2015[1:15,]
top15_2015 <- h_2015[1:15,]
haptopmean2015 <-mean(top15_2015$Happiness.Score)
means2015 <-(colMeans(top15_2015[sapply(top15_2015, is.numeric)]))/haptopmean2015
means2015[4:10]
#bottom 10%
bottom15_2015 <- h_2015[144:158,]
hapbottommean2015 <- mean(bottom15_2015$Happiness.Score)
bottommeans2015 <-(colMeans(bottom15_2015[sapply(bottom15_2015, is.numeric)]))/hapbottommean2015
bottommeans2015[4:10]
# Pie Chart with Percentages
slices <- c(0.17472624,0.17401361,0.12168597,0.08294748,0.04021450,0.04822559,0.35818943 )
lbls <- c("Economy (GDP)","Family", "Health (Life Expectancy)","Freedom",
"Trust (Government Corruption)","Generosity","Dystopia Residual")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
topgraph <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, main="How Variables Effect Happiness (Top 10%)")
#Bottom 10%
slices2 <- c(0.07727785,0.15962628,0.09370198,0.10619555,0.03920589,0.07414065,0.44984324)
lbls2 <- c("Economy (GDP)","Family", "Health (Life Expectancy)","Freedom",
"Trust (Government Corruption)","Generosity","Dystopia Residual")
pct2 <- round(slices2/sum(slices2)*100)
lbls2 <- paste(lbls2, pct2) # add percents to labels
botgraph <- paste(lbls2,"%",sep="") # ad % to labels
pie(slices2,labels = lbls2, main="How Variables Effect Happiness (Bottom 10%)")
grid.arrange(topgraph, bottomgraph, nrow = 1)
########## final project
library(dplyr)
library(ggmap)
library(desc)
top_17 <- h_2017 %>%
filter(rank(desc("Happiness.Score"))<=15)
if(!requireNamespace("devtools")) install.packages("devtools")
devtools::install_github("dkahle/ggmap", ref = "tidyup")
register_google(key = "[your key here]", account_type = "premium", day_limit = 100)
world_data <- map_data("world")
worldmap <- ggplot(world_data, aes(x = long, y = lat, group = group)) +
geom_path() +
scale_y_continuous(breaks = (-2:2) * 30) +
scale_x_continuous(breaks = (-4:4) * 45)
worldmap
problem1 #####prints final map
# Create data: 3 sets of data: Low, Medium, High:
low = h_2017[1:51,]
medium = h_2017[52:104,]
high = h_2017[105:155,]
newcol<-c(low,medium,high)
library(rworldmap)
library(ggplot2)
map.world <- map_data(map="world")
#Add the data you want to map countries by to map.world
#In this example, I add lengths of country names plus some offset
map.world$name_len <- nchar(map.world$region) + sample(nrow(map.world))
gg <- ggplot()
gg <- gg + theme(legend.position="none")
gg <- gg + geom_map(data=map.world, map=map.world, aes(map_id=region, x=long, y=lat, fill=name_len))
gg <- gg + scale_fill_gradient(low = "green", high = "red", guide = "colourbar")
gg <- gg + coord_equal()
scale_fill
gg
library(rworldmap)
library(ggplot2)
map.world <- map_data(map="world")
#Add the data you want to map countries by to map.world
#In this example, I add lengths of country names plus some offset
map.world$name_len <- nchar(map.world$region) + sample(nrow(map.world))
gg <- ggplot()
gg <- gg + theme(legend.position="none")
gg <- gg + geom_map(data=map.world, map=map.world, aes(map_id=region, x=long, y=lat, fill=LMH))
gg <- gg + scale_fill_gradient(low = "green", high = "brown3", guide = "colourbar")
gg <- gg + coord_equal()
gg
library(maptools)
data(wrld_simpl)
plot(wrld_simpl,
col = c(gray(.80), "red")[grepl("^U", wrld_simpl@data$NAME) + 1])
#####################################################################
library(rworldmap)
h_2017 <- read.csv('2017.csv',header=TRUE,as.is=TRUE)
sPDF <- joinCountryData2Map(h_2017
, joinCode='NAME'
, nameJoinColumn='Country'
, verbose='TRUE')
#categorise component indices
h_2017$HappinessColor <-
ifelse(h_2017$Happiness.Score < 4.72,'red'
,ifelse(h_2017$Happiness.Score > 5.91,'green'
,'amber' ))
#count red, amber , greens per country
numReds<-
(as.numeric(h_2017$HappinessColor =='red'))
numAmbers<-
(as.numeric(h_2017$HappinessColor =='amber'))
numGreens<-
(as.numeric(h_2017$HappinessColor =='green'))
#calculate HPI colour per country
h_2017$HPIcolour <-
ifelse(h_2017$HappinessColor=='blood red'
| numReds==1,3
,ifelse(numAmbers==1,2
,ifelse(numGreens==1,1
,NA)))
#join data to map
sPDF <- joinCountryData2Map(h_2017
,joinCode="NAME"
,nameJoinColumn="Country")
#set colours
colourPalette <- c('navyblue'
,'deepskyblue2'
,'lightcyan2')
#plot map
mapDevice() #create world map shaped window
mapParams <- mapCountryData(sPDF
,nameColumnToPlot='HPIcolour'
,catMethod='categorical'
,colourPalette=colourPalette
,addLegend=FALSE
,mapTitle='Countries Based on Happiness Rank')
#changing legendText
mapParams$legendText <-
c('1 High'
,'2 Medium'
,'3 Low')
#add legend
do.call(addMapLegendBoxes
, c(mapParams
,x='bottom'
,title="World Happiness Color"))
mapParams
|
library(DataExplorer)
server <- function(input, output,session) {
# Return the requested dataset ----
datasetInput <- reactive({
switch(input$dataset,
"rock" = rock,
"pressure" = pressure,
"cars" = cars,
"mtcars"=mtcars)
})
# Generate a dim of the dataset ----
output$dim <- renderPrint({
dim(datasetInput())
})
#get data set col names
output$names <- renderPrint({
names(datasetInput())
})
# Generate a str of the dataset ----
output$str <- renderPrint({
str(datasetInput())
})
# Generate a summary of the dataset ----
output$summary <- renderPrint({
summary(datasetInput())
})
# Show the first "5" observations ----
output$head <- renderTable({
head(datasetInput())
})
output$plot1 <- renderPlot({
plot(datasetInput())
})
output$plot2 <- renderPlot({
plot_histogram(datasetInput())
})
output$plot3 <- renderPlot({
plot_missing(datasetInput())
})
output$plot4 <- renderPlot({
plot_density(datasetInput())
})
output$plot5 <- renderPlot({
plot_correlation(datasetInput(), type = 'continuous','Review.Date')
})
} | /mtCarsApp/server.R | no_license | ahmedshawky1/course9_4_1 | R | false | false | 1,370 | r |
library(DataExplorer)
server <- function(input, output,session) {
# Return the requested dataset ----
datasetInput <- reactive({
switch(input$dataset,
"rock" = rock,
"pressure" = pressure,
"cars" = cars,
"mtcars"=mtcars)
})
# Generate a dim of the dataset ----
output$dim <- renderPrint({
dim(datasetInput())
})
#get data set col names
output$names <- renderPrint({
names(datasetInput())
})
# Generate a str of the dataset ----
output$str <- renderPrint({
str(datasetInput())
})
# Generate a summary of the dataset ----
output$summary <- renderPrint({
summary(datasetInput())
})
# Show the first "5" observations ----
output$head <- renderTable({
head(datasetInput())
})
output$plot1 <- renderPlot({
plot(datasetInput())
})
output$plot2 <- renderPlot({
plot_histogram(datasetInput())
})
output$plot3 <- renderPlot({
plot_missing(datasetInput())
})
output$plot4 <- renderPlot({
plot_density(datasetInput())
})
output$plot5 <- renderPlot({
plot_correlation(datasetInput(), type = 'continuous','Review.Date')
})
} |
tar <-
function (y, p1, p2, d, is.constant1 = TRUE, is.constant2 = TRUE,
transform = "no", center = FALSE, standard = FALSE, estimate.thd = TRUE,
threshold, method=c("MAIC","CLS")[1], a = 0.05, b = 0.95,
order.select = TRUE, print = FALSE)
{
cvar <-
function (x, df = 1)
{
# x is assumed to be of zero expectation
sum(x^2)/(length(x) - df)
}
fmaic <-
function (R, n, order.select = FALSE)
{
k <- dim(R)[2]
v <- R[-1, k]^2
v <- rev(cumsum(rev(v)))
k <- k - 1
AIC <- n * log(v/n) + 2 * seq(k)
order <- (1:k)[AIC == min(AIC)]
like <- (-n * log(v/n))/2 - n/2 - n * log(2 * 3.14159)/2
if (order.select) {
maic <- min(AIC)
return(list(MAIC = maic, order = ((1:k)[AIC == maic])[1] -
1, AIC = AIC, like = like[order]))
}
else return(list(MAIC = AIC[length(AIC)], order = k - 1,
AIC = AIC, like = like[length(AIC)]))
}
cat1 <-
function (..., print = TRUE, file = "", sep = " ", fill = FALSE,
labels = NULL, append = FALSE)
{
if (print) {
if (is.character(file))
if (file == "")
file <- stdout()
else if (substring(file, 1, 1) == "|") {
file <- pipe(substring(file, 2), "w")
on.exit(close(file))
}
else {
file <- file(file, ifelse(append, "a", "w"))
on.exit(close(file))
}
cat(..., file=file, sep=sep, fill=fill, labels=labels, append=append)
}
invisible(NULL)
}
makedata <-
function (dataf, p1, p2, d, is.constant1 = TRUE, is.constant2 = TRUE,
thd.by.phase = FALSE)
{
n <- dim(dataf)[1]
nseries <- dim(dataf)[2]
start <- max(c(p1, p2, d)) + 1
p <- max(c(p1, p2))
xy <- NULL
for (i in (1:nseries)) xy <- rbind(xy, cbind(makexy(dataf[,
i], p, start, d, thd.by.phase = thd.by.phase), i))
xy <- dna(xy)
xy <- xy[s <- order(xy[, p + 2]), ]
xy1 <- setxy(xy, p, p1, nseries, is.constant1)
xy2 <- setxy(xy, p, p2, nseries, is.constant2)
list(xy1 = xy1, xy2 = xy2, sort.list = s)
}
makexy <-
function (x, p, start, d, thd.by.phase = FALSE)
{
n <- length(x)
xy <- NULL
for (i in (1:p)) xy <- cbind(xy, x[(start - i):(n - i)])
if (thd.by.phase)
xy <- cbind(xy, x[start:n], x[(start - 1):(n - 1)] -
x[(start - 2):(n - 2)])
else xy <- cbind(xy, x[start:n], x[(start - d):(n - d)])
xy
}
setxy<-
function (old.xy, p, p1, nseries, is.coefficient = TRUE)
{
if (p1 >= 1)
s <- 1:p1
else s <- NULL
n <- dim(old.xy)[1]
new.xy <- old.xy[, c(s, (p + 1):(p + 3)), drop = FALSE]
temp <- new.xy[, s, drop = FALSE]
if (is.coefficient)
new.xy <- cbind(1, new.xy)
if (is.coefficient)
temp <- cbind(rep(1, n), temp)
if (nseries == 1)
return(new.xy)
for (i in rev(2:nseries)) {
select <- old.xy[, p + 3] == i
zero <- 0 * temp
zero[select, ] <- temp[select, ]
new.xy <- cbind(zero, new.xy)
}
new.xy
}
findstart <-
function (x, nseries, indexid, p)
{
m <- dim(x)[1]
amax <- 0
for (i in (1:nseries)) {
amax <- max(amax, (1:m)[(cumsum(x[, indexid] == i) ==
p)])
}
amax
}
dna <-
function (x)
{
x[!apply(x, 1, any.na), ]
}
any.na <-
function (x)
{
any(x == "NA")
}
revm <-
function (m)
{
apply(m, 2, rev)
}
if(method=="MAIC") MAIC=TRUE else MAIC=FALSE
dataf=y
aic.no.thd <- NA
if (!is.matrix(dataf)) {
temp <- cbind(dataf, dataf)
dataf <- temp[, 1, drop = FALSE]
}
dataf <- switch(transform, log = log(dataf), log10 = log10(dataf),
sqrt = sqrt(dataf), no = dataf)
means <- apply(dataf, 2, mean, na.rm = TRUE)
stds <- apply(dataf, 2, var, na.rm = TRUE)^0.5
if (standard)
dataf <- apply(dataf, 2, standard)
nseries <- dim(dataf)[2]
res <- makedata(dataf, p1, p2, d, is.constant1, is.constant2,
thd.by.phase = FALSE)
xy1 <- res$xy1
xy2 <- res$xy2
sort.l <- res$sort.list
m <- dim(xy1)[1]
q1 <- dim(xy1)[2]
q2 <- dim(xy2)[2]
s <- (q1 - 3 - p1):(q1 - 3)
temp <- xy1[, s]
xy1 <- cbind(temp, xy1[, -s])
lab <- deparse(match.call()$y)
if (p1 >= 1)
sublab <- paste("lag", 1:p1, sep = "")
else sublab <- NULL
if (is.null(lab))
lab <- ""
if (lab == "")
dimnames(xy1)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = " "), "", "", ""))
else dimnames(xy1)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = "-"), "", "", ""))
s <- (q2 - 3 - p2):(q2 - 3)
temp <- xy2[, s]
xy2 <- cbind(temp, xy2[, -s])
if (p2 >= 1)
sublab <- paste("lag", 1:p2, sep = "")
else sublab <- NULL
if (lab == "")
dimnames(xy2)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = " "), "", "", ""))
else dimnames(xy2)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = "-"), "", "", ""))
aic1 <- aic2 <- rss1 <- rss2 <- rep(10^10, m)
like1 <- like2 <- rep(-10^10, m)
lbound <- sum(dataf[dataf != "NA"] == min(dataf[dataf !=
"NA"]))
ubound <- sum(dataf[dataf != "NA"] == max(dataf[dataf !=
"NA"]))
i1 <- max(c(q1 - 3, lbound + 1, 2 * p1 + 1, d, findstart(xy1,
nseries, q1, p1 + 2)))
i1 <- max(i1, floor(a * m))
i2 <- m - max(c(q2 - 3, ubound + 1, 2 * p2 + 1, d, findstart(revm(xy2),
nseries, q2, p2 + 2))) - 1
i2 <- min(i2, ceiling(b * m))
s <- -((q1 - 1):q1)
R <- qr.R(qr(xy1[1:i1, s]))
if (estimate.thd) {
posy <- q1 - 2
rss1[i1] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, i1, order.select = order.select)
like1[i1] <- res.fmaic$like
aic1[i1] <- res.fmaic$MAIC
for (i in ((i1 + 1):i2)) {
R <- qr.R(qr(rbind(R, xy1[i, s])))
rss1[i] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, i, order.select = order.select)
like1[i] <- res.fmaic$like
aic1[i] <- res.fmaic$MAIC
}
s <- -((q2 - 1):q2)
posy <- q2 - 2
R <- qr.R(qr(xy2[(i2 + 1):m, s]))
rss2[i2] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, m - i2, order.select = order.select)
like2[i2] <- res.fmaic$like
aic2[i2] <- res.fmaic$MAIC
for (i in rev(i1:(i2 - 1))) {
R <- qr.R(qr(rbind(R, xy2[i + 1, s])))
rss2[i] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, m - i, order.select = order.select)
like2[i] <- res.fmaic$like
aic2[i] <- res.fmaic$MAIC
}
rss <- rss1 + rss2
thdindex <- ((1:m)[rss == min(rss)])[1]
aic <- aic1 + aic2
if (MAIC) {
aic[-(i1:i2)] <- 10^10
thdindex <- ((1:m)[aic == (aic.no.thd <- min(aic))])[1]
}
thd <- xy1[thdindex, q1 - 1]
}
else {
thd <- threshold
thdindex <- ((1:m)[xy1[, q1 - 1] > thd])[1] - 1
}
s <- -((q1 - 1):q1)
qr1 <- qr(xy1[1:thdindex, s])
R1 <- qr.R(qr1)
if(MAIC) {order1=fmaic(R1, thdindex, order.select = order.select)$order+1
subset1=1:order1
p1=order1-is.constant1}
else subset1=-((q1 - 2):q1)
qr1 <- lsfit(x.regime1 <- xy1[1:thdindex, subset1,
drop = FALSE], y.regime1 <- xy1[1:thdindex, q1 - 2], intercept = FALSE)
dxy1=xy1[, subset1, drop = FALSE] # for diagnostics
dxy1[-(1:thdindex),]=0
s <- -((q2 - 1):q2)
qr2 <- qr(xy2[(thdindex + 1):m, s])
R2 <- qr.R(qr2)
if(MAIC) {order2=fmaic(R2, m-thdindex, order.select = order.select)$order+1
subset2=1:order2
p2=order2-is.constant1}
else subset2=-((q2 - 2):q2)
qr2 <- lsfit(x.regime2 <- xy2[-(1:thdindex), subset2,
drop = FALSE], y.regime2 <- xy2[-(1:thdindex), q2 - 2], intercept = FALSE)
dxy2=xy2[,subset2, drop = FALSE] # for diagnostics
dxy2[(1:thdindex),]=0
cat1(print = print, "time series included in this analysis is: ",
lab, "\n")
cat1(print = print, "SETAR(2,", p1, ",", p2, ") model")
cat1(print = print, " delay =", d, "\n")
if (MAIC)
method <- "Minimum AIC"
else method <- "CLS"
cat1(print = print, "estimated threshold = ", signif(thd,
4), " from a", method, " fit with thresholds \nsearched from the ",
round(i1/m * 100), " percentile to the ", round(i2/m *
100), " percentile of all data.\n")
cat1(print = print, "The estimated threshold is the ",
signif(thdindex/m * 100, 3), " percentile of\nall data.\n")
cat1(print = print, "lower regime: \n")
if (print)
ls.print(qr1)
cat1(print = print, "\n\n (unbiased) RMS \n")
rms1 <- tapply(qr1$residuals, xy1[1:thdindex, q1], cvar,
df = p1 + is.constant1)
cat1(print = print, signif(rms1, 4), "\n")
cat1(print = print, " with no of data falling in the regime being \n")
n1 <- tapply(qr1$residuals, xy1[1:thdindex, q1], length)
cat1(print = print, rbind(lab, signif(n1, 4)), "\n")
cat1(print = print, "\n\n (max. likelihood) RMS for each series (denominator=sample size in the regime) \n")
cat1(print = print, rbind(lab, signif((rms1 * (n1 -
p1 - 1))/n1, 4)), "\n")
cat1(print = print, "\n\n upper regime: \n")
if (print)
ls.print(qr2)
cat1(print = print, "\n\n (unbiased) RMS \n")
rms2 <- tapply(qr2$residuals, xy2[(thdindex + 1):m, q2],
cvar, df = p2 + is.constant2)
cat1(print = print, signif(rms2, 4), "\n")
cat1(print = print, " with no of data falling in the regime being \n")
n2 <- tapply(qr2$residuals, xy2[(thdindex + 1):m, q2], length)
cat1(print = print, signif(n2, 4), "\n")
cat1(print = print, "\n\n (max. likelihood) RMS for each series (denominator=sample size in the regime)\n")
cat1(print = print, signif((rms2 * (n2 -
p2 - 1))/n2, 4), "\n")
AIC <- signif(n1 * log((rms1 * (n1 - p1 - is.constant1))/n1) + n2 *
log((rms2 * (n2 - p2 - is.constant2))/n2) +
+(n1+n2)*(1+log(2*pi))+ 2 * (p1 + p2 + is.constant1+is.constant2 + 1),
4)
cat1(print = print, "\n Nominal AIC is ", AIC, "\n")
residuals <- c(qr1$residuals, qr2$residuals)
residuals[sort.l] <- residuals
std.res <- c(qr1$residuals/as.double(rms1^0.5), qr2$residuals/as.double(rms2^0.5))
std.res[sort.l] <- std.res
dxy1[sort.l,]=dxy1
dxy2[sort.l,]=dxy2
res=list(dxy1=dxy1,dxy2=dxy2,p1=p1,q1=q1,d=d,qr1=qr1,qr2=qr2,x.regime1 = x.regime1, y.regime1 = y.regime1,
x.regime2 = x.regime2, y.regime2 = y.regime2, thd = thd,
thdindex = thdindex, qr1 = qr1, qr2 = qr2, i1 = i1, i2 = i2,
x = xy1[, q1 - 1], m = m, rss1 = as.vector(rms1 * (n1 -
p1 - 1)), rss2 = as.vector(rms2 * (n2 - p2 - 2)),
n1 = as.vector(n1), n2 = as.vector(n2), std.res = std.res,
p1=p1, p2=p2, rms1=rms1,rms2=rms2,
is.constant1=is.constant1,is.constant2=is.constant2,
residuals = residuals, AIC = AIC, aic.no.thd = aic.no.thd,y=y,
like = max(like1 + like2),method=method)
class(res)="TAR"
invisible(res)
}
| /TSA/R/tar.R | no_license | ingted/R-Examples | R | false | false | 11,594 | r | tar <-
function (y, p1, p2, d, is.constant1 = TRUE, is.constant2 = TRUE,
transform = "no", center = FALSE, standard = FALSE, estimate.thd = TRUE,
threshold, method=c("MAIC","CLS")[1], a = 0.05, b = 0.95,
order.select = TRUE, print = FALSE)
{
cvar <-
function (x, df = 1)
{
# x is assumed to be of zero expectation
sum(x^2)/(length(x) - df)
}
fmaic <-
function (R, n, order.select = FALSE)
{
k <- dim(R)[2]
v <- R[-1, k]^2
v <- rev(cumsum(rev(v)))
k <- k - 1
AIC <- n * log(v/n) + 2 * seq(k)
order <- (1:k)[AIC == min(AIC)]
like <- (-n * log(v/n))/2 - n/2 - n * log(2 * 3.14159)/2
if (order.select) {
maic <- min(AIC)
return(list(MAIC = maic, order = ((1:k)[AIC == maic])[1] -
1, AIC = AIC, like = like[order]))
}
else return(list(MAIC = AIC[length(AIC)], order = k - 1,
AIC = AIC, like = like[length(AIC)]))
}
cat1 <-
function (..., print = TRUE, file = "", sep = " ", fill = FALSE,
labels = NULL, append = FALSE)
{
if (print) {
if (is.character(file))
if (file == "")
file <- stdout()
else if (substring(file, 1, 1) == "|") {
file <- pipe(substring(file, 2), "w")
on.exit(close(file))
}
else {
file <- file(file, ifelse(append, "a", "w"))
on.exit(close(file))
}
cat(..., file=file, sep=sep, fill=fill, labels=labels, append=append)
}
invisible(NULL)
}
makedata <-
function (dataf, p1, p2, d, is.constant1 = TRUE, is.constant2 = TRUE,
thd.by.phase = FALSE)
{
n <- dim(dataf)[1]
nseries <- dim(dataf)[2]
start <- max(c(p1, p2, d)) + 1
p <- max(c(p1, p2))
xy <- NULL
for (i in (1:nseries)) xy <- rbind(xy, cbind(makexy(dataf[,
i], p, start, d, thd.by.phase = thd.by.phase), i))
xy <- dna(xy)
xy <- xy[s <- order(xy[, p + 2]), ]
xy1 <- setxy(xy, p, p1, nseries, is.constant1)
xy2 <- setxy(xy, p, p2, nseries, is.constant2)
list(xy1 = xy1, xy2 = xy2, sort.list = s)
}
makexy <-
function (x, p, start, d, thd.by.phase = FALSE)
{
n <- length(x)
xy <- NULL
for (i in (1:p)) xy <- cbind(xy, x[(start - i):(n - i)])
if (thd.by.phase)
xy <- cbind(xy, x[start:n], x[(start - 1):(n - 1)] -
x[(start - 2):(n - 2)])
else xy <- cbind(xy, x[start:n], x[(start - d):(n - d)])
xy
}
setxy<-
function (old.xy, p, p1, nseries, is.coefficient = TRUE)
{
if (p1 >= 1)
s <- 1:p1
else s <- NULL
n <- dim(old.xy)[1]
new.xy <- old.xy[, c(s, (p + 1):(p + 3)), drop = FALSE]
temp <- new.xy[, s, drop = FALSE]
if (is.coefficient)
new.xy <- cbind(1, new.xy)
if (is.coefficient)
temp <- cbind(rep(1, n), temp)
if (nseries == 1)
return(new.xy)
for (i in rev(2:nseries)) {
select <- old.xy[, p + 3] == i
zero <- 0 * temp
zero[select, ] <- temp[select, ]
new.xy <- cbind(zero, new.xy)
}
new.xy
}
findstart <-
function (x, nseries, indexid, p)
{
m <- dim(x)[1]
amax <- 0
for (i in (1:nseries)) {
amax <- max(amax, (1:m)[(cumsum(x[, indexid] == i) ==
p)])
}
amax
}
dna <-
function (x)
{
x[!apply(x, 1, any.na), ]
}
any.na <-
function (x)
{
any(x == "NA")
}
revm <-
function (m)
{
apply(m, 2, rev)
}
if(method=="MAIC") MAIC=TRUE else MAIC=FALSE
dataf=y
aic.no.thd <- NA
if (!is.matrix(dataf)) {
temp <- cbind(dataf, dataf)
dataf <- temp[, 1, drop = FALSE]
}
dataf <- switch(transform, log = log(dataf), log10 = log10(dataf),
sqrt = sqrt(dataf), no = dataf)
means <- apply(dataf, 2, mean, na.rm = TRUE)
stds <- apply(dataf, 2, var, na.rm = TRUE)^0.5
if (standard)
dataf <- apply(dataf, 2, standard)
nseries <- dim(dataf)[2]
res <- makedata(dataf, p1, p2, d, is.constant1, is.constant2,
thd.by.phase = FALSE)
xy1 <- res$xy1
xy2 <- res$xy2
sort.l <- res$sort.list
m <- dim(xy1)[1]
q1 <- dim(xy1)[2]
q2 <- dim(xy2)[2]
s <- (q1 - 3 - p1):(q1 - 3)
temp <- xy1[, s]
xy1 <- cbind(temp, xy1[, -s])
lab <- deparse(match.call()$y)
if (p1 >= 1)
sublab <- paste("lag", 1:p1, sep = "")
else sublab <- NULL
if (is.null(lab))
lab <- ""
if (lab == "")
dimnames(xy1)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = " "), "", "", ""))
else dimnames(xy1)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = "-"), "", "", ""))
s <- (q2 - 3 - p2):(q2 - 3)
temp <- xy2[, s]
xy2 <- cbind(temp, xy2[, -s])
if (p2 >= 1)
sublab <- paste("lag", 1:p2, sep = "")
else sublab <- NULL
if (lab == "")
dimnames(xy2)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = " "), "", "", ""))
else dimnames(xy2)[[2]] <- as.vector(c(outer(c("intercept",
sublab), lab, paste, sep = "-"), "", "", ""))
aic1 <- aic2 <- rss1 <- rss2 <- rep(10^10, m)
like1 <- like2 <- rep(-10^10, m)
lbound <- sum(dataf[dataf != "NA"] == min(dataf[dataf !=
"NA"]))
ubound <- sum(dataf[dataf != "NA"] == max(dataf[dataf !=
"NA"]))
i1 <- max(c(q1 - 3, lbound + 1, 2 * p1 + 1, d, findstart(xy1,
nseries, q1, p1 + 2)))
i1 <- max(i1, floor(a * m))
i2 <- m - max(c(q2 - 3, ubound + 1, 2 * p2 + 1, d, findstart(revm(xy2),
nseries, q2, p2 + 2))) - 1
i2 <- min(i2, ceiling(b * m))
s <- -((q1 - 1):q1)
R <- qr.R(qr(xy1[1:i1, s]))
if (estimate.thd) {
posy <- q1 - 2
rss1[i1] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, i1, order.select = order.select)
like1[i1] <- res.fmaic$like
aic1[i1] <- res.fmaic$MAIC
for (i in ((i1 + 1):i2)) {
R <- qr.R(qr(rbind(R, xy1[i, s])))
rss1[i] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, i, order.select = order.select)
like1[i] <- res.fmaic$like
aic1[i] <- res.fmaic$MAIC
}
s <- -((q2 - 1):q2)
posy <- q2 - 2
R <- qr.R(qr(xy2[(i2 + 1):m, s]))
rss2[i2] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, m - i2, order.select = order.select)
like2[i2] <- res.fmaic$like
aic2[i2] <- res.fmaic$MAIC
for (i in rev(i1:(i2 - 1))) {
R <- qr.R(qr(rbind(R, xy2[i + 1, s])))
rss2[i] <- (R[posy, posy])^2
res.fmaic <- fmaic(R, m - i, order.select = order.select)
like2[i] <- res.fmaic$like
aic2[i] <- res.fmaic$MAIC
}
rss <- rss1 + rss2
thdindex <- ((1:m)[rss == min(rss)])[1]
aic <- aic1 + aic2
if (MAIC) {
aic[-(i1:i2)] <- 10^10
thdindex <- ((1:m)[aic == (aic.no.thd <- min(aic))])[1]
}
thd <- xy1[thdindex, q1 - 1]
}
else {
thd <- threshold
thdindex <- ((1:m)[xy1[, q1 - 1] > thd])[1] - 1
}
s <- -((q1 - 1):q1)
qr1 <- qr(xy1[1:thdindex, s])
R1 <- qr.R(qr1)
if(MAIC) {order1=fmaic(R1, thdindex, order.select = order.select)$order+1
subset1=1:order1
p1=order1-is.constant1}
else subset1=-((q1 - 2):q1)
qr1 <- lsfit(x.regime1 <- xy1[1:thdindex, subset1,
drop = FALSE], y.regime1 <- xy1[1:thdindex, q1 - 2], intercept = FALSE)
dxy1=xy1[, subset1, drop = FALSE] # for diagnostics
dxy1[-(1:thdindex),]=0
s <- -((q2 - 1):q2)
qr2 <- qr(xy2[(thdindex + 1):m, s])
R2 <- qr.R(qr2)
if(MAIC) {order2=fmaic(R2, m-thdindex, order.select = order.select)$order+1
subset2=1:order2
p2=order2-is.constant1}
else subset2=-((q2 - 2):q2)
qr2 <- lsfit(x.regime2 <- xy2[-(1:thdindex), subset2,
drop = FALSE], y.regime2 <- xy2[-(1:thdindex), q2 - 2], intercept = FALSE)
dxy2=xy2[,subset2, drop = FALSE] # for diagnostics
dxy2[(1:thdindex),]=0
cat1(print = print, "time series included in this analysis is: ",
lab, "\n")
cat1(print = print, "SETAR(2,", p1, ",", p2, ") model")
cat1(print = print, " delay =", d, "\n")
if (MAIC)
method <- "Minimum AIC"
else method <- "CLS"
cat1(print = print, "estimated threshold = ", signif(thd,
4), " from a", method, " fit with thresholds \nsearched from the ",
round(i1/m * 100), " percentile to the ", round(i2/m *
100), " percentile of all data.\n")
cat1(print = print, "The estimated threshold is the ",
signif(thdindex/m * 100, 3), " percentile of\nall data.\n")
cat1(print = print, "lower regime: \n")
if (print)
ls.print(qr1)
cat1(print = print, "\n\n (unbiased) RMS \n")
rms1 <- tapply(qr1$residuals, xy1[1:thdindex, q1], cvar,
df = p1 + is.constant1)
cat1(print = print, signif(rms1, 4), "\n")
cat1(print = print, " with no of data falling in the regime being \n")
n1 <- tapply(qr1$residuals, xy1[1:thdindex, q1], length)
cat1(print = print, rbind(lab, signif(n1, 4)), "\n")
cat1(print = print, "\n\n (max. likelihood) RMS for each series (denominator=sample size in the regime) \n")
cat1(print = print, rbind(lab, signif((rms1 * (n1 -
p1 - 1))/n1, 4)), "\n")
cat1(print = print, "\n\n upper regime: \n")
if (print)
ls.print(qr2)
cat1(print = print, "\n\n (unbiased) RMS \n")
rms2 <- tapply(qr2$residuals, xy2[(thdindex + 1):m, q2],
cvar, df = p2 + is.constant2)
cat1(print = print, signif(rms2, 4), "\n")
cat1(print = print, " with no of data falling in the regime being \n")
n2 <- tapply(qr2$residuals, xy2[(thdindex + 1):m, q2], length)
cat1(print = print, signif(n2, 4), "\n")
cat1(print = print, "\n\n (max. likelihood) RMS for each series (denominator=sample size in the regime)\n")
cat1(print = print, signif((rms2 * (n2 -
p2 - 1))/n2, 4), "\n")
AIC <- signif(n1 * log((rms1 * (n1 - p1 - is.constant1))/n1) + n2 *
log((rms2 * (n2 - p2 - is.constant2))/n2) +
+(n1+n2)*(1+log(2*pi))+ 2 * (p1 + p2 + is.constant1+is.constant2 + 1),
4)
cat1(print = print, "\n Nominal AIC is ", AIC, "\n")
residuals <- c(qr1$residuals, qr2$residuals)
residuals[sort.l] <- residuals
std.res <- c(qr1$residuals/as.double(rms1^0.5), qr2$residuals/as.double(rms2^0.5))
std.res[sort.l] <- std.res
dxy1[sort.l,]=dxy1
dxy2[sort.l,]=dxy2
res=list(dxy1=dxy1,dxy2=dxy2,p1=p1,q1=q1,d=d,qr1=qr1,qr2=qr2,x.regime1 = x.regime1, y.regime1 = y.regime1,
x.regime2 = x.regime2, y.regime2 = y.regime2, thd = thd,
thdindex = thdindex, qr1 = qr1, qr2 = qr2, i1 = i1, i2 = i2,
x = xy1[, q1 - 1], m = m, rss1 = as.vector(rms1 * (n1 -
p1 - 1)), rss2 = as.vector(rms2 * (n2 - p2 - 2)),
n1 = as.vector(n1), n2 = as.vector(n2), std.res = std.res,
p1=p1, p2=p2, rms1=rms1,rms2=rms2,
is.constant1=is.constant1,is.constant2=is.constant2,
residuals = residuals, AIC = AIC, aic.no.thd = aic.no.thd,y=y,
like = max(like1 + like2),method=method)
class(res)="TAR"
invisible(res)
}
|
\name{oneStepATT}
\Rdversion{1.5.0.2}
\alias{oneStepATT}
\title{Calculate Additive treatment effect among the treated (oneStepATT)}
\description{
An internal function called by the \code{tmle} function to calculate the additive treatment effect among the treated (ATT) using a universal least favorable submodel (on the transformed scale if outcomes are continuous). The function is called a second time with updated arguments to calculate the additive treatment effect among the controls (ATC). Missingness in the outcome data is allowed.
}
\usage{
oneStepATT(Y, A, Delta, Q, g1W, pDelta1, depsilon, max_iter, gbounds, Qbounds)
}
\arguments{
\item{Y}{continuous or binary outcome variable}
\item{A}{binary treatment indicator, \code{1} - treatment, \code{0} - control}
\item{Delta}{indicator of missing outcome. \code{1} - observed, \code{0} - missing}
\item{Q}{a 3-column matrix \code{(Q(A,W), Q(1,W), Q(0,W))}}
\item{g1W}{treatment mechanism estimates, \eqn{P(A=1|W)}}
\item{pDelta1}{censoring mechanism estimates, a 2-column matrix [\eqn{P(Delta=1|A=0,W)}, \eqn{P(Delta=1|A=1,W)}]}
\item{depsilon}{step size for delta moves, set to 0.001}
\item{max_iter}{maximum number of iterations before terminating without convergence}
\item{gbounds}{bounds on the propensity score for untreated subjects}
\item{Qbounds}{alpha bounds on the logit scale}
}
\value{
\item{psi}{effect estimate (on the transformed scale for continuous outcomes)}
\item{IC}{influence function}
\item{conv}{TRUE if procedure converged, FALSE otherwise}
}
\author{Susan Gruber}
\seealso{
\code{\link{tmle}},
}
| /man/oneStepATT.Rd | no_license | gcgibson/tmle | R | false | false | 1,607 | rd | \name{oneStepATT}
\Rdversion{1.5.0.2}
\alias{oneStepATT}
\title{Calculate Additive treatment effect among the treated (oneStepATT)}
\description{
An internal function called by the \code{tmle} function to calculate the additive treatment effect among the treated (ATT) using a universal least favorable submodel (on the transformed scale if outcomes are continuous). The function is called a second time with updated arguments to calculate the additive treatment effect among the controls (ATC). Missingness in the outcome data is allowed.
}
\usage{
oneStepATT(Y, A, Delta, Q, g1W, pDelta1, depsilon, max_iter, gbounds, Qbounds)
}
\arguments{
\item{Y}{continuous or binary outcome variable}
\item{A}{binary treatment indicator, \code{1} - treatment, \code{0} - control}
\item{Delta}{indicator of missing outcome. \code{1} - observed, \code{0} - missing}
\item{Q}{a 3-column matrix \code{(Q(A,W), Q(1,W), Q(0,W))}}
\item{g1W}{treatment mechanism estimates, \eqn{P(A=1|W)}}
\item{pDelta1}{censoring mechanism estimates, a 2-column matrix [\eqn{P(Delta=1|A=0,W)}, \eqn{P(Delta=1|A=1,W)}]}
\item{depsilon}{step size for delta moves, set to 0.001}
\item{max_iter}{maximum number of iterations before terminating without convergence}
\item{gbounds}{bounds on the propensity score for untreated subjects}
\item{Qbounds}{alpha bounds on the logit scale}
}
\value{
\item{psi}{effect estimate (on the transformed scale for continuous outcomes)}
\item{IC}{influence function}
\item{conv}{TRUE if procedure converged, FALSE otherwise}
}
\author{Susan Gruber}
\seealso{
\code{\link{tmle}},
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=1,family="gaussian",standardize=FALSE)
sink('./skin_098.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/skin/skin_098.R | no_license | esbgkannan/QSMART | R | false | false | 340 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=1,family="gaussian",standardize=FALSE)
sink('./skin_098.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
%% WARNING: This file was automatically generated from the associated
%% fra_data.mid file. Do NOT edit this Rd file to make a change. Instead,
%% edit the fra_data.mid file in the project MID directory. Once the
%% fra_data.mid file has been updated, this Rd file, and all other
%% documentation (such as corresponding LaTeX, SGML and HTML documentation)
%% should be regenerated using the mid.pl Perl script.
%% R documentation for the ecgrr function
\name{ecgrr}
\alias{ecgrr}
\title{Electrocardiogram R-R Interval Data}
\concept{measured time series}
\description{These data are from the file \code{rr1.txt}
in the `RR interval time series modeling:
A challenge from PhysioNet and Computers in Cardiology 2002' site of PhysioNet.
sponsored by NIH's National Center for Research Resources.
The data are the RR intervals (beat-to-beat intervals measured
between successive peaks of the QRS complex) for patients in normal sinus rhythm
(record 16265 of the MIT-BIH database).}
\seealso{
\code{\link{beamchaos}}, \code{\link{eegduke}}, \code{\link{lorenz}}, \code{\link{pd5si}}.}
\examples{
plot(ecgrr)
}
\keyword{data}
| /man/ecgrr.Rd | no_license | wconstan/fractal | R | false | false | 1,168 | rd | %% WARNING: This file was automatically generated from the associated
%% fra_data.mid file. Do NOT edit this Rd file to make a change. Instead,
%% edit the fra_data.mid file in the project MID directory. Once the
%% fra_data.mid file has been updated, this Rd file, and all other
%% documentation (such as corresponding LaTeX, SGML and HTML documentation)
%% should be regenerated using the mid.pl Perl script.
%% R documentation for the ecgrr function
\name{ecgrr}
\alias{ecgrr}
\title{Electrocardiogram R-R Interval Data}
\concept{measured time series}
\description{These data are from the file \code{rr1.txt}
in the `RR interval time series modeling:
A challenge from PhysioNet and Computers in Cardiology 2002' site of PhysioNet.
sponsored by NIH's National Center for Research Resources.
The data are the RR intervals (beat-to-beat intervals measured
between successive peaks of the QRS complex) for patients in normal sinus rhythm
(record 16265 of the MIT-BIH database).}
\seealso{
\code{\link{beamchaos}}, \code{\link{eegduke}}, \code{\link{lorenz}}, \code{\link{pd5si}}.}
\examples{
plot(ecgrr)
}
\keyword{data}
|
# Function for plotting the 6 correlation coefficients (e.g., betw richness and MRD, richness
# and time, MRD and environment,etc) as a function of clade origin time.
# NOTE: May not be worth doing this over early time slices. Perhaps just for last time point,
# in which case time loop can be commented out and set t = max(stats.output$time)
clade.richness.corr.plot = function(stats.output,
sim.params,
min.num.data.pts = 10,
min.num.spp.per.clade = 30,
min.num.regions = 5,
output.dir) {
timeslices = unique(stats.output$time[!is.na(stats.output$time)])
#Plotting
pdf(paste(output.dir,'/corrs_vs_claderichness_sim',stats.output$sim[2],'.pdf',sep=''),height=8,width=9)
par(mfrow=c(3,3),oma=c(5,1,4,0),mar=c(2,5,2,1))
for (t in timeslices) {
#t = max(stats.output$time) #comment this out if looping over multiple time slices
x = subset(stats.output, time==t & n.regions >= min.num.regions & clade.richness >= min.num.spp.per.clade, select = 2:ncol(stats.output))
spline.df = 4
if (length(x$r.env.rich[!is.na(x$r.env.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.env.rich, xlab="",ylab="Environment-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.env.rich)]),x$r.env.rich[!is.na(x$r.env.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Environment-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.MRD.rich[!is.na(x$r.MRD.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.MRD.rich, xlab="",ylab="Mean Root Distance-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.MRD.rich)]),x$r.MRD.rich[!is.na(x$r.MRD.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Mean Root Distance-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,-1.1,log10(max(x$clade.richness))+.5,-.5,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.PSV.rich[!is.na(x$r.PSV.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.PSV.rich, xlab = "", ylab="PSV-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.PSV.rich)]),x$r.PSV.rich[!is.na(x$r.PSV.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "PSV-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.time.rich[!is.na(x$r.time.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.time.rich, xlab = "",ylab="Time in region-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.time.rich)]),x$r.time.rich[!is.na(x$r.time.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Time in Region-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.env.MRD[!is.na(x$r.env.MRD)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.env.MRD, xlab="", ylab="Environment-MRD correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.env.MRD)]),x$r.env.MRD[!is.na(x$r.env.MRD)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Environment-MRD correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,-1.1,log10(max(x$clade.richness))+.5,-.5,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.env.PSV[!is.na(x$r.env.PSV)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.env.PSV, xlab="", ylab="Environment-PSV correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.env.PSV)]),x$r.env.PSV[!is.na(x$r.env.PSV)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Environment-PSV correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.rich.ext[!is.na(x$r.rich.ext)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.rich.ext, xlab = "", ylab="Extinction Rate-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.rich.ext)]),x$r.rich.ext[!is.na(x$r.rich.ext)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Extinction Rate-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
#rect(-1000,.5,t+50,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.ext.reg[!is.na(x$r.ext.reg)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.ext.reg, xlab = "", ylab="Extinction Rate-Region correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.ext.reg)]),x$r.ext.reg[!is.na(x$r.ext.reg)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Extinction Rate-Region correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
#rect(-1000,.5,t+50,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$gamma.stat[!is.na(x$gamma.stat)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$gamma.stat, xlab = "", ylab="Gamma")
points(smooth.spline(log10(x$clade.richness[!is.na(x$gamma.stat)]),x$gamma.stat[!is.na(x$gamma.stat)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Gamma",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,-1.645,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0) # -1.645 is the critical value for rejecting constant rates
mtext("Clade Richness",1,outer=T,line=2)
if (sim.params[1,8]=='on' & sim.params[1,9]=='on') {
K.text = 'K gradient present'
} else if (sim.params[1,8]=='on' & sim.params[1,9]=='off') {
K.text = 'K constant across regions'
} else if (sim.params[1,8]=='off') {
K.text = 'no K'
}
mtext(paste('Sim',sim.params[1,1],', Origin =',sim.params[1,3],', w =',sim.params[1,4],', sigma =',
sim.params[1,7],', dist.freq =',sim.params[1,'disturb_frequency'],', dist.intens.temp =',
sim.params[1,'temperate_disturb_intensity'],
',\ndisp = ',sim.params[1,6],', specn =',sim.params[1,5],',',K.text,', time =',t),outer=T)
}
dev.off()
}
| /code/clade.richness.corr.plot.r | no_license | ahhurlbert/species-energy-simulation | R | false | false | 6,618 | r | # Function for plotting the 6 correlation coefficients (e.g., betw richness and MRD, richness
# and time, MRD and environment,etc) as a function of clade origin time.
# NOTE: May not be worth doing this over early time slices. Perhaps just for last time point,
# in which case time loop can be commented out and set t = max(stats.output$time)
clade.richness.corr.plot = function(stats.output,
sim.params,
min.num.data.pts = 10,
min.num.spp.per.clade = 30,
min.num.regions = 5,
output.dir) {
timeslices = unique(stats.output$time[!is.na(stats.output$time)])
#Plotting
pdf(paste(output.dir,'/corrs_vs_claderichness_sim',stats.output$sim[2],'.pdf',sep=''),height=8,width=9)
par(mfrow=c(3,3),oma=c(5,1,4,0),mar=c(2,5,2,1))
for (t in timeslices) {
#t = max(stats.output$time) #comment this out if looping over multiple time slices
x = subset(stats.output, time==t & n.regions >= min.num.regions & clade.richness >= min.num.spp.per.clade, select = 2:ncol(stats.output))
spline.df = 4
if (length(x$r.env.rich[!is.na(x$r.env.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.env.rich, xlab="",ylab="Environment-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.env.rich)]),x$r.env.rich[!is.na(x$r.env.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Environment-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.MRD.rich[!is.na(x$r.MRD.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.MRD.rich, xlab="",ylab="Mean Root Distance-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.MRD.rich)]),x$r.MRD.rich[!is.na(x$r.MRD.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Mean Root Distance-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,-1.1,log10(max(x$clade.richness))+.5,-.5,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.PSV.rich[!is.na(x$r.PSV.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.PSV.rich, xlab = "", ylab="PSV-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.PSV.rich)]),x$r.PSV.rich[!is.na(x$r.PSV.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "PSV-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.time.rich[!is.na(x$r.time.rich)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.time.rich, xlab = "",ylab="Time in region-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.time.rich)]),x$r.time.rich[!is.na(x$r.time.rich)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Time in Region-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.env.MRD[!is.na(x$r.env.MRD)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.env.MRD, xlab="", ylab="Environment-MRD correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.env.MRD)]),x$r.env.MRD[!is.na(x$r.env.MRD)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Environment-MRD correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,-1.1,log10(max(x$clade.richness))+.5,-.5,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.env.PSV[!is.na(x$r.env.PSV)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.env.PSV, xlab="", ylab="Environment-PSV correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.env.PSV)]),x$r.env.PSV[!is.na(x$r.env.PSV)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Environment-PSV correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,.5,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.rich.ext[!is.na(x$r.rich.ext)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.rich.ext, xlab = "", ylab="Extinction Rate-Richness correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.rich.ext)]),x$r.rich.ext[!is.na(x$r.rich.ext)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Extinction Rate-Richness correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
#rect(-1000,.5,t+50,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$r.ext.reg[!is.na(x$r.ext.reg)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$r.ext.reg, xlab = "", ylab="Extinction Rate-Region correlation",ylim=c(-1,1))
points(smooth.spline(log10(x$clade.richness[!is.na(x$r.ext.reg)]),x$r.ext.reg[!is.na(x$r.ext.reg)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Extinction Rate-Region correlation",type="n",xlim=c(0,t),ylim=c(-1,1))
}
#rect(-1000,.5,t+50,1.1,col=rgb(.1,.1,.1,.1),lty=0)
if (length(x$gamma.stat[!is.na(x$gamma.stat)]) > min.num.data.pts) {
plot(log10(x$clade.richness), x$gamma.stat, xlab = "", ylab="Gamma")
points(smooth.spline(log10(x$clade.richness[!is.na(x$gamma.stat)]),x$gamma.stat[!is.na(x$gamma.stat)],df=spline.df),type='l',col='red')
} else {
plot(1,1,xlab="",ylab = "Gamma",type="n",xlim=c(0,t),ylim=c(-1,1))
}
rect(-1,-1.645,log10(max(x$clade.richness))+.5,1.1,col=rgb(.1,.1,.1,.1),lty=0) # -1.645 is the critical value for rejecting constant rates
mtext("Clade Richness",1,outer=T,line=2)
if (sim.params[1,8]=='on' & sim.params[1,9]=='on') {
K.text = 'K gradient present'
} else if (sim.params[1,8]=='on' & sim.params[1,9]=='off') {
K.text = 'K constant across regions'
} else if (sim.params[1,8]=='off') {
K.text = 'no K'
}
mtext(paste('Sim',sim.params[1,1],', Origin =',sim.params[1,3],', w =',sim.params[1,4],', sigma =',
sim.params[1,7],', dist.freq =',sim.params[1,'disturb_frequency'],', dist.intens.temp =',
sim.params[1,'temperate_disturb_intensity'],
',\ndisp = ',sim.params[1,6],', specn =',sim.params[1,5],',',K.text,', time =',t),outer=T)
}
dev.off()
}
|
### =========================================================================
### Vector objects
### -------------------------------------------------------------------------
###
### The Vector virtual class is a general container for storing a finite
### sequence i.e. an ordered finite collection of elements.
###
setClassUnion("DataTable_OR_NULL", c("DataTable", "NULL"))
setClass("Vector",
contains="Annotated",
representation(
"VIRTUAL",
elementMetadata="DataTable_OR_NULL"
)
)
### Beware that:
### > is(factor(), "vector_OR_Vector")
### [1] TRUE
### even though:
### > is(factor(), "vector")
### [1] FALSE
### > is(factor(), "Vector")
### [1] FALSE
### See R/S4-utils.R for other examples of messed up inheritance with union
### classes.
### TODO: Should we explicitely add "factor" to this union?
setClassUnion("vector_OR_Vector", c("vector", "Vector")) # vector-like objects
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### parallelSlotNames()
###
### For internal use only.
###
### Must return the names of all the slots in Vector object 'x' that are
### "parallel" to 'x'. Slot 'foo' is considered to be "parallel" to 'x' if:
### (a) 'x@foo' is NULL or an object for which NROW() is equal to
### 'length(x)', and
### (b) the i-th element in 'x@foo' describes some component of the i-th
### element in 'x'.
### For example, the "start", "width", "NAMES", and "elementMetadata" slots
### of an IRanges object are parallel to the object. Note that the "NAMES"
### and "elementMetadata" slots can be set to NULL.
### The *first" slot name returned by parallelSlotNames() is used to get the
### length of 'x'.
###
setGeneric("parallelSlotNames",
function(x) standardGeneric("parallelSlotNames")
)
setMethod("parallelSlotNames", "Vector", function(x) "elementMetadata")
### Methods for Vector subclasses only need to specify the parallel slots they
### add to their parent class. See Hits-class.R file for an example.
### parallelVectorNames() is for internal use only.
setGeneric("parallelVectorNames",
function(x) standardGeneric("parallelVectorNames"))
setMethod("parallelVectorNames", "ANY",
function(x) setdiff(colnames(as.data.frame(new(class(x)))), "value"))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### updateObject()
###
### The default method (defined in BiocGenerics) does complicated, costly,
### and dangerous things, and sometimes it actually breaks valid objects
### (e.g. it breaks valid OverlapEncodings objects). So we overwrite it with
### a method for Vector objects that does nothing! That way it's simple,
### cheap, and safe ;-). And that's really all it needs to do at the moment.
### UPDATE: Starting with S4Vectors 0.23.19, all DataFrame instances need
### to be replaced with DFrame instances. So the updateObject() method for
### Vector objects got updated from doing nothing (no-op) to call
### updateObject() on the elementMetadata component of the object.
setMethod("updateObject", "Vector",
function(object, ..., verbose=FALSE)
{
## Update from DataFrame to DFrame.
object@elementMetadata <- updateObject(object@elementMetadata,
..., verbose=verbose)
object
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters
###
setMethod("length", "Vector",
function(x) NROW(slot(x, parallelSlotNames(x)[[1L]]))
)
setMethod("lengths", "Vector",
function(x, use.names=TRUE)
{
if (!isTRUEorFALSE(use.names))
stop("'use.names' must be TRUE or FALSE")
ans <- elementNROWS(x) # This is wrong! See ?Vector for the details.
if (!use.names)
names(ans) <- NULL
ans
}
)
setMethod("NROW", "Vector", function(x) length(x))
setMethod("ROWNAMES", "Vector", function(x) names(x))
### 3 accessors for the same slot: elementMetadata(), mcols(), and values().
### mcols() is the recommended one, use of elementMetadata() or values() is
### discouraged.
setGeneric("elementMetadata", signature="x",
function(x, use.names=TRUE, ...) standardGeneric("elementMetadata")
)
setMethod("elementMetadata", "Vector",
function(x, use.names=TRUE, ...)
{
if (!isTRUEorFALSE(use.names))
stop("'use.names' must be TRUE or FALSE")
ans <- updateObject(x@elementMetadata, check=FALSE)
if (use.names && !is.null(ans))
rownames(ans) <- names(x)
ans
}
)
setGeneric("mcols", signature="x",
function(x, use.names=TRUE, ...) standardGeneric("mcols")
)
setMethod("mcols", "Vector",
function(x, use.names=TRUE, ...)
elementMetadata(x, use.names=use.names, ...)
)
setGeneric("values", function(x, ...) standardGeneric("values"))
setMethod("values", "Vector", function(x, ...) elementMetadata(x, ...))
setMethod("anyNA", "Vector", function(x, recursive=FALSE) FALSE)
setMethod("is.na", "Vector", function(x) rep.int(FALSE, length(x)))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity
###
.valid.Vector.length <- function(x)
{
x_len <- length(x)
if (!isSingleNumber(x_len) || x_len < 0L)
return("'length(x)' must be a single non-negative number")
if (!is.null(attributes(x_len)))
return("'length(x)' must be a single integer with no attributes")
NULL
}
.valid.Vector.parallelSlots <- function(x)
{
x_len <- length(x)
x_pslotnames <- parallelSlotNames(x)
if (!is.character(x_pslotnames)
|| anyMissing(x_pslotnames)
|| anyDuplicated(x_pslotnames)) {
msg <- c("'parallelSlotNames(x)' must be a character vector ",
"with no NAs and no duplicates")
return(paste(msg, collapse=""))
}
if (x_pslotnames[[length(x_pslotnames)]] != "elementMetadata") {
msg <- c("last string in 'parallelSlotNames(x)' ",
"must be \"elementMetadata\"")
return(paste(msg, collapse=""))
}
msg <- NULL
for (slotname in head(x_pslotnames, -1L)) {
tmp <- slot(x, slotname)
if (!(is.null(tmp) || NROW(tmp) == x_len)) {
what <- paste0("x@", slotname)
msg <- c(msg, paste0("'", what, "' is not parallel to 'x'"))
}
}
tmp <- mcols(x, use.names=FALSE)
if (!(is.null(tmp) || nrow(tmp) == x_len)) {
msg <- c(msg, "'mcols(x)' is not parallel to 'x'")
}
msg
}
.valid.Vector.names <- function(x)
{
x_names <- names(x)
if (is.null(x_names))
return(NULL)
if (!is.character(x_names) || !is.null(attributes(x_names))) {
msg <- c("'names(x)' must be NULL or a character vector ",
"with no attributes")
return(paste(msg, collapse=""))
}
if (length(x_names) != length(x))
return("'names(x)' must be NULL or have the length of 'x'")
NULL
}
.valid.Vector.mcols <- function(x)
{
x_mcols <- mcols(x, use.names=FALSE)
if (is.null(x_mcols))
return(NULL)
if (!is(x_mcols, "DataTable"))
return("'mcols(x)' must be a DataTable object or NULL")
## 'x_mcols' is a DataTable object.
x_mcols_rownames <- rownames(x_mcols)
if (is.null(x_mcols_rownames))
return(NULL)
if (!identical(x_mcols_rownames, names(x)))
{
msg <- c("the rownames of DataTable 'mcols(x)' ",
"must match the names of 'x'")
return(paste(msg, collapse=""))
}
NULL
}
.valid.Vector <- function(x)
{
c(.valid.Vector.length(x),
.valid.Vector.parallelSlots(x),
.valid.Vector.names(x),
.valid.Vector.mcols(x))
}
setValidity2("Vector", .valid.Vector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Setters
###
setGeneric("elementMetadata<-",
function(x, ..., value) standardGeneric("elementMetadata<-"))
### NOT exported but used in the IRanges and GenomicRanges packages.
normarg_mcols <- function(mcols, x_class, x_len)
{
## Note that 'mcols_target_class' could also be obtained with
## 'getClassDef(x_class)@slots[["elementMetadata"]]', in which
## case the class name would be returned with the "package" attribute.
mcols_target_class <- getSlots(x_class)[["elementMetadata"]]
ok <- is(mcols, mcols_target_class)
if (is.null(mcols)) {
if (ok)
return(mcols) # NULL
mcols <- make_zero_col_DFrame(x_len)
} else if (is.list(mcols)) {
## Note that this will also handle an 'mcols' that is a data.frame
## or a data.frame derivative (e.g. data.table object).
if (ok)
return(mcols)
mcols <- new_DataFrame(mcols)
} else {
mcols <- updateObject(mcols, check=FALSE)
}
ok <- is(mcols, mcols_target_class)
if (!ok)
mcols <- as(mcols, mcols_target_class)
## From now on, 'mcols' is guaranteed to be a DataTable object.
if (!is.null(rownames(mcols)))
rownames(mcols) <- NULL
mcols_nrow <- nrow(mcols)
if (mcols_nrow == x_len)
return(mcols)
one <- ncol(mcols) == 1L
if (mcols_nrow > x_len && mcols_nrow != 1L)
stop(wmsg("trying to set ", if (one) "a " else "",
"metadata column", if (one) "" else "s", " ",
"of length ", mcols_nrow, " on an object of length ", x_len))
if (mcols_nrow == 0L)
stop(wmsg("trying to set ", if (one) "a " else "", "zero length ",
"metadata column", if (one) "" else "s", " ",
"on a non-zero length object "))
if (x_len %% mcols_nrow != 0L)
warning(wmsg("You supplied ", if (one) "a " else "",
"metadata column", if (one) "" else "s", " ",
"of length ", mcols_nrow, " to set on an object ",
"of length ", x_len, ". However please note that ",
"the latter is not a multiple of the former."))
i <- rep(seq_len(mcols_nrow), length.out=x_len)
extractROWS(mcols, i)
}
setReplaceMethod("elementMetadata", "Vector",
function(x, ..., value)
{
value <- normarg_mcols(value, class(x), length(x))
BiocGenerics:::replaceSlots(x, elementMetadata=value, check=FALSE)
}
)
setGeneric("mcols<-", function(x, ..., value) standardGeneric("mcols<-"))
setReplaceMethod("mcols", "Vector",
function(x, ..., value) `elementMetadata<-`(x, ..., value=value)
)
setGeneric("values<-", function(x, ..., value) standardGeneric("values<-"))
setReplaceMethod("values", "Vector",
function(x, value) {
elementMetadata(x) <- value
x
})
setGeneric("rename", function(x, ...) standardGeneric("rename"))
.renameVector <- function(x, ...) {
newNames <- c(...)
if (!is.character(newNames) || any(is.na(newNames))) {
stop("arguments in '...' must be character and not NA")
}
badOldNames <- setdiff(names(newNames), names(x))
if (length(badOldNames))
stop("Some 'from' names in value not found on 'x': ",
paste(badOldNames, collapse = ", "))
names(x)[match(names(newNames), names(x))] <- newNames
x
}
setMethod("rename", "vector", .renameVector)
setMethod("rename", "Vector", .renameVector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
setMethod("as.logical", "Vector",
function(x) as.vector(x, mode="logical")
)
setMethod("as.integer", "Vector",
function(x) as.vector(x, mode="integer")
)
setMethod("as.numeric", "Vector",
function(x) as.vector(x, mode="numeric")
)
### Even though as.double() is a generic function (as reported by
### 'getGeneric("as.double")', it seems impossible to define methods for this
### generic. Trying to do so like in the code below actually creates an
### "as.numeric" method.
#setMethod("as.double", "Vector",
# function(x) as.vector(x, mode="double")
#)
setMethod("as.complex", "Vector",
function(x) as.vector(x, mode="complex")
)
setMethod("as.character", "Vector",
function(x) as.vector(x, mode="character")
)
setMethod("as.raw", "Vector",
function(x) as.vector(x, mode="raw")
)
setAs("Vector", "vector", function(from) as.vector(from))
setAs("Vector", "logical", function(from) as.logical(from))
setAs("Vector", "integer", function(from) as.integer(from))
setAs("Vector", "numeric", function(from) as.numeric(from))
setAs("Vector", "complex", function(from) as.complex(from))
setAs("Vector", "character", function(from) as.character(from))
setAs("Vector", "raw", function(from) as.raw(from))
setAs("Vector", "factor", function(from) as.factor(from))
setAs("Vector", "data.frame", function(from) as.data.frame(from))
### S3/S4 combo for as.data.frame.Vector
as.data.frame.Vector <- function(x, row.names=NULL, optional=FALSE, ...) {
as.data.frame(x, row.names=NULL, optional=optional, ...)
}
setMethod("as.data.frame", "Vector",
function(x, row.names=NULL, optional=FALSE, ...)
{
x <- as.vector(x)
as.data.frame(x, row.names=row.names, optional=optional, ...)
})
as.matrix.Vector <- function(x, ...) {
as.matrix(x)
}
setMethod("as.matrix", "Vector", function(x) {
as.matrix(as.vector(x))
})
classNamespace <- function(x) {
pkg <- packageSlot(class(x))
pvnEnv <- .GlobalEnv
if (!is.null(pkg)) {
pvnEnv <- getNamespace(pkg)
}
pvnEnv
}
makeFixedColumnEnv <- function(x, parent, tform = identity) {
env <- new.env(parent=parent)
pvnEnv <- classNamespace(x)
lapply(c("names", parallelVectorNames(x)), function(nm) {
accessor <- get(nm, pvnEnv, mode="function")
makeActiveBinding(nm, function() {
val <- tform(accessor(x))
rm(list=nm, envir=env)
assign(nm, val, env)
val
}, env)
})
env
}
setMethod("as.env", "Vector", function(x, enclos, tform = identity) {
parent <- as.env(mcols(x, use.names=FALSE), enclos, tform)
addSelfRef(x, makeFixedColumnEnv(x, parent, tform))
})
as.list.Vector <- function(x, ...) as.list(x, ...)
setMethod("as.list", "Vector", function(x, ...) as.list(as(x, "List"), ...))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting
###
### The "[" and "[<-" methods for Vector objects are just delegating to
### extractROWS() and replaceROWS() for performing the real work. Most of
### the times, the author of a Vector subclass only needs to implement
### an "extractROWS" and "replaceROWS" method for his/her objects.
###
### The "[" method for Vector objects supports the 'x[i, j]' form to
### allow the user to conveniently subset the metadata columns thru 'j'.
### Note that a Vector subclass with a true 2-D semantic (e.g.
### SummarizedExperiment) needs to overwrite this. This means that code
### intended to operate on an arbitrary Vector derivative 'x' should not
### use this feature as there is no guarantee that 'x' supports it. For
### this reason this feature should preferrably be used interactively only.
setMethod("[", "Vector",
function(x, i, j, ..., drop=TRUE)
{
ans <- subset_along_ROWS(x, i, , ..., drop=drop)
if (!missing(j))
mcols(ans) <- mcols(ans, use.names=FALSE)[ , j, drop=FALSE]
ans
}
)
### We provide a default "extractROWS" method for Vector objects that subsets
### all the parallel slots.
### Note that this method will work out-of-the-box and do the right thing
### on most Vector subclasses as long as parallelSlotNames() reports the
### names of all the parallel slots on objects of the subclass (some Vector
### subclasses might require a "parallelSlotNames" method for this to happen).
### For those Vector subclasses on which extractROWS() does not work
### out-of-the-box nor do the right thing, it is strongly advised to override
### the method for Vector objects rather than trying to override the "["
### method for Vector objects with a specialized method. The specialized
### "extractROWS" method will typically delegate to the method below via the
### use of callNextMethod(). See "extractROWS" method for Hits for an example.
setMethod("extractROWS", "Vector",
function(x, i)
{
## Fix old objects on-the-fly (e.g. old GRanges or GAlignments
## instances).
x <- updateObject(x, check=FALSE)
i <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE)
x_pslotnames <- parallelSlotNames(x)
ans_pslots <- lapply(setNames(x_pslotnames, x_pslotnames),
function(slotname)
extractROWS(slot(x, slotname), i))
## Does NOT validate the object before returning it, because, most of
## the times, this is not needed. There are exceptions though. See
## for example the "extractROWS" method for Hits objects.
do.call(BiocGenerics:::replaceSlots,
c(list(x), ans_pslots, list(check=FALSE)))
}
)
setReplaceMethod("[", "Vector",
function(x, i, j, ..., value)
{
if (!missing(j) || length(list(...)) > 0L)
stop("invalid subsetting")
nsbs <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE,
allow.append=TRUE)
li <- length(nsbs)
if (li == 0L) {
## Surprisingly, in that case, `[<-` on standard vectors does not
## even look at 'value'. So neither do we...
return(x)
}
value <- normalizeSingleBracketReplacementValue(value, x)
if (is.null(value)) {
return(extractROWS(x, complement(nsbs)))
}
value <- recycleSingleBracketReplacementValue(value, x, nsbs)
mergeROWS(x, i, value)
}
)
setMethod("mergeROWS", c("Vector", "ANY"),
function(x, i, value)
{
nsbs <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE,
allow.append=TRUE)
if (max(nsbs) <= NROW(x)) {
nsbs@upper_bound_is_strict <- TRUE
return(replaceROWS(x, nsbs, value))
}
idx <- as.integer(nsbs)
oob <- idx > NROW(x)
value_idx <- integer(max(nsbs) - NROW(x))
## handles replacement in the appended region
value_idx[idx[oob] - NROW(x)] <- seq_along(value)[oob]
if (any(value_idx == 0L)) {
stop("appending gaps is not supported")
}
new_values <- extractROWS(value, value_idx)
names(new_values) <- if (is.character(i)) i[oob] else NULL
x <- bindROWS(x, list(new_values), check=FALSE)
replaceROWS(x, idx[!oob], extractROWS(value, !oob))
}
)
### Work on any Vector object on which bindROWS() and extractROWS() work.
### Assume that 'value' is compatible with 'x'.
setMethod("replaceROWS", c("Vector", "ANY"),
function(x, i, value)
{
i <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE)
stopifnot(length(i) == NROW(value))
## --<1>-- Concatenate 'x' and 'value' with bindROWS() -----
## We assume that bindROWS() works on objects of class 'class(x)'
## and does the right thing i.e. that it returns an object of the
## same class as 'x' and of NROW 'NROW(x) + NROW(value)'. We skip
## validation.
ans <- bindROWS(x, list(value), check=FALSE)
## --<2>-- Subset 'ans' with extractROWS() -----
idx <- replaceROWS(seq_along(x), i, seq_along(value) + NROW(x))
## Because of how we constructed it, 'idx' is guaranteed to be a valid
## subscript to use in 'extractROWS(ans, idx)'. By wrapping it inside a
## NativeNSBS object, extractROWS() won't waste time checking it or
## trying to normalize it.
idx <- NativeNSBS(idx, NROW(ans), TRUE, FALSE)
## We assume that extractROWS() works on an object of class 'class(x)'.
## For some objects (e.g. Hits), extractROWS() will take care of
## validating the returned object.
ans <- extractROWS(ans, idx)
## --<3>-- Restore the original names -----
names(ans) <- names(x)
## Note that we want the elements coming from 'value' to bring their
## metadata columns into 'x' so we do NOT restore the original metadata
## columns. See this thread on bioc-devel:
## https://stat.ethz.ch/pipermail/bioc-devel/2015-November/008319.html
#mcols(ans) <- mcols(x, use.names=FALSE)
ans
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Convenience wrappers for common subsetting operations
###
### S3/S4 combo for subset.Vector
subset.Vector <- function(x, ...) subset(x, ...)
subset_Vector <- function(x, subset, select, drop=FALSE, ...)
{
## Fix old objects on-the-fly (e.g. old GRanges, GRangesList, or
## GAlignments instances).
x <- updateObject(x, check=FALSE)
i <- evalqForSubset(subset, x, ...)
x_mcols <- mcols(x, use.names=FALSE)
if (!is.null(x_mcols)) {
j <- evalqForSelect(select, x_mcols, ...)
mcols(x) <- x_mcols[ , j, drop=FALSE]
}
x[i, drop=drop]
}
setMethod("subset", "Vector", subset_Vector)
### S3/S4 combo for window.Vector
window.Vector <- function(x, ...) window(x, ...)
Vector_window <- function(x, start=NA, end=NA, width=NA)
{
i <- RangeNSBS(x, start=start, end=end, width=width)
extractROWS(x, i)
}
setMethod("window", "Vector", Vector_window)
### S3/S4 combo for head.Vector
head.Vector <- function(x, ...) head(x, ...)
setMethod("head", "Vector", head_along_ROWS)
## S3/S4 combo for tail.Vector
tail.Vector <- function(x, ...) tail(x, ...)
setMethod("tail", "Vector", tail_along_ROWS)
setMethod("rep.int", "Vector", rep.int_along_ROWS)
## NOT exported.
revROWS <- function(x) extractROWS(x, rev(seq_len(NROW(x))))
### S3/S4 combo for rev.Vector
rev.Vector <- revROWS
setMethod("rev", "Vector", revROWS)
## NOT exported.
repROWS <- function(x, ...) extractROWS(x, rep(seq_len(NROW(x)), ...))
setMethod("rep", "Vector", repROWS)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Displaying
###
.Vector_summary <- function(object)
{
object_len <- length(object)
object_mcols <- mcols(object, use.names=FALSE)
object_nmc <- if (is.null(object_mcols)) 0L else ncol(object_mcols)
paste0(classNameForDisplay(object), " object of length ", object_len,
" with ", object_nmc, " metadata ",
ifelse(object_nmc == 1L, "column", "columns"))
}
### S3/S4 combo for summary.Vector
summary.Vector <- function(object, ...)
.Vector_summary(object, ...)
setMethod("summary", "Vector", summary.Vector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Concatenation along the ROWS
###
### Note that supporting "extractROWS" and "c" makes "replaceROWS" (and thus
### "[<-") work out-of-the-box!
###
### Somewhat painful that we do not always have a DataFrame in elementMetadata
ensureMcols <- function(x) {
ans <- mcols(x, use.names=FALSE)
if (is.null(ans))
ans <- make_zero_col_DFrame(length(x))
ans
}
rbind_mcols <- function(...)
{
objects <- unname(list(...))
mcols_list <- lapply(objects, mcols, use.names=FALSE)
if (length(mcols_list) == 1L)
return(mcols_list[[1L]])
mcols_is_null <- sapply_isNULL(mcols_list)
if (all(mcols_is_null))
return(NULL)
mcols_list[mcols_is_null] <- lapply(
objects[mcols_is_null],
function(object) make_zero_col_DFrame(length(object))
)
colnames_list <- lapply(mcols_list, colnames)
all_colnames <- unique(unlist(colnames_list, use.names=FALSE))
fillCols <- function(df) {
if (nrow(df))
df[setdiff(all_colnames, colnames(df))] <- NA
df
}
do.call(rbind, lapply(mcols_list, fillCols))
}
### We provide a default "bindROWS" method for Vector objects that uses
### bindROWS() internally to concatenate the parallel slots along the ROWS.
### The method behaves like an endomorphism with respect to its first
### argument 'x'. Note that this method will work out-of-the-box and do the
### right thing on most Vector subclasses as long as parallelSlotNames()
### reports the names of all the parallel slots on objects of the subclass
### (some Vector subclasses might require a "parallelSlotNames" method for
### this to happen). For those Vector subclasses on which bindROWS() does not
### work out-of-the-box nor do the right thing, it is strongly advised to
### override the method for Vector objects rather than trying to override
### the "c" method for Vector objects with a specialized method. The
### specialized "bindROWS" method will typically delegate to the method
### below via the use of callNextMethod(). See "bindROWS" methods for
### Hits and Rle objects for some examples.
### No Vector subclass should need to override the "c" method for
### Vector objects.
concatenate_Vector_objects <-
function(x, objects=list(), use.names=TRUE, ignore.mcols=FALSE, check=TRUE)
{
if (!isTRUEorFALSE(use.names))
stop("'use.names' must be TRUE or FALSE")
if (!isTRUEorFALSE(ignore.mcols))
stop("'ignore.mcols' must be TRUE or FALSE")
if (!isTRUEorFALSE(check))
stop("'check' must be TRUE or FALSE")
objects <- prepare_objects_to_bind(x, objects)
all_objects <- c(list(x), objects)
## Concatenate all the parallel slots except "NAMES" and "elementMetadata".
x_pslotnames <- parallelSlotNames(x)
pslotnames <- setdiff(x_pslotnames, c("NAMES", "elementMetadata"))
ans_pslots <- lapply(setNames(pslotnames, pslotnames),
function(slotname) {
x_slot <- slot(x, slotname)
if (is.null(x_slot))
return(NULL)
slot_list <- lapply(objects, slot, slotname)
bindROWS(x_slot, slot_list)
}
)
if ("NAMES" %in% x_pslotnames) {
ans_NAMES <- NULL
if (use.names) {
names_list <- lapply(all_objects, slot, "NAMES")
object_has_no_names <- sapply_isNULL(names_list)
if (!all(object_has_no_names)) {
## Concatenate the "NAMES" slots.
names_list[object_has_no_names] <-
lapply(all_objects[object_has_no_names],
function(object) character(length(object)))
ans_NAMES <- unlist(names_list, use.names=FALSE)
}
}
ans_pslots <- c(ans_pslots, list(NAMES=ans_NAMES))
}
if (!ignore.mcols) {
## Concatenate the "elementMetadata" slots.
ans_mcols <- do.call(rbind_mcols, all_objects)
ans_pslots <- c(ans_pslots, list(elementMetadata=ans_mcols))
}
ans <- do.call(BiocGenerics:::replaceSlots,
c(list(x), ans_pslots, list(check=FALSE)))
if (ignore.mcols)
mcols(ans) <- NULL
if (check)
validObject(ans)
ans
}
setMethod("bindROWS", "Vector", concatenate_Vector_objects)
### Thin wrapper around bindROWS(). Behave like an endomorphism i.e. return
### an object of the same class as 'x'. In particular 'c(x)' should return 'x'.
### No Vector subclass should need to override this method. See the
### "bindROWS" method for Vector objects above for more information.
setMethod("c", "Vector",
function(x, ..., ignore.mcols=FALSE, recursive=FALSE)
{
if (!identical(recursive, FALSE))
stop(wmsg("\"c\" method for Vector objects ",
"does not support the 'recursive' argument"))
bindROWS(x, list(...), ignore.mcols=ignore.mcols)
}
)
### FIXME: This method doesn't work properly on DataTable objects if 'after'
### is >= 1 and < length(x).
setMethod("append", c("Vector", "Vector"),
function(x, values, after=length(x))
{
if (!isSingleNumber(after))
stop("'after' must be a single number")
x_len <- length(x)
if (after == 0L)
c(values, x)
else if (after >= x_len)
c(x, values)
else
c(head(x, n=after), values, tail(x, n=-after))
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Evaluating
###
setMethod("eval", c("expression", "Vector"),
function(expr, envir, enclos = parent.frame())
eval(expr, as.env(envir, enclos))
)
setMethod("eval", c("language", "Vector"),
function(expr, envir, enclos = parent.frame())
eval(expr, as.env(envir, enclos))
)
setMethod("with", "Vector",
function(data, expr, ...)
{
safeEval(substitute(expr), data, parent.frame(), ...)
})
setReplaceMethod("column", "Vector", function(x, name, value) {
if (name %in% parallelVectorNames(x)) {
setter <- get(paste0(name, "<-"), classNamespace(x), mode="function")
setter(x, value=value)
} else {
mcols(x)[[name]] <- value
x
}
})
transform.Vector <- transformColumns
setMethod("transform", "Vector", transform.Vector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Utilities
###
setGeneric("expand.grid", signature="...")
BiocGenerics:::apply_hotfix73465(getGeneric("expand.grid"))
setMethod("expand.grid", "Vector",
function(..., KEEP.OUT.ATTRS = TRUE, stringsAsFactors = TRUE) {
args <- list(...)
inds <- lapply(args, seq_along)
grid <- do.call(expand.grid,
c(inds,
KEEP.OUT.ATTRS=KEEP.OUT.ATTRS,
stringsAsFactors=stringsAsFactors))
names(args) <- names(grid)
ans <- DataFrame(mapply(`[`, args, grid, SIMPLIFY=FALSE),
check.names=FALSE)
metadata(ans)$out.attrs <- attr(grid, "out.attrs")
ans
})
### FIXME: tapply method still in IRanges
setMethod("by", "Vector",
function(data, INDICES, FUN, ..., simplify = TRUE)
{
if (!is.list(INDICES)) {
INDICES <- setNames(list(INDICES),
deparse(substitute(INDICES))[1L])
}
FUNx <- function(i) FUN(extractROWS(data, i), ...)
structure(tapply(seq_len(NROW(data)), INDICES, FUNx,
simplify = simplify),
call = match.call(), class = "by")
})
diff.Vector <- function(x, ...) diff(x, ...)
| /R/Vector-class.R | no_license | jonocarroll/S4Vectors | R | false | false | 30,630 | r | ### =========================================================================
### Vector objects
### -------------------------------------------------------------------------
###
### The Vector virtual class is a general container for storing a finite
### sequence i.e. an ordered finite collection of elements.
###
setClassUnion("DataTable_OR_NULL", c("DataTable", "NULL"))
setClass("Vector",
contains="Annotated",
representation(
"VIRTUAL",
elementMetadata="DataTable_OR_NULL"
)
)
### Beware that:
### > is(factor(), "vector_OR_Vector")
### [1] TRUE
### even though:
### > is(factor(), "vector")
### [1] FALSE
### > is(factor(), "Vector")
### [1] FALSE
### See R/S4-utils.R for other examples of messed up inheritance with union
### classes.
### TODO: Should we explicitely add "factor" to this union?
setClassUnion("vector_OR_Vector", c("vector", "Vector")) # vector-like objects
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### parallelSlotNames()
###
### For internal use only.
###
### Must return the names of all the slots in Vector object 'x' that are
### "parallel" to 'x'. Slot 'foo' is considered to be "parallel" to 'x' if:
### (a) 'x@foo' is NULL or an object for which NROW() is equal to
### 'length(x)', and
### (b) the i-th element in 'x@foo' describes some component of the i-th
### element in 'x'.
### For example, the "start", "width", "NAMES", and "elementMetadata" slots
### of an IRanges object are parallel to the object. Note that the "NAMES"
### and "elementMetadata" slots can be set to NULL.
### The *first" slot name returned by parallelSlotNames() is used to get the
### length of 'x'.
###
setGeneric("parallelSlotNames",
function(x) standardGeneric("parallelSlotNames")
)
setMethod("parallelSlotNames", "Vector", function(x) "elementMetadata")
### Methods for Vector subclasses only need to specify the parallel slots they
### add to their parent class. See Hits-class.R file for an example.
### parallelVectorNames() is for internal use only.
setGeneric("parallelVectorNames",
function(x) standardGeneric("parallelVectorNames"))
setMethod("parallelVectorNames", "ANY",
function(x) setdiff(colnames(as.data.frame(new(class(x)))), "value"))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### updateObject()
###
### The default method (defined in BiocGenerics) does complicated, costly,
### and dangerous things, and sometimes it actually breaks valid objects
### (e.g. it breaks valid OverlapEncodings objects). So we overwrite it with
### a method for Vector objects that does nothing! That way it's simple,
### cheap, and safe ;-). And that's really all it needs to do at the moment.
### UPDATE: Starting with S4Vectors 0.23.19, all DataFrame instances need
### to be replaced with DFrame instances. So the updateObject() method for
### Vector objects got updated from doing nothing (no-op) to call
### updateObject() on the elementMetadata component of the object.
setMethod("updateObject", "Vector",
function(object, ..., verbose=FALSE)
{
## Update from DataFrame to DFrame.
object@elementMetadata <- updateObject(object@elementMetadata,
..., verbose=verbose)
object
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters
###
setMethod("length", "Vector",
function(x) NROW(slot(x, parallelSlotNames(x)[[1L]]))
)
setMethod("lengths", "Vector",
function(x, use.names=TRUE)
{
if (!isTRUEorFALSE(use.names))
stop("'use.names' must be TRUE or FALSE")
ans <- elementNROWS(x) # This is wrong! See ?Vector for the details.
if (!use.names)
names(ans) <- NULL
ans
}
)
setMethod("NROW", "Vector", function(x) length(x))
setMethod("ROWNAMES", "Vector", function(x) names(x))
### 3 accessors for the same slot: elementMetadata(), mcols(), and values().
### mcols() is the recommended one, use of elementMetadata() or values() is
### discouraged.
setGeneric("elementMetadata", signature="x",
function(x, use.names=TRUE, ...) standardGeneric("elementMetadata")
)
setMethod("elementMetadata", "Vector",
function(x, use.names=TRUE, ...)
{
if (!isTRUEorFALSE(use.names))
stop("'use.names' must be TRUE or FALSE")
ans <- updateObject(x@elementMetadata, check=FALSE)
if (use.names && !is.null(ans))
rownames(ans) <- names(x)
ans
}
)
setGeneric("mcols", signature="x",
function(x, use.names=TRUE, ...) standardGeneric("mcols")
)
setMethod("mcols", "Vector",
function(x, use.names=TRUE, ...)
elementMetadata(x, use.names=use.names, ...)
)
setGeneric("values", function(x, ...) standardGeneric("values"))
setMethod("values", "Vector", function(x, ...) elementMetadata(x, ...))
setMethod("anyNA", "Vector", function(x, recursive=FALSE) FALSE)
setMethod("is.na", "Vector", function(x) rep.int(FALSE, length(x)))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity
###
.valid.Vector.length <- function(x)
{
x_len <- length(x)
if (!isSingleNumber(x_len) || x_len < 0L)
return("'length(x)' must be a single non-negative number")
if (!is.null(attributes(x_len)))
return("'length(x)' must be a single integer with no attributes")
NULL
}
.valid.Vector.parallelSlots <- function(x)
{
x_len <- length(x)
x_pslotnames <- parallelSlotNames(x)
if (!is.character(x_pslotnames)
|| anyMissing(x_pslotnames)
|| anyDuplicated(x_pslotnames)) {
msg <- c("'parallelSlotNames(x)' must be a character vector ",
"with no NAs and no duplicates")
return(paste(msg, collapse=""))
}
if (x_pslotnames[[length(x_pslotnames)]] != "elementMetadata") {
msg <- c("last string in 'parallelSlotNames(x)' ",
"must be \"elementMetadata\"")
return(paste(msg, collapse=""))
}
msg <- NULL
for (slotname in head(x_pslotnames, -1L)) {
tmp <- slot(x, slotname)
if (!(is.null(tmp) || NROW(tmp) == x_len)) {
what <- paste0("x@", slotname)
msg <- c(msg, paste0("'", what, "' is not parallel to 'x'"))
}
}
tmp <- mcols(x, use.names=FALSE)
if (!(is.null(tmp) || nrow(tmp) == x_len)) {
msg <- c(msg, "'mcols(x)' is not parallel to 'x'")
}
msg
}
.valid.Vector.names <- function(x)
{
x_names <- names(x)
if (is.null(x_names))
return(NULL)
if (!is.character(x_names) || !is.null(attributes(x_names))) {
msg <- c("'names(x)' must be NULL or a character vector ",
"with no attributes")
return(paste(msg, collapse=""))
}
if (length(x_names) != length(x))
return("'names(x)' must be NULL or have the length of 'x'")
NULL
}
.valid.Vector.mcols <- function(x)
{
x_mcols <- mcols(x, use.names=FALSE)
if (is.null(x_mcols))
return(NULL)
if (!is(x_mcols, "DataTable"))
return("'mcols(x)' must be a DataTable object or NULL")
## 'x_mcols' is a DataTable object.
x_mcols_rownames <- rownames(x_mcols)
if (is.null(x_mcols_rownames))
return(NULL)
if (!identical(x_mcols_rownames, names(x)))
{
msg <- c("the rownames of DataTable 'mcols(x)' ",
"must match the names of 'x'")
return(paste(msg, collapse=""))
}
NULL
}
.valid.Vector <- function(x)
{
c(.valid.Vector.length(x),
.valid.Vector.parallelSlots(x),
.valid.Vector.names(x),
.valid.Vector.mcols(x))
}
setValidity2("Vector", .valid.Vector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Setters
###
setGeneric("elementMetadata<-",
function(x, ..., value) standardGeneric("elementMetadata<-"))
### NOT exported but used in the IRanges and GenomicRanges packages.
normarg_mcols <- function(mcols, x_class, x_len)
{
## Note that 'mcols_target_class' could also be obtained with
## 'getClassDef(x_class)@slots[["elementMetadata"]]', in which
## case the class name would be returned with the "package" attribute.
mcols_target_class <- getSlots(x_class)[["elementMetadata"]]
ok <- is(mcols, mcols_target_class)
if (is.null(mcols)) {
if (ok)
return(mcols) # NULL
mcols <- make_zero_col_DFrame(x_len)
} else if (is.list(mcols)) {
## Note that this will also handle an 'mcols' that is a data.frame
## or a data.frame derivative (e.g. data.table object).
if (ok)
return(mcols)
mcols <- new_DataFrame(mcols)
} else {
mcols <- updateObject(mcols, check=FALSE)
}
ok <- is(mcols, mcols_target_class)
if (!ok)
mcols <- as(mcols, mcols_target_class)
## From now on, 'mcols' is guaranteed to be a DataTable object.
if (!is.null(rownames(mcols)))
rownames(mcols) <- NULL
mcols_nrow <- nrow(mcols)
if (mcols_nrow == x_len)
return(mcols)
one <- ncol(mcols) == 1L
if (mcols_nrow > x_len && mcols_nrow != 1L)
stop(wmsg("trying to set ", if (one) "a " else "",
"metadata column", if (one) "" else "s", " ",
"of length ", mcols_nrow, " on an object of length ", x_len))
if (mcols_nrow == 0L)
stop(wmsg("trying to set ", if (one) "a " else "", "zero length ",
"metadata column", if (one) "" else "s", " ",
"on a non-zero length object "))
if (x_len %% mcols_nrow != 0L)
warning(wmsg("You supplied ", if (one) "a " else "",
"metadata column", if (one) "" else "s", " ",
"of length ", mcols_nrow, " to set on an object ",
"of length ", x_len, ". However please note that ",
"the latter is not a multiple of the former."))
i <- rep(seq_len(mcols_nrow), length.out=x_len)
extractROWS(mcols, i)
}
setReplaceMethod("elementMetadata", "Vector",
function(x, ..., value)
{
value <- normarg_mcols(value, class(x), length(x))
BiocGenerics:::replaceSlots(x, elementMetadata=value, check=FALSE)
}
)
setGeneric("mcols<-", function(x, ..., value) standardGeneric("mcols<-"))
setReplaceMethod("mcols", "Vector",
function(x, ..., value) `elementMetadata<-`(x, ..., value=value)
)
setGeneric("values<-", function(x, ..., value) standardGeneric("values<-"))
setReplaceMethod("values", "Vector",
function(x, value) {
elementMetadata(x) <- value
x
})
setGeneric("rename", function(x, ...) standardGeneric("rename"))
.renameVector <- function(x, ...) {
newNames <- c(...)
if (!is.character(newNames) || any(is.na(newNames))) {
stop("arguments in '...' must be character and not NA")
}
badOldNames <- setdiff(names(newNames), names(x))
if (length(badOldNames))
stop("Some 'from' names in value not found on 'x': ",
paste(badOldNames, collapse = ", "))
names(x)[match(names(newNames), names(x))] <- newNames
x
}
setMethod("rename", "vector", .renameVector)
setMethod("rename", "Vector", .renameVector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
setMethod("as.logical", "Vector",
function(x) as.vector(x, mode="logical")
)
setMethod("as.integer", "Vector",
function(x) as.vector(x, mode="integer")
)
setMethod("as.numeric", "Vector",
function(x) as.vector(x, mode="numeric")
)
### Even though as.double() is a generic function (as reported by
### 'getGeneric("as.double")', it seems impossible to define methods for this
### generic. Trying to do so like in the code below actually creates an
### "as.numeric" method.
#setMethod("as.double", "Vector",
# function(x) as.vector(x, mode="double")
#)
setMethod("as.complex", "Vector",
function(x) as.vector(x, mode="complex")
)
setMethod("as.character", "Vector",
function(x) as.vector(x, mode="character")
)
setMethod("as.raw", "Vector",
function(x) as.vector(x, mode="raw")
)
setAs("Vector", "vector", function(from) as.vector(from))
setAs("Vector", "logical", function(from) as.logical(from))
setAs("Vector", "integer", function(from) as.integer(from))
setAs("Vector", "numeric", function(from) as.numeric(from))
setAs("Vector", "complex", function(from) as.complex(from))
setAs("Vector", "character", function(from) as.character(from))
setAs("Vector", "raw", function(from) as.raw(from))
setAs("Vector", "factor", function(from) as.factor(from))
setAs("Vector", "data.frame", function(from) as.data.frame(from))
### S3/S4 combo for as.data.frame.Vector
as.data.frame.Vector <- function(x, row.names=NULL, optional=FALSE, ...) {
as.data.frame(x, row.names=NULL, optional=optional, ...)
}
setMethod("as.data.frame", "Vector",
function(x, row.names=NULL, optional=FALSE, ...)
{
x <- as.vector(x)
as.data.frame(x, row.names=row.names, optional=optional, ...)
})
as.matrix.Vector <- function(x, ...) {
as.matrix(x)
}
setMethod("as.matrix", "Vector", function(x) {
as.matrix(as.vector(x))
})
classNamespace <- function(x) {
pkg <- packageSlot(class(x))
pvnEnv <- .GlobalEnv
if (!is.null(pkg)) {
pvnEnv <- getNamespace(pkg)
}
pvnEnv
}
makeFixedColumnEnv <- function(x, parent, tform = identity) {
env <- new.env(parent=parent)
pvnEnv <- classNamespace(x)
lapply(c("names", parallelVectorNames(x)), function(nm) {
accessor <- get(nm, pvnEnv, mode="function")
makeActiveBinding(nm, function() {
val <- tform(accessor(x))
rm(list=nm, envir=env)
assign(nm, val, env)
val
}, env)
})
env
}
setMethod("as.env", "Vector", function(x, enclos, tform = identity) {
parent <- as.env(mcols(x, use.names=FALSE), enclos, tform)
addSelfRef(x, makeFixedColumnEnv(x, parent, tform))
})
as.list.Vector <- function(x, ...) as.list(x, ...)
setMethod("as.list", "Vector", function(x, ...) as.list(as(x, "List"), ...))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting
###
### The "[" and "[<-" methods for Vector objects are just delegating to
### extractROWS() and replaceROWS() for performing the real work. Most of
### the times, the author of a Vector subclass only needs to implement
### an "extractROWS" and "replaceROWS" method for his/her objects.
###
### The "[" method for Vector objects supports the 'x[i, j]' form to
### allow the user to conveniently subset the metadata columns thru 'j'.
### Note that a Vector subclass with a true 2-D semantic (e.g.
### SummarizedExperiment) needs to overwrite this. This means that code
### intended to operate on an arbitrary Vector derivative 'x' should not
### use this feature as there is no guarantee that 'x' supports it. For
### this reason this feature should preferrably be used interactively only.
setMethod("[", "Vector",
function(x, i, j, ..., drop=TRUE)
{
ans <- subset_along_ROWS(x, i, , ..., drop=drop)
if (!missing(j))
mcols(ans) <- mcols(ans, use.names=FALSE)[ , j, drop=FALSE]
ans
}
)
### We provide a default "extractROWS" method for Vector objects that subsets
### all the parallel slots.
### Note that this method will work out-of-the-box and do the right thing
### on most Vector subclasses as long as parallelSlotNames() reports the
### names of all the parallel slots on objects of the subclass (some Vector
### subclasses might require a "parallelSlotNames" method for this to happen).
### For those Vector subclasses on which extractROWS() does not work
### out-of-the-box nor do the right thing, it is strongly advised to override
### the method for Vector objects rather than trying to override the "["
### method for Vector objects with a specialized method. The specialized
### "extractROWS" method will typically delegate to the method below via the
### use of callNextMethod(). See "extractROWS" method for Hits for an example.
setMethod("extractROWS", "Vector",
function(x, i)
{
## Fix old objects on-the-fly (e.g. old GRanges or GAlignments
## instances).
x <- updateObject(x, check=FALSE)
i <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE)
x_pslotnames <- parallelSlotNames(x)
ans_pslots <- lapply(setNames(x_pslotnames, x_pslotnames),
function(slotname)
extractROWS(slot(x, slotname), i))
## Does NOT validate the object before returning it, because, most of
## the times, this is not needed. There are exceptions though. See
## for example the "extractROWS" method for Hits objects.
do.call(BiocGenerics:::replaceSlots,
c(list(x), ans_pslots, list(check=FALSE)))
}
)
setReplaceMethod("[", "Vector",
function(x, i, j, ..., value)
{
if (!missing(j) || length(list(...)) > 0L)
stop("invalid subsetting")
nsbs <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE,
allow.append=TRUE)
li <- length(nsbs)
if (li == 0L) {
## Surprisingly, in that case, `[<-` on standard vectors does not
## even look at 'value'. So neither do we...
return(x)
}
value <- normalizeSingleBracketReplacementValue(value, x)
if (is.null(value)) {
return(extractROWS(x, complement(nsbs)))
}
value <- recycleSingleBracketReplacementValue(value, x, nsbs)
mergeROWS(x, i, value)
}
)
setMethod("mergeROWS", c("Vector", "ANY"),
function(x, i, value)
{
nsbs <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE,
allow.append=TRUE)
if (max(nsbs) <= NROW(x)) {
nsbs@upper_bound_is_strict <- TRUE
return(replaceROWS(x, nsbs, value))
}
idx <- as.integer(nsbs)
oob <- idx > NROW(x)
value_idx <- integer(max(nsbs) - NROW(x))
## handles replacement in the appended region
value_idx[idx[oob] - NROW(x)] <- seq_along(value)[oob]
if (any(value_idx == 0L)) {
stop("appending gaps is not supported")
}
new_values <- extractROWS(value, value_idx)
names(new_values) <- if (is.character(i)) i[oob] else NULL
x <- bindROWS(x, list(new_values), check=FALSE)
replaceROWS(x, idx[!oob], extractROWS(value, !oob))
}
)
### Work on any Vector object on which bindROWS() and extractROWS() work.
### Assume that 'value' is compatible with 'x'.
setMethod("replaceROWS", c("Vector", "ANY"),
function(x, i, value)
{
i <- normalizeSingleBracketSubscript(i, x, as.NSBS=TRUE)
stopifnot(length(i) == NROW(value))
## --<1>-- Concatenate 'x' and 'value' with bindROWS() -----
## We assume that bindROWS() works on objects of class 'class(x)'
## and does the right thing i.e. that it returns an object of the
## same class as 'x' and of NROW 'NROW(x) + NROW(value)'. We skip
## validation.
ans <- bindROWS(x, list(value), check=FALSE)
## --<2>-- Subset 'ans' with extractROWS() -----
idx <- replaceROWS(seq_along(x), i, seq_along(value) + NROW(x))
## Because of how we constructed it, 'idx' is guaranteed to be a valid
## subscript to use in 'extractROWS(ans, idx)'. By wrapping it inside a
## NativeNSBS object, extractROWS() won't waste time checking it or
## trying to normalize it.
idx <- NativeNSBS(idx, NROW(ans), TRUE, FALSE)
## We assume that extractROWS() works on an object of class 'class(x)'.
## For some objects (e.g. Hits), extractROWS() will take care of
## validating the returned object.
ans <- extractROWS(ans, idx)
## --<3>-- Restore the original names -----
names(ans) <- names(x)
## Note that we want the elements coming from 'value' to bring their
## metadata columns into 'x' so we do NOT restore the original metadata
## columns. See this thread on bioc-devel:
## https://stat.ethz.ch/pipermail/bioc-devel/2015-November/008319.html
#mcols(ans) <- mcols(x, use.names=FALSE)
ans
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Convenience wrappers for common subsetting operations
###
### S3/S4 combo for subset.Vector
subset.Vector <- function(x, ...) subset(x, ...)
subset_Vector <- function(x, subset, select, drop=FALSE, ...)
{
## Fix old objects on-the-fly (e.g. old GRanges, GRangesList, or
## GAlignments instances).
x <- updateObject(x, check=FALSE)
i <- evalqForSubset(subset, x, ...)
x_mcols <- mcols(x, use.names=FALSE)
if (!is.null(x_mcols)) {
j <- evalqForSelect(select, x_mcols, ...)
mcols(x) <- x_mcols[ , j, drop=FALSE]
}
x[i, drop=drop]
}
setMethod("subset", "Vector", subset_Vector)
### S3/S4 combo for window.Vector
window.Vector <- function(x, ...) window(x, ...)
Vector_window <- function(x, start=NA, end=NA, width=NA)
{
i <- RangeNSBS(x, start=start, end=end, width=width)
extractROWS(x, i)
}
setMethod("window", "Vector", Vector_window)
### S3/S4 combo for head.Vector
head.Vector <- function(x, ...) head(x, ...)
setMethod("head", "Vector", head_along_ROWS)
## S3/S4 combo for tail.Vector
tail.Vector <- function(x, ...) tail(x, ...)
setMethod("tail", "Vector", tail_along_ROWS)
setMethod("rep.int", "Vector", rep.int_along_ROWS)
## NOT exported.
revROWS <- function(x) extractROWS(x, rev(seq_len(NROW(x))))
### S3/S4 combo for rev.Vector
rev.Vector <- revROWS
setMethod("rev", "Vector", revROWS)
## NOT exported.
repROWS <- function(x, ...) extractROWS(x, rep(seq_len(NROW(x)), ...))
setMethod("rep", "Vector", repROWS)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Displaying
###
.Vector_summary <- function(object)
{
object_len <- length(object)
object_mcols <- mcols(object, use.names=FALSE)
object_nmc <- if (is.null(object_mcols)) 0L else ncol(object_mcols)
paste0(classNameForDisplay(object), " object of length ", object_len,
" with ", object_nmc, " metadata ",
ifelse(object_nmc == 1L, "column", "columns"))
}
### S3/S4 combo for summary.Vector
summary.Vector <- function(object, ...)
.Vector_summary(object, ...)
setMethod("summary", "Vector", summary.Vector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Concatenation along the ROWS
###
### Note that supporting "extractROWS" and "c" makes "replaceROWS" (and thus
### "[<-") work out-of-the-box!
###
### Somewhat painful that we do not always have a DataFrame in elementMetadata
ensureMcols <- function(x) {
ans <- mcols(x, use.names=FALSE)
if (is.null(ans))
ans <- make_zero_col_DFrame(length(x))
ans
}
rbind_mcols <- function(...)
{
objects <- unname(list(...))
mcols_list <- lapply(objects, mcols, use.names=FALSE)
if (length(mcols_list) == 1L)
return(mcols_list[[1L]])
mcols_is_null <- sapply_isNULL(mcols_list)
if (all(mcols_is_null))
return(NULL)
mcols_list[mcols_is_null] <- lapply(
objects[mcols_is_null],
function(object) make_zero_col_DFrame(length(object))
)
colnames_list <- lapply(mcols_list, colnames)
all_colnames <- unique(unlist(colnames_list, use.names=FALSE))
fillCols <- function(df) {
if (nrow(df))
df[setdiff(all_colnames, colnames(df))] <- NA
df
}
do.call(rbind, lapply(mcols_list, fillCols))
}
### We provide a default "bindROWS" method for Vector objects that uses
### bindROWS() internally to concatenate the parallel slots along the ROWS.
### The method behaves like an endomorphism with respect to its first
### argument 'x'. Note that this method will work out-of-the-box and do the
### right thing on most Vector subclasses as long as parallelSlotNames()
### reports the names of all the parallel slots on objects of the subclass
### (some Vector subclasses might require a "parallelSlotNames" method for
### this to happen). For those Vector subclasses on which bindROWS() does not
### work out-of-the-box nor do the right thing, it is strongly advised to
### override the method for Vector objects rather than trying to override
### the "c" method for Vector objects with a specialized method. The
### specialized "bindROWS" method will typically delegate to the method
### below via the use of callNextMethod(). See "bindROWS" methods for
### Hits and Rle objects for some examples.
### No Vector subclass should need to override the "c" method for
### Vector objects.
concatenate_Vector_objects <-
function(x, objects=list(), use.names=TRUE, ignore.mcols=FALSE, check=TRUE)
{
if (!isTRUEorFALSE(use.names))
stop("'use.names' must be TRUE or FALSE")
if (!isTRUEorFALSE(ignore.mcols))
stop("'ignore.mcols' must be TRUE or FALSE")
if (!isTRUEorFALSE(check))
stop("'check' must be TRUE or FALSE")
objects <- prepare_objects_to_bind(x, objects)
all_objects <- c(list(x), objects)
## Concatenate all the parallel slots except "NAMES" and "elementMetadata".
x_pslotnames <- parallelSlotNames(x)
pslotnames <- setdiff(x_pslotnames, c("NAMES", "elementMetadata"))
ans_pslots <- lapply(setNames(pslotnames, pslotnames),
function(slotname) {
x_slot <- slot(x, slotname)
if (is.null(x_slot))
return(NULL)
slot_list <- lapply(objects, slot, slotname)
bindROWS(x_slot, slot_list)
}
)
if ("NAMES" %in% x_pslotnames) {
ans_NAMES <- NULL
if (use.names) {
names_list <- lapply(all_objects, slot, "NAMES")
object_has_no_names <- sapply_isNULL(names_list)
if (!all(object_has_no_names)) {
## Concatenate the "NAMES" slots.
names_list[object_has_no_names] <-
lapply(all_objects[object_has_no_names],
function(object) character(length(object)))
ans_NAMES <- unlist(names_list, use.names=FALSE)
}
}
ans_pslots <- c(ans_pslots, list(NAMES=ans_NAMES))
}
if (!ignore.mcols) {
## Concatenate the "elementMetadata" slots.
ans_mcols <- do.call(rbind_mcols, all_objects)
ans_pslots <- c(ans_pslots, list(elementMetadata=ans_mcols))
}
ans <- do.call(BiocGenerics:::replaceSlots,
c(list(x), ans_pslots, list(check=FALSE)))
if (ignore.mcols)
mcols(ans) <- NULL
if (check)
validObject(ans)
ans
}
setMethod("bindROWS", "Vector", concatenate_Vector_objects)
### Thin wrapper around bindROWS(). Behave like an endomorphism i.e. return
### an object of the same class as 'x'. In particular 'c(x)' should return 'x'.
### No Vector subclass should need to override this method. See the
### "bindROWS" method for Vector objects above for more information.
setMethod("c", "Vector",
function(x, ..., ignore.mcols=FALSE, recursive=FALSE)
{
if (!identical(recursive, FALSE))
stop(wmsg("\"c\" method for Vector objects ",
"does not support the 'recursive' argument"))
bindROWS(x, list(...), ignore.mcols=ignore.mcols)
}
)
### FIXME: This method doesn't work properly on DataTable objects if 'after'
### is >= 1 and < length(x).
setMethod("append", c("Vector", "Vector"),
function(x, values, after=length(x))
{
if (!isSingleNumber(after))
stop("'after' must be a single number")
x_len <- length(x)
if (after == 0L)
c(values, x)
else if (after >= x_len)
c(x, values)
else
c(head(x, n=after), values, tail(x, n=-after))
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Evaluating
###
setMethod("eval", c("expression", "Vector"),
function(expr, envir, enclos = parent.frame())
eval(expr, as.env(envir, enclos))
)
setMethod("eval", c("language", "Vector"),
function(expr, envir, enclos = parent.frame())
eval(expr, as.env(envir, enclos))
)
setMethod("with", "Vector",
function(data, expr, ...)
{
safeEval(substitute(expr), data, parent.frame(), ...)
})
setReplaceMethod("column", "Vector", function(x, name, value) {
if (name %in% parallelVectorNames(x)) {
setter <- get(paste0(name, "<-"), classNamespace(x), mode="function")
setter(x, value=value)
} else {
mcols(x)[[name]] <- value
x
}
})
transform.Vector <- transformColumns
setMethod("transform", "Vector", transform.Vector)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Utilities
###
setGeneric("expand.grid", signature="...")
BiocGenerics:::apply_hotfix73465(getGeneric("expand.grid"))
setMethod("expand.grid", "Vector",
function(..., KEEP.OUT.ATTRS = TRUE, stringsAsFactors = TRUE) {
args <- list(...)
inds <- lapply(args, seq_along)
grid <- do.call(expand.grid,
c(inds,
KEEP.OUT.ATTRS=KEEP.OUT.ATTRS,
stringsAsFactors=stringsAsFactors))
names(args) <- names(grid)
ans <- DataFrame(mapply(`[`, args, grid, SIMPLIFY=FALSE),
check.names=FALSE)
metadata(ans)$out.attrs <- attr(grid, "out.attrs")
ans
})
### FIXME: tapply method still in IRanges
setMethod("by", "Vector",
function(data, INDICES, FUN, ..., simplify = TRUE)
{
if (!is.list(INDICES)) {
INDICES <- setNames(list(INDICES),
deparse(substitute(INDICES))[1L])
}
FUNx <- function(i) FUN(extractROWS(data, i), ...)
structure(tapply(seq_len(NROW(data)), INDICES, FUNx,
simplify = simplify),
call = match.call(), class = "by")
})
diff.Vector <- function(x, ...) diff(x, ...)
|
plot4<-function(){
data<-read.table("household_power_consumption.txt", header=TRUE, sep=";", nrows=69520, colClasses="character")
data<-data[66637:69516,]
DateTime<-paste(data[,"Date"], data[,"Time"], sep=" ")
DateTime<-strptime(DateTime, "%d/%m/%Y %X")
data<-cbind(DateTime, data)
data<-subset(data, select=-c(Date,Time))
index<-2:8
for(i in index){
data[,i]<-as.numeric(data[,i])
}
par(mfrow = c(2,2))
with(data, plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power"))
with(data, plot(DateTime, Voltage, type="l", xlab="datetime", ylab="Voltage"))
with(data, plot(DateTime, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering"))
with(data, lines(DateTime, Sub_metering_1, type="l", col="black"))
with(data, lines(DateTime, Sub_metering_2, type="l", col="red"))
with(data, lines(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", bty="n", lty = "solid", col = c("black", "red", "blue"), legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
with(data, plot(DateTime, Global_reactive_power, type="l", xlab="datetime", ylab="Global Reactive Power"))
dev.copy(png, width=480, height=480, file = "plot4.png")
dev.off()
}
| /plot4.R | no_license | anace/ExData_Plotting1 | R | false | false | 1,217 | r | plot4<-function(){
data<-read.table("household_power_consumption.txt", header=TRUE, sep=";", nrows=69520, colClasses="character")
data<-data[66637:69516,]
DateTime<-paste(data[,"Date"], data[,"Time"], sep=" ")
DateTime<-strptime(DateTime, "%d/%m/%Y %X")
data<-cbind(DateTime, data)
data<-subset(data, select=-c(Date,Time))
index<-2:8
for(i in index){
data[,i]<-as.numeric(data[,i])
}
par(mfrow = c(2,2))
with(data, plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power"))
with(data, plot(DateTime, Voltage, type="l", xlab="datetime", ylab="Voltage"))
with(data, plot(DateTime, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering"))
with(data, lines(DateTime, Sub_metering_1, type="l", col="black"))
with(data, lines(DateTime, Sub_metering_2, type="l", col="red"))
with(data, lines(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", bty="n", lty = "solid", col = c("black", "red", "blue"), legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
with(data, plot(DateTime, Global_reactive_power, type="l", xlab="datetime", ylab="Global Reactive Power"))
dev.copy(png, width=480, height=480, file = "plot4.png")
dev.off()
}
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
warning = F,
fig.align = "center"
)
devtools::load_all()
library(tidyverse)
## ---- eval = F----------------------------------------------------------------
# library(tidyverse)
# library(anomalize)
## -----------------------------------------------------------------------------
# Data on `lubridate` package daily downloads
lubridate_download_history <- tidyverse_cran_downloads %>%
filter(package == "lubridate") %>%
ungroup()
# Output first 10 observations
lubridate_download_history %>%
head(10) %>%
knitr::kable()
## ---- fig.show='hold', fig.height=7, fig.align='default'----------------------
# STL Decomposition Method
p1 <- lubridate_download_history %>%
time_decompose(count,
method = "stl",
frequency = "1 week",
trend = "3 months") %>%
anomalize(remainder) %>%
plot_anomaly_decomposition() +
ggtitle("STL Decomposition")
# Twitter Decomposition Method
p2 <- lubridate_download_history %>%
time_decompose(count,
method = "twitter",
frequency = "1 week",
trend = "3 months") %>%
anomalize(remainder) %>%
plot_anomaly_decomposition() +
ggtitle("Twitter Decomposition")
# Show plots
p1
p2
## ---- fig.height=3, fig.width=5-----------------------------------------------
# Generate anomalies
set.seed(100)
x <- rnorm(100)
idx_outliers <- sample(100, size = 5)
x[idx_outliers] <- x[idx_outliers] + 10
# Visualize simulated anomalies
qplot(1:length(x), x,
main = "Simulated Anomalies",
xlab = "Index")
## ---- fig.show="hold", fig.width=5--------------------------------------------
# Analyze outliers: Outlier Report is available with verbose = TRUE
iqr_outliers <- iqr(x, alpha = 0.05, max_anoms = 0.2, verbose = TRUE)$outlier_report
gesd_outliers <- gesd(x, alpha = 0.05, max_anoms = 0.2, verbose = TRUE)$outlier_report
# ploting function for anomaly plots
ggsetup <- function(data) {
data %>%
ggplot(aes(rank, value, color = outlier)) +
geom_point() +
geom_line(aes(y = limit_upper), color = "red", linetype = 2) +
geom_line(aes(y = limit_lower), color = "red", linetype = 2) +
geom_text(aes(label = index), vjust = -1.25) +
theme_bw() +
scale_color_manual(values = c("No" = "#2c3e50", "Yes" = "#e31a1c")) +
expand_limits(y = 13) +
theme(legend.position = "bottom")
}
# Visualize
p3 <- iqr_outliers %>%
ggsetup() +
ggtitle("IQR: Top outliers sorted by rank")
p4 <- gesd_outliers %>%
ggsetup() +
ggtitle("GESD: Top outliers sorted by rank")
# Show plots
p3
p4
| /inst/doc/anomalize_methods.R | no_license | cran/anomalize | R | false | false | 2,832 | r | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
warning = F,
fig.align = "center"
)
devtools::load_all()
library(tidyverse)
## ---- eval = F----------------------------------------------------------------
# library(tidyverse)
# library(anomalize)
## -----------------------------------------------------------------------------
# Data on `lubridate` package daily downloads
lubridate_download_history <- tidyverse_cran_downloads %>%
filter(package == "lubridate") %>%
ungroup()
# Output first 10 observations
lubridate_download_history %>%
head(10) %>%
knitr::kable()
## ---- fig.show='hold', fig.height=7, fig.align='default'----------------------
# STL Decomposition Method
p1 <- lubridate_download_history %>%
time_decompose(count,
method = "stl",
frequency = "1 week",
trend = "3 months") %>%
anomalize(remainder) %>%
plot_anomaly_decomposition() +
ggtitle("STL Decomposition")
# Twitter Decomposition Method
p2 <- lubridate_download_history %>%
time_decompose(count,
method = "twitter",
frequency = "1 week",
trend = "3 months") %>%
anomalize(remainder) %>%
plot_anomaly_decomposition() +
ggtitle("Twitter Decomposition")
# Show plots
p1
p2
## ---- fig.height=3, fig.width=5-----------------------------------------------
# Generate anomalies
set.seed(100)
x <- rnorm(100)
idx_outliers <- sample(100, size = 5)
x[idx_outliers] <- x[idx_outliers] + 10
# Visualize simulated anomalies
qplot(1:length(x), x,
main = "Simulated Anomalies",
xlab = "Index")
## ---- fig.show="hold", fig.width=5--------------------------------------------
# Analyze outliers: Outlier Report is available with verbose = TRUE
iqr_outliers <- iqr(x, alpha = 0.05, max_anoms = 0.2, verbose = TRUE)$outlier_report
gesd_outliers <- gesd(x, alpha = 0.05, max_anoms = 0.2, verbose = TRUE)$outlier_report
# ploting function for anomaly plots
ggsetup <- function(data) {
data %>%
ggplot(aes(rank, value, color = outlier)) +
geom_point() +
geom_line(aes(y = limit_upper), color = "red", linetype = 2) +
geom_line(aes(y = limit_lower), color = "red", linetype = 2) +
geom_text(aes(label = index), vjust = -1.25) +
theme_bw() +
scale_color_manual(values = c("No" = "#2c3e50", "Yes" = "#e31a1c")) +
expand_limits(y = 13) +
theme(legend.position = "bottom")
}
# Visualize
p3 <- iqr_outliers %>%
ggsetup() +
ggtitle("IQR: Top outliers sorted by rank")
p4 <- gesd_outliers %>%
ggsetup() +
ggtitle("GESD: Top outliers sorted by rank")
# Show plots
p3
p4
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umxMI}
\alias{umxMI}
\title{Report modifications which would improve fit.}
\usage{
umxMI(
model = NA,
matrices = NA,
full = TRUE,
numInd = NA,
typeToShow = "both",
decreasing = TRUE
)
}
\arguments{
\item{model}{An \code{\link[=mxModel]{mxModel()}} for which to report modification indices}
\item{matrices}{which matrices to test. The default (NA) will test A & S for RAM models}
\item{full}{Change in fit allowing all parameters to move. If FALSE only the parameter under test can move.}
\item{numInd}{How many modifications to report. Use -1 for all. Default (NA) will report all over 6.63 (p = .01)}
\item{typeToShow}{Whether to shown additions or deletions (default = "both")}
\item{decreasing}{How to sort (default = TRUE, decreasing)}
}
\description{
This function uses the mechanical modification-indices approach to detect single paths which, if added
or dropped, would improve fit.
}
\details{
Notes:
\enumerate{
\item Runs much faster with full = FALSE (but this does not allow the model to re-fit around the newly-
freed parameter).
\item Compared to mxMI, this function returns top changes, and also suppresses the run message.
\item Finally, of course: see the requirements for (legitimate) post-hoc modeling in \code{\link[=mxMI]{mxMI()}}
You are almost certainly doing better science when testing competing models rather than modifying a model to fit.
}
}
\examples{
require(umx)
data(demoOneFactor)
manifests = names(demoOneFactor)
m1 = umxRAM("One Factor", data = demoOneFactor, type = "cov",
umxPath("G", to = manifests),
umxPath(var = manifests),
umxPath(var = "G", fixedAt = 1)
)
# umxMI(m1, full=FALSE)
}
\references{
\itemize{
\item \url{https://www.github.com/tbates/umx}
}
}
\seealso{
\itemize{
\item \code{\link[=mxMI]{mxMI()}}
}
Other Modify or Compare Models:
\code{\link{umxEquate}()},
\code{\link{umxFixAll}()},
\code{\link{umxModify}()},
\code{\link{umxSetParameters}()},
\code{\link{umxUnexplainedCausalNexus}()},
\code{\link{umx}}
}
\concept{Modify or Compare Models}
| /man/umxMI.Rd | no_license | Davidpjmp/umx | R | false | true | 2,125 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umxMI}
\alias{umxMI}
\title{Report modifications which would improve fit.}
\usage{
umxMI(
model = NA,
matrices = NA,
full = TRUE,
numInd = NA,
typeToShow = "both",
decreasing = TRUE
)
}
\arguments{
\item{model}{An \code{\link[=mxModel]{mxModel()}} for which to report modification indices}
\item{matrices}{which matrices to test. The default (NA) will test A & S for RAM models}
\item{full}{Change in fit allowing all parameters to move. If FALSE only the parameter under test can move.}
\item{numInd}{How many modifications to report. Use -1 for all. Default (NA) will report all over 6.63 (p = .01)}
\item{typeToShow}{Whether to shown additions or deletions (default = "both")}
\item{decreasing}{How to sort (default = TRUE, decreasing)}
}
\description{
This function uses the mechanical modification-indices approach to detect single paths which, if added
or dropped, would improve fit.
}
\details{
Notes:
\enumerate{
\item Runs much faster with full = FALSE (but this does not allow the model to re-fit around the newly-
freed parameter).
\item Compared to mxMI, this function returns top changes, and also suppresses the run message.
\item Finally, of course: see the requirements for (legitimate) post-hoc modeling in \code{\link[=mxMI]{mxMI()}}
You are almost certainly doing better science when testing competing models rather than modifying a model to fit.
}
}
\examples{
require(umx)
data(demoOneFactor)
manifests = names(demoOneFactor)
m1 = umxRAM("One Factor", data = demoOneFactor, type = "cov",
umxPath("G", to = manifests),
umxPath(var = manifests),
umxPath(var = "G", fixedAt = 1)
)
# umxMI(m1, full=FALSE)
}
\references{
\itemize{
\item \url{https://www.github.com/tbates/umx}
}
}
\seealso{
\itemize{
\item \code{\link[=mxMI]{mxMI()}}
}
Other Modify or Compare Models:
\code{\link{umxEquate}()},
\code{\link{umxFixAll}()},
\code{\link{umxModify}()},
\code{\link{umxSetParameters}()},
\code{\link{umxUnexplainedCausalNexus}()},
\code{\link{umx}}
}
\concept{Modify or Compare Models}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
library(dplyr, warn.conflicts = FALSE)
# randomize order of rows in test data
tbl <- slice_sample(example_data_for_sorting, prop = 1L)
test_that("sort(Scalar) is identity function", {
int <- Scalar$create(42L)
expect_equal(sort(int), int)
dbl <- Scalar$create(3.14)
expect_equal(sort(dbl), dbl)
chr <- Scalar$create("foo")
expect_equal(sort(chr), chr)
})
test_that("Array$SortIndices()", {
int <- tbl$int
# Remove ties because they could give non-deterministic sort indices, and this
# test compares sort indices. Other tests compare sorted values, which are
# deterministic in the case of ties.
int <- int[!duplicated(int)]
expect_equal(
Array$create(int)$SortIndices(),
Array$create(order(int) - 1L, type = uint64())
)
# TODO(ARROW-14085): remove workaround once NA behavior is supported
int <- na.omit(int)
expect_equal(
Array$create(int)$SortIndices(descending = TRUE),
Array$create(rev(order(int)) - 1, type = uint64())
)
})
test_that("ChunkedArray$SortIndices()", {
int <- tbl$int
# Remove ties because they could give non-deterministic sort indices, and this
# test compares sort indices. Other tests compare sorted values, which are
# deterministic in the case of ties.
int <- int[!duplicated(int)]
expect_equal(
ChunkedArray$create(int[1:4], int[5:length(int)])$SortIndices(),
Array$create(order(int) - 1L, type = uint64())
)
# TODO(ARROW-14085): remove workaround once NA behavior is supported
int <- na.omit(int)
expect_equal(
ChunkedArray$create(int[1:4], int[5:length(int)])$SortIndices(descending = TRUE),
Array$create(rev(order(int)) - 1, type = uint64())
)
})
test_that("sort(vector), sort(Array), sort(ChunkedArray) give equivalent results on integers", {
compare_expression(
sort(.input),
tbl$int
)
compare_expression(
sort(.input, na.last = NA),
tbl$int
)
compare_expression(
sort(.input, na.last = TRUE),
tbl$int
)
compare_expression(
sort(.input, na.last = FALSE),
tbl$int
)
compare_expression(
sort(.input, decreasing = TRUE),
tbl$int,
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = TRUE),
tbl$int,
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
tbl$int,
)
})
test_that("sort(vector), sort(Array), sort(ChunkedArray) give equivalent results on strings", {
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
tbl$chr
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
tbl$chr
)
})
test_that("sort(vector), sort(Array), sort(ChunkedArray) give equivalent results on floats", {
test_vec <- tbl$dbl
# Arrow sorts NA and NaN differently, but it's not important, so eliminate here
test_vec[is.nan(test_vec)] <- NA_real_
compare_expression(
sort(.input, decreasing = TRUE, na.last = TRUE),
test_vec
)
compare_expression(
sort(.input, decreasing = FALSE, na.last = TRUE),
test_vec
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = NA),
test_vec
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
test_vec,
)
compare_expression(
sort(.input, decreasing = FALSE, na.last = NA),
test_vec
)
compare_expression(
sort(.input, decreasing = FALSE, na.last = FALSE),
test_vec,
)
})
test_that("Table$SortIndices()", {
x <- Table$create(tbl)
expect_identical(
as.vector(x$Take(x$SortIndices("chr"))$chr),
sort(tbl$chr, na.last = TRUE)
)
expect_identical(
as.data.frame(x$Take(x$SortIndices(c("int", "dbl"), c(FALSE, FALSE)))),
tbl %>% arrange(int, dbl)
)
})
test_that("RecordBatch$SortIndices()", {
x <- record_batch(tbl)
expect_identical(
as.data.frame(x$Take(x$SortIndices(c("chr", "int", "dbl"), TRUE))),
tbl %>% arrange(desc(chr), desc(int), desc(dbl))
)
})
| /tests/testthat/test-compute-sort.R | no_license | cran/arrow | R | false | false | 4,692 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
library(dplyr, warn.conflicts = FALSE)
# randomize order of rows in test data
tbl <- slice_sample(example_data_for_sorting, prop = 1L)
test_that("sort(Scalar) is identity function", {
int <- Scalar$create(42L)
expect_equal(sort(int), int)
dbl <- Scalar$create(3.14)
expect_equal(sort(dbl), dbl)
chr <- Scalar$create("foo")
expect_equal(sort(chr), chr)
})
test_that("Array$SortIndices()", {
int <- tbl$int
# Remove ties because they could give non-deterministic sort indices, and this
# test compares sort indices. Other tests compare sorted values, which are
# deterministic in the case of ties.
int <- int[!duplicated(int)]
expect_equal(
Array$create(int)$SortIndices(),
Array$create(order(int) - 1L, type = uint64())
)
# TODO(ARROW-14085): remove workaround once NA behavior is supported
int <- na.omit(int)
expect_equal(
Array$create(int)$SortIndices(descending = TRUE),
Array$create(rev(order(int)) - 1, type = uint64())
)
})
test_that("ChunkedArray$SortIndices()", {
int <- tbl$int
# Remove ties because they could give non-deterministic sort indices, and this
# test compares sort indices. Other tests compare sorted values, which are
# deterministic in the case of ties.
int <- int[!duplicated(int)]
expect_equal(
ChunkedArray$create(int[1:4], int[5:length(int)])$SortIndices(),
Array$create(order(int) - 1L, type = uint64())
)
# TODO(ARROW-14085): remove workaround once NA behavior is supported
int <- na.omit(int)
expect_equal(
ChunkedArray$create(int[1:4], int[5:length(int)])$SortIndices(descending = TRUE),
Array$create(rev(order(int)) - 1, type = uint64())
)
})
test_that("sort(vector), sort(Array), sort(ChunkedArray) give equivalent results on integers", {
compare_expression(
sort(.input),
tbl$int
)
compare_expression(
sort(.input, na.last = NA),
tbl$int
)
compare_expression(
sort(.input, na.last = TRUE),
tbl$int
)
compare_expression(
sort(.input, na.last = FALSE),
tbl$int
)
compare_expression(
sort(.input, decreasing = TRUE),
tbl$int,
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = TRUE),
tbl$int,
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
tbl$int,
)
})
test_that("sort(vector), sort(Array), sort(ChunkedArray) give equivalent results on strings", {
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
tbl$chr
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
tbl$chr
)
})
test_that("sort(vector), sort(Array), sort(ChunkedArray) give equivalent results on floats", {
test_vec <- tbl$dbl
# Arrow sorts NA and NaN differently, but it's not important, so eliminate here
test_vec[is.nan(test_vec)] <- NA_real_
compare_expression(
sort(.input, decreasing = TRUE, na.last = TRUE),
test_vec
)
compare_expression(
sort(.input, decreasing = FALSE, na.last = TRUE),
test_vec
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = NA),
test_vec
)
compare_expression(
sort(.input, decreasing = TRUE, na.last = FALSE),
test_vec,
)
compare_expression(
sort(.input, decreasing = FALSE, na.last = NA),
test_vec
)
compare_expression(
sort(.input, decreasing = FALSE, na.last = FALSE),
test_vec,
)
})
test_that("Table$SortIndices()", {
x <- Table$create(tbl)
expect_identical(
as.vector(x$Take(x$SortIndices("chr"))$chr),
sort(tbl$chr, na.last = TRUE)
)
expect_identical(
as.data.frame(x$Take(x$SortIndices(c("int", "dbl"), c(FALSE, FALSE)))),
tbl %>% arrange(int, dbl)
)
})
test_that("RecordBatch$SortIndices()", {
x <- record_batch(tbl)
expect_identical(
as.data.frame(x$Take(x$SortIndices(c("chr", "int", "dbl"), TRUE))),
tbl %>% arrange(desc(chr), desc(int), desc(dbl))
)
})
|
set.seed(3)
# small circle sample
X <- tdaunif::sample_circle(n = 12L)
# random seed index
l <- landmarks_maxmin(X, seed_index = "random")
# plot landmark order at point positions
plot(X, asp = 1, pch = NA)
text(X, labels = order(l))
# minmax seed index
l <- landmarks_maxmin(X, seed_index = "minmax")
# plot landmark order at point positions
plot(X, asp = 1, pch = NA)
text(X, labels = order(l))
library(landmark)
## Iris data set example
iris_pca <- prcomp(iris[,1:4], retx = TRUE, rank. = 2)
lm <- landmarks_maxmin(iris_pca$x, num = 15, cover = TRUE)
## Helper function
draw_circle <- function(center, radius, ...){
theta <- seq(0, 2 * pi, length = 200)
lines(x = radius * cos(theta) + center[1], y = radius * sin(theta) + center[2], ...)
}
## Landmark colors
pt_colors <- sample(rainbow(15))
## Plot the points + landmarks
plot(iris_pca$x, asp = 1)
points(iris_pca$x[lm$landmark,], pch = 20, col = pt_colors)
## Draw colored balls around each landmark
for (i in seq(length(lm$landmark))){
lm_idx <- lm$landmark[i]
draw_circle(iris_pca$x[lm_idx,], radius = attr(lm, "cover_radius"), col = pt_colors[i])
}
## Draw a segment moving from each point to its landmark
for (i in seq(length(lm$landmark))){
cover <- lm$cover_set[[i]]
for (pt_idx in cover){
pt <- iris_pca$x[pt_idx,]
landmark_pt <- iris_pca$x[lm$landmark[i],]
segments(x0 = pt[1], x1 = landmark_pt[1], y0 = pt[2], y1 = landmark_pt[2], col = adjustcolor("gray", alpha.f = 0.40))
}
}
points(iris_pca$x[lm$landmark,], pch = 20, col = pt_colors)
| /inst/examples/ex-landmarks-maxmin.r | no_license | peekxc/landmark | R | false | false | 1,536 | r | set.seed(3)
# small circle sample
X <- tdaunif::sample_circle(n = 12L)
# random seed index
l <- landmarks_maxmin(X, seed_index = "random")
# plot landmark order at point positions
plot(X, asp = 1, pch = NA)
text(X, labels = order(l))
# minmax seed index
l <- landmarks_maxmin(X, seed_index = "minmax")
# plot landmark order at point positions
plot(X, asp = 1, pch = NA)
text(X, labels = order(l))
library(landmark)
## Iris data set example
iris_pca <- prcomp(iris[,1:4], retx = TRUE, rank. = 2)
lm <- landmarks_maxmin(iris_pca$x, num = 15, cover = TRUE)
## Helper function
draw_circle <- function(center, radius, ...){
theta <- seq(0, 2 * pi, length = 200)
lines(x = radius * cos(theta) + center[1], y = radius * sin(theta) + center[2], ...)
}
## Landmark colors
pt_colors <- sample(rainbow(15))
## Plot the points + landmarks
plot(iris_pca$x, asp = 1)
points(iris_pca$x[lm$landmark,], pch = 20, col = pt_colors)
## Draw colored balls around each landmark
for (i in seq(length(lm$landmark))){
lm_idx <- lm$landmark[i]
draw_circle(iris_pca$x[lm_idx,], radius = attr(lm, "cover_radius"), col = pt_colors[i])
}
## Draw a segment moving from each point to its landmark
for (i in seq(length(lm$landmark))){
cover <- lm$cover_set[[i]]
for (pt_idx in cover){
pt <- iris_pca$x[pt_idx,]
landmark_pt <- iris_pca$x[lm$landmark[i],]
segments(x0 = pt[1], x1 = landmark_pt[1], y0 = pt[2], y1 = landmark_pt[2], col = adjustcolor("gray", alpha.f = 0.40))
}
}
points(iris_pca$x[lm$landmark,], pch = 20, col = pt_colors)
|
# Exercise 2: using built-in string functions
# Create a variable `lyric` that contains the text "I like to eat apples and
# bananas"
lyric <- "I like to eat apples and bananas"
# Use the `substr()` function to extract the 1st through 13th letters from the
# `lyric`, and store the result in a variable called `intro`
# Use `?substr` to see more about this function
?substr
intro <- substr(lyric, 1, 13)
# Use the `substr()` function to extract the 15th through the last letter of the
# `lyric`, and store the result in a variable called `fruits`
# Hint: use `nchar()` to determine how many total letters there are!
fruits <- substr(lyric, 15, nchar(lyric))
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Store the result in a variable called `fruits_e`
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or
# use `?gsub`)
fruits_e <- gsub("a", "ee", fruits)
fruits_e
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits_o`
fruits_o <- gsub("a", "o", fruits)
fruits_o
# Create a new variable `lyric_e` that is the `intro` combined with the new
# `fruits_e` ending. Print out this variable
lyric_e <- paste(intro, fruits_e)
lyric_e
# Without making a new variable, print out the `intro` combined with the new
# `fruits_o` ending
print(paste(intro, fruits_o))
| /chapter-06-exercises/exercise-2/exercise.R | permissive | dano-uw/book-exercises | R | false | false | 1,402 | r | # Exercise 2: using built-in string functions
# Create a variable `lyric` that contains the text "I like to eat apples and
# bananas"
lyric <- "I like to eat apples and bananas"
# Use the `substr()` function to extract the 1st through 13th letters from the
# `lyric`, and store the result in a variable called `intro`
# Use `?substr` to see more about this function
?substr
intro <- substr(lyric, 1, 13)
# Use the `substr()` function to extract the 15th through the last letter of the
# `lyric`, and store the result in a variable called `fruits`
# Hint: use `nchar()` to determine how many total letters there are!
fruits <- substr(lyric, 15, nchar(lyric))
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Store the result in a variable called `fruits_e`
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or
# use `?gsub`)
fruits_e <- gsub("a", "ee", fruits)
fruits_e
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits_o`
fruits_o <- gsub("a", "o", fruits)
fruits_o
# Create a new variable `lyric_e` that is the `intro` combined with the new
# `fruits_e` ending. Print out this variable
lyric_e <- paste(intro, fruits_e)
lyric_e
# Without making a new variable, print out the `intro` combined with the new
# `fruits_o` ending
print(paste(intro, fruits_o))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting-fun-files.R
\name{generate_files_opt_report}
\alias{generate_files_opt_report}
\title{Report possible optimizations in `.R` files.}
\usage{
generate_files_opt_report(files, optimizers = rco:::all_optimizers)
}
\arguments{
\item{files}{A character vector with paths to files to optimize.}
\item{optimizers}{A named list of optimizer functions.}
}
\description{
Report possible optimizations in `.R` files.
}
| /man/generate_files_opt_report.Rd | no_license | jcrodriguez1989/rco | R | false | true | 496 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting-fun-files.R
\name{generate_files_opt_report}
\alias{generate_files_opt_report}
\title{Report possible optimizations in `.R` files.}
\usage{
generate_files_opt_report(files, optimizers = rco:::all_optimizers)
}
\arguments{
\item{files}{A character vector with paths to files to optimize.}
\item{optimizers}{A named list of optimizer functions.}
}
\description{
Report possible optimizations in `.R` files.
}
|
test_process <- function(text, roccers = base_roccers()) {
rocblocks <- parse_block(text)
roxy_process(rocblocks, roccers)[[length(rocblocks)]]$roc
}
test_output <- function(text, roccers = base_roccers()) {
rocblocks <- parse_block(text)
rocblocks <- roxy_process(rocblocks, roccers)
out <- roxy_out(rocblocks, roccers)
roxy_postproc(out)
}
| /R/test.r | no_license | vspinu/roxygen3 | R | false | false | 358 | r | test_process <- function(text, roccers = base_roccers()) {
rocblocks <- parse_block(text)
roxy_process(rocblocks, roccers)[[length(rocblocks)]]$roc
}
test_output <- function(text, roccers = base_roccers()) {
rocblocks <- parse_block(text)
rocblocks <- roxy_process(rocblocks, roccers)
out <- roxy_out(rocblocks, roccers)
roxy_postproc(out)
}
|
library(readxl)
library(dplyr)
library(tidyr)
setwd("D:/ABCN/Github/MDv01sesion2_parte-practica/3 Tasa de empleo informal")
informal <- readxl::read_xlsx("peao-cuad-7.xlsx",skip = 17)
informal <- informal[complete.cases(informal$...2),]
names(informal)
names(informal)[2:12] <- c(2008:2018)
sapply(informal,class)
informal_segun_departamento<-informal %>%
pivot_longer(-Departamento, names_to = "Periodo", values_to = "Informalidad")
# pueden hacer lo mismo para area de residencia y region natural y graficar en ggplot2. Fijarse del ejemplo BD2 de sesion 2.
| /3 Tasa de empleo informal/1 analisis exploratorio.R | no_license | arturochian/MDv01sesion2_parte-practica | R | false | false | 566 | r | library(readxl)
library(dplyr)
library(tidyr)
setwd("D:/ABCN/Github/MDv01sesion2_parte-practica/3 Tasa de empleo informal")
informal <- readxl::read_xlsx("peao-cuad-7.xlsx",skip = 17)
informal <- informal[complete.cases(informal$...2),]
names(informal)
names(informal)[2:12] <- c(2008:2018)
sapply(informal,class)
informal_segun_departamento<-informal %>%
pivot_longer(-Departamento, names_to = "Periodo", values_to = "Informalidad")
# pueden hacer lo mismo para area de residencia y region natural y graficar en ggplot2. Fijarse del ejemplo BD2 de sesion 2.
|
#' @title Robust Effect Size
#' @description computes a Hodges-Lehmann estimator-based effect size
#' @param x numeric vector of length m
#' @param y numeric vector of length n
#' @param th numeric threshold value for the scale of the two distributions
#' to avoid unintentional large \eqn{\Delta} values when expression levels are near zero
#' @return \eqn{\Delta} numeric value
#' @details \eqn{\Delta} is defined as the Hodges-Lehmann two-sample estimator scaled by the median absolute deviation about the
#' two-sampleHodges-Lehmann estimator. Currently, only input vectors with maximum length 1024 are allowed for feasible computation.
#' If their size exceeds this limit, data can be downsampled using \code{\link{exprs2con}}.
#' @examples
#' \dontrun{
#' # without outliers, compare to Cohen's d
#' Cohen <- function(x,y) {
#' n <- length(x)-1
#' m <- length(y)-1
#' d <- mean(x)-mean(y)
#' pv <- (n*var(x)+m*var(y))/(n+m)
#' d/sqrt(pv)
#' }
#' x <- rnorm(512, 10, 2)
#' y <- rnorm(512, 14, 2)
#' Cohen(x, y)
#' robDelta(x, y)
#'
#' # with outliers
#' x <- c(rnorm(480, 10, 2), rnorm(32, 21, 2))
#' Cohen(x, y)
#' robDelta(x, y)
#' }
#' @references
#' P. Rousseeuw and C. Croux (1992), Explicit Scale Estimators with High Breakdown point, L1-Statistical Analysis and Related Methods
#' @seealso
#' \code{\link[stats]{median}}
#' \code{\link[stats]{mad}}
#' @rdname robDelta
#' @export
#' @importFrom stats median mad
robDelta <- function(x, y, th = 0) {
if (length(x) > 1024L || length(y) > 1024L) {
stop("too many observations to compute delta - use expr2con() for downsampling")
}
out <- outer(x, y, "-")
m <- stats::median(out) # HL
return(sqrt(2)*m/max(stats::mad(out), th))
}
| /NNhuMG/R/robDelta.R | no_license | steschlick/Boettcher_et_al_NN_2018 | R | false | false | 1,782 | r | #' @title Robust Effect Size
#' @description computes a Hodges-Lehmann estimator-based effect size
#' @param x numeric vector of length m
#' @param y numeric vector of length n
#' @param th numeric threshold value for the scale of the two distributions
#' to avoid unintentional large \eqn{\Delta} values when expression levels are near zero
#' @return \eqn{\Delta} numeric value
#' @details \eqn{\Delta} is defined as the Hodges-Lehmann two-sample estimator scaled by the median absolute deviation about the
#' two-sampleHodges-Lehmann estimator. Currently, only input vectors with maximum length 1024 are allowed for feasible computation.
#' If their size exceeds this limit, data can be downsampled using \code{\link{exprs2con}}.
#' @examples
#' \dontrun{
#' # without outliers, compare to Cohen's d
#' Cohen <- function(x,y) {
#' n <- length(x)-1
#' m <- length(y)-1
#' d <- mean(x)-mean(y)
#' pv <- (n*var(x)+m*var(y))/(n+m)
#' d/sqrt(pv)
#' }
#' x <- rnorm(512, 10, 2)
#' y <- rnorm(512, 14, 2)
#' Cohen(x, y)
#' robDelta(x, y)
#'
#' # with outliers
#' x <- c(rnorm(480, 10, 2), rnorm(32, 21, 2))
#' Cohen(x, y)
#' robDelta(x, y)
#' }
#' @references
#' P. Rousseeuw and C. Croux (1992), Explicit Scale Estimators with High Breakdown point, L1-Statistical Analysis and Related Methods
#' @seealso
#' \code{\link[stats]{median}}
#' \code{\link[stats]{mad}}
#' @rdname robDelta
#' @export
#' @importFrom stats median mad
robDelta <- function(x, y, th = 0) {
if (length(x) > 1024L || length(y) > 1024L) {
stop("too many observations to compute delta - use expr2con() for downsampling")
}
out <- outer(x, y, "-")
m <- stats::median(out) # HL
return(sqrt(2)*m/max(stats::mad(out), th))
}
|
manipulate <- function(raw) {
raw <- raw[raw[["Source"]] == "Osada et al. (2003) Forest Ecology and Management", ]
raw[["SLA"]] <- (1/raw[["SLA"]])*10000
raw[["LMA"]] <- mapply(function(x,y){sum(c(x,y),na.rm=TRUE)},raw[["SLA"]],raw[["LMA"]])
raw
}
| /data/Osada2003/dataManipulate.R | permissive | hrlai/baad | R | false | false | 272 | r | manipulate <- function(raw) {
raw <- raw[raw[["Source"]] == "Osada et al. (2003) Forest Ecology and Management", ]
raw[["SLA"]] <- (1/raw[["SLA"]])*10000
raw[["LMA"]] <- mapply(function(x,y){sum(c(x,y),na.rm=TRUE)},raw[["SLA"]],raw[["LMA"]])
raw
}
|
### Code for creating weighted ensembles
###############################################################################
###############################################################################
# Weighted ensemble method 1: 'Manual entry'
### Process text inputs for weights and return vector of weights
create_ens_weights_manual <- reactive({
preds.weights <- suppressWarnings(
esdm_parse_num(req(input$create_ens_weight_manual))
# as.numeric(unlist(strsplit(req(input$create_ens_weight_manual), ",")))
)
validate(
need(!anyNA(preds.weights),
paste("Error: One or more of the weights was not recognized as",
"a number; please ensure that all of the weights are numbers",
"separated by a comma and a space"))
)
if (input$create_ens_table_subset) {
models.num <- length(input$create_ens_datatable_rows_selected)
} else {
models.num <- length(vals$overlaid.models)
}
# Validate weights input
validate(
need(length(preds.weights) == models.num,
paste("Error: The number of entered weights does not",
"match the number of selected overlaid predictions"))
)
validate(
need(all(preds.weights > 0),
"Error: All entered weights must be greater than zero")
)
validate(
need(round(sum(preds.weights), 3) == 1,
"Error: The entered weights do not sum to 1")
)
preds.weights
})
###############################################################################
###############################################################################
# Weighted ensemble method 2: 'Evaluation metric'
### Table of selected metrics
create_ens_weights_metric_table <- reactive({
req(
input$create_ens_weights_metric,
all(create_ens_overlaid_idx() %in% vals$eval.models.idx[[2]])
)
# Get desired metric for desired overlaid models from eval metrics table
eval.metrics <- table_eval_metrics()
idx.col <- which(names(eval.metrics) == input$create_ens_weights_metric)
idx.row <- grep("Overlaid", eval.metrics$Predictions)
idx.row <- idx.row[vals$eval.models.idx[[2]] %in% create_ens_overlaid_idx()]
weights.table <- eval.metrics[idx.row, c(1, idx.col)]
# Prep for display
weights.table$R.weights <- weights.table[, 2] / sum(weights.table[, 2])
names(weights.table)[3] <- "Weights"
row.names(weights.table) <- 1:nrow(weights.table)
weights.table
})
### Return vector of weights based on evaluation metrics
create_ens_weights_metric <- reactive({
# Check that selected predictions have calculated metrics
validate(
need(all(create_ens_overlaid_idx() %in% vals$eval.models.idx[[2]]),
paste("Error: You must calculate at least one metric for all",
"selected overlaid predictions"))
)
create_ens_weights_metric_table()[, 3]
})
###############################################################################
###############################################################################
# Weighted ensemble method 3: 'Pixel-level spatial weights'
### Vector of idx of selected overlaid models that have spatial weights
create_ens_weights_pix_which <- reactive({
which(!is.na(vapply(vals$overlaid.specs, function(i) i["col_weight"], "1")))
})
### Generate data frame of pixel weights - fed into table and ensemble_create
create_ens_weights_pix <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.spatial <- create_ens_weights_pix_which()
# Need validate() call here for ensemble function
validate(
need(any(ens.which.spatial %in% ens.which),
paste("Error: At least one of the selected overlaid predictions",
"must have pixel-level spatial weights"))
)
w.list <- lapply(ens.which, function(i, j, k) {
if (i %in% j) vals$overlaid.models[[i]]$Weight else rep(1, k)
}, j = ens.which.spatial, k = nrow(vals$overlaid.models[[1]]))
purrr::set_names(data.frame(w.list), paste0("w", seq_along(ens.which)))
})
### Table summarizing pixel-level spatial weights of selected overlaid preds
create_ens_weights_pix_table <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.spatial <- create_ens_weights_pix_which()
# Before create_ens_weights_pix() call to avoid 'Error' validation
validate(
need(any(ens.which.spatial %in% ens.which),
paste("At least one of the selected overlaid predictions must have",
"pixel-level spatial weights to use this weighting method")),
errorClass = "validation2"
)
ens.pix.w <- create_ens_weights_pix()
data.frame(
Predictions = paste("Overlaid", ens.which),
Min = vapply(ens.pix.w, min, 1, na.rm = TRUE),
Median = vapply(ens.pix.w, median, 1, na.rm = TRUE),
Mean = vapply(ens.pix.w, mean, 1, na.rm = TRUE),
Max = vapply(ens.pix.w, max, 1, na.rm = TRUE),
NAs = vapply(ens.pix.w, function(i) sum(is.na(i)), 1)
)
})
###############################################################################
###############################################################################
# Weighted ensemble method 4: Weighting by the inverse of the variance
### Vector of idx of selected overlaid preds that have associated uncertainty
create_ens_weights_var_which <- reactive({
which(!is.na(vapply(vals$overlaid.specs, function(i) i["col_se"], "1")))
})
### Table summarizing variance values of selected overlaid preds
create_ens_weights_var_table <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.var <- create_ens_weights_var_which()
# Need validate() call here for display in-app
validate(
need(all(ens.which %in% ens.which.var),
paste("All of the selected overlaid predictions must have",
"associated uncertainty values to use this weighting method")),
errorClass = "validation2"
)
ens.varvalue <- create_ens_data_rescale()[[2]]
data.frame(
Predictions = paste("Overlaid", ens.which),
Min = vapply(ens.varvalue, min, 1, na.rm = TRUE),
Median = vapply(ens.varvalue, median, 1, na.rm = TRUE),
Mean = vapply(ens.varvalue, mean, 1, na.rm = TRUE),
Max = vapply(ens.varvalue, max, 1, na.rm = TRUE),
NAs = vapply(ens.varvalue, function(i) sum(is.na(i)), 1)
)
})
### Create data frame of weights (1 / var)
# ensemble_create() will normalize each row so it sums to 1
create_ens_weights_var <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.var <- create_ens_weights_var_which()
# Need validate() call here for ensemble function
validate(
need(all(ens.which %in% ens.which.var),
paste("Error: All of the selected overlaid predictions must have",
"associated uncertainty values to use this weighting method"))
)
purrr::set_names(
1 / create_ens_data_rescale()[[2]],
paste0("w", seq_along(ens.which))
)
})
###############################################################################
###############################################################################
| /inst/shiny/server_3_createEns/server_3_createEns_create_weighted.R | no_license | cran/eSDM | R | false | false | 7,171 | r | ### Code for creating weighted ensembles
###############################################################################
###############################################################################
# Weighted ensemble method 1: 'Manual entry'
### Process text inputs for weights and return vector of weights
create_ens_weights_manual <- reactive({
preds.weights <- suppressWarnings(
esdm_parse_num(req(input$create_ens_weight_manual))
# as.numeric(unlist(strsplit(req(input$create_ens_weight_manual), ",")))
)
validate(
need(!anyNA(preds.weights),
paste("Error: One or more of the weights was not recognized as",
"a number; please ensure that all of the weights are numbers",
"separated by a comma and a space"))
)
if (input$create_ens_table_subset) {
models.num <- length(input$create_ens_datatable_rows_selected)
} else {
models.num <- length(vals$overlaid.models)
}
# Validate weights input
validate(
need(length(preds.weights) == models.num,
paste("Error: The number of entered weights does not",
"match the number of selected overlaid predictions"))
)
validate(
need(all(preds.weights > 0),
"Error: All entered weights must be greater than zero")
)
validate(
need(round(sum(preds.weights), 3) == 1,
"Error: The entered weights do not sum to 1")
)
preds.weights
})
###############################################################################
###############################################################################
# Weighted ensemble method 2: 'Evaluation metric'
### Table of selected metrics
create_ens_weights_metric_table <- reactive({
req(
input$create_ens_weights_metric,
all(create_ens_overlaid_idx() %in% vals$eval.models.idx[[2]])
)
# Get desired metric for desired overlaid models from eval metrics table
eval.metrics <- table_eval_metrics()
idx.col <- which(names(eval.metrics) == input$create_ens_weights_metric)
idx.row <- grep("Overlaid", eval.metrics$Predictions)
idx.row <- idx.row[vals$eval.models.idx[[2]] %in% create_ens_overlaid_idx()]
weights.table <- eval.metrics[idx.row, c(1, idx.col)]
# Prep for display
weights.table$R.weights <- weights.table[, 2] / sum(weights.table[, 2])
names(weights.table)[3] <- "Weights"
row.names(weights.table) <- 1:nrow(weights.table)
weights.table
})
### Return vector of weights based on evaluation metrics
create_ens_weights_metric <- reactive({
# Check that selected predictions have calculated metrics
validate(
need(all(create_ens_overlaid_idx() %in% vals$eval.models.idx[[2]]),
paste("Error: You must calculate at least one metric for all",
"selected overlaid predictions"))
)
create_ens_weights_metric_table()[, 3]
})
###############################################################################
###############################################################################
# Weighted ensemble method 3: 'Pixel-level spatial weights'
### Vector of idx of selected overlaid models that have spatial weights
create_ens_weights_pix_which <- reactive({
which(!is.na(vapply(vals$overlaid.specs, function(i) i["col_weight"], "1")))
})
### Generate data frame of pixel weights - fed into table and ensemble_create
create_ens_weights_pix <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.spatial <- create_ens_weights_pix_which()
# Need validate() call here for ensemble function
validate(
need(any(ens.which.spatial %in% ens.which),
paste("Error: At least one of the selected overlaid predictions",
"must have pixel-level spatial weights"))
)
w.list <- lapply(ens.which, function(i, j, k) {
if (i %in% j) vals$overlaid.models[[i]]$Weight else rep(1, k)
}, j = ens.which.spatial, k = nrow(vals$overlaid.models[[1]]))
purrr::set_names(data.frame(w.list), paste0("w", seq_along(ens.which)))
})
### Table summarizing pixel-level spatial weights of selected overlaid preds
create_ens_weights_pix_table <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.spatial <- create_ens_weights_pix_which()
# Before create_ens_weights_pix() call to avoid 'Error' validation
validate(
need(any(ens.which.spatial %in% ens.which),
paste("At least one of the selected overlaid predictions must have",
"pixel-level spatial weights to use this weighting method")),
errorClass = "validation2"
)
ens.pix.w <- create_ens_weights_pix()
data.frame(
Predictions = paste("Overlaid", ens.which),
Min = vapply(ens.pix.w, min, 1, na.rm = TRUE),
Median = vapply(ens.pix.w, median, 1, na.rm = TRUE),
Mean = vapply(ens.pix.w, mean, 1, na.rm = TRUE),
Max = vapply(ens.pix.w, max, 1, na.rm = TRUE),
NAs = vapply(ens.pix.w, function(i) sum(is.na(i)), 1)
)
})
###############################################################################
###############################################################################
# Weighted ensemble method 4: Weighting by the inverse of the variance
### Vector of idx of selected overlaid preds that have associated uncertainty
create_ens_weights_var_which <- reactive({
which(!is.na(vapply(vals$overlaid.specs, function(i) i["col_se"], "1")))
})
### Table summarizing variance values of selected overlaid preds
create_ens_weights_var_table <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.var <- create_ens_weights_var_which()
# Need validate() call here for display in-app
validate(
need(all(ens.which %in% ens.which.var),
paste("All of the selected overlaid predictions must have",
"associated uncertainty values to use this weighting method")),
errorClass = "validation2"
)
ens.varvalue <- create_ens_data_rescale()[[2]]
data.frame(
Predictions = paste("Overlaid", ens.which),
Min = vapply(ens.varvalue, min, 1, na.rm = TRUE),
Median = vapply(ens.varvalue, median, 1, na.rm = TRUE),
Mean = vapply(ens.varvalue, mean, 1, na.rm = TRUE),
Max = vapply(ens.varvalue, max, 1, na.rm = TRUE),
NAs = vapply(ens.varvalue, function(i) sum(is.na(i)), 1)
)
})
### Create data frame of weights (1 / var)
# ensemble_create() will normalize each row so it sums to 1
create_ens_weights_var <- reactive({
ens.which <- create_ens_overlaid_idx()
ens.which.var <- create_ens_weights_var_which()
# Need validate() call here for ensemble function
validate(
need(all(ens.which %in% ens.which.var),
paste("Error: All of the selected overlaid predictions must have",
"associated uncertainty values to use this weighting method"))
)
purrr::set_names(
1 / create_ens_data_rescale()[[2]],
paste0("w", seq_along(ens.which))
)
})
###############################################################################
###############################################################################
|
#' @title Return occurrences for taxa within the PBDB/Neotoma.
#' @description A wrapper for the Composite API, returning all records from both datasets.
#'
#' @importFrom jsonlite fromJSON
#' @importFrom httr content GET
#' @importFrom dplyr bind_rows
#' @param x A taxon name, at any level, may use wildcards. Taxonomy follows either Neotoma (morpho-type based) or Paleobiology DB taxonomy.
#' @param lower Include all taxa at an order below the focal taxon (default \code{TRUE}).
#' @param pattern Is the search string a pattern match i.e. a partial or wildcard search (default \code{TRUE})
#' @param ... Other parameters to be passed into the API, described at \url{https://training.paleobiodb.org/comp1.0}.
#'
#' @author Simon J. Goring \email{goring@@wisc.edu}
#' @return More details on the use of these parameters can be obtained from
#' \url{https://training.paleobiodb.org/comp1.0/}.
#'
#' A list of class `occurrence` and `list`. The list is composed of two elements:
#'
#' \item{ \code{records} }{The complete listing of taxon occurrences.}
#' \item{ \code{meta} }{Metadata for the search.}
#'
#' The \code{records} object is a \code{data.frame}
#' \item{ \code{collection_name} }{Site or collection unit name for the record.}
#' \item{ \code{lng} }{Collection site longitude.}
#' \item{ \code{lat} }{Collection site latitude.}
#' \item{ \code{accepted_name} }{The taxon name.}
#' \item{ \code{max_age} }{The oldest of all sample ages (in calendar years before present) in the dataset.}
#' \item{ \code{min_age} }{The youngest of all sample ages (in calendar years before present) in the dataset.}
#' \item{ \code{age_unit} }{The units for age (by default Mya).}
#' \item{ \code{database} }{The database from which the record was obtained.}
#' \item{ \code{occurrence_no} }{The numeric ID for the record within the parent database.}
#' \item{ \code{dataset_no} }{For records within Neotoma, the numeric ID of the dataset from which the sample was obtained.}
#' \item{ \code{accepted_no} }{The numeric identifier for the taxon name from the parent database.}
#' \item{ \code{collection_no} }{The numeric identifier for the collection within the parent database.}
#' \item{ \code{country} }{The country within which the sample is found (if known).}
#' \item{ \code{state} }{The state (when known) within the identified country.}
#'
#' @examples \dontrun{
#' # Search for sites with "Canis" fossils.
#' canis <- get_by_taxon("Canis")
#'
#' # Limit searches to North America (undocumented use of \code{bbox})
#' canis_na <- get_by_taxon("Canis", bbox = c(-180, 20, -20, 90))
#'
#' }
#'
#' @references
#' EarthLife Consortium: http://earthlifeconsortium.org/
#' API Reference: https://training.paleobiodb.org/comp1.0
#' @keywords IO connection
#' @export
#'
get_by_taxon <- function(x, lower = TRUE, pattern = TRUE, ...) {
UseMethod('get_by_taxon')
}
#' @export
get_by_taxon.default <- function(x, lower = TRUE, pattern = TRUE, ...) {
base_uri <- "https://training.paleobiodb.org/comp1.0/occs/list.json"
## Build the URL for the call. This assumes only taxon matching.
if (lower == TRUE) {
if (pattern == TRUE) {
params <- list(match_name = x, vocab = "com", datainfo = TRUE, show = "loc", ageunit = "ma")
} else {
params <- list(base_name = x, vocab = "com", datainfo = TRUE, show = "loc", ageunit = "ma")
}
} else {
params <- list(taxon_name = x, vocab = "com", datainfo = TRUE, show = "loc", ageunit = "ma")
}
api_content <- httr::content(httr::GET(base_uri, query = params))
records <- data.frame(do.call(dplyr::bind_rows, api_content$records))
if (nrow(records) == 0) {stop("The search returned no records.")}
colnames(records) <- earthlife:::record_cols$pbdb[match(colnames(records), record_cols$com)]
if ("dataset_no" %in% colnames(records)) {
# Resorting and excluding "record_type"
col_names <- c("collection_name", "lng", "lat", "accepted_name",
"max_age", "min_age", "age_unit",
"database", "occurrence_no", "dataset_no", "accepted_no",
"collection_no", "country", "state")
for (i in col_names[!"collection_name" %in% colnames(records)]) {
records[,i] <- NA
}
records <- records[, c("collection_name", "lng", "lat", "accepted_name",
"max_age", "min_age", "age_unit",
"database", "occurrence_no", "dataset_no", "accepted_no",
"collection_no", "country", "state")]
} else {
# This happens when only PBDB records are returned.
records <- records[, c("collection_name", "lng", "lat", "accepted_name",
"max_age", "min_age", "age_unit",
"database", "occurrence_no", "accepted_no",
"collection_no", "country", "state")]
}
occurrence <- list(records = records,
meta = list(title = api_content$title,
access = as.POSIXct(api_content$access_time,
format = "%a %F %T", tz = "GMT"),
doc_url = api_content$documentation_url,
call = api_content$data_url))
class(occurrence) <- c("occurrence", "list")
return(occurrence)
}
| /R/get_by_taxon.R | permissive | tylerhoecker/earthlife | R | false | false | 5,375 | r | #' @title Return occurrences for taxa within the PBDB/Neotoma.
#' @description A wrapper for the Composite API, returning all records from both datasets.
#'
#' @importFrom jsonlite fromJSON
#' @importFrom httr content GET
#' @importFrom dplyr bind_rows
#' @param x A taxon name, at any level, may use wildcards. Taxonomy follows either Neotoma (morpho-type based) or Paleobiology DB taxonomy.
#' @param lower Include all taxa at an order below the focal taxon (default \code{TRUE}).
#' @param pattern Is the search string a pattern match i.e. a partial or wildcard search (default \code{TRUE})
#' @param ... Other parameters to be passed into the API, described at \url{https://training.paleobiodb.org/comp1.0}.
#'
#' @author Simon J. Goring \email{goring@@wisc.edu}
#' @return More details on the use of these parameters can be obtained from
#' \url{https://training.paleobiodb.org/comp1.0/}.
#'
#' A list of class `occurrence` and `list`. The list is composed of two elements:
#'
#' \item{ \code{records} }{The complete listing of taxon occurrences.}
#' \item{ \code{meta} }{Metadata for the search.}
#'
#' The \code{records} object is a \code{data.frame}
#' \item{ \code{collection_name} }{Site or collection unit name for the record.}
#' \item{ \code{lng} }{Collection site longitude.}
#' \item{ \code{lat} }{Collection site latitude.}
#' \item{ \code{accepted_name} }{The taxon name.}
#' \item{ \code{max_age} }{The oldest of all sample ages (in calendar years before present) in the dataset.}
#' \item{ \code{min_age} }{The youngest of all sample ages (in calendar years before present) in the dataset.}
#' \item{ \code{age_unit} }{The units for age (by default Mya).}
#' \item{ \code{database} }{The database from which the record was obtained.}
#' \item{ \code{occurrence_no} }{The numeric ID for the record within the parent database.}
#' \item{ \code{dataset_no} }{For records within Neotoma, the numeric ID of the dataset from which the sample was obtained.}
#' \item{ \code{accepted_no} }{The numeric identifier for the taxon name from the parent database.}
#' \item{ \code{collection_no} }{The numeric identifier for the collection within the parent database.}
#' \item{ \code{country} }{The country within which the sample is found (if known).}
#' \item{ \code{state} }{The state (when known) within the identified country.}
#'
#' @examples \dontrun{
#' # Search for sites with "Canis" fossils.
#' canis <- get_by_taxon("Canis")
#'
#' # Limit searches to North America (undocumented use of \code{bbox})
#' canis_na <- get_by_taxon("Canis", bbox = c(-180, 20, -20, 90))
#'
#' }
#'
#' @references
#' EarthLife Consortium: http://earthlifeconsortium.org/
#' API Reference: https://training.paleobiodb.org/comp1.0
#' @keywords IO connection
#' @export
#'
get_by_taxon <- function(x, lower = TRUE, pattern = TRUE, ...) {
UseMethod('get_by_taxon')
}
#' @export
get_by_taxon.default <- function(x, lower = TRUE, pattern = TRUE, ...) {
base_uri <- "https://training.paleobiodb.org/comp1.0/occs/list.json"
## Build the URL for the call. This assumes only taxon matching.
if (lower == TRUE) {
if (pattern == TRUE) {
params <- list(match_name = x, vocab = "com", datainfo = TRUE, show = "loc", ageunit = "ma")
} else {
params <- list(base_name = x, vocab = "com", datainfo = TRUE, show = "loc", ageunit = "ma")
}
} else {
params <- list(taxon_name = x, vocab = "com", datainfo = TRUE, show = "loc", ageunit = "ma")
}
api_content <- httr::content(httr::GET(base_uri, query = params))
records <- data.frame(do.call(dplyr::bind_rows, api_content$records))
if (nrow(records) == 0) {stop("The search returned no records.")}
colnames(records) <- earthlife:::record_cols$pbdb[match(colnames(records), record_cols$com)]
if ("dataset_no" %in% colnames(records)) {
# Resorting and excluding "record_type"
col_names <- c("collection_name", "lng", "lat", "accepted_name",
"max_age", "min_age", "age_unit",
"database", "occurrence_no", "dataset_no", "accepted_no",
"collection_no", "country", "state")
for (i in col_names[!"collection_name" %in% colnames(records)]) {
records[,i] <- NA
}
records <- records[, c("collection_name", "lng", "lat", "accepted_name",
"max_age", "min_age", "age_unit",
"database", "occurrence_no", "dataset_no", "accepted_no",
"collection_no", "country", "state")]
} else {
# This happens when only PBDB records are returned.
records <- records[, c("collection_name", "lng", "lat", "accepted_name",
"max_age", "min_age", "age_unit",
"database", "occurrence_no", "accepted_no",
"collection_no", "country", "state")]
}
occurrence <- list(records = records,
meta = list(title = api_content$title,
access = as.POSIXct(api_content$access_time,
format = "%a %F %T", tz = "GMT"),
doc_url = api_content$documentation_url,
call = api_content$data_url))
class(occurrence) <- c("occurrence", "list")
return(occurrence)
}
|
# Load Packages #
library(readr)
library(readxl)
library(dplyr)
library(tidyr)
library(knitr)
library(mlr)
library(outliers)
#----------------------------------------------------------------------------------------------------------#
# Read Data #
setwd("C:\\Users\\marga\\OneDrive\\Documents\\Uni\\Machine Learning")
heart <- read.csv("heart.csv")
#----------------------------------------------------------------------------------------------------------#
# Understand the Data#
head(heart)
tail(heart)
dim(heart)
str(heart)
names(heart)
class(heart)
#Findings: 14 attributes for 303 observations
#Attributes:
# > 1. age
# > 2. sex
# > 3. chest pain type (4 values)
# > 4. resting blood pressure
# > 5. serum cholestoral in mg/dl
# > 6. fasting blood sugar > 120 mg/dl
# > 7. resting electrocardiographic results (values 0,1,2)
# > 8. maximum heart rate achieved
# > 9. exercise induced angina
# > 10. oldpeak = ST depression induced by exercise relative to rest
# > 11. the slope of the peak exercise ST segment
# > 12. number of major vessels (0-3) colored by flourosopy
# > 13. thal: 3 = normal; 6 = fixed defect; 7 = reversable defect
#Ideas:
# - Change Column Names
# - Change Sex to M and F (factor)
# - Change chest pain type to ordered factors (names? mild to severe?)
# - Change "thal" numerical values to anmed factors normal, fixed defect, reversable defect
#----------------------------------------------------------------------------------------------------------#
# Tidy the Data #
colnames(heart) <- c('Age',
'Sex',
'Chest Pain',
'Rest. Blood Pressure',
'Cholestoral (mg/dl)',
'Fast. Blood Sugar (>120mg/dl)',
'Resting ECG',
'Max Heart Rate',
'Ex. Induced Angina',
'Old Peak',
'Slope',
'No. of Blood Vessels',
'Thalessemia',
'Target')
head(heart)
str(heart)
heart$Sex <- factor(heart$Sex,
levels = c(1,0),
labels = c("Male", "Female"))
heart$`Chest Pain` <- factor(heart$`Chest Pain` ,
levels = c(0,1,2,3),
labels = c("Typical Angina", "Atypical Angina", "Non-Anginal", "Asymptomatic"))
heart$`Fast. Blood Sugar (>120mg/dl)` <- factor(heart$`Fast. Blood Sugar (>120mg/dl)` ,
levels = c(0,1),
labels = c("FALSE", "TRUE"))
heart$`Resting ECG` <- factor(heart$`Resting ECG` ,
levels = c(0,1,2),
labels = c("Normal", "ST-T Abnormal", "Hypertrophy"))
heart$`Ex. Induced Angina` <- factor(heart$`Ex. Induced Angina` ,
levels = c(0,1),
labels = c("No", "Yes"))
heart$Slope <- factor(heart$Slope ,
levels = c(0,1,2),
labels = c("Upsloping", "Flat", "Downsloping"))
heart$`Thalessemia` <- factor(heart$`Thalessemia` ,
levels = c(1,2,3),
labels = c("Normal", "Fixed Defect", "Reversable Defect"))
heart$Target <- factor(heart$Target,
levels = c(0,1),
labels = c("No", "Yes"))
str(heart)
head(heart)
summary(heart)
#----------------------------------------------------------------------------------------------------------#
# Scan the Data #
# Missing Values #
which(is.na(heart))
colSums(is.na(heart))
rowSums(is.na(heart))
#Only a couple of NAs in the Thalessemia column. Can replace with mode value of Thalessemia.
summary(heart$`Thalessemia`)
#Mode value is "Fixed Defect".
heart$`Thalessemia`[is.na(heart$`Thalessemia`)] <- "Fixed Defect" #Look at relationships between thal and other variables to find best mode for NAs
which(is.na(heart))
summary(heart$`Thalessemia`)
#Check for special values
is.special <- function(x){
if (is.numeric(x)) !is.finite(x)
}
sapply(heart, is.special)
#None found.
# Outliers #
str(heart)
z.scores <- heart$`Rest. Blood Pressure` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#2 Outliers for Resting Blood Pressure
z.scores <- heart$`Cholestoral (mg/dl)` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#4 Outliers for Resting Blood Pressure
z.scores <- heart$`Max Heart Rate` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#1 Outlier for Max Heart Rate
z.scores <- heart$`Old Peak` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#2 Outliers for Old Peak
#Univariate Box Plots of Numeric Variables
boxplot(heart$`Rest. Blood Pressure`, main = "Resting Blood Pressure")
boxplot(heart$`Rest. Blood Pressure`, main = "Resting Blood Pressure")
boxplot(heart$`Cholestoral (mg/dl)`, main = "Cholestoral")
boxplot(heart$`Old Peak`, main = "Old Peak")
#Histograms
hist(heart$`Age`, main = "Sample Distribution of Age")
hist(heart$`No. of Blood Vessels`, main = "No. of Blood Vessels Coloured by Flourosopy")
| /ML_Assignment_R.R | no_license | ApacheStark/Machine-Learning-Assign-1 | R | false | false | 5,647 | r | # Load Packages #
library(readr)
library(readxl)
library(dplyr)
library(tidyr)
library(knitr)
library(mlr)
library(outliers)
#----------------------------------------------------------------------------------------------------------#
# Read Data #
setwd("C:\\Users\\marga\\OneDrive\\Documents\\Uni\\Machine Learning")
heart <- read.csv("heart.csv")
#----------------------------------------------------------------------------------------------------------#
# Understand the Data#
head(heart)
tail(heart)
dim(heart)
str(heart)
names(heart)
class(heart)
#Findings: 14 attributes for 303 observations
#Attributes:
# > 1. age
# > 2. sex
# > 3. chest pain type (4 values)
# > 4. resting blood pressure
# > 5. serum cholestoral in mg/dl
# > 6. fasting blood sugar > 120 mg/dl
# > 7. resting electrocardiographic results (values 0,1,2)
# > 8. maximum heart rate achieved
# > 9. exercise induced angina
# > 10. oldpeak = ST depression induced by exercise relative to rest
# > 11. the slope of the peak exercise ST segment
# > 12. number of major vessels (0-3) colored by flourosopy
# > 13. thal: 3 = normal; 6 = fixed defect; 7 = reversable defect
#Ideas:
# - Change Column Names
# - Change Sex to M and F (factor)
# - Change chest pain type to ordered factors (names? mild to severe?)
# - Change "thal" numerical values to anmed factors normal, fixed defect, reversable defect
#----------------------------------------------------------------------------------------------------------#
# Tidy the Data #
colnames(heart) <- c('Age',
'Sex',
'Chest Pain',
'Rest. Blood Pressure',
'Cholestoral (mg/dl)',
'Fast. Blood Sugar (>120mg/dl)',
'Resting ECG',
'Max Heart Rate',
'Ex. Induced Angina',
'Old Peak',
'Slope',
'No. of Blood Vessels',
'Thalessemia',
'Target')
head(heart)
str(heart)
heart$Sex <- factor(heart$Sex,
levels = c(1,0),
labels = c("Male", "Female"))
heart$`Chest Pain` <- factor(heart$`Chest Pain` ,
levels = c(0,1,2,3),
labels = c("Typical Angina", "Atypical Angina", "Non-Anginal", "Asymptomatic"))
heart$`Fast. Blood Sugar (>120mg/dl)` <- factor(heart$`Fast. Blood Sugar (>120mg/dl)` ,
levels = c(0,1),
labels = c("FALSE", "TRUE"))
heart$`Resting ECG` <- factor(heart$`Resting ECG` ,
levels = c(0,1,2),
labels = c("Normal", "ST-T Abnormal", "Hypertrophy"))
heart$`Ex. Induced Angina` <- factor(heart$`Ex. Induced Angina` ,
levels = c(0,1),
labels = c("No", "Yes"))
heart$Slope <- factor(heart$Slope ,
levels = c(0,1,2),
labels = c("Upsloping", "Flat", "Downsloping"))
heart$`Thalessemia` <- factor(heart$`Thalessemia` ,
levels = c(1,2,3),
labels = c("Normal", "Fixed Defect", "Reversable Defect"))
heart$Target <- factor(heart$Target,
levels = c(0,1),
labels = c("No", "Yes"))
str(heart)
head(heart)
summary(heart)
#----------------------------------------------------------------------------------------------------------#
# Scan the Data #
# Missing Values #
which(is.na(heart))
colSums(is.na(heart))
rowSums(is.na(heart))
#Only a couple of NAs in the Thalessemia column. Can replace with mode value of Thalessemia.
summary(heart$`Thalessemia`)
#Mode value is "Fixed Defect".
heart$`Thalessemia`[is.na(heart$`Thalessemia`)] <- "Fixed Defect" #Look at relationships between thal and other variables to find best mode for NAs
which(is.na(heart))
summary(heart$`Thalessemia`)
#Check for special values
is.special <- function(x){
if (is.numeric(x)) !is.finite(x)
}
sapply(heart, is.special)
#None found.
# Outliers #
str(heart)
z.scores <- heart$`Rest. Blood Pressure` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#2 Outliers for Resting Blood Pressure
z.scores <- heart$`Cholestoral (mg/dl)` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#4 Outliers for Resting Blood Pressure
z.scores <- heart$`Max Heart Rate` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#1 Outlier for Max Heart Rate
z.scores <- heart$`Old Peak` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#2 Outliers for Old Peak
#Univariate Box Plots of Numeric Variables
boxplot(heart$`Rest. Blood Pressure`, main = "Resting Blood Pressure")
boxplot(heart$`Rest. Blood Pressure`, main = "Resting Blood Pressure")
boxplot(heart$`Cholestoral (mg/dl)`, main = "Cholestoral")
boxplot(heart$`Old Peak`, main = "Old Peak")
#Histograms
hist(heart$`Age`, main = "Sample Distribution of Age")
hist(heart$`No. of Blood Vessels`, main = "No. of Blood Vessels Coloured by Flourosopy")
|
# Definition of the basics() function
# basics <- function(x) {
# c(min = min(x), mean = mean(x), median = median(x), max = max(x))
# }
# Fix the error:
#vapply(temp, basics, numeric(3))
# temp is already available in the workspace
# Definition of the basics() function
basics <- function(x) {
c(min = min(x), mean = mean(x), median = median(x), max = max(x))
}
# Fix the error:
vapply(temp, basics, numeric(4)) | /Intermediate of R/5.0.11.r | no_license | TopicosSelectos/tutoriales-2019-2-al150422 | R | false | false | 439 | r | # Definition of the basics() function
# basics <- function(x) {
# c(min = min(x), mean = mean(x), median = median(x), max = max(x))
# }
# Fix the error:
#vapply(temp, basics, numeric(3))
# temp is already available in the workspace
# Definition of the basics() function
basics <- function(x) {
c(min = min(x), mean = mean(x), median = median(x), max = max(x))
}
# Fix the error:
vapply(temp, basics, numeric(4)) |
create_hrmap <- function(hr_dat, id_lst, col_lst = setdiff(names(hr_dat),id_lst)){
# this function takes in the hr masterfile loops through each id and appends final list for full mapping
# function assumes latest record is dedupped
# hr_dat is hr masterfile
# id_lst - list of system id cols in hr masterfile
# col_lst - list of non-id columns to be kept in the final dataset, defaults to all non id cols
# the join keys must be named differently
hr_dat <- as.data.frame(hr_dat)
if(length(col_lst)<1){
col_lst <- setdiff(names(hr_dat),id_lst)
#print(col_lst)
}
datalist = list()
i <- 1
for (id_col in id_lst){
#print(id_col)
hr_dat[[id_col]]<- toupper(hr_dat[[id_col]])
hr_dat1 <- hr_dat[!is.na(hr_dat[[id_col]])
& !(hr_dat[[id_col]] %in% c(NA,"")),
unique(c(id_col,col_lst))]
names(hr_dat1) <- unique(c("ID",col_lst))
datalist[[i]] <- hr_dat1
# print(nrow(hr_dat1))
i <- i+1
}
hr_dat <- do.call(rbind, datalist)
return(hr_dat)
}
#Merging different columns
condensed_merge <- function(t1,t2,t1key,t2key){
#this function helps to full left join two tables with the columns condensed
#duplicated columns from the secondary tables will be removed before the join
#the join keys must be named differently
t1 <- as.data.frame(t1)
t2 <- as.data.frame(t2)
Cols <- setdiff(names(t2),names(t1))
Cols <- match(Cols,colnames(t2))
t2 <- t2[,Cols]
tf <-merge.data.frame(t1,t2,by.x = t1key,by.y = t2key,all.x = TRUE)#joining data with sales and commissions thresholds for a role and product group
return(tf)
}
#month period that should load from Database
prev_ym_date <- function(t,n){
#t is starting yyyymm
#n is number of months prior
#output previous yyyymm
t <- format(seq(t,by="-1 month",length.out = n)-1,"%Y%m")[n]
return(t)
}
# # Test
# t <- Sys.Date()
# for (n in c(1:10)){
# print(n)
# nt <- prev_ym_date(t,n)
# print(nt)
# }
#calculating previous yyyymm
prev_ym <- function(t,n){
#t is starting yyyymm
#n is number of months prior
#output previous yyyymm
#print(t)
t1 <- as.numeric(as.character(substr(t,1,4)))
t2 <- as.numeric(as.character(substr(t,5,6)))
# print (paste0(n,"-previous month"))
if(t2>11 & ((t2-n)%%12) == 0){
t2 = 12
t1 <- t1-((n%/%12))
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))
}else if(t2<=11 & ((t2-n)%%12) == 0){
t2 = 12
t1 <- t1-((n%/%12))-1
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))
} else if ((t2-n) < 0){
t1 <- t1-((t2-n)%/%(-12)+1)
t2 <-(t2-n)%%12
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))}
else{
t2 <-(t2-n)%%12
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))}
#print(t)
return(t)
}
# # Test
# t <- "202105"
# for (n in c(1:10)){
# print(n)
# nt <- prev_ym(t,n)
# print(nt)
# }
`%ni%` <- Negate(`%in%`)
#boolean dataframe function to identify NaNs
is.nan.data.frame <- function(x){
do.call(cbind, lapply(x, is.nan))}
#clean and trim string (mainly used for conforming Product Names)
conform_str <- function(x){ sub(".*? (.+)", "\\1",tolower(gsub(" ","",
str_replace_all(x,"[^[:alnum:]]", ""))),"")}
#rounding all numeric DEs in a dataframe
round_df <- function(df, digits) {
nums <- vapply(df, is.numeric, FUN.VALUE = logical(1))
df[,nums] <- round(df[,nums], digits = digits)
(df)
}
#getting months elapsed
elapsed_months <- function(end_date, start_date) {
ed <- as.POSIXlt(end_date)
sd <- as.POSIXlt(start_date)
12 * (ed$year - sd$year) + (ed$mon - sd$mon)
}
#capitalize the first letter of each word, lower case for else in a string
#can be used using sapply
simple_cap <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), tolower(substring(s, 2)),
sep="", collapse=" ")
}
#checking file dependancies are available
file_check = function(data_path,pattern_name,totalfile,filetype){
#data_path is the path that we do pattern reseaching
#pattern_name is a case sensitive string/list of string that use to detect files and e.g."ERROR-" is diff from "error-"
#totalfile is the total number of files that suppose to contained the list of pattern
#filetype is either "file" or "folder" that use to identify what file type that we searching for
#keep the rootpath in a standard format
rootpath = ifelse(substr(data_path,nchar(data_path),nchar(data_path)) %in% c("/"),substr(data_path,1,nchar(data_path)-1),data_path)
patterns = pattern_name
num = totalfile
file = filetype
j = 0
file_num = 0
#split the study by file type
#loop for "file"
if (file %in% c("file")){
folder_list <- list.dirs(rootpath,full.names = TRUE,recursive = TRUE)
#folders and subfolders searching:
for (i in folder_list) {
j = 0
for (m in patterns){
#detect all files that contain the patterns,and exclude the system duplicate version of the original file
data_find <- grep(list.files(i,pattern = m,all.files = FALSE,full.names = TRUE,ignore.case = FALSE),pattern = "~$",inv=T,value=T)
j <- j+length(data_find)
}
file_num <- file_num+j
}
output <- ifelse(file_num<num,"Missing file,please check","Contain all files that want to search for")
return(output)
} else if (file %in% c("folder")){#loop to check how many folders inside the path and what are they
for (m in patterns) {
folder_found <- ifelse(dir.exists(paste0(data_path,m)),1,0)
j <- j + folder_found
}
if (j >= num) {
return("All folders found under the path")
} else{
return("Folder is missing, please check")
}
} else{
return("Please select a correct file type")
}
}
| /helper_func.R | no_license | thelilianli/Lib_Helpers | R | false | false | 5,820 | r | create_hrmap <- function(hr_dat, id_lst, col_lst = setdiff(names(hr_dat),id_lst)){
# this function takes in the hr masterfile loops through each id and appends final list for full mapping
# function assumes latest record is dedupped
# hr_dat is hr masterfile
# id_lst - list of system id cols in hr masterfile
# col_lst - list of non-id columns to be kept in the final dataset, defaults to all non id cols
# the join keys must be named differently
hr_dat <- as.data.frame(hr_dat)
if(length(col_lst)<1){
col_lst <- setdiff(names(hr_dat),id_lst)
#print(col_lst)
}
datalist = list()
i <- 1
for (id_col in id_lst){
#print(id_col)
hr_dat[[id_col]]<- toupper(hr_dat[[id_col]])
hr_dat1 <- hr_dat[!is.na(hr_dat[[id_col]])
& !(hr_dat[[id_col]] %in% c(NA,"")),
unique(c(id_col,col_lst))]
names(hr_dat1) <- unique(c("ID",col_lst))
datalist[[i]] <- hr_dat1
# print(nrow(hr_dat1))
i <- i+1
}
hr_dat <- do.call(rbind, datalist)
return(hr_dat)
}
#Merging different columns
condensed_merge <- function(t1,t2,t1key,t2key){
#this function helps to full left join two tables with the columns condensed
#duplicated columns from the secondary tables will be removed before the join
#the join keys must be named differently
t1 <- as.data.frame(t1)
t2 <- as.data.frame(t2)
Cols <- setdiff(names(t2),names(t1))
Cols <- match(Cols,colnames(t2))
t2 <- t2[,Cols]
tf <-merge.data.frame(t1,t2,by.x = t1key,by.y = t2key,all.x = TRUE)#joining data with sales and commissions thresholds for a role and product group
return(tf)
}
#month period that should load from Database
prev_ym_date <- function(t,n){
#t is starting yyyymm
#n is number of months prior
#output previous yyyymm
t <- format(seq(t,by="-1 month",length.out = n)-1,"%Y%m")[n]
return(t)
}
# # Test
# t <- Sys.Date()
# for (n in c(1:10)){
# print(n)
# nt <- prev_ym_date(t,n)
# print(nt)
# }
#calculating previous yyyymm
prev_ym <- function(t,n){
#t is starting yyyymm
#n is number of months prior
#output previous yyyymm
#print(t)
t1 <- as.numeric(as.character(substr(t,1,4)))
t2 <- as.numeric(as.character(substr(t,5,6)))
# print (paste0(n,"-previous month"))
if(t2>11 & ((t2-n)%%12) == 0){
t2 = 12
t1 <- t1-((n%/%12))
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))
}else if(t2<=11 & ((t2-n)%%12) == 0){
t2 = 12
t1 <- t1-((n%/%12))-1
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))
} else if ((t2-n) < 0){
t1 <- t1-((t2-n)%/%(-12)+1)
t2 <-(t2-n)%%12
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))}
else{
t2 <-(t2-n)%%12
t <- paste0(t1,str_pad(as.character(t2),2,pad = "0"))}
#print(t)
return(t)
}
# # Test
# t <- "202105"
# for (n in c(1:10)){
# print(n)
# nt <- prev_ym(t,n)
# print(nt)
# }
`%ni%` <- Negate(`%in%`)
#boolean dataframe function to identify NaNs
is.nan.data.frame <- function(x){
do.call(cbind, lapply(x, is.nan))}
#clean and trim string (mainly used for conforming Product Names)
conform_str <- function(x){ sub(".*? (.+)", "\\1",tolower(gsub(" ","",
str_replace_all(x,"[^[:alnum:]]", ""))),"")}
#rounding all numeric DEs in a dataframe
round_df <- function(df, digits) {
nums <- vapply(df, is.numeric, FUN.VALUE = logical(1))
df[,nums] <- round(df[,nums], digits = digits)
(df)
}
#getting months elapsed
elapsed_months <- function(end_date, start_date) {
ed <- as.POSIXlt(end_date)
sd <- as.POSIXlt(start_date)
12 * (ed$year - sd$year) + (ed$mon - sd$mon)
}
#capitalize the first letter of each word, lower case for else in a string
#can be used using sapply
simple_cap <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), tolower(substring(s, 2)),
sep="", collapse=" ")
}
#checking file dependancies are available
file_check = function(data_path,pattern_name,totalfile,filetype){
#data_path is the path that we do pattern reseaching
#pattern_name is a case sensitive string/list of string that use to detect files and e.g."ERROR-" is diff from "error-"
#totalfile is the total number of files that suppose to contained the list of pattern
#filetype is either "file" or "folder" that use to identify what file type that we searching for
#keep the rootpath in a standard format
rootpath = ifelse(substr(data_path,nchar(data_path),nchar(data_path)) %in% c("/"),substr(data_path,1,nchar(data_path)-1),data_path)
patterns = pattern_name
num = totalfile
file = filetype
j = 0
file_num = 0
#split the study by file type
#loop for "file"
if (file %in% c("file")){
folder_list <- list.dirs(rootpath,full.names = TRUE,recursive = TRUE)
#folders and subfolders searching:
for (i in folder_list) {
j = 0
for (m in patterns){
#detect all files that contain the patterns,and exclude the system duplicate version of the original file
data_find <- grep(list.files(i,pattern = m,all.files = FALSE,full.names = TRUE,ignore.case = FALSE),pattern = "~$",inv=T,value=T)
j <- j+length(data_find)
}
file_num <- file_num+j
}
output <- ifelse(file_num<num,"Missing file,please check","Contain all files that want to search for")
return(output)
} else if (file %in% c("folder")){#loop to check how many folders inside the path and what are they
for (m in patterns) {
folder_found <- ifelse(dir.exists(paste0(data_path,m)),1,0)
j <- j + folder_found
}
if (j >= num) {
return("All folders found under the path")
} else{
return("Folder is missing, please check")
}
} else{
return("Please select a correct file type")
}
}
|
testlist <- list(x = structure(c(5.56220507515428e-308, 7.00958014851408e-229, 0, 0, 0, 0, 0), .Dim = c(1L, 7L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result) | /borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609957859-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 177 | r | testlist <- list(x = structure(c(5.56220507515428e-308, 7.00958014851408e-229, 0, 0, 0, 0, 0), .Dim = c(1L, 7L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result) |
# ID generateMetadata.R
# Copyright (C) 2017-2019 INRA
# Authors: D. Jacob
#
#' generateMetadata
#'
#' \code{generateMetadata} Generate the metadata from the list of raw spectra namely the samples, the experimental factors and the list of selected raw spectra. Depending on whether the sample matrix is supplied as input or not,
#'
#' @param RAWDIR The full path of either the raw spectra directory on the disk
#' @param procParams the list of processing parameters. First initialize this list with the \code{Spec1r.Procpar.default} list, then modify parameters depending of your spectra set.
#' @param samples the samples matrix with the correspondence of the raw spectra
#'
#' @return
#' \code{generateMetadata} returns a list containing the following components:
#' \itemize{
#' \item \code{samples} : the samples matrix with the correspondence of the raw spectra, as well as the levels of the experimental factors if specified in the input.
#' \item \code{factors} : the factors matrix with the corresponding factor names. At minimum, the list contains the Samplecode label corresponding to the samples without their group level.
#' \item \code{rawids} : list of the full directories of the raw spectra (i.e. where the FID files are accessible)
#'}
#'
#' @examples
#' data_dir <- system.file("extra", package = "Rnmr1D")
#' samplefile <- file.path(data_dir, "Samples.txt")
#' samples <- read.table(samplefile, sep="\t", header=TRUE,stringsAsFactors=FALSE)
#' metadata <- generateMetadata(data_dir, procParams=Spec1rProcpar, samples)
#'
generateMetadata <- function(RAWDIR, procParams, samples=NULL)
{
metadata <- list()
repeat {
# if a sample file is provided
if (! is.null(samples) ) {
metadata <- set_Metadata(RAWDIR, procParams, samples )
break
}
# if Bruker without sample file
if ( procParams$VENDOR=="bruker" ) {
if ( procParams$INPUT_SIGNAL=="fid" ) {
metadata <- generate_Metadata_Bruker_fid(RAWDIR, procParams )
break
}
if ( procParams$INPUT_SIGNAL=="1r" ) {
metadata <- generate_Metadata_Bruker_1r(RAWDIR, procParams )
break
}
break
}
# if RS2D without sample file
if ( procParams$VENDOR=="rs2d" ) {
if ( procParams$INPUT_SIGNAL=="fid" ) {
metadata <- generate_Metadata_RS2D_fid(RAWDIR, procParams )
break
}
if ( procParams$INPUT_SIGNAL=="1r" ) {
metadata <- generate_Metadata_RS2D_1r(RAWDIR, procParams )
break
}
break
}
# else: Varian, Jeol or nmrML without sample file
metadata <- set_Metadata(RAWDIR, procParams, samples )
break
}
return(metadata)
}
generate_Metadata_Bruker_fid <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "fid$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(NULL)
L <- simplify2array(strsplit(LIST,'/'))
if (! "matrix" %in% class(L)) {
L <- simplify2array(lapply(L, length))
LM <- round(mean(L),0)
RMLIST <- c()
if (sum(L<LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L < LM) ] )
RMLIST <- c( RMLIST, which(L < LM) )
}
if (sum(L>LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L > LM) ] )
RMLIST <- c( RMLIST, which(L > LM) )
}
if (length(RMLIST)>0) LIST <- LIST[ -RMLIST ]
}
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
nDir <- dim(simplify2array(strsplit(RAWPATH,'/')))[1]
LIST <- LIST[, c(-1:-nDir)]
nc <- dim(LIST)[2]
nr <- dim(LIST)[1]
if (nc<3) {
LIST <- cbind(LIST[,1],LIST)
}
SL <- NULL
if (nc>3) { SL <- LIST[, c(1:(nc-3)) ]; LIST <- LIST[ , c((nc-2):nc) ]; }
nc=3
if (length(levels(LIST[,1]))<nr && length(levels(LIST[,1]))>1 &&
length(levels(LIST[,2]))<nr && length(levels(LIST[,2]))>1) {
L <- levels(LIST[,1])
LIST2 <- NULL
for (i in 1:length(L)) {
L2 <- LIST[ LIST[,1]==L[i], ]
LIST2 <- rbind( LIST2, L2[ L2[,2]==levels(L2[,2])[1], ] )
}
LIST <- LIST2
}
nr <- dim(LIST)[1]
MS <- as.matrix(LIST)
if( !is.null(SL)) { LIST2 <- cbind(SL[c(1: dim(LIST)[1])], LIST); } else { LIST2 <- LIST; }
nc <- dim(LIST2)[2]
rawdir <- cbind( sapply(1:nr, function(x){ do.call( paste, c( RAWPATH, as.list(LIST2[x,c(1:(nc-1))]), sep="/")) }), MS[, 2], rep(0,nr) )
if (length(levels(LIST[,1]))==nr) {
M <- MS[, c(1,1) ]
} else {
M <- MS[, c(1,2) ]
}
if (nr==1 && "character" %in% class(M)) M <- as.matrix(t(M))
if (nr>1 && length(unique(sort(M[,2])))==1) M[,2] <- M[,1]
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
generate_Metadata_Bruker_1r <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "1r$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(NULL)
L <- simplify2array(strsplit(LIST,'/'))
if (! "matrix" %in% class(L)) {
L <- simplify2array(lapply(L, length))
LM <- round(mean(L),0)
RMLIST <- c()
if (sum(L<LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L < LM) ] )
RMLIST <- c( RMLIST, which(L < LM) )
}
if (sum(L>LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L > LM) ] )
RMLIST <- c( RMLIST, which(L > LM) )
}
if (length(RMLIST)>0) LIST <- LIST[ -RMLIST ]
}
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
# Check if we have a Bruker directory structure
nDir <- dim(simplify2array(strsplit(RAWPATH,'/')))[1]
LIST <- LIST[, c(-1:-nDir)]
nc <- dim(LIST)[2]
nr <- dim(LIST)[1]
if (nc<5) {
LIST <- cbind(LIST[,1],LIST)
}
SL <- NULL
if (nc>5) { SL <- LIST[, c(1:(nc-5)) ]; LIST <- LIST[ , c((nc-4):nc) ]; }
nc=5
L <- levels(LIST[,4])
if (length(L)>1) {
LIST <- LIST[ LIST[,4]==L[1], ]
}
nr <- dim(LIST)[1]
if (length(levels(LIST[,1]))<nr && length(levels(LIST[,1]))>1 &&
length(levels(LIST[,2]))<nr && length(levels(LIST[,2]))>1) {
L <- levels(LIST[,1])
LIST2 <- NULL
for (i in 1:length(L)) {
L2 <- LIST[ LIST[,1]==L[i], ]
LIST2 <- rbind( LIST2, L2[ L2[,2]==levels(L2[,2])[1], ] )
}
LIST <- LIST2
}
nr <- dim(LIST)[1]
MS <- as.matrix(LIST)
if( !is.null(SL)) { LIST2 <- cbind(SL[c(1: dim(LIST)[1])], LIST); } else { LIST2 <- LIST; }
nc <- dim(LIST2)[2]
rawdir <- cbind( sapply(1:nr, function(x){ do.call( paste, c( RAWPATH, as.list(LIST2[x,c(1:(nc-3))]), sep="/")) }), MS[, 2], MS[, 4] )
if (length(levels(LIST[,1]))==nr) {
M <- MS[, c(1,1) ]
} else {
M <- MS[, c(1,2) ]
}
if (nr==1 && "character" %in% class(M)) M <- as.matrix(t(M))
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
generate_Metadata_RS2D_fid <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "data.dat$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- grep(pattern = "/Proc/", LIST, value = TRUE, invert=TRUE)
rawdir <- cbind( dirname(LIST), rep(0, length(LIST)), rep(0, length(LIST)) )
M <- cbind( basename(dirname(LIST)), basename(dirname(LIST)) )
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
generate_Metadata_RS2D_1r <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "data.dat$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- dirname(grep(pattern = "/Proc/", LIST, value = TRUE, invert=FALSE))
L <- simplify2array(strsplit(LIST,'/'))
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
nDir <- dim(simplify2array(strsplit(RAWPATH,'/')))[1]
LIST <- LIST[, c(-1:-nDir)]
nr <- dim(LIST)[1]
nc <- dim(LIST)[2]
if (length(levels(LIST[,nc-2]))<nr && length(levels(LIST[,nc-2]))>1) {
L <- levels(LIST[,nc-2])
LIST2 <- NULL
for (i in 1:length(L)) LIST2 <- rbind(LIST2, LIST[ LIST[,nc-2]==L[i], ][1,])
LIST <- LIST2
}
nr <- dim(LIST)[1]
nc <- dim(LIST)[2]
MS <- as.matrix(LIST)
M <- cbind( MS[,nc-2], MS[,nc-2] )
rawdir <- cbind( sapply(1:nr, function(x){ do.call( paste, c( RAWPATH, as.list(MS[x,1:(nc-2)]), sep="/")) }), MS[,nc], MS[,nc] )
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
set_Metadata <- function(RAWDIR, procParams, samples)
{
if (procParams$VENDOR == "bruker") return (.set_Metadata_Bruker(RAWDIR, procParams, samples))
if (procParams$VENDOR == "varian") return (.set_Metadata_Varian(RAWDIR, procParams, samples))
if (procParams$VENDOR == "nmrml") return (.set_Metadata_nmrML(RAWDIR, procParams, samples))
if (procParams$VENDOR == "jeol") return (.set_Metadata_Jeol(RAWDIR, procParams, samples))
if (procParams$VENDOR == "rs2d") return (.set_Metadata_RS2D(RAWDIR, procParams, samples))
return(NULL)
}
.set_Metadata_Bruker <- function(RAWDIR, procParams, samples)
{
metadata <- list()
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
ERRORLIST <- c()
OKRAW <- 1
if (procParams$INPUT_SIGNAL == "fid") {
LIST <- gsub("//", "/", list.files(path = RAWDIR, pattern = "fid$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- grep(pattern = "pdata", LIST, value = TRUE, invert=TRUE)
} else {
LIST <- gsub("//", "/", list.files(path = RAWDIR, pattern = "1r$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- grep(pattern = "pdata", LIST, value = TRUE, invert=FALSE)
}
for (i in 1:nraw) {
FileSpectrum <- paste(samples[i,1],samples[i,3], sep="/")
if (procParams$INPUT_SIGNAL == "fid") {
FileSpectrum <- paste(FileSpectrum, "fid", sep="/")
} else {
FileSpectrum <- paste(FileSpectrum, "pdata",samples[i,4], "1r", sep="/")
}
L <- sapply( LIST, function(x) as.numeric(regexpr(FileSpectrum, x)>0) )
if ( sum(L)==1 ) {
specdir <- dirname(LIST[which(L==1)])
if (procParams$INPUT_SIGNAL == "1r") specdir <- dirname(dirname(specdir))
rawdir <- rbind( rawdir, c( specdir, samples[i,3], samples[i,4] ) )
} else {
ERRORLIST <- c( ERRORLIST, paste0("Line ",i,": ",FileSpectrum ) )
OKRAW <- 0
}
}
if (nbcol>4) {
M <- samples[,c(-3:-4)]
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-3)), colnames(samples)[c(-1:-4)] ) )
} else {
M <- cbind(samples[,1], samples[,2])
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
}
return(metadata)
}
.set_Metadata_RS2D <- function(RAWDIR, procParams, samples)
{
metadata <- list()
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
ERRORLIST <- c()
OKRAW <- 1
LIST <- gsub("//", "/", list.files(path = RAWDIR, pattern = "data.dat$",
all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if (procParams$INPUT_SIGNAL == "fid") {
LIST <- grep(pattern = "/Proc/", LIST, value = TRUE, invert=TRUE)
} else {
LIST <- grep(pattern = "/Proc/", LIST, value = TRUE, invert=FALSE)
}
for (i in 1:nraw) {
if (procParams$INPUT_SIGNAL == "fid") {
FileSpectrum <- paste(samples[i,1], "data.dat", sep="/")
} else {
FileSpectrum <- paste(samples[i,1],"Proc",samples[i,3], "data.dat", sep="/")
}
L <- sapply( LIST, function(x) as.numeric(regexpr(FileSpectrum, x)>0) )
if ( sum(L)==1 ) {
specdir <- dirname(LIST[which(L==1)])
if (procParams$INPUT_SIGNAL == "1r") specdir <- dirname(dirname(specdir))
rawdir <- rbind( rawdir, c( specdir, samples[i,3], samples[1,3] ) )
} else {
ERRORLIST <- c( ERRORLIST, FileSpectrum )
OKRAW <- 0
}
}
if (nbcol>3) {
M <- samples[,-3]
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-2)), colnames(samples)[c(-1:-3)] ) )
} else {
M <- cbind(samples[,1], samples[,2])
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
}
return(metadata)
}
.set_Metadata_Varian <- function(RAWDIR, procParams, samples)
{
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
LIST <- gsub('//', '/', list.files(path = RAWDIR, pattern = "fid$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(0)
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
# Check directory structure
nDir <- dim(simplify2array(strsplit(RAWDIR,'/')))[1]
nc <- dim(LIST)[2]
if ((nc-nDir)>2) {
RAWDIR <- do.call( paste, c( RAWDIR, as.list(LIST[1,c((nDir+1):(nc-2))]), sep="/"))
}
for (i in 1:nraw) {
if ( file.exists( paste(RAWDIR, samples[i,1],"fid", sep="/")) ) {
rawdir <- rbind( rawdir, c( paste(RAWDIR, samples[i,1], sep="/"), 0, 0 ) )
} else {
ERRORLIST <- c( ERRORLIST, paste(samples[i,1],"fid", sep="/") )
OKRAW <- 0
}
}
if (nbcol==2) {
M <- cbind(samples[,1], samples[,2])
}
if (nbcol>2) {
M <- samples
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-1)), colnames(samples)[c(-1:-2)] ) )
}
} else {
rawdir <- cbind( dirname(LIST), rep(0, length(LIST)), rep(0, length(LIST)) )
M <- cbind( basename(dirname(LIST)), basename(dirname(LIST)) )
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
.set_Metadata_nmrML <- function(RAWDIR, procParams, samples)
{
return(.set_Metadata_ext(RAWDIR, procParams, samples, ext="nmrML"))
}
.set_Metadata_Jeol <- function(RAWDIR, procParams, samples)
{
return(.set_Metadata_ext(RAWDIR, procParams, samples, ext="jdf"))
}
.set_Metadata_ext <- function(RAWDIR, procParams, samples, ext="nmrML")
{
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
pattern <- paste0('.',ext,'$')
LIST <- gsub('//', '/', list.files(path = RAWDIR, pattern = pattern, all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(0)
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
LIST2 <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
nc <- dim(LIST2)[2]
for (i in 1:nraw) {
if (sum( samples[i,1] == LIST2[,nc]) == 1) {
rawdir <- rbind( rawdir, c( LIST[ which(samples[i,1] == LIST2[,nc]) ], 0, 0) )
} else {
ERRORLIST <- c( ERRORLIST, samples[i,1] )
OKRAW <- 0
}
}
if (nbcol==2) {
M <- cbind(samples[,1], samples[,2])
}
if (nbcol>2) {
M <- samples
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-1)), colnames(samples)[c(-1:-2)] ) )
}
} else {
rawdir <- cbind( LIST, rep(0, length(LIST)), rep(0, length(LIST)) )
M <- cbind( basename(LIST), gsub(pattern, "", basename(LIST)) )
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
| /R/generateMetadata.R | no_license | jpgourdine/Rnmr1D | R | false | false | 18,574 | r | # ID generateMetadata.R
# Copyright (C) 2017-2019 INRA
# Authors: D. Jacob
#
#' generateMetadata
#'
#' \code{generateMetadata} Generate the metadata from the list of raw spectra namely the samples, the experimental factors and the list of selected raw spectra. Depending on whether the sample matrix is supplied as input or not,
#'
#' @param RAWDIR The full path of either the raw spectra directory on the disk
#' @param procParams the list of processing parameters. First initialize this list with the \code{Spec1r.Procpar.default} list, then modify parameters depending of your spectra set.
#' @param samples the samples matrix with the correspondence of the raw spectra
#'
#' @return
#' \code{generateMetadata} returns a list containing the following components:
#' \itemize{
#' \item \code{samples} : the samples matrix with the correspondence of the raw spectra, as well as the levels of the experimental factors if specified in the input.
#' \item \code{factors} : the factors matrix with the corresponding factor names. At minimum, the list contains the Samplecode label corresponding to the samples without their group level.
#' \item \code{rawids} : list of the full directories of the raw spectra (i.e. where the FID files are accessible)
#'}
#'
#' @examples
#' data_dir <- system.file("extra", package = "Rnmr1D")
#' samplefile <- file.path(data_dir, "Samples.txt")
#' samples <- read.table(samplefile, sep="\t", header=TRUE,stringsAsFactors=FALSE)
#' metadata <- generateMetadata(data_dir, procParams=Spec1rProcpar, samples)
#'
generateMetadata <- function(RAWDIR, procParams, samples=NULL)
{
metadata <- list()
repeat {
# if a sample file is provided
if (! is.null(samples) ) {
metadata <- set_Metadata(RAWDIR, procParams, samples )
break
}
# if Bruker without sample file
if ( procParams$VENDOR=="bruker" ) {
if ( procParams$INPUT_SIGNAL=="fid" ) {
metadata <- generate_Metadata_Bruker_fid(RAWDIR, procParams )
break
}
if ( procParams$INPUT_SIGNAL=="1r" ) {
metadata <- generate_Metadata_Bruker_1r(RAWDIR, procParams )
break
}
break
}
# if RS2D without sample file
if ( procParams$VENDOR=="rs2d" ) {
if ( procParams$INPUT_SIGNAL=="fid" ) {
metadata <- generate_Metadata_RS2D_fid(RAWDIR, procParams )
break
}
if ( procParams$INPUT_SIGNAL=="1r" ) {
metadata <- generate_Metadata_RS2D_1r(RAWDIR, procParams )
break
}
break
}
# else: Varian, Jeol or nmrML without sample file
metadata <- set_Metadata(RAWDIR, procParams, samples )
break
}
return(metadata)
}
generate_Metadata_Bruker_fid <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "fid$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(NULL)
L <- simplify2array(strsplit(LIST,'/'))
if (! "matrix" %in% class(L)) {
L <- simplify2array(lapply(L, length))
LM <- round(mean(L),0)
RMLIST <- c()
if (sum(L<LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L < LM) ] )
RMLIST <- c( RMLIST, which(L < LM) )
}
if (sum(L>LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L > LM) ] )
RMLIST <- c( RMLIST, which(L > LM) )
}
if (length(RMLIST)>0) LIST <- LIST[ -RMLIST ]
}
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
nDir <- dim(simplify2array(strsplit(RAWPATH,'/')))[1]
LIST <- LIST[, c(-1:-nDir)]
nc <- dim(LIST)[2]
nr <- dim(LIST)[1]
if (nc<3) {
LIST <- cbind(LIST[,1],LIST)
}
SL <- NULL
if (nc>3) { SL <- LIST[, c(1:(nc-3)) ]; LIST <- LIST[ , c((nc-2):nc) ]; }
nc=3
if (length(levels(LIST[,1]))<nr && length(levels(LIST[,1]))>1 &&
length(levels(LIST[,2]))<nr && length(levels(LIST[,2]))>1) {
L <- levels(LIST[,1])
LIST2 <- NULL
for (i in 1:length(L)) {
L2 <- LIST[ LIST[,1]==L[i], ]
LIST2 <- rbind( LIST2, L2[ L2[,2]==levels(L2[,2])[1], ] )
}
LIST <- LIST2
}
nr <- dim(LIST)[1]
MS <- as.matrix(LIST)
if( !is.null(SL)) { LIST2 <- cbind(SL[c(1: dim(LIST)[1])], LIST); } else { LIST2 <- LIST; }
nc <- dim(LIST2)[2]
rawdir <- cbind( sapply(1:nr, function(x){ do.call( paste, c( RAWPATH, as.list(LIST2[x,c(1:(nc-1))]), sep="/")) }), MS[, 2], rep(0,nr) )
if (length(levels(LIST[,1]))==nr) {
M <- MS[, c(1,1) ]
} else {
M <- MS[, c(1,2) ]
}
if (nr==1 && "character" %in% class(M)) M <- as.matrix(t(M))
if (nr>1 && length(unique(sort(M[,2])))==1) M[,2] <- M[,1]
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
generate_Metadata_Bruker_1r <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "1r$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(NULL)
L <- simplify2array(strsplit(LIST,'/'))
if (! "matrix" %in% class(L)) {
L <- simplify2array(lapply(L, length))
LM <- round(mean(L),0)
RMLIST <- c()
if (sum(L<LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L < LM) ] )
RMLIST <- c( RMLIST, which(L < LM) )
}
if (sum(L>LM)>0) {
ERRORLIST <- c( ERRORLIST, LIST[ which(L > LM) ] )
RMLIST <- c( RMLIST, which(L > LM) )
}
if (length(RMLIST)>0) LIST <- LIST[ -RMLIST ]
}
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
# Check if we have a Bruker directory structure
nDir <- dim(simplify2array(strsplit(RAWPATH,'/')))[1]
LIST <- LIST[, c(-1:-nDir)]
nc <- dim(LIST)[2]
nr <- dim(LIST)[1]
if (nc<5) {
LIST <- cbind(LIST[,1],LIST)
}
SL <- NULL
if (nc>5) { SL <- LIST[, c(1:(nc-5)) ]; LIST <- LIST[ , c((nc-4):nc) ]; }
nc=5
L <- levels(LIST[,4])
if (length(L)>1) {
LIST <- LIST[ LIST[,4]==L[1], ]
}
nr <- dim(LIST)[1]
if (length(levels(LIST[,1]))<nr && length(levels(LIST[,1]))>1 &&
length(levels(LIST[,2]))<nr && length(levels(LIST[,2]))>1) {
L <- levels(LIST[,1])
LIST2 <- NULL
for (i in 1:length(L)) {
L2 <- LIST[ LIST[,1]==L[i], ]
LIST2 <- rbind( LIST2, L2[ L2[,2]==levels(L2[,2])[1], ] )
}
LIST <- LIST2
}
nr <- dim(LIST)[1]
MS <- as.matrix(LIST)
if( !is.null(SL)) { LIST2 <- cbind(SL[c(1: dim(LIST)[1])], LIST); } else { LIST2 <- LIST; }
nc <- dim(LIST2)[2]
rawdir <- cbind( sapply(1:nr, function(x){ do.call( paste, c( RAWPATH, as.list(LIST2[x,c(1:(nc-3))]), sep="/")) }), MS[, 2], MS[, 4] )
if (length(levels(LIST[,1]))==nr) {
M <- MS[, c(1,1) ]
} else {
M <- MS[, c(1,2) ]
}
if (nr==1 && "character" %in% class(M)) M <- as.matrix(t(M))
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
generate_Metadata_RS2D_fid <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "data.dat$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- grep(pattern = "/Proc/", LIST, value = TRUE, invert=TRUE)
rawdir <- cbind( dirname(LIST), rep(0, length(LIST)), rep(0, length(LIST)) )
M <- cbind( basename(dirname(LIST)), basename(dirname(LIST)) )
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
generate_Metadata_RS2D_1r <- function(RAWDIR, procParams)
{
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
RAWPATH <- gsub("//", "/", RAWDIR)
LIST <- gsub("//", "/", list.files(path = RAWPATH, pattern = "data.dat$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- dirname(grep(pattern = "/Proc/", LIST, value = TRUE, invert=FALSE))
L <- simplify2array(strsplit(LIST,'/'))
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
nDir <- dim(simplify2array(strsplit(RAWPATH,'/')))[1]
LIST <- LIST[, c(-1:-nDir)]
nr <- dim(LIST)[1]
nc <- dim(LIST)[2]
if (length(levels(LIST[,nc-2]))<nr && length(levels(LIST[,nc-2]))>1) {
L <- levels(LIST[,nc-2])
LIST2 <- NULL
for (i in 1:length(L)) LIST2 <- rbind(LIST2, LIST[ LIST[,nc-2]==L[i], ][1,])
LIST <- LIST2
}
nr <- dim(LIST)[1]
nc <- dim(LIST)[2]
MS <- as.matrix(LIST)
M <- cbind( MS[,nc-2], MS[,nc-2] )
rawdir <- cbind( sapply(1:nr, function(x){ do.call( paste, c( RAWPATH, as.list(MS[x,1:(nc-2)]), sep="/")) }), MS[,nc], MS[,nc] )
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
set_Metadata <- function(RAWDIR, procParams, samples)
{
if (procParams$VENDOR == "bruker") return (.set_Metadata_Bruker(RAWDIR, procParams, samples))
if (procParams$VENDOR == "varian") return (.set_Metadata_Varian(RAWDIR, procParams, samples))
if (procParams$VENDOR == "nmrml") return (.set_Metadata_nmrML(RAWDIR, procParams, samples))
if (procParams$VENDOR == "jeol") return (.set_Metadata_Jeol(RAWDIR, procParams, samples))
if (procParams$VENDOR == "rs2d") return (.set_Metadata_RS2D(RAWDIR, procParams, samples))
return(NULL)
}
.set_Metadata_Bruker <- function(RAWDIR, procParams, samples)
{
metadata <- list()
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
ERRORLIST <- c()
OKRAW <- 1
if (procParams$INPUT_SIGNAL == "fid") {
LIST <- gsub("//", "/", list.files(path = RAWDIR, pattern = "fid$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- grep(pattern = "pdata", LIST, value = TRUE, invert=TRUE)
} else {
LIST <- gsub("//", "/", list.files(path = RAWDIR, pattern = "1r$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
LIST <- grep(pattern = "pdata", LIST, value = TRUE, invert=FALSE)
}
for (i in 1:nraw) {
FileSpectrum <- paste(samples[i,1],samples[i,3], sep="/")
if (procParams$INPUT_SIGNAL == "fid") {
FileSpectrum <- paste(FileSpectrum, "fid", sep="/")
} else {
FileSpectrum <- paste(FileSpectrum, "pdata",samples[i,4], "1r", sep="/")
}
L <- sapply( LIST, function(x) as.numeric(regexpr(FileSpectrum, x)>0) )
if ( sum(L)==1 ) {
specdir <- dirname(LIST[which(L==1)])
if (procParams$INPUT_SIGNAL == "1r") specdir <- dirname(dirname(specdir))
rawdir <- rbind( rawdir, c( specdir, samples[i,3], samples[i,4] ) )
} else {
ERRORLIST <- c( ERRORLIST, paste0("Line ",i,": ",FileSpectrum ) )
OKRAW <- 0
}
}
if (nbcol>4) {
M <- samples[,c(-3:-4)]
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-3)), colnames(samples)[c(-1:-4)] ) )
} else {
M <- cbind(samples[,1], samples[,2])
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
}
return(metadata)
}
.set_Metadata_RS2D <- function(RAWDIR, procParams, samples)
{
metadata <- list()
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
ERRORLIST <- c()
OKRAW <- 1
LIST <- gsub("//", "/", list.files(path = RAWDIR, pattern = "data.dat$",
all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if (procParams$INPUT_SIGNAL == "fid") {
LIST <- grep(pattern = "/Proc/", LIST, value = TRUE, invert=TRUE)
} else {
LIST <- grep(pattern = "/Proc/", LIST, value = TRUE, invert=FALSE)
}
for (i in 1:nraw) {
if (procParams$INPUT_SIGNAL == "fid") {
FileSpectrum <- paste(samples[i,1], "data.dat", sep="/")
} else {
FileSpectrum <- paste(samples[i,1],"Proc",samples[i,3], "data.dat", sep="/")
}
L <- sapply( LIST, function(x) as.numeric(regexpr(FileSpectrum, x)>0) )
if ( sum(L)==1 ) {
specdir <- dirname(LIST[which(L==1)])
if (procParams$INPUT_SIGNAL == "1r") specdir <- dirname(dirname(specdir))
rawdir <- rbind( rawdir, c( specdir, samples[i,3], samples[1,3] ) )
} else {
ERRORLIST <- c( ERRORLIST, FileSpectrum )
OKRAW <- 0
}
}
if (nbcol>3) {
M <- samples[,-3]
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-2)), colnames(samples)[c(-1:-3)] ) )
} else {
M <- cbind(samples[,1], samples[,2])
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
}
return(metadata)
}
.set_Metadata_Varian <- function(RAWDIR, procParams, samples)
{
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
LIST <- gsub('//', '/', list.files(path = RAWDIR, pattern = "fid$", all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(0)
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
LIST <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
# Check directory structure
nDir <- dim(simplify2array(strsplit(RAWDIR,'/')))[1]
nc <- dim(LIST)[2]
if ((nc-nDir)>2) {
RAWDIR <- do.call( paste, c( RAWDIR, as.list(LIST[1,c((nDir+1):(nc-2))]), sep="/"))
}
for (i in 1:nraw) {
if ( file.exists( paste(RAWDIR, samples[i,1],"fid", sep="/")) ) {
rawdir <- rbind( rawdir, c( paste(RAWDIR, samples[i,1], sep="/"), 0, 0 ) )
} else {
ERRORLIST <- c( ERRORLIST, paste(samples[i,1],"fid", sep="/") )
OKRAW <- 0
}
}
if (nbcol==2) {
M <- cbind(samples[,1], samples[,2])
}
if (nbcol>2) {
M <- samples
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-1)), colnames(samples)[c(-1:-2)] ) )
}
} else {
rawdir <- cbind( dirname(LIST), rep(0, length(LIST)), rep(0, length(LIST)) )
M <- cbind( basename(dirname(LIST)), basename(dirname(LIST)) )
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
.set_Metadata_nmrML <- function(RAWDIR, procParams, samples)
{
return(.set_Metadata_ext(RAWDIR, procParams, samples, ext="nmrML"))
}
.set_Metadata_Jeol <- function(RAWDIR, procParams, samples)
{
return(.set_Metadata_ext(RAWDIR, procParams, samples, ext="jdf"))
}
.set_Metadata_ext <- function(RAWDIR, procParams, samples, ext="nmrML")
{
lstfac <- matrix(c(1,"Samplecode"), nrow=1)
rawdir <- NULL
metadata <- list()
ERRORLIST <- c()
OKRAW <- 1
pattern <- paste0('.',ext,'$')
LIST <- gsub('//', '/', list.files(path = RAWDIR, pattern = pattern, all.files = FALSE, full.names = TRUE, recursive = TRUE, ignore.case = FALSE, include.dirs = FALSE))
if ( "character" %in% class(LIST) && length(LIST)==0 ) return(0)
if (!is.null(samples)) {
samplesize <- dim(samples)
nraw <- samplesize[1]
nbcol <- samplesize[2]
LIST2 <- as.data.frame(t(simplify2array(strsplit(LIST,'/'))))
nc <- dim(LIST2)[2]
for (i in 1:nraw) {
if (sum( samples[i,1] == LIST2[,nc]) == 1) {
rawdir <- rbind( rawdir, c( LIST[ which(samples[i,1] == LIST2[,nc]) ], 0, 0) )
} else {
ERRORLIST <- c( ERRORLIST, samples[i,1] )
OKRAW <- 0
}
}
if (nbcol==2) {
M <- cbind(samples[,1], samples[,2])
}
if (nbcol>2) {
M <- samples
lstfac <- rbind( lstfac, cbind( c(2:(nbcol-1)), colnames(samples)[c(-1:-2)] ) )
}
} else {
rawdir <- cbind( LIST, rep(0, length(LIST)), rep(0, length(LIST)) )
M <- cbind( basename(LIST), gsub(pattern, "", basename(LIST)) )
}
metadata$ERRORLIST <- ERRORLIST
if (OKRAW==1) {
metadata$samples <- M
metadata$rawids <- gsub("//", "/", rawdir)
metadata$factors <- lstfac
}
return(metadata)
}
|
# exploring the biases occurring when underreporting is ignored or
# multiplication factors are used.
# Johannes Bracher, johannes.bracher@uzh.ch
library(hhh4underreporting)
setwd("/home/johannes/Documents/underreporting/Article_hhh4/simulations/")
# number of simulation iterations:
n_sim <- 1000
# true parameters:
nu <- 15
phi <- 0.4
kappa <- 0.3
psi <- 0.1
vals_q <- c(1, 2.5, 5, 7.5, 10)/10 # five different values for reporting probability
lgt <- 8*52 # set length of simulated time series
pars <- c("nu" = nu, "phi" = phi, "kappa" = kappa, "psi" = psi)
# function to choose starting values based on moment estimators:
get_starting_values <- function(sts, q = 1){
# extract counts:
vect <- as.vector(sts@observed)
# get empirical second-order properties:
ac <- acf(vect, lag.max = 2, plot = FALSE)$acf[2:4]
emp_sop <- list(mu = mean(vect), sigma2 = var(vect),
g = ac[1], h = min(ac[2]/ac[1], 0.9))
# transform to parameter starting values:
start0 <- hhh4underreporting:::recover_pars_homog(sop_list = emp_sop, q = q)
# catch cases where moment estimators ill-defined:
if(is.nan(start0$nu) | start0$nu < 0) start0$nu <- 5
if(is.nan(start0$phi) | start0$phi < 0) start0$phi <- max(ac[1], 0.1)
if(is.nan(start0$kappa) | start0$kappa < 0) start0$kappa <- 0.05
if(is.nan(start0$psi) | start0$psi < 0) start0$psi <- 0.01
# format so that hhh4u can use them:
start <- c("log_lambda1" = log(vect[1] + 1),
"end.(Intercept)" = log(start0$nu),
"ar.(Intercept)" = log(start0$phi),
"log_kappa" = log(start0$kappa),
"log_psi" = log(start0$psi))
return(start)
}
# prepare lists of matrices to store results:
estim0 <- matrix(ncol = 5, nrow = n_sim)
colnames(estim0) <- c("lambda1", "nu", "phi", "kappa", "psi")
estim <- Y <- list()
for(i in seq_along(vals_q)){
estim[[i]] <- estim0
Y[[i]] <- matrix(ncol = lgt, nrow = n_sim)
}
names(estim) <- names(Y) <- paste0("p", vals_q)
estim_naive <- estim_stretch <- estim
# set seeds. We set the seed in each iteration to be able to get back to certain settings
# in case convergence issues occur.
set.seed(123)
seeds <- sample(1:10000, n_sim)
# run:
for(i in 1:n_sim){
set.seed(seeds[i]) # set seed
# simulate latent proces X:
X_temp <- simulate_hhh4u_homog(nu = nu, phi = phi, kappa = kappa, psi = psi, q = 1, lgt = lgt)$X
# run over different reporting probabilities:
for(j in seq_along(vals_q)){
# generate underreported counts:
q_temp <- vals_q[j]
Y_temp <- rbinom(lgt, X_temp, q_temp)
Y[[j]][i, ] <- Y_temp
sts_temp <- sts(observed = Y_temp)
# fit model using our method:
start_fl <- get_starting_values(sts = sts_temp, q = q_temp)
fl <- hhh4u(sts_temp, control = list(q = q_temp,
family = "NegBin1",
start = start_fl,
return_se = FALSE))
estim[[j]][i, ] <- exp(fl$coefficients) # keep parameter estimates
# fit model ignoring underreporting:
start_fl_naive <- get_starting_values(sts = sts_temp, q = 1)
fl_naive <- hhh4u(sts_temp, control = list(q = 1,
family = "NegBin1",
start = start_fl_naive,
return_se = FALSE))
estim_naive[[j]][i, ] <- exp(fl_naive$coefficients)
# fit model using stretching approach:
sts_stretched_temp <- sts(observed = round(Y_temp/q_temp))
start_fl_stretched <- get_starting_values(sts = sts_stretched_temp, q = 1)
fl_stretch <- hhh4u(sts_stretched_temp, control = list(q = 1,
family = "NegBin1",
start = start_fl_stretched,
return_se = FALSE))
estim_stretch[[j]][i, ] <- exp(fl_stretch$coefficients)
}
print(i)
}
# save:
save(nu, phi, kappa, psi, vals_q, lgt, seeds, estim, estim_naive, estim_stretch,
file = paste0("bias_variance/results_bias_variance_", lgt, "_", n_sim, "_general.rda"))
# load(paste0("bias_variance/results_bias_variance_", lgt, "_", n_sim, "_general.rda"))
# store as csv:
estim_frame <- data.frame(true_nu = rep(nu, n_sim),
true_phi = rep(phi, n_sim),
true_kappa = rep(kappa, n_sim),
true_psi = rep(psi, n_sim))
for(p in names(estim)){
colnames(estim[[p]]) <- paste0("estim_", colnames(estim[[p]]), "_", p)
colnames(estim_naive[[p]]) <- paste0("estim_naive_", colnames(estim_naive[[p]]), "_", p)
colnames(estim_stretch[[p]]) <- paste0("estim_stretch_", colnames(estim_stretch[[p]]), "_", p)
estim_frame <- cbind(estim_frame, estim[[p]], estim_naive[[p]], estim_stretch[[p]])
}
colnames(estim_frame)
write.csv(estim_frame, file = paste0("bias_variance/results_bias_variance_", lgt, "_", n_sim, "_general.csv"))
# get expectations for naive estimates:
vals_q_long <- 1:100/100
expec_naive <- matrix(nrow = 4, ncol = length(vals_q_long))
rownames(expec_naive) <- c("nu", "phi", "kappa", "psi")
colnames(expec_naive) <- vals_q_long
for(k in seq_along(vals_q_long)){
true_sop <- hhh4underreporting:::compute_sop_homog(nu, phi, kappa, psi, vals_q_long[k])$X_tilde
expec_naive[, k] <- unlist(hhh4underreporting:::recover_pars_homog(q = 1, sop_list = true_sop))[1:4]
}
# get expectations for estimates based on stretching with underreporting factors:
expec_stretched <- NA*expec_naive
for(k in seq_along(vals_q_long)){
true_sop <- hhh4underreporting:::compute_sop_homog(nu, phi, kappa, psi, vals_q_long[k])$X_tilde
stretched_sop <- true_sop
stretched_sop$mu <- true_sop$mu/vals_q_long[k]
stretched_sop$sigma2 <- true_sop$sigma2/vals_q_long[k]^2
expec_stretched[, k] <- unlist(hhh4underreporting:::recover_pars_homog(q = 1, sop_list = stretched_sop))[1:4]
}
# add estimates of effective reproduction numbers:
for(i in seq_along(vals_q)){
estim[[i]] <- cbind(estim[[i]],
Reff = estim[[i]][, "phi"]/(1 - estim[[i]][, "kappa"]))
estim_naive[[i]] <- cbind(estim_naive[[i]],
Reff = estim_naive[[i]][, "phi"]/
(1 - estim_naive[[i]][, "kappa"]))
estim_stretch[[i]] <- cbind(estim_stretch[[i]],
Reff = estim_stretch[[i]][, "phi"]/
(1 - estim_stretch[[i]][, "kappa"]))
}
# helper functions for small boxplots:
# plot one small boxplot
bp <- function(vect, at, shift = 0, col = "white"){
boxplot(vect, at = at + shift, add = TRUE, boxwex = 0.1, col = col, pch = 16, lty = 1,
staplewex = 0, medlwd = 1, cex = 0.5)
}
# plot several small boxplots
all_bps <- function(samples, variable, vals_q, col = "white",
true_value = NA, expected_bias = rep(NA, length(vals_q)),
xlim = 0:1, ylim = 0:1,
xlab = expression(true~pi), ...){
plot(NULL, xlim = xlim, ylim = ylim, xlab = xlab, axes = FALSE, ...)
axis(1, at = c(0, 0.1, 0.25, 0.5, 0.75, 1))
abline(h = true_value, col = "chartreuse3")
lines(seq_along(expected_bias)/length(expected_bias), expected_bias, col = "red")
for(i in seq_along(vals_q)){
bp(samples[[i]][, variable], at = vals_q[i], col = col)
}
}
# plot:
par(las = 1, mfcol = c(5, 3), mar = c(4, 4.5, 1.8, 1), font.main = 1, family = "serif")
# my method:
all_bps(samples = estim, variable = "nu", vals_q = vals_q, ylim = c(0, 60),
ylab = expression(hat(nu)), true_value = nu)
mtext("(a) Accounting for underreporting", side = 3, at = 0.5, cex = 0.75, line = 0.9)
all_bps(samples = estim, variable = "phi", vals_q = vals_q,
ylab = expression(hat(phi1)), true_value = phi)
all_bps(samples = estim, variable = "kappa", vals_q = vals_q,
ylab = expression(hat(kappa)), true_value = kappa)
all_bps(samples = estim, variable = "psi", vals_q = vals_q, ylim = c(0, 0.25),
ylab = expression(hat(psi)), true_value = psi)
all_bps(samples = estim, variable = "Reff", vals_q = vals_q, ylim = c(0, 1),
ylab = expression(hat(R)[eff]), true_value = phi/(1 - kappa))
# naive:
all_bps(samples = estim_naive, variable = "nu", vals_q = vals_q, ylim = c(0, 60),
ylab = expression(hat(nu)), expected_bias = expec_naive["nu", ], true_value = nu)
mtext("(b) Ignoring underreporting", side = 3, at = 0.5, cex = 0.75, line = 0.9)
all_bps(samples = estim_naive, variable = "phi", vals_q = vals_q,
ylab = expression(hat(phi1)), expected_bias = expec_naive["phi", ], true_value = phi)
all_bps(samples = estim_naive, variable = "kappa", vals_q = vals_q,
ylab = expression(hat(kappa)), expected_bias = expec_naive["kappa", ], true_value = kappa)
all_bps(samples = estim_naive, variable = "psi", vals_q = vals_q, ylim = c(0, 0.25),
ylab = expression(hat(psi)), expected_bias = expec_naive["psi", ], true_value = psi)
all_bps(samples = estim_naive, variable = "Reff", vals_q = vals_q, ylim = c(0, 1),
ylab = expression(hat(R)[eff]),
expected_bias = expec_naive["phi", ]/(1 - expec_naive["kappa", ]), true_value = phi/(1 - kappa))
# stretched:
all_bps(samples = estim_stretch, variable = "nu", vals_q = vals_q, ylim = c(0, 60),
ylab = expression(hat(nu)), true_value = nu)
mtext("(c) Using multiplication factors", side = 3, at = 0.5, cex = 0.75, line = 0.9)
all_bps(samples = estim_stretch, variable = "phi", vals_q = vals_q,
ylab = expression(hat(phi1)), expected_bias = expec_stretched["phi", ], true_value = phi)
all_bps(samples = estim_stretch, variable = "kappa", vals_q = vals_q,
ylab = expression(hat(kappa)), expected_bias = expec_stretched["kappa", ], true_value = kappa)
all_bps(samples = estim_stretch, variable = "psi", vals_q = vals_q, ylim = c(0, 0.25),
ylab = expression(hat(psi)), expected_bias = expec_stretched["psi", ], true_value = psi)
all_bps(samples = estim_stretch, variable = "Reff", vals_q = vals_q, ylim = c(0, 1),
ylab = expression(hat(R)[eff]),
expected_bias = expec_stretched["phi", ]/(1 - expec_stretched["kappa", ]), true_value = phi/(1 - kappa))
| /examples/simulations/bias_variance/bias_variance.R | no_license | jbracher/hhh4underreporting | R | false | false | 10,249 | r | # exploring the biases occurring when underreporting is ignored or
# multiplication factors are used.
# Johannes Bracher, johannes.bracher@uzh.ch
library(hhh4underreporting)
setwd("/home/johannes/Documents/underreporting/Article_hhh4/simulations/")
# number of simulation iterations:
n_sim <- 1000
# true parameters:
nu <- 15
phi <- 0.4
kappa <- 0.3
psi <- 0.1
vals_q <- c(1, 2.5, 5, 7.5, 10)/10 # five different values for reporting probability
lgt <- 8*52 # set length of simulated time series
pars <- c("nu" = nu, "phi" = phi, "kappa" = kappa, "psi" = psi)
# function to choose starting values based on moment estimators:
get_starting_values <- function(sts, q = 1){
# extract counts:
vect <- as.vector(sts@observed)
# get empirical second-order properties:
ac <- acf(vect, lag.max = 2, plot = FALSE)$acf[2:4]
emp_sop <- list(mu = mean(vect), sigma2 = var(vect),
g = ac[1], h = min(ac[2]/ac[1], 0.9))
# transform to parameter starting values:
start0 <- hhh4underreporting:::recover_pars_homog(sop_list = emp_sop, q = q)
# catch cases where moment estimators ill-defined:
if(is.nan(start0$nu) | start0$nu < 0) start0$nu <- 5
if(is.nan(start0$phi) | start0$phi < 0) start0$phi <- max(ac[1], 0.1)
if(is.nan(start0$kappa) | start0$kappa < 0) start0$kappa <- 0.05
if(is.nan(start0$psi) | start0$psi < 0) start0$psi <- 0.01
# format so that hhh4u can use them:
start <- c("log_lambda1" = log(vect[1] + 1),
"end.(Intercept)" = log(start0$nu),
"ar.(Intercept)" = log(start0$phi),
"log_kappa" = log(start0$kappa),
"log_psi" = log(start0$psi))
return(start)
}
# prepare lists of matrices to store results:
estim0 <- matrix(ncol = 5, nrow = n_sim)
colnames(estim0) <- c("lambda1", "nu", "phi", "kappa", "psi")
estim <- Y <- list()
for(i in seq_along(vals_q)){
estim[[i]] <- estim0
Y[[i]] <- matrix(ncol = lgt, nrow = n_sim)
}
names(estim) <- names(Y) <- paste0("p", vals_q)
estim_naive <- estim_stretch <- estim
# set seeds. We set the seed in each iteration to be able to get back to certain settings
# in case convergence issues occur.
set.seed(123)
seeds <- sample(1:10000, n_sim)
# run:
for(i in 1:n_sim){
set.seed(seeds[i]) # set seed
# simulate latent proces X:
X_temp <- simulate_hhh4u_homog(nu = nu, phi = phi, kappa = kappa, psi = psi, q = 1, lgt = lgt)$X
# run over different reporting probabilities:
for(j in seq_along(vals_q)){
# generate underreported counts:
q_temp <- vals_q[j]
Y_temp <- rbinom(lgt, X_temp, q_temp)
Y[[j]][i, ] <- Y_temp
sts_temp <- sts(observed = Y_temp)
# fit model using our method:
start_fl <- get_starting_values(sts = sts_temp, q = q_temp)
fl <- hhh4u(sts_temp, control = list(q = q_temp,
family = "NegBin1",
start = start_fl,
return_se = FALSE))
estim[[j]][i, ] <- exp(fl$coefficients) # keep parameter estimates
# fit model ignoring underreporting:
start_fl_naive <- get_starting_values(sts = sts_temp, q = 1)
fl_naive <- hhh4u(sts_temp, control = list(q = 1,
family = "NegBin1",
start = start_fl_naive,
return_se = FALSE))
estim_naive[[j]][i, ] <- exp(fl_naive$coefficients)
# fit model using stretching approach:
sts_stretched_temp <- sts(observed = round(Y_temp/q_temp))
start_fl_stretched <- get_starting_values(sts = sts_stretched_temp, q = 1)
fl_stretch <- hhh4u(sts_stretched_temp, control = list(q = 1,
family = "NegBin1",
start = start_fl_stretched,
return_se = FALSE))
estim_stretch[[j]][i, ] <- exp(fl_stretch$coefficients)
}
print(i)
}
# save:
save(nu, phi, kappa, psi, vals_q, lgt, seeds, estim, estim_naive, estim_stretch,
file = paste0("bias_variance/results_bias_variance_", lgt, "_", n_sim, "_general.rda"))
# load(paste0("bias_variance/results_bias_variance_", lgt, "_", n_sim, "_general.rda"))
# store as csv:
estim_frame <- data.frame(true_nu = rep(nu, n_sim),
true_phi = rep(phi, n_sim),
true_kappa = rep(kappa, n_sim),
true_psi = rep(psi, n_sim))
for(p in names(estim)){
colnames(estim[[p]]) <- paste0("estim_", colnames(estim[[p]]), "_", p)
colnames(estim_naive[[p]]) <- paste0("estim_naive_", colnames(estim_naive[[p]]), "_", p)
colnames(estim_stretch[[p]]) <- paste0("estim_stretch_", colnames(estim_stretch[[p]]), "_", p)
estim_frame <- cbind(estim_frame, estim[[p]], estim_naive[[p]], estim_stretch[[p]])
}
colnames(estim_frame)
write.csv(estim_frame, file = paste0("bias_variance/results_bias_variance_", lgt, "_", n_sim, "_general.csv"))
# get expectations for naive estimates:
vals_q_long <- 1:100/100
expec_naive <- matrix(nrow = 4, ncol = length(vals_q_long))
rownames(expec_naive) <- c("nu", "phi", "kappa", "psi")
colnames(expec_naive) <- vals_q_long
for(k in seq_along(vals_q_long)){
true_sop <- hhh4underreporting:::compute_sop_homog(nu, phi, kappa, psi, vals_q_long[k])$X_tilde
expec_naive[, k] <- unlist(hhh4underreporting:::recover_pars_homog(q = 1, sop_list = true_sop))[1:4]
}
# get expectations for estimates based on stretching with underreporting factors:
expec_stretched <- NA*expec_naive
for(k in seq_along(vals_q_long)){
true_sop <- hhh4underreporting:::compute_sop_homog(nu, phi, kappa, psi, vals_q_long[k])$X_tilde
stretched_sop <- true_sop
stretched_sop$mu <- true_sop$mu/vals_q_long[k]
stretched_sop$sigma2 <- true_sop$sigma2/vals_q_long[k]^2
expec_stretched[, k] <- unlist(hhh4underreporting:::recover_pars_homog(q = 1, sop_list = stretched_sop))[1:4]
}
# add estimates of effective reproduction numbers:
for(i in seq_along(vals_q)){
estim[[i]] <- cbind(estim[[i]],
Reff = estim[[i]][, "phi"]/(1 - estim[[i]][, "kappa"]))
estim_naive[[i]] <- cbind(estim_naive[[i]],
Reff = estim_naive[[i]][, "phi"]/
(1 - estim_naive[[i]][, "kappa"]))
estim_stretch[[i]] <- cbind(estim_stretch[[i]],
Reff = estim_stretch[[i]][, "phi"]/
(1 - estim_stretch[[i]][, "kappa"]))
}
# helper functions for small boxplots:
# plot one small boxplot
bp <- function(vect, at, shift = 0, col = "white"){
boxplot(vect, at = at + shift, add = TRUE, boxwex = 0.1, col = col, pch = 16, lty = 1,
staplewex = 0, medlwd = 1, cex = 0.5)
}
# plot several small boxplots
all_bps <- function(samples, variable, vals_q, col = "white",
true_value = NA, expected_bias = rep(NA, length(vals_q)),
xlim = 0:1, ylim = 0:1,
xlab = expression(true~pi), ...){
plot(NULL, xlim = xlim, ylim = ylim, xlab = xlab, axes = FALSE, ...)
axis(1, at = c(0, 0.1, 0.25, 0.5, 0.75, 1))
abline(h = true_value, col = "chartreuse3")
lines(seq_along(expected_bias)/length(expected_bias), expected_bias, col = "red")
for(i in seq_along(vals_q)){
bp(samples[[i]][, variable], at = vals_q[i], col = col)
}
}
# plot:
par(las = 1, mfcol = c(5, 3), mar = c(4, 4.5, 1.8, 1), font.main = 1, family = "serif")
# my method:
all_bps(samples = estim, variable = "nu", vals_q = vals_q, ylim = c(0, 60),
ylab = expression(hat(nu)), true_value = nu)
mtext("(a) Accounting for underreporting", side = 3, at = 0.5, cex = 0.75, line = 0.9)
all_bps(samples = estim, variable = "phi", vals_q = vals_q,
ylab = expression(hat(phi1)), true_value = phi)
all_bps(samples = estim, variable = "kappa", vals_q = vals_q,
ylab = expression(hat(kappa)), true_value = kappa)
all_bps(samples = estim, variable = "psi", vals_q = vals_q, ylim = c(0, 0.25),
ylab = expression(hat(psi)), true_value = psi)
all_bps(samples = estim, variable = "Reff", vals_q = vals_q, ylim = c(0, 1),
ylab = expression(hat(R)[eff]), true_value = phi/(1 - kappa))
# naive:
all_bps(samples = estim_naive, variable = "nu", vals_q = vals_q, ylim = c(0, 60),
ylab = expression(hat(nu)), expected_bias = expec_naive["nu", ], true_value = nu)
mtext("(b) Ignoring underreporting", side = 3, at = 0.5, cex = 0.75, line = 0.9)
all_bps(samples = estim_naive, variable = "phi", vals_q = vals_q,
ylab = expression(hat(phi1)), expected_bias = expec_naive["phi", ], true_value = phi)
all_bps(samples = estim_naive, variable = "kappa", vals_q = vals_q,
ylab = expression(hat(kappa)), expected_bias = expec_naive["kappa", ], true_value = kappa)
all_bps(samples = estim_naive, variable = "psi", vals_q = vals_q, ylim = c(0, 0.25),
ylab = expression(hat(psi)), expected_bias = expec_naive["psi", ], true_value = psi)
all_bps(samples = estim_naive, variable = "Reff", vals_q = vals_q, ylim = c(0, 1),
ylab = expression(hat(R)[eff]),
expected_bias = expec_naive["phi", ]/(1 - expec_naive["kappa", ]), true_value = phi/(1 - kappa))
# stretched:
all_bps(samples = estim_stretch, variable = "nu", vals_q = vals_q, ylim = c(0, 60),
ylab = expression(hat(nu)), true_value = nu)
mtext("(c) Using multiplication factors", side = 3, at = 0.5, cex = 0.75, line = 0.9)
all_bps(samples = estim_stretch, variable = "phi", vals_q = vals_q,
ylab = expression(hat(phi1)), expected_bias = expec_stretched["phi", ], true_value = phi)
all_bps(samples = estim_stretch, variable = "kappa", vals_q = vals_q,
ylab = expression(hat(kappa)), expected_bias = expec_stretched["kappa", ], true_value = kappa)
all_bps(samples = estim_stretch, variable = "psi", vals_q = vals_q, ylim = c(0, 0.25),
ylab = expression(hat(psi)), expected_bias = expec_stretched["psi", ], true_value = psi)
all_bps(samples = estim_stretch, variable = "Reff", vals_q = vals_q, ylim = c(0, 1),
ylab = expression(hat(R)[eff]),
expected_bias = expec_stretched["phi", ]/(1 - expec_stretched["kappa", ]), true_value = phi/(1 - kappa))
|
# This script runs one instance of the hierarchical Bayesian model
# (suitable for parallelization on a computing cluster)
#
# Call: Rscript run_single_model.R lifestage species disp_mod err_mod
# - lifestage: seed, recruit or seedling
# - species: scientific name (w/ underscore betweeen words)
# - disp_mod: dispersal kernel (2Dt, exppow, invpow, lognorm or weibull)
# - err_mod: count model (nb or pois)
library(methods)
library(readr)
library(tidyr)
library(dplyr)
library(purrr)
library(rstan)
library(bayesplot) # for 'rhat' function
source("data_subset_funcs.R") # Load data and subsetting functions
source("calc_dists_weights_func.R") # Functions for edge-correction weights
# Get command-line arguments
argv <- commandArgs(trailingOnly=TRUE)
lifestage <- argv[1]
species <- gsub("_", " ", argv[2])
disp_mod <- argv[3]
err_mod <- argv[4]
# lifestage <- "recruit"
# species <- "Beilschmiedia tovarensis"
# disp_mod <- "weibull"
# err_mod <- "nb"
print(paste(lifestage, species, disp_mod, err_mod))
# Fixed parameters
dist_trap_max <- 10 # For seedlings, max. distance from a seed trap
trap_area <- switch(lifestage, seed = 0.5, recruit = 1, seedling = 1)
years <- switch(lifestage, seed = 1988:2014, recruit = 1994:2014,
seedling = seedling_years)
# Load priors and arrange into list
disp_priors <- read_csv("disp_priors.csv") %>%
filter(model == disp_mod) %>%
select(-model)
repr_priors <- read_csv("repr_priors.csv") %>%
filter(stage == lifestage) %>%
select(-stage)
priors <- bind_rows(disp_priors, repr_priors)
if (err_mod != "nb")
priors <- filter(priors, param != "p_ri_theta")
priors_list <- setNames(map2(priors$prior_mean, priors$prior_sd, c),
priors$param)
# Iterations and number of chains for Stan
n_warmup <- 600
n_iter <- 1600 # includes warmup
n_chains <- 2
# Extract tree data, create tree size x year matrix
tree_data <- map_dfr(set_names(years), ~ subset_trees(species, .), .id = "year")
tree_data <- spread(tree_data, key = "year", value = "size", fill = 0)
tree_size <- as.matrix(select(tree_data, -id, -x, -y))
# Extract seed, recruit or seedling data
subset_func <- switch(lifestage,
seed = subset_seeds,
recruit = subset_recruits,
seedling = subset_seedlings
)
seed_data <- map_dfr(set_names(years), ~ subset_func(species, .), .id = "year")
# Reshape seed_data differently for seedlings vs. seeds or recruits
if (lifestage == "seedling") {
seed_data <- spread(seed_data, key = "year", value = "seeds")
# Only keep within dist_trap_max of a seed trap
seed_traps <- read_csv("data/seed_traps.csv")
dist_trap <- sqrt(outer(seed_data$x, seed_traps$X, "-")^2 +
outer(seed_data$y, seed_traps$Y, "-")^2)
seed_data <- seed_data[apply(dist_trap, 1, min) < dist_trap_max, ]
seed_data$itrap <- 1:nrow(seed_data)
# Scale by dt to get seedlings by year
nseed <- as.matrix(select(seed_data, -itrap, -x, -y))
nseed <- scale(nseed, center = FALSE, scale = seedling_dt)
} else {
# Seed and recruit data is kept in "sparse matrix" format
# due to change in number of traps over years
seed_data <- mutate(seed_data, iyear = as.integer(as.factor(year)),
itrap = as.integer(as.factor(trap)))
nseed <- seed_data$seeds
}
# Round fractions up or down randomly (with p = fraction)
round_frac <- function(x) {
ifelse(runif(length(x)) < (x %% 1), ceiling(x), floor(x))
}
nseed[nseed %% 1 > 0] <- round_frac(nseed[nseed %% 1 > 0])
# Calculate tree-trap distance matrix, maximum radius in plot and edge-correction weights
traps <- distinct(seed_data, itrap, x, y)
dist_weights <- calc_dists_weights(traps, tree_data)
# Other input variables for model
total_size <- colSums(tree_size)
plot_area <- (xmax - xmin) * (ymax - ymin)
size_density <- total_size / plot_area
# Create data list for Stan
if (lifestage == "seedling") {
data_list <- lst(trap_area, nyear = ncol(tree_size), ntree = nrow(tree_size),
ntrap = nrow(traps), tree_size, size_density,
nseed = as.vector(nseed))
} else {
data_list <- lst(trap_area, nyear = ncol(tree_size), ntree = nrow(tree_size),
ntrap = nrow(traps), tree_size, size_density,
iyear = seed_data$iyear, itrap = seed_data$itrap,
nobs = length(nseed), nseed)
}
data_list <- c(data_list, dist_weights, priors_list)
# Check for missing data
if (any(is.na(unlist(data_list)))) stop("Missing values in data.")
# Run Stan model
model_file <- paste("stan_models/disp", disp_mod, err_mod,
ifelse(lifestage == "seedling", "mat.stan", "sparse.stan"),
sep = "_")
res <- stan(model_file, data = data_list, chains = n_chains,
warmup = n_warmup, iter = n_iter, cores = 2)
# Export diagnostics, LOO results and parameter samples
pars_keep <- c("alpha|inv_k|k_real|mu_disp|sd_disp|mu_beta|sd_beta|ri_theta")
diags <- list(ndiv = get_num_divergent(res),
max_tree = get_num_max_treedepth(res),
bfmi = min(get_bfmi(res)),
max_rhat = max(rhat(res, regex_pars = pars_keep)))
loo_res <- loo(res)
samples <- extract(res)
samples <- samples[!(names(samples) == "log_lik")]
res_sum <- list(samples = samples, diags = diags, loo = loo_res)
saveRDS(res_sum, paste0("results/", lifestage, "_", species, "_",
disp_mod, "_", err_mod, ".rds")) | /run_single_model.R | permissive | pmarchand1/seed-seedling-kernels | R | false | false | 5,517 | r | # This script runs one instance of the hierarchical Bayesian model
# (suitable for parallelization on a computing cluster)
#
# Call: Rscript run_single_model.R lifestage species disp_mod err_mod
# - lifestage: seed, recruit or seedling
# - species: scientific name (w/ underscore betweeen words)
# - disp_mod: dispersal kernel (2Dt, exppow, invpow, lognorm or weibull)
# - err_mod: count model (nb or pois)
library(methods)
library(readr)
library(tidyr)
library(dplyr)
library(purrr)
library(rstan)
library(bayesplot) # for 'rhat' function
source("data_subset_funcs.R") # Load data and subsetting functions
source("calc_dists_weights_func.R") # Functions for edge-correction weights
# Get command-line arguments
argv <- commandArgs(trailingOnly=TRUE)
lifestage <- argv[1]
species <- gsub("_", " ", argv[2])
disp_mod <- argv[3]
err_mod <- argv[4]
# lifestage <- "recruit"
# species <- "Beilschmiedia tovarensis"
# disp_mod <- "weibull"
# err_mod <- "nb"
print(paste(lifestage, species, disp_mod, err_mod))
# Fixed parameters
dist_trap_max <- 10 # For seedlings, max. distance from a seed trap
trap_area <- switch(lifestage, seed = 0.5, recruit = 1, seedling = 1)
years <- switch(lifestage, seed = 1988:2014, recruit = 1994:2014,
seedling = seedling_years)
# Load priors and arrange into list
disp_priors <- read_csv("disp_priors.csv") %>%
filter(model == disp_mod) %>%
select(-model)
repr_priors <- read_csv("repr_priors.csv") %>%
filter(stage == lifestage) %>%
select(-stage)
priors <- bind_rows(disp_priors, repr_priors)
if (err_mod != "nb")
priors <- filter(priors, param != "p_ri_theta")
priors_list <- setNames(map2(priors$prior_mean, priors$prior_sd, c),
priors$param)
# Iterations and number of chains for Stan
n_warmup <- 600
n_iter <- 1600 # includes warmup
n_chains <- 2
# Extract tree data, create tree size x year matrix
tree_data <- map_dfr(set_names(years), ~ subset_trees(species, .), .id = "year")
tree_data <- spread(tree_data, key = "year", value = "size", fill = 0)
tree_size <- as.matrix(select(tree_data, -id, -x, -y))
# Extract seed, recruit or seedling data
subset_func <- switch(lifestage,
seed = subset_seeds,
recruit = subset_recruits,
seedling = subset_seedlings
)
seed_data <- map_dfr(set_names(years), ~ subset_func(species, .), .id = "year")
# Reshape seed_data differently for seedlings vs. seeds or recruits
if (lifestage == "seedling") {
seed_data <- spread(seed_data, key = "year", value = "seeds")
# Only keep within dist_trap_max of a seed trap
seed_traps <- read_csv("data/seed_traps.csv")
dist_trap <- sqrt(outer(seed_data$x, seed_traps$X, "-")^2 +
outer(seed_data$y, seed_traps$Y, "-")^2)
seed_data <- seed_data[apply(dist_trap, 1, min) < dist_trap_max, ]
seed_data$itrap <- 1:nrow(seed_data)
# Scale by dt to get seedlings by year
nseed <- as.matrix(select(seed_data, -itrap, -x, -y))
nseed <- scale(nseed, center = FALSE, scale = seedling_dt)
} else {
# Seed and recruit data is kept in "sparse matrix" format
# due to change in number of traps over years
seed_data <- mutate(seed_data, iyear = as.integer(as.factor(year)),
itrap = as.integer(as.factor(trap)))
nseed <- seed_data$seeds
}
# Round fractions up or down randomly (with p = fraction)
round_frac <- function(x) {
ifelse(runif(length(x)) < (x %% 1), ceiling(x), floor(x))
}
nseed[nseed %% 1 > 0] <- round_frac(nseed[nseed %% 1 > 0])
# Calculate tree-trap distance matrix, maximum radius in plot and edge-correction weights
traps <- distinct(seed_data, itrap, x, y)
dist_weights <- calc_dists_weights(traps, tree_data)
# Other input variables for model
total_size <- colSums(tree_size)
plot_area <- (xmax - xmin) * (ymax - ymin)
size_density <- total_size / plot_area
# Create data list for Stan
if (lifestage == "seedling") {
data_list <- lst(trap_area, nyear = ncol(tree_size), ntree = nrow(tree_size),
ntrap = nrow(traps), tree_size, size_density,
nseed = as.vector(nseed))
} else {
data_list <- lst(trap_area, nyear = ncol(tree_size), ntree = nrow(tree_size),
ntrap = nrow(traps), tree_size, size_density,
iyear = seed_data$iyear, itrap = seed_data$itrap,
nobs = length(nseed), nseed)
}
data_list <- c(data_list, dist_weights, priors_list)
# Check for missing data
if (any(is.na(unlist(data_list)))) stop("Missing values in data.")
# Run Stan model
model_file <- paste("stan_models/disp", disp_mod, err_mod,
ifelse(lifestage == "seedling", "mat.stan", "sparse.stan"),
sep = "_")
res <- stan(model_file, data = data_list, chains = n_chains,
warmup = n_warmup, iter = n_iter, cores = 2)
# Export diagnostics, LOO results and parameter samples
pars_keep <- c("alpha|inv_k|k_real|mu_disp|sd_disp|mu_beta|sd_beta|ri_theta")
diags <- list(ndiv = get_num_divergent(res),
max_tree = get_num_max_treedepth(res),
bfmi = min(get_bfmi(res)),
max_rhat = max(rhat(res, regex_pars = pars_keep)))
loo_res <- loo(res)
samples <- extract(res)
samples <- samples[!(names(samples) == "log_lik")]
res_sum <- list(samples = samples, diags = diags, loo = loo_res)
saveRDS(res_sum, paste0("results/", lifestage, "_", species, "_",
disp_mod, "_", err_mod, ".rds")) |
library(plyr)
library(dplyr)
library(caret)
library(caTools)
library(stats)
library(MASS)
library(pROC)
library(brglm2)
library(leaps)
source("~/Documents/Data_Science/projects/adni/sc_linear/linear_regression_functions.R")
#loading data-----------------------------------------------------------------------------------------
df_full <- read.csv("ADNID_Baseline_MasterFile_clean.csv", stringsAsFactors = FALSE)
df_clean <- df_full %>% dplyr::select(b_ECOG_Total_Avg, b_Informant_ECOG_Total_Avg, b_Age, BiRace, Gender,
EducationYears, ECOG_Imp, INFORMECOG_Imp, CogScore, b_HAMD17Total,
b_GAD7_Total, b_PSS_Total, b_DSSI_Total, b_GSIS_Total)
#data transformation----------------------------------------------------------------------------------
#transforming variables in clean df
df_clean_tran <- transform_clean_df(df_clean)
plot_histogram(df_clean_tran)
#transforming variables in full df (for ecog and cognition subdomain analyses)
df_full_tran <- transform_full_df(df_cfull)
plot_histogram(df_full_tran)
#univariate analyses----------------------------------------------------------------------------------
##race
mod1b <- lm(b_Informant_ECOG_Total_Avg ~ BiRace, data = df_clean_tran)
summary(mod1b) # sig
##gender
mod2b <- lm(b_Informant_ECOG_Total_Avg ~ Gender, data = df_clean_tran)
summary(mod2b) #not sig
##education
mod3b <- lm(b_Informant_ECOG_Total_Avg ~ EducationYears, data = df_clean_tran)
summary(mod3b) #not sig
##age
mod4b <- lm(b_Informant_ECOG_Total_Avg ~ b_Age, data = df_clean_tran)
summary(mod4b) #not sig
##HAMD
mod5b <- lm(b_Informant_ECOG_Total_Avg ~ b_HAMD17Total, data = df_clean_tran)
summary(mod5b) #not sig
##GSIS
mod6b <- lm(b_Informant_ECOG_Total_Avg ~ b_GSIS_Total, data = df_clean_tran)
summary(mod6b) #not significant
##GAD
mod7b <- lm(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total, data = df_clean_tran)
summary(mod7b) #not sig
##DSSI
mod8b <- lm(b_Informant_ECOG_Total_Avg ~ b_DSSI_Total, data = df_clean_tran)
summary(mod8b) #not sig
##PSS
mod9b <- lm(b_Informant_ECOG_Total_Avg ~ b_PSS_Total, data = df_clean_tran)
summary(mod9b) #significant
##CogScore
mod10b <- lm(b_Informant_ECOG_Total_Avg ~ CogScore, data = df_clean_tran)
summary(mod10b) #not significant
#stepwise regression----------------------------------------------------------------------------------
set.seed(101)
models_inf <- regsubsets(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total + b_PSS_Total +
b_GSIS_Total + b_DSSI_Total +b_HAMD17Total + BiRace +
EducationYears + b_Age + Gender, data = df_clean_tran, nvmax = 6,
method = "seqrep")
summary(models_inf)
# pss --> pss, race --> pss dssi race --> race pss gender dssi --> dssi pss hamd race gender -->
#pss gsis dssi race edu gender
#multiple regression----------------------------------------------------------------------------------
#w/ pss race
mr1b <- lm(b_Informant_ECOG_Total_Avg ~ b_PSS_Total + BiRace, data = df_clean_tran)
summary(mr1b)
plot(mr1b)
#w/ pss dssi race
mr2b <- lm(b_Informant_ECOG_Total_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_clean_tran)
summary(mr2b)
plot(mr2b)
#w/ race pss gender dssi (BEST)
mr3b <- lm(b_Informant_ECOG_Total_Avg ~ BiRace + b_DSSI_Total +
Gender+ b_PSS_Total, data = df_clean_tran)
summary(mr3b)
plot(mr3b)
#w/ dssi pss hamd race gender
mr4b <- lm(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total + b_HAMD17Total +
b_GSIS_Total + b_DSSI_Total, data = df_clean_tran)
summary(mr4b)
plot(mr4b)
#w/ pss gsis dssi race edu gender
mr5b <- lm(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total + b_PSS_Total + BiRace +
b_HAMD17Total + EducationYears, data = df_clean_tran)
summary(mr5b)
plot(mr5b)
#selected regression model with ecog subdomains-------------------------------------------------------
#use df_full data frame
#memory
sd1b <- lm(b_Informant_ECOG_Memory_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd1b) #sig
plot(sd1b)
#language
sd2b <- lm(b_Informant_ECOG_Language_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd2b) #sig
plot(sd2b)
#visuospatial
sd3b <- lm(b_Informant_ECOG_Visuospatial_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd3b) #not sig
plot(sd3b)
#executive planning
sd4b <- lm(b_Informant_ECOG_ExecPlanning_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd4b)
plot(sd4b)
#executive organization
sd5b <- lm(b_Informant_ECOG_ExecOrganization_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd5b) #not sig
plot(sd5b)
#exectutive attention
sd6b <- lm(b_Informant_ECOG_ExecAttention_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd6b)
plot(sd6b)
#executive total
sd7b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd7b)
plot(sd7b)
#ecog score predicted by cognitive subdomain----------------------------------------------------------
#use df_full data frame
#executive functioning
cs1b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_Heaton_FAS_SS_imp2 + b_Stroop_CW_SSImp2 + b_Heaton_TMTB_SS_imp2, data = df_full_tran)
summary(cs1b) #not sig
#visual perception functioning
cs2b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_BentonJLO_SSImp2, data = df_full_tran)
summary(cs2b) #not sig
#visual learning & memory
cs3b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_BVMT_TotalRecall_SSImp2 + b_BVMT_DelayedRecall_SSImp2, data = df_full_tran)
summary(cs3b) #not sig
#verbal learning & memory
cs4b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_LM_Immediate_SSImp2 + b_LM_Delayed_SSImp2 +
b_HVLT_TotalRecall_SSImp2 + b_HVLT_DelayedRecall_SSImp2, data = df_full_tran)
summary(cs4b) #not sig
#working memory (model fails as digit span has no impaired scores)
cs5b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_DigitSpan_Total_SSImp2, data = df_full_tran)
summary(cs5b) #not sig
#language ability
cs6b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_Heaton_BNT_SS_imp2, data = df_full_tran)
summary(cs6b) #not sig
#info processing speed
cs7b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_DigitSymbol_SSImp2 + b_Heaton_TMTA_SS_imp2, data = df_full_tran)
summary(cs7b) #not sig
| /informant_linear_regression.R | no_license | emiburns/subjective-cognition-in-late-life-depression | R | false | false | 6,323 | r | library(plyr)
library(dplyr)
library(caret)
library(caTools)
library(stats)
library(MASS)
library(pROC)
library(brglm2)
library(leaps)
source("~/Documents/Data_Science/projects/adni/sc_linear/linear_regression_functions.R")
#loading data-----------------------------------------------------------------------------------------
df_full <- read.csv("ADNID_Baseline_MasterFile_clean.csv", stringsAsFactors = FALSE)
df_clean <- df_full %>% dplyr::select(b_ECOG_Total_Avg, b_Informant_ECOG_Total_Avg, b_Age, BiRace, Gender,
EducationYears, ECOG_Imp, INFORMECOG_Imp, CogScore, b_HAMD17Total,
b_GAD7_Total, b_PSS_Total, b_DSSI_Total, b_GSIS_Total)
#data transformation----------------------------------------------------------------------------------
#transforming variables in clean df
df_clean_tran <- transform_clean_df(df_clean)
plot_histogram(df_clean_tran)
#transforming variables in full df (for ecog and cognition subdomain analyses)
df_full_tran <- transform_full_df(df_cfull)
plot_histogram(df_full_tran)
#univariate analyses----------------------------------------------------------------------------------
##race
mod1b <- lm(b_Informant_ECOG_Total_Avg ~ BiRace, data = df_clean_tran)
summary(mod1b) # sig
##gender
mod2b <- lm(b_Informant_ECOG_Total_Avg ~ Gender, data = df_clean_tran)
summary(mod2b) #not sig
##education
mod3b <- lm(b_Informant_ECOG_Total_Avg ~ EducationYears, data = df_clean_tran)
summary(mod3b) #not sig
##age
mod4b <- lm(b_Informant_ECOG_Total_Avg ~ b_Age, data = df_clean_tran)
summary(mod4b) #not sig
##HAMD
mod5b <- lm(b_Informant_ECOG_Total_Avg ~ b_HAMD17Total, data = df_clean_tran)
summary(mod5b) #not sig
##GSIS
mod6b <- lm(b_Informant_ECOG_Total_Avg ~ b_GSIS_Total, data = df_clean_tran)
summary(mod6b) #not significant
##GAD
mod7b <- lm(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total, data = df_clean_tran)
summary(mod7b) #not sig
##DSSI
mod8b <- lm(b_Informant_ECOG_Total_Avg ~ b_DSSI_Total, data = df_clean_tran)
summary(mod8b) #not sig
##PSS
mod9b <- lm(b_Informant_ECOG_Total_Avg ~ b_PSS_Total, data = df_clean_tran)
summary(mod9b) #significant
##CogScore
mod10b <- lm(b_Informant_ECOG_Total_Avg ~ CogScore, data = df_clean_tran)
summary(mod10b) #not significant
#stepwise regression----------------------------------------------------------------------------------
set.seed(101)
models_inf <- regsubsets(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total + b_PSS_Total +
b_GSIS_Total + b_DSSI_Total +b_HAMD17Total + BiRace +
EducationYears + b_Age + Gender, data = df_clean_tran, nvmax = 6,
method = "seqrep")
summary(models_inf)
# pss --> pss, race --> pss dssi race --> race pss gender dssi --> dssi pss hamd race gender -->
#pss gsis dssi race edu gender
#multiple regression----------------------------------------------------------------------------------
#w/ pss race
mr1b <- lm(b_Informant_ECOG_Total_Avg ~ b_PSS_Total + BiRace, data = df_clean_tran)
summary(mr1b)
plot(mr1b)
#w/ pss dssi race
mr2b <- lm(b_Informant_ECOG_Total_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_clean_tran)
summary(mr2b)
plot(mr2b)
#w/ race pss gender dssi (BEST)
mr3b <- lm(b_Informant_ECOG_Total_Avg ~ BiRace + b_DSSI_Total +
Gender+ b_PSS_Total, data = df_clean_tran)
summary(mr3b)
plot(mr3b)
#w/ dssi pss hamd race gender
mr4b <- lm(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total + b_HAMD17Total +
b_GSIS_Total + b_DSSI_Total, data = df_clean_tran)
summary(mr4b)
plot(mr4b)
#w/ pss gsis dssi race edu gender
mr5b <- lm(b_Informant_ECOG_Total_Avg ~ b_GAD7_Total + b_PSS_Total + BiRace +
b_HAMD17Total + EducationYears, data = df_clean_tran)
summary(mr5b)
plot(mr5b)
#selected regression model with ecog subdomains-------------------------------------------------------
#use df_full data frame
#memory
sd1b <- lm(b_Informant_ECOG_Memory_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd1b) #sig
plot(sd1b)
#language
sd2b <- lm(b_Informant_ECOG_Language_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd2b) #sig
plot(sd2b)
#visuospatial
sd3b <- lm(b_Informant_ECOG_Visuospatial_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd3b) #not sig
plot(sd3b)
#executive planning
sd4b <- lm(b_Informant_ECOG_ExecPlanning_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd4b)
plot(sd4b)
#executive organization
sd5b <- lm(b_Informant_ECOG_ExecOrganization_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd5b) #not sig
plot(sd5b)
#exectutive attention
sd6b <- lm(b_Informant_ECOG_ExecAttention_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd6b)
plot(sd6b)
#executive total
sd7b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_PSS_Total + b_DSSI_Total + BiRace, data = df_full_tran)
summary(sd7b)
plot(sd7b)
#ecog score predicted by cognitive subdomain----------------------------------------------------------
#use df_full data frame
#executive functioning
cs1b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_Heaton_FAS_SS_imp2 + b_Stroop_CW_SSImp2 + b_Heaton_TMTB_SS_imp2, data = df_full_tran)
summary(cs1b) #not sig
#visual perception functioning
cs2b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_BentonJLO_SSImp2, data = df_full_tran)
summary(cs2b) #not sig
#visual learning & memory
cs3b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_BVMT_TotalRecall_SSImp2 + b_BVMT_DelayedRecall_SSImp2, data = df_full_tran)
summary(cs3b) #not sig
#verbal learning & memory
cs4b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_LM_Immediate_SSImp2 + b_LM_Delayed_SSImp2 +
b_HVLT_TotalRecall_SSImp2 + b_HVLT_DelayedRecall_SSImp2, data = df_full_tran)
summary(cs4b) #not sig
#working memory (model fails as digit span has no impaired scores)
cs5b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_DigitSpan_Total_SSImp2, data = df_full_tran)
summary(cs5b) #not sig
#language ability
cs6b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_Heaton_BNT_SS_imp2, data = df_full_tran)
summary(cs6b) #not sig
#info processing speed
cs7b <- lm(b_Informant_ECOG_ExecTotal_Avg ~ b_DigitSymbol_SSImp2 + b_Heaton_TMTA_SS_imp2, data = df_full_tran)
summary(cs7b) #not sig
|
setwd("Desktop/MiriamProgramming/r/eitc")
cps85 = read.table("cpsmar85.dat", as.is=T)
cps86 = read.table("cpsmar86.dat", as.is=T)
cps87 = read.table("cpsmar87.dat", as.is=T)
h_seq[1:20]
is_household[1:20]
cps85[1:10,]
cps87[1:20,]
table(hnumfam)
table(hnumfam2)
#paste(substr(cps85[1:20,], 10, 10), is_household)
small = head(cps85, 10000)
t85 = read.cps85.take2(cps85)
read.cps85.take2 = function(cps) {
is_household = as.numeric(substr(cps[,1], 7, 8))
plist = cps[which(is_household>0 & is_household<41),]
p = NROW(plist)
current_h = NULL
current_f = NULL
h_seq = rep(NA, p)
hnumfam = rep(NA, p)
htotval = rep(NA, p)
ffpos = rep(NA, p)
f_seq_pos = rep(NA, p)
fkind = rep(NA, p)
ftype = rep(NA, p)
frelu18 = rep(NA, p)
ftotval = rep(NA, p)
pppos = rep(NA, p)
a_maritl = rep(NA, p)
a_sex = rep(NA, p)
a_race = rep(NA, p)
a_age = rep(NA, p)
a_hga = rep(NA, p)
a_hgc = rep(NA, p)
marsupwt = rep(NA, p)
wkswork = rep(NA, p)
hrswk = rep(NA, p)
rsnnotw = rep(NA, p)
wsal_val = rep(NA, p)
semp_val = rep(NA, p)
frse_val = rep(NA, p)
ss_val = rep(NA, p)
ssi_val = rep(NA, p)
paw_val = rep(NA, p)
vet_val = rep(NA, p)
int_val = rep(NA, p)
ptotval = rep(NA, p)
pearnval = rep(NA, p)
pothval = rep(NA, p)
counter = 0
for (row in 1:nrow(cps)) {
if (is_household[row]==0) {
current_h = cps[row,]
} else if (is_household[row]>40) {
current_f = cps[row,]
} else {
current_p = cps[row,]
counter = counter + 1
h_seq[counter] = substr(current_h, 1, 6)
hnumfam[counter] = as.numeric(substr(current_h, 11, 12))
htotval[counter] = as.numeric(substr(current_h, 72, 80))
ffpos[counter] = as.numeric(substr(current_f, 7, 8))
f_seq_pos[counter] = substr(current_f, 1, 8)
fkind[counter] = as.numeric(substr(current_f, 9, 9))
ftype[counter] = as.numeric(substr(current_f, 10, 10))
frelu18[counter] = as.numeric(substr(current_f, 151, 151))
ftotval[counter] = as.numeric(substr(current_f, 105, 113))
pppos[counter] = as.numeric(substr(current_p, 7, 8))
a_maritl[counter] = as.numeric(substr(current_p, 107, 107))
a_sex[counter] = as.numeric(substr(current_p, 108, 108))
a_race[counter] = as.numeric(substr(current_p, 109, 109))
a_age[counter] = as.numeric(substr(current_p, 110, 110))
a_hga[counter] = as.numeric(substr(current_p, 115, 116))
a_hgc[counter] = as.numeric(substr(current_p, 117, 117))
marsupwt[counter] = as.numeric(substr(current_p, 118, 128))
wkswork[counter] = as.numeric(substr(current_p, 134, 135))
hrswk[counter] = as.numeric(substr(current_p, 136, 137))
rsnnotw[counter] = as.numeric(substr(current_p, 19, 20))
wsal_val[counter] = as.numeric(substr(current_p, 191, 195))
semp_val[counter] = as.numeric(substr(current_p, 196, 201))
frse_val[counter] = as.numeric(substr(current_p, 202, 207))
ss_val[counter] = as.numeric(substr(current_p, 208, 212))
ssi_val[counter] = as.numeric(substr(current_p, 213, 216))
paw_val[counter] = as.numeric(substr(current_p, 217, 221))
vet_val[counter] = as.numeric(substr(current_p, 233, 237))
int_val[counter] = as.numeric(substr(current_p, 222, 226))
ptotval[counter] = as.numeric(substr(current_p, 248, 254))
pearnval[counter] = as.numeric(substr(current_p, 255, 261))
pothval[counter] = as.numeric(substr(current_p, 262, 268))
}
}
t = data.frame(h_seq, hnumfam, htotval, ffpos, f_seq_pos, fkind, ftype, frelu18, ftotval, pppos, a_maritl, a_sex, a_race, a_age,
a_hga, a_hgc, marsupwt, wkswork, hrswk, rsnnotw, wsal_val, semp_val, frse_val, ss_val, ssi_val, paw_val, vet_val,
int_val, ptotval, pearnval, pothval)
return(t)
}
read.cps = function(cps) {
is_household = as.numeric(substr(cps[,1], 7, 8))
h_seq = substr(cps[which(is_household==0),], 1, 6)
hnumfam = as.numeric(substr(cps[which(is_household==0),], 11, 12))
#hg_st60 = as.numeric(substr(cps[which(is_household==0),], 40, 41))
#hccc_r = as.numeric(substr(cps[which(is_household==0),], 58, 58))
htotval = as.numeric(substr(cps[which(is_household==0),], 72, 80))
h = data.frame(h_seq, hnumfam, htotval)
f_seq = substr(cps[which(is_household>0 & is_household<40),], 1, 6)
ffpos = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 7, 8))
f_seq_pos = substr(cps[which(is_household>0 & is_household<40),], 1, 8)
fkind = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 9, 9))
ftype = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 10, 10))
frelu18 = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 151, 151))
#frelu6 = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 28, 28))
ftotval = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 105, 113))
f = data.frame(f_seq, ffpos, f_seq_pos, fkind, ftype, frelu18, ftotval)
colnames(f)[1] = "h_seq"
p_seq = substr(cps[which(is_household>40 & is_household<80),], 1, 6)
pppos = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 7, 8))
#p_f_pos = paste(p_seq, substr(cps[which(is_household>40 & is_household<80),], 44, 45), sep="") #105?
#a_exprrp = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 13, 14))
#a_famtyp = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 31, 31))
#a_famnum = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 29, 30))
#a_famrel = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 32, 32))
a_maritl = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 107, 107))
a_sex = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 108, 108))
a_race = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 109, 109))
a_age = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 110, 110))
#a_reorgn = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 27, 28))
a_hga = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 115, 116))
a_hgc = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 117, 117))
marsupwt = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 118, 128))
wkswork = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 134, 135))
hrswk = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 136, 137))
rsnnotw = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 19, 20))
wsal_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 191, 195))
semp_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 196, 201))
frse_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 202, 207))
#uc_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 278, 282))
#wc_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 285, 289))
ss_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 208, 212))
ssi_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 213, 216))
paw_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 217, 221))
vet_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 233, 237))
#srvs_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 337, 342))
#dsab_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 360, 365))
#rtm_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 379, 384))
int_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 222, 226))
#div_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 393, 397))
#rnt_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 399, 403))
ptotval = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 248, 254))
pearnval = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 255, 261))
pothval = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 262, 268))
p = data.frame(pppos, p_f_pos, a_exprrp, a_famtyp, a_famnum, a_famrel, a_maritl, a_sex, a_race, a_age, a_reorgn,
a_hga, a_hgc, marsupwt, wkswork, hrswk, rsnnotw, wsal_val, semp_val, frse_val, uc_val, wc_val,
ss_val, ssi_val, paw_val, vet_val, srvs_val, dsab_val, rtm_val, int_val, div_val, ptotval, pearnval, pothval)
#colnames(p)[1] = "h_seq"
colnames(p)[2] = "f_seq_pos"
print(dim(h))
print(dim(f))
print(dim(p))
first = merge(p, f, by="f_seq_pos")
print(dim(first))
second = merge(first, h, by="h_seq")
print(dim(second))
return(second)
} | /read.cps85.R | no_license | hheelllloo/eitc_replication | R | false | false | 8,835 | r | setwd("Desktop/MiriamProgramming/r/eitc")
cps85 = read.table("cpsmar85.dat", as.is=T)
cps86 = read.table("cpsmar86.dat", as.is=T)
cps87 = read.table("cpsmar87.dat", as.is=T)
h_seq[1:20]
is_household[1:20]
cps85[1:10,]
cps87[1:20,]
table(hnumfam)
table(hnumfam2)
#paste(substr(cps85[1:20,], 10, 10), is_household)
small = head(cps85, 10000)
t85 = read.cps85.take2(cps85)
read.cps85.take2 = function(cps) {
is_household = as.numeric(substr(cps[,1], 7, 8))
plist = cps[which(is_household>0 & is_household<41),]
p = NROW(plist)
current_h = NULL
current_f = NULL
h_seq = rep(NA, p)
hnumfam = rep(NA, p)
htotval = rep(NA, p)
ffpos = rep(NA, p)
f_seq_pos = rep(NA, p)
fkind = rep(NA, p)
ftype = rep(NA, p)
frelu18 = rep(NA, p)
ftotval = rep(NA, p)
pppos = rep(NA, p)
a_maritl = rep(NA, p)
a_sex = rep(NA, p)
a_race = rep(NA, p)
a_age = rep(NA, p)
a_hga = rep(NA, p)
a_hgc = rep(NA, p)
marsupwt = rep(NA, p)
wkswork = rep(NA, p)
hrswk = rep(NA, p)
rsnnotw = rep(NA, p)
wsal_val = rep(NA, p)
semp_val = rep(NA, p)
frse_val = rep(NA, p)
ss_val = rep(NA, p)
ssi_val = rep(NA, p)
paw_val = rep(NA, p)
vet_val = rep(NA, p)
int_val = rep(NA, p)
ptotval = rep(NA, p)
pearnval = rep(NA, p)
pothval = rep(NA, p)
counter = 0
for (row in 1:nrow(cps)) {
if (is_household[row]==0) {
current_h = cps[row,]
} else if (is_household[row]>40) {
current_f = cps[row,]
} else {
current_p = cps[row,]
counter = counter + 1
h_seq[counter] = substr(current_h, 1, 6)
hnumfam[counter] = as.numeric(substr(current_h, 11, 12))
htotval[counter] = as.numeric(substr(current_h, 72, 80))
ffpos[counter] = as.numeric(substr(current_f, 7, 8))
f_seq_pos[counter] = substr(current_f, 1, 8)
fkind[counter] = as.numeric(substr(current_f, 9, 9))
ftype[counter] = as.numeric(substr(current_f, 10, 10))
frelu18[counter] = as.numeric(substr(current_f, 151, 151))
ftotval[counter] = as.numeric(substr(current_f, 105, 113))
pppos[counter] = as.numeric(substr(current_p, 7, 8))
a_maritl[counter] = as.numeric(substr(current_p, 107, 107))
a_sex[counter] = as.numeric(substr(current_p, 108, 108))
a_race[counter] = as.numeric(substr(current_p, 109, 109))
a_age[counter] = as.numeric(substr(current_p, 110, 110))
a_hga[counter] = as.numeric(substr(current_p, 115, 116))
a_hgc[counter] = as.numeric(substr(current_p, 117, 117))
marsupwt[counter] = as.numeric(substr(current_p, 118, 128))
wkswork[counter] = as.numeric(substr(current_p, 134, 135))
hrswk[counter] = as.numeric(substr(current_p, 136, 137))
rsnnotw[counter] = as.numeric(substr(current_p, 19, 20))
wsal_val[counter] = as.numeric(substr(current_p, 191, 195))
semp_val[counter] = as.numeric(substr(current_p, 196, 201))
frse_val[counter] = as.numeric(substr(current_p, 202, 207))
ss_val[counter] = as.numeric(substr(current_p, 208, 212))
ssi_val[counter] = as.numeric(substr(current_p, 213, 216))
paw_val[counter] = as.numeric(substr(current_p, 217, 221))
vet_val[counter] = as.numeric(substr(current_p, 233, 237))
int_val[counter] = as.numeric(substr(current_p, 222, 226))
ptotval[counter] = as.numeric(substr(current_p, 248, 254))
pearnval[counter] = as.numeric(substr(current_p, 255, 261))
pothval[counter] = as.numeric(substr(current_p, 262, 268))
}
}
t = data.frame(h_seq, hnumfam, htotval, ffpos, f_seq_pos, fkind, ftype, frelu18, ftotval, pppos, a_maritl, a_sex, a_race, a_age,
a_hga, a_hgc, marsupwt, wkswork, hrswk, rsnnotw, wsal_val, semp_val, frse_val, ss_val, ssi_val, paw_val, vet_val,
int_val, ptotval, pearnval, pothval)
return(t)
}
read.cps = function(cps) {
is_household = as.numeric(substr(cps[,1], 7, 8))
h_seq = substr(cps[which(is_household==0),], 1, 6)
hnumfam = as.numeric(substr(cps[which(is_household==0),], 11, 12))
#hg_st60 = as.numeric(substr(cps[which(is_household==0),], 40, 41))
#hccc_r = as.numeric(substr(cps[which(is_household==0),], 58, 58))
htotval = as.numeric(substr(cps[which(is_household==0),], 72, 80))
h = data.frame(h_seq, hnumfam, htotval)
f_seq = substr(cps[which(is_household>0 & is_household<40),], 1, 6)
ffpos = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 7, 8))
f_seq_pos = substr(cps[which(is_household>0 & is_household<40),], 1, 8)
fkind = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 9, 9))
ftype = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 10, 10))
frelu18 = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 151, 151))
#frelu6 = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 28, 28))
ftotval = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 105, 113))
f = data.frame(f_seq, ffpos, f_seq_pos, fkind, ftype, frelu18, ftotval)
colnames(f)[1] = "h_seq"
p_seq = substr(cps[which(is_household>40 & is_household<80),], 1, 6)
pppos = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 7, 8))
#p_f_pos = paste(p_seq, substr(cps[which(is_household>40 & is_household<80),], 44, 45), sep="") #105?
#a_exprrp = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 13, 14))
#a_famtyp = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 31, 31))
#a_famnum = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 29, 30))
#a_famrel = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 32, 32))
a_maritl = as.numeric(substr(cps[which(is_household>0 & is_household<40),], 107, 107))
a_sex = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 108, 108))
a_race = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 109, 109))
a_age = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 110, 110))
#a_reorgn = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 27, 28))
a_hga = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 115, 116))
a_hgc = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 117, 117))
marsupwt = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 118, 128))
wkswork = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 134, 135))
hrswk = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 136, 137))
rsnnotw = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 19, 20))
wsal_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 191, 195))
semp_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 196, 201))
frse_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 202, 207))
#uc_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 278, 282))
#wc_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 285, 289))
ss_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 208, 212))
ssi_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 213, 216))
paw_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 217, 221))
vet_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 233, 237))
#srvs_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 337, 342))
#dsab_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 360, 365))
#rtm_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 379, 384))
int_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 222, 226))
#div_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 393, 397))
#rnt_val = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 399, 403))
ptotval = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 248, 254))
pearnval = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 255, 261))
pothval = as.numeric(substr(cps[which(is_household>40 & is_household<80),], 262, 268))
p = data.frame(pppos, p_f_pos, a_exprrp, a_famtyp, a_famnum, a_famrel, a_maritl, a_sex, a_race, a_age, a_reorgn,
a_hga, a_hgc, marsupwt, wkswork, hrswk, rsnnotw, wsal_val, semp_val, frse_val, uc_val, wc_val,
ss_val, ssi_val, paw_val, vet_val, srvs_val, dsab_val, rtm_val, int_val, div_val, ptotval, pearnval, pothval)
#colnames(p)[1] = "h_seq"
colnames(p)[2] = "f_seq_pos"
print(dim(h))
print(dim(f))
print(dim(p))
first = merge(p, f, by="f_seq_pos")
print(dim(first))
second = merge(first, h, by="h_seq")
print(dim(second))
return(second)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot2_themes.r
\name{theme_pinkygray}
\alias{theme_pinkygray}
\title{Pinky gray ggplot2 theme}
\usage{
theme_pinkygray(font_size = 12, lines = TRUE, legend = TRUE, ...)
}
\arguments{
\item{...}{}
}
\value{
}
\description{
Pinky gray ggplot2 theme
}
| /man/theme_pinkygray.Rd | no_license | peterdalle/surveyutils | R | false | true | 330 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot2_themes.r
\name{theme_pinkygray}
\alias{theme_pinkygray}
\title{Pinky gray ggplot2 theme}
\usage{
theme_pinkygray(font_size = 12, lines = TRUE, legend = TRUE, ...)
}
\arguments{
\item{...}{}
}
\value{
}
\description{
Pinky gray ggplot2 theme
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getFeatureSet.R
\name{getDefaultFeatureFunArgs}
\alias{getDefaultFeatureFunArgs}
\title{Returns list of parameters defaults for feature computation.}
\usage{
getDefaultFeatureFunArgs()
}
\value{
[\code{list}]
}
\description{
Returns list of parameters defaults for feature computation.
}
| /man/getDefaultFeatureFunArgs.Rd | no_license | MartinWolke/salesperson | R | false | false | 375 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getFeatureSet.R
\name{getDefaultFeatureFunArgs}
\alias{getDefaultFeatureFunArgs}
\title{Returns list of parameters defaults for feature computation.}
\usage{
getDefaultFeatureFunArgs()
}
\value{
[\code{list}]
}
\description{
Returns list of parameters defaults for feature computation.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modules_fn.R
\name{use_module_test}
\alias{use_module_test}
\title{Add a test file for a module}
\usage{
use_module_test(name, pkg = get_golem_wd(), open = TRUE)
}
\arguments{
\item{name}{The name of the module.}
\item{pkg}{Path to the root of the package. Default is \code{get_golem_wd()}.}
\item{open}{Should the created file be opened?}
}
\value{
Used for side effect. Returns the path invisibly.
}
\description{
Add a test file for in module, with the new testServer structure.
}
| /man/use_module_test.Rd | permissive | ThinkR-open/golem | R | false | true | 564 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modules_fn.R
\name{use_module_test}
\alias{use_module_test}
\title{Add a test file for a module}
\usage{
use_module_test(name, pkg = get_golem_wd(), open = TRUE)
}
\arguments{
\item{name}{The name of the module.}
\item{pkg}{Path to the root of the package. Default is \code{get_golem_wd()}.}
\item{open}{Should the created file be opened?}
}
\value{
Used for side effect. Returns the path invisibly.
}
\description{
Add a test file for in module, with the new testServer structure.
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.64958500504479e-220, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613113133-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 251 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.64958500504479e-220, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -5.89333382920197e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615832192-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -5.89333382920197e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
# /usr/bin/rscript
## Coursera Getting and Cleaning Data
## by Jeff Leek, PhD, Roger D. Peng, PhD, Brian Caffo, PhD
## Johns Hopkins University
##
## Question 2
##
## The sqldf package allows for execution of SQL commands on R data frames. We will
## use the sqldf package to practice the queries we might send with the dbSendQuery
## command in RMySQL. Download the American Community Survey data and load it into
## an R object called
## acs
##
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv
##
## Which of the following commands will select only the data for the probability
## weights pwgtp1 with ages less than 50?
##
## sqldf("select * from acs where AGEP 50")
## sqldf("select * from acs where AGEP 50 and pwgtp1")
## sqldf("select pwgtp1 from acs where AGEP 50")
## sqldf("select pwgtp1 from acs")
##
## Description:
## This script attempts to answer the above question.
##
## Author:
## Min Wang (min.wang@depi.vic.gov.au)
##
## Date Created:
## 17 June 2015
##
## Date modified and reason:
##
## Execution:
## Rscript <MODULE_NAME>
##
## Answer:
## sqldf("select pwgtp1 from acs where AGEP < 50")
library(sqldf)
acs <- download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv", "ques2.rawfile.csv", method="curl")
acs <- read.table('./ques2.rawfile.csv', sep=",", header=TRUE)
sqldf("select pwgtp1 from acs where AGEP < 50")
| /03.GettingAndCleaningData/quizzes/quiz2/ques2.R | no_license | minw2828/datasciencecoursera | R | false | false | 1,403 | r | # /usr/bin/rscript
## Coursera Getting and Cleaning Data
## by Jeff Leek, PhD, Roger D. Peng, PhD, Brian Caffo, PhD
## Johns Hopkins University
##
## Question 2
##
## The sqldf package allows for execution of SQL commands on R data frames. We will
## use the sqldf package to practice the queries we might send with the dbSendQuery
## command in RMySQL. Download the American Community Survey data and load it into
## an R object called
## acs
##
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv
##
## Which of the following commands will select only the data for the probability
## weights pwgtp1 with ages less than 50?
##
## sqldf("select * from acs where AGEP 50")
## sqldf("select * from acs where AGEP 50 and pwgtp1")
## sqldf("select pwgtp1 from acs where AGEP 50")
## sqldf("select pwgtp1 from acs")
##
## Description:
## This script attempts to answer the above question.
##
## Author:
## Min Wang (min.wang@depi.vic.gov.au)
##
## Date Created:
## 17 June 2015
##
## Date modified and reason:
##
## Execution:
## Rscript <MODULE_NAME>
##
## Answer:
## sqldf("select pwgtp1 from acs where AGEP < 50")
library(sqldf)
acs <- download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv", "ques2.rawfile.csv", method="curl")
acs <- read.table('./ques2.rawfile.csv', sep=",", header=TRUE)
sqldf("select pwgtp1 from acs where AGEP < 50")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pm10.R
\name{pm10}
\alias{pm10}
\title{Sub-index of a PM10}
\usage{
pm10(x)
}
\arguments{
\item{x}{The numeric value in concentration(ug/m^3)}
}
\value{
The index value of parameter PM10
}
\description{
take the concentration value(ug/m^3) and give the index value
}
| /man/pm10.Rd | no_license | mth3012/aqi | R | false | true | 345 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pm10.R
\name{pm10}
\alias{pm10}
\title{Sub-index of a PM10}
\usage{
pm10(x)
}
\arguments{
\item{x}{The numeric value in concentration(ug/m^3)}
}
\value{
The index value of parameter PM10
}
\description{
take the concentration value(ug/m^3) and give the index value
}
|
## Overall Tour ##
install.packages("gmapsdistance")
library(gmapsdistance)
set.api.key("AIzaSyBMO8Hyj6lsza9JLkN0TzCP9cimVxd67n4")
results = gmapsdistance(origin = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park", "Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
destination = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park", "Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
mode = "driving")
round(results$Distance[,2:21] * 0.000621371192, 2)
df <- round(results$Distance[,2:21] * 0.000621371192, 2)
row.names(df) <- colnames(df)
write.csv(df, file="distance.csv")
install.packages("TSP")
library(TSP)
df <- as.matrix(df)
df <- as.ATSP(df)
tour <- solve_TSP(df, 'nn')
tour_length(tour)
tour <- as.data.frame(tour)
## Tour of West ##
resultsWest = gmapsdistance(origin = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park"),
destination = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park"),
mode = "driving")
round(resultsWest$Distance[,2:11] * 0.000621371192, 2)
West <- round(resultsWest$Distance[,2:11] * 0.000621371192, 2)
row.names(West) <- colnames(West)
write.csv(West, file="Westdistance.csv")
library(TSP)
West <- as.matrix(West)
West <- as.ATSP(West)
tourWest <- solve_TSP(West, 'nn')
tour_length(tourWest)
tourWest <- as.data.frame(tourWest)
## Tour of East ##
resultsEast = gmapsdistance(origin = c("Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
destination = c("Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
mode = "driving")
round(resultsEast$Distance[,2:11] * 0.000621371192, 2)
East <- round(resultsEast$Distance[,2:11] * 0.000621371192, 2)
row.names(East) <- colnames(East)
write.csv(East, file="Eastdistance.csv")
library(TSP)
East <- as.matrix(East)
East <- as.ATSP(East)
tourEast <- solve_TSP(East, 'nn')
tour_length(tourEast)
tourEast <- as.data.frame(tourEast)
| /Rstudio final project.R | no_license | braggjn/National-Parks | R | false | false | 3,701 | r | ## Overall Tour ##
install.packages("gmapsdistance")
library(gmapsdistance)
set.api.key("AIzaSyBMO8Hyj6lsza9JLkN0TzCP9cimVxd67n4")
results = gmapsdistance(origin = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park", "Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
destination = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park", "Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
mode = "driving")
round(results$Distance[,2:21] * 0.000621371192, 2)
df <- round(results$Distance[,2:21] * 0.000621371192, 2)
row.names(df) <- colnames(df)
write.csv(df, file="distance.csv")
install.packages("TSP")
library(TSP)
df <- as.matrix(df)
df <- as.ATSP(df)
tour <- solve_TSP(df, 'nn')
tour_length(tour)
tour <- as.data.frame(tour)
## Tour of West ##
resultsWest = gmapsdistance(origin = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park"),
destination = c("Yosemite+National+Park", "Grand+Canyon", "Redwood+National+Park", "Gardiner+MT", "Bryce+Canyon", "Glacier+National+Park", "Badlands+National+Park", "Arches+National+Park", "Zion+National+Park", "Monument+valley+national+park"),
mode = "driving")
round(resultsWest$Distance[,2:11] * 0.000621371192, 2)
West <- round(resultsWest$Distance[,2:11] * 0.000621371192, 2)
row.names(West) <- colnames(West)
write.csv(West, file="Westdistance.csv")
library(TSP)
West <- as.matrix(West)
West <- as.ATSP(West)
tourWest <- solve_TSP(West, 'nn')
tour_length(tourWest)
tourWest <- as.data.frame(tourWest)
## Tour of East ##
resultsEast = gmapsdistance(origin = c("Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
destination = c("Acadia+national+park", "Congaree+National+Park", "Voyageurs+national+park", "Cuyahoga+Valley+national+park", "Great+Smoky+Mountains+National+Park", "Shenandoah+national+park", "Mammoth+Cave+national+park", "Cape+Cod+national+park", "Isle+Royale+national+park", "Dry+Tortugas+national+park"),
mode = "driving")
round(resultsEast$Distance[,2:11] * 0.000621371192, 2)
East <- round(resultsEast$Distance[,2:11] * 0.000621371192, 2)
row.names(East) <- colnames(East)
write.csv(East, file="Eastdistance.csv")
library(TSP)
East <- as.matrix(East)
East <- as.ATSP(East)
tourEast <- solve_TSP(East, 'nn')
tour_length(tourEast)
tourEast <- as.data.frame(tourEast)
|
context("calculate")
test_that("calculate works with correct lists", {
skip_if_not(check_tf_version())
source("helpers.R")
# unknown variable
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
y_value <- calculate(y, list(a = 3))
expect_equal(y_value, matrix(c(3, 6)))
# unknown variable and new data
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
y_value <- calculate(y, list(a = 6, x = c(2, 1)))
expect_equal(y_value, matrix(c(12, 6)))
})
test_that("calculate works with mcmc.list objects", {
skip_if_not(check_tf_version())
source("helpers.R")
samples <- 10
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
m <- model(y)
draws <- mcmc(m, warmup = 0, n_samples = samples, verbose = FALSE)
# with an existing greta array
y_values <- calculate(y, draws)
# correct class
expect_s3_class(y_values, "mcmc.list")
# correct dimensions
expect_equal(dim(y_values[[1]]), c(10, 2))
# all valid values
expect_true(all(is.finite(as.vector(y_values[[1]]))))
# with a new greta array, based on a different element in the model
new_values <- calculate(a ^ 2, draws)
# correct class
expect_s3_class(new_values, "mcmc.list")
# correct dimensions
expect_equal(dim(new_values[[1]]), c(10, 1))
# all valid values
expect_true(all(is.finite(as.vector(new_values[[1]]))))
})
test_that("calculate errors nicely if mcmc.list objects missing info", {
skip_if_not(check_tf_version())
source("helpers.R")
samples <- 10
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
m <- model(y)
draws <- mcmc(m, warmup = 0, n_samples = samples, verbose = FALSE)
# scrub the model info
attr(draws, "model_info") <- NULL
# it should error nicely
expect_error(calculate(y, draws),
"perhaps it wasn't created by greta")
})
test_that("calculate errors nicely if not all required values are passed", {
skip_if_not(check_tf_version())
source("helpers.R")
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
# it should error nicely
expect_error(calculate(y, list(x = c(2, 1))),
paste("values have not been provided for all greta arrays on",
"which the target depends. Please provide values for the",
"greta array: a"))
})
test_that("calculate errors nicely if values have incorrect dimensions", {
skip_if_not(check_tf_version())
source("helpers.R")
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
# it should error nicely
expect_error(calculate(y, list(a = c(1, 1))),
"different number of elements than the greta array")
})
test_that("calculate errors nicely if not used on a greta array", {
skip_if_not(check_tf_version())
source("helpers.R")
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
z <- 1:5
# it should error nicely
expect_error(calculate(z, list(a = c(1, 1))),
"target' is not a greta array")
})
| /tests/testthat/test_calculate.R | permissive | Rosenguyen0411/greta | R | false | false | 2,952 | r | context("calculate")
test_that("calculate works with correct lists", {
skip_if_not(check_tf_version())
source("helpers.R")
# unknown variable
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
y_value <- calculate(y, list(a = 3))
expect_equal(y_value, matrix(c(3, 6)))
# unknown variable and new data
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
y_value <- calculate(y, list(a = 6, x = c(2, 1)))
expect_equal(y_value, matrix(c(12, 6)))
})
test_that("calculate works with mcmc.list objects", {
skip_if_not(check_tf_version())
source("helpers.R")
samples <- 10
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
m <- model(y)
draws <- mcmc(m, warmup = 0, n_samples = samples, verbose = FALSE)
# with an existing greta array
y_values <- calculate(y, draws)
# correct class
expect_s3_class(y_values, "mcmc.list")
# correct dimensions
expect_equal(dim(y_values[[1]]), c(10, 2))
# all valid values
expect_true(all(is.finite(as.vector(y_values[[1]]))))
# with a new greta array, based on a different element in the model
new_values <- calculate(a ^ 2, draws)
# correct class
expect_s3_class(new_values, "mcmc.list")
# correct dimensions
expect_equal(dim(new_values[[1]]), c(10, 1))
# all valid values
expect_true(all(is.finite(as.vector(new_values[[1]]))))
})
test_that("calculate errors nicely if mcmc.list objects missing info", {
skip_if_not(check_tf_version())
source("helpers.R")
samples <- 10
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
m <- model(y)
draws <- mcmc(m, warmup = 0, n_samples = samples, verbose = FALSE)
# scrub the model info
attr(draws, "model_info") <- NULL
# it should error nicely
expect_error(calculate(y, draws),
"perhaps it wasn't created by greta")
})
test_that("calculate errors nicely if not all required values are passed", {
skip_if_not(check_tf_version())
source("helpers.R")
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
# it should error nicely
expect_error(calculate(y, list(x = c(2, 1))),
paste("values have not been provided for all greta arrays on",
"which the target depends. Please provide values for the",
"greta array: a"))
})
test_that("calculate errors nicely if values have incorrect dimensions", {
skip_if_not(check_tf_version())
source("helpers.R")
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
# it should error nicely
expect_error(calculate(y, list(a = c(1, 1))),
"different number of elements than the greta array")
})
test_that("calculate errors nicely if not used on a greta array", {
skip_if_not(check_tf_version())
source("helpers.R")
x <- as_data(c(1, 2))
a <- normal(0, 1)
y <- a * x
z <- 1:5
# it should error nicely
expect_error(calculate(z, list(a = c(1, 1))),
"target' is not a greta array")
})
|
context("log0const")
# Test that log0const(), a quick but less transparent version of
# log0const_slow() gives the correct answers
x <- c(runif(100), 0, 0, 0)
const <- -log(10)
test1 <- log0const_slow(x, const)
test2 <- log0const(x, const)
test_that("log0const slow = log0const fast", {
testthat::expect_equal(test1, test2)
})
context("Empirical c.d.f.")
x <- newlyn
temp <- disjoint_maxima(newlyn)
y <- temp$y
x <- temp$x
test0 <- stats::ecdf(x)(y)
test1 <- ecdf1(x, y, lenx = length(x))
test2 <- ecdf2(x, y)
test3 <- ecdf3(x, y)
test_that("ecdf1 = stats::ecdf", {
testthat::expect_equal(test0, test1)
})
test_that("ecdf2 = stats::ecdf", {
testthat::expect_equal(test0, test2)
})
test_that("ecdf3 = stats::ecdf", {
testthat::expect_equal(test0, test3)
})
| /fuzzedpackages/exdex/tests/testthat/test-misc.R | no_license | akhikolla/testpackages | R | false | false | 774 | r | context("log0const")
# Test that log0const(), a quick but less transparent version of
# log0const_slow() gives the correct answers
x <- c(runif(100), 0, 0, 0)
const <- -log(10)
test1 <- log0const_slow(x, const)
test2 <- log0const(x, const)
test_that("log0const slow = log0const fast", {
testthat::expect_equal(test1, test2)
})
context("Empirical c.d.f.")
x <- newlyn
temp <- disjoint_maxima(newlyn)
y <- temp$y
x <- temp$x
test0 <- stats::ecdf(x)(y)
test1 <- ecdf1(x, y, lenx = length(x))
test2 <- ecdf2(x, y)
test3 <- ecdf3(x, y)
test_that("ecdf1 = stats::ecdf", {
testthat::expect_equal(test0, test1)
})
test_that("ecdf2 = stats::ecdf", {
testthat::expect_equal(test0, test2)
})
test_that("ecdf3 = stats::ecdf", {
testthat::expect_equal(test0, test3)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.