content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grass7_r_out_vtk.R
\name{grass7_r_out_vtk}
\alias{grass7_r_out_vtk}
\title{QGIS algorithm r.out.vtk}
\usage{
grass7_r_out_vtk(
input = qgisprocess::qgis_default_value(),
elevation = qgisprocess::qgis_default_value(),
null = qgisprocess::qgis_default_value(),
z = qgisprocess::qgis_default_value(),
rgbmaps = qgisprocess::qgis_default_value(),
vectormaps = qgisprocess::qgis_default_value(),
zscale = qgisprocess::qgis_default_value(),
precision = qgisprocess::qgis_default_value(),
.p = qgisprocess::qgis_default_value(),
.s = qgisprocess::qgis_default_value(),
.t = qgisprocess::qgis_default_value(),
.v = qgisprocess::qgis_default_value(),
.o = qgisprocess::qgis_default_value(),
.c = qgisprocess::qgis_default_value(),
output = qgisprocess::qgis_default_value(),
GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_REGION_CELLSIZE_PARAMETER = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{input}{\code{multilayer} - Input raster. .}
\item{elevation}{\code{raster} - Input elevation raster map. Path to a raster layer.}
\item{null}{\code{number} - Value to represent no data cell. A numeric value.}
\item{z}{\code{number} - Constant elevation (if no elevation map is specified). A numeric value.}
\item{rgbmaps}{\code{multilayer} - Three (r,g,b) raster maps to create RGB values. .}
\item{vectormaps}{\code{multilayer} - Three (x,y,z) raster maps to create vector values. .}
\item{zscale}{\code{number} - Scale factor for elevation. A numeric value.}
\item{precision}{\code{number} - Number of significant digits. A numeric value.}
\item{.p}{\code{boolean} - Create VTK point data instead of VTK cell data. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -p.}
\item{.s}{\code{boolean} - Use structured grid for elevation (not recommended). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -s.}
\item{.t}{\code{boolean} - Use polydata-trianglestrips for elevation grid creation. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -t.}
\item{.v}{\code{boolean} - Use polydata-vertices for elevation grid creation. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -v.}
\item{.o}{\code{boolean} - Scale factor affects the origin (if no elevation map is given). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -o.}
\item{.c}{\code{boolean} - Correct the coordinates to match the VTK-OpenGL precision. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -c.}
\item{output}{\code{fileDestination} - VTK File. Path for new file.}
\item{GRASS_REGION_PARAMETER}{\code{extent} - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..}
\item{GRASS_REGION_CELLSIZE_PARAMETER}{\code{number} - GRASS GIS 7 region cellsize (leave 0 for default). A numeric value.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by GRASS r.out.vtk (grass7:r.out.vtk)
}
\details{
\subsection{Outputs description}{
\itemize{
\item output - outputFile - VTK File
}
}
}
| /man/grass7_r_out_vtk.Rd | permissive | VB6Hobbyst7/r_package_qgis | R | false | true | 3,531 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grass7_r_out_vtk.R
\name{grass7_r_out_vtk}
\alias{grass7_r_out_vtk}
\title{QGIS algorithm r.out.vtk}
\usage{
grass7_r_out_vtk(
input = qgisprocess::qgis_default_value(),
elevation = qgisprocess::qgis_default_value(),
null = qgisprocess::qgis_default_value(),
z = qgisprocess::qgis_default_value(),
rgbmaps = qgisprocess::qgis_default_value(),
vectormaps = qgisprocess::qgis_default_value(),
zscale = qgisprocess::qgis_default_value(),
precision = qgisprocess::qgis_default_value(),
.p = qgisprocess::qgis_default_value(),
.s = qgisprocess::qgis_default_value(),
.t = qgisprocess::qgis_default_value(),
.v = qgisprocess::qgis_default_value(),
.o = qgisprocess::qgis_default_value(),
.c = qgisprocess::qgis_default_value(),
output = qgisprocess::qgis_default_value(),
GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_REGION_CELLSIZE_PARAMETER = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{input}{\code{multilayer} - Input raster. .}
\item{elevation}{\code{raster} - Input elevation raster map. Path to a raster layer.}
\item{null}{\code{number} - Value to represent no data cell. A numeric value.}
\item{z}{\code{number} - Constant elevation (if no elevation map is specified). A numeric value.}
\item{rgbmaps}{\code{multilayer} - Three (r,g,b) raster maps to create RGB values. .}
\item{vectormaps}{\code{multilayer} - Three (x,y,z) raster maps to create vector values. .}
\item{zscale}{\code{number} - Scale factor for elevation. A numeric value.}
\item{precision}{\code{number} - Number of significant digits. A numeric value.}
\item{.p}{\code{boolean} - Create VTK point data instead of VTK cell data. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -p.}
\item{.s}{\code{boolean} - Use structured grid for elevation (not recommended). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -s.}
\item{.t}{\code{boolean} - Use polydata-trianglestrips for elevation grid creation. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -t.}
\item{.v}{\code{boolean} - Use polydata-vertices for elevation grid creation. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -v.}
\item{.o}{\code{boolean} - Scale factor affects the origin (if no elevation map is given). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -o.}
\item{.c}{\code{boolean} - Correct the coordinates to match the VTK-OpenGL precision. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -c.}
\item{output}{\code{fileDestination} - VTK File. Path for new file.}
\item{GRASS_REGION_PARAMETER}{\code{extent} - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..}
\item{GRASS_REGION_CELLSIZE_PARAMETER}{\code{number} - GRASS GIS 7 region cellsize (leave 0 for default). A numeric value.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by GRASS r.out.vtk (grass7:r.out.vtk)
}
\details{
\subsection{Outputs description}{
\itemize{
\item output - outputFile - VTK File
}
}
}
|
#' Localization distances
#'
#' Compute the localization distances of order k of the curve \code{y0}.
#'
#' @param y matrix p by n, being n the number of functions and p the number of grid points.
#' @param y0 focal curve (index or character name).
#' @return a vector of length (n-1), being the localization distance of its corresponding order.
#'
#' @examples
#' localizationDistances_1 <- localizationDistances(exampleData, y0 = "1")
#'
#' @references Elías, Antonio, Jiménez, Raúl and Yukich, Joe (2020). Localization processes for functional data analysis (submitted).
#'
#' @export
localizationDistances <- function(y, y0){
localizarionProcesses_focal <- localizationProcesses(y, y0)$lc
l_estimator <- apply(localizarionProcesses_focal, 2, function(x) mean(abs(y[,y0] - x)))
return(l_estimator)
}
| /R/LocalizationDistances.R | no_license | cran/localFDA | R | false | false | 841 | r | #' Localization distances
#'
#' Compute the localization distances of order k of the curve \code{y0}.
#'
#' @param y matrix p by n, being n the number of functions and p the number of grid points.
#' @param y0 focal curve (index or character name).
#' @return a vector of length (n-1), being the localization distance of its corresponding order.
#'
#' @examples
#' localizationDistances_1 <- localizationDistances(exampleData, y0 = "1")
#'
#' @references Elías, Antonio, Jiménez, Raúl and Yukich, Joe (2020). Localization processes for functional data analysis (submitted).
#'
#' @export
localizationDistances <- function(y, y0){
localizarionProcesses_focal <- localizationProcesses(y, y0)$lc
l_estimator <- apply(localizarionProcesses_focal, 2, function(x) mean(abs(y[,y0] - x)))
return(l_estimator)
}
|
list.dirs <- function(path=".", pattern=NULL, all.dirs=FALSE, full.names=FALSE, ignore.case=FALSE) {
# Use this function to get a list of folders (directories) in
# a specified folder (directory)
# use full.names=TRUE to pass to file.info
all <- list.files(path, pattern, all.dirs, full.names=TRUE, recursive=FALSE, ignore.case)
dirs <- all[file.info(all)$isdir] # determine whether to return full names or just dir names
if(isTRUE(full.names))
return(dirs)
else
return(basename(dirs))
}
/Users/Kevin/Fieldwork-vmj/_exp_data/NPron
NPron.list[names(NPron.list)==fn] | /Temp.R | no_license | klp3hills/prosodypro-R | R | false | false | 597 | r | list.dirs <- function(path=".", pattern=NULL, all.dirs=FALSE, full.names=FALSE, ignore.case=FALSE) {
# Use this function to get a list of folders (directories) in
# a specified folder (directory)
# use full.names=TRUE to pass to file.info
all <- list.files(path, pattern, all.dirs, full.names=TRUE, recursive=FALSE, ignore.case)
dirs <- all[file.info(all)$isdir] # determine whether to return full names or just dir names
if(isTRUE(full.names))
return(dirs)
else
return(basename(dirs))
}
/Users/Kevin/Fieldwork-vmj/_exp_data/NPron
NPron.list[names(NPron.list)==fn] |
#TextMining
# Install
install.packages("tm") # for text mining
install.packages("SnowballC") # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
# Load
library("tm")
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
text <- readLines(file.choose())
# Read the text file from internet
filePath <- "http://www.sthda.com/sthda/RDoc/example-files/martin-luther-king-i-have-a-dream-speech.txt"
text <- readLines(filePath)
text
# Load the data as a corpus
docs <- Corpus(VectorSource(text))
inspect(docs)
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
# Convert the text to lower case
docs <- tm_map(docs, content_transformer(tolower))
# Remove numbers
docs <- tm_map(docs, removeNumbers)
# Remove english common stopwords
docs <- tm_map(docs, removeWords, stopwords("english"))
#Term Document Matrix
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
#Generate Wordcloud
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
| /DataMining.R | no_license | ShreyasRB/Analytics | R | false | false | 1,357 | r | #TextMining
# Install
install.packages("tm") # for text mining
install.packages("SnowballC") # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
# Load
library("tm")
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
text <- readLines(file.choose())
# Read the text file from internet
filePath <- "http://www.sthda.com/sthda/RDoc/example-files/martin-luther-king-i-have-a-dream-speech.txt"
text <- readLines(filePath)
text
# Load the data as a corpus
docs <- Corpus(VectorSource(text))
inspect(docs)
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
# Convert the text to lower case
docs <- tm_map(docs, content_transformer(tolower))
# Remove numbers
docs <- tm_map(docs, removeNumbers)
# Remove english common stopwords
docs <- tm_map(docs, removeWords, stopwords("english"))
#Term Document Matrix
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
#Generate Wordcloud
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frequency_analysis.R
\name{daily_frequency_table}
\alias{daily_frequency_table}
\title{Daily frequency table}
\usage{
daily_frequency_table(gw_level_dv, date_col, value_col, approved_col)
}
\arguments{
\item{gw_level_dv}{daily groundwater level data
from readNWISdv}
\item{date_col}{the heading of the date column.}
\item{value_col}{name of value column.}
\item{approved_col}{name of column to get provisional/approved status.}
}
\value{
a data frame giving the max, mean, min, and number of available
days of data for each day of the year.
}
\description{
Give the historical max, mean, minimum, and number of available points
for each day of the year
}
\examples{
# site <- "263819081585801"
p_code_dv <- "62610"
statCd <- "00001"
# gw_level_dv <- dataRetrieval::readNWISdv(site, p_code_dv, statCd = statCd)
gw_level_dv <- L2701_example_data$Daily
daily_frequency_table(gw_level_dv,
date_col = "Date",
value_col = "X_62610_00001",
approved_col = "X_62610_00001_cd")
}
| /man/daily_frequency_table.Rd | permissive | PatrickEslick/HASP | R | false | true | 1,119 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frequency_analysis.R
\name{daily_frequency_table}
\alias{daily_frequency_table}
\title{Daily frequency table}
\usage{
daily_frequency_table(gw_level_dv, date_col, value_col, approved_col)
}
\arguments{
\item{gw_level_dv}{daily groundwater level data
from readNWISdv}
\item{date_col}{the heading of the date column.}
\item{value_col}{name of value column.}
\item{approved_col}{name of column to get provisional/approved status.}
}
\value{
a data frame giving the max, mean, min, and number of available
days of data for each day of the year.
}
\description{
Give the historical max, mean, minimum, and number of available points
for each day of the year
}
\examples{
# site <- "263819081585801"
p_code_dv <- "62610"
statCd <- "00001"
# gw_level_dv <- dataRetrieval::readNWISdv(site, p_code_dv, statCd = statCd)
gw_level_dv <- L2701_example_data$Daily
daily_frequency_table(gw_level_dv,
date_col = "Date",
value_col = "X_62610_00001",
approved_col = "X_62610_00001_cd")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlayByPlayBoxScore.R
\name{simple_boxscore}
\alias{simple_boxscore}
\title{Simple Game Boxscore}
\usage{
simple_boxscore(GameID, home = TRUE)
}
\arguments{
\item{GameID}{(character or numeric) A 10 digit game ID associated with a
given NFL game.}
\item{home}{(boolean): home = TRUE will pull home stats,
home = FALSE pulls away stats}
}
\value{
A list of player statistics including passing, rushing, receiving,
defense, kicking, kick return, and punt return statistics for the specified
game.
}
\description{
This function pulls data from an NFL url and contructs it into a formatted
boxscore.
}
\examples{
# Parsed drive summaries of final game in 2015 NFL season
nfl2015.finalregseasongame.gameID <- "2016010310"
simple_boxscore(nfl2015.finalregseasongame.gameID, home = TRUE)
}
| /man/simple_boxscore.Rd | no_license | ryurko/nflscrapR | R | false | true | 867 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlayByPlayBoxScore.R
\name{simple_boxscore}
\alias{simple_boxscore}
\title{Simple Game Boxscore}
\usage{
simple_boxscore(GameID, home = TRUE)
}
\arguments{
\item{GameID}{(character or numeric) A 10 digit game ID associated with a
given NFL game.}
\item{home}{(boolean): home = TRUE will pull home stats,
home = FALSE pulls away stats}
}
\value{
A list of player statistics including passing, rushing, receiving,
defense, kicking, kick return, and punt return statistics for the specified
game.
}
\description{
This function pulls data from an NFL url and contructs it into a formatted
boxscore.
}
\examples{
# Parsed drive summaries of final game in 2015 NFL season
nfl2015.finalregseasongame.gameID <- "2016010310"
simple_boxscore(nfl2015.finalregseasongame.gameID, home = TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formatting.R
\docType{data}
\name{custom_colors}
\alias{custom_colors}
\title{Default Color Scheme}
\format{An object of class \code{list} of length 19.}
\usage{
custom_colors
}
\description{
List of default custom colors
}
\details{
Convenience list of colors in #RGB format. Just type
names(custom_colors) to see the list of colors included.
}
\examples{
\dontrun{ggplot() + geom_line(aes(x=-5:5, y=(-5:5)^2), colour=custom_colors$gold)}
}
\keyword{datasets}
| /man/custom_colors.Rd | permissive | problemofpoints/reservetestr | R | false | true | 542 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formatting.R
\docType{data}
\name{custom_colors}
\alias{custom_colors}
\title{Default Color Scheme}
\format{An object of class \code{list} of length 19.}
\usage{
custom_colors
}
\description{
List of default custom colors
}
\details{
Convenience list of colors in #RGB format. Just type
names(custom_colors) to see the list of colors included.
}
\examples{
\dontrun{ggplot() + geom_line(aes(x=-5:5, y=(-5:5)^2), colour=custom_colors$gold)}
}
\keyword{datasets}
|
source("Scripts/loadLibraries.r")
source("Scripts/PlottingScripts/PlotVerse.r")
load("DerivedData/FlowBookCorpus.rdata")
meta = read.delim("SourceData/verseMetadata.txt",header=T)
# 4.1 ---------------------------------------------------------------
# text, see Word file.
# 4.2., Lose Yourself demo, transcription -----------------------------------
load("DerivedData/FlowBookCorpus.rdata")
PlotVerse("loseYourselfDemo1",m.range=-1:7,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.2 (bottom).pdf",type="pdf");dev.off()
PlotVerse("loseYourselfDemo1",m.range=8:15,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.2 (top).pdf",type="pdf");dev.off()
# 4.3a and 3b. accents on each position of “Lose Yourself” demo ------------
x = corpora %>% filter(verse=="loseYourselfDemo1",accent==1) %>%
.[["beatIndex"]] %>% mod(4)
a = table(c(x,seq(0,3.75,.25)))-1
names(a) = 0:15
b = table(c(a,0:16))-1
quartz(width=4.55,height=2)
par(mfrow=c(1,2),mar = c(3.5,3.5,0,0),mgp = c(2.5,1,0),las=1,bty='n',
family="Times New Roman",cex=.65)
barplot(a,
mgp = c(2,1,0),family="Times New Roman",
col = rep(c("black",rep("gray",3)),4),cex.names = .5,
xlab = "Metric position",ylab="Count of accents")
#par(fig = c(0,1,0,.5),new=T)
barplot(b,cex.names = .5,mgp = c(2,1,0),family="Times New Roman",
xlab = "Count of accents",ylab="Number of positions")
quartz.save(file="Examples/Chapter 4/Example 4.3.pdf",type="pdf");dev.off()
remove(list=c("x","a","b"))
# 4.4. Lose Yourself, released version, transcription -----------------------
PlotVerse("loseYourself1",m.range=-1:7,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.4 (bottom).pdf",type="pdf");dev.off()
PlotVerse("loseYourself1",m.range=8:15,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.4 (top).pdf",type="pdf");dev.off()
# 4.5a and 5b (compare to Example 3) --------------------------------
x1 = corpora %>%
filter(verse=="loseYourself1",accent==1,beatIndex<32) %>%
select(syllable,beatIndex) %>%
.[["beatIndex"]] %>% mod(4)
x2 = corpora %>%
filter(verse=="loseYourself1",accent==1,beatIndex>=32) %>%
select(syllable,beatIndex) %>%
.[["beatIndex"]] %>% mod(4)
a1 = table(c(x1,seq(0,3.75,.25)))-1
names(a1) = 0:15
b1 = table(c(a1,0:8))-1
a2 = table(c(x2,seq(0,3.75,.25)))-1
names(a2) = 0:15
b2 = table(c(a2,0:8))-1
quartz(width=4.55,height=4.5)
par(mfrow=c(2,2),mar = c(3.5,3.5,0,0),mgp = c(2.5,1,0),cex = .65,las=1,bty='n',
family="Times New Roman")
barplot(a1,las=1,cex.names=.5,family="Times New Roman",
col = rep(c("black",rep("gray",3)),4),
xlab = "Metric position",ylab="Count of accents")
barplot(b1,las=1,cex.names = .5,family="Times New Roman",
xlab = "Count of accents",ylab="Number of positions")
barplot(a2,las=1,cex.names=.5,family="Times New Roman",
col = rep(c("black",rep("gray",3)),4),
xlab = "Metric position",ylab="Count of accents")
barplot(b2,las=1,cex.names = .5,family="Times New Roman",
xlab = "Count of accents",ylab="Number of positions")
quartz.save(file="Examples/Chapter 4/Example 4.5.pdf",type="pdf");dev.off()
remove(list=c("x1","x2","a1","a2","b1","b2"))
# 4.6. Temperley reprint ----------------------------------------------------
# 4.7. 8 Mile, verse 3, mm. 21–24 -----------------------------------
load("DerivedData/FlowBookCorpus.rdata")
PlotVerse(verseTitle = "8mile3", row.index = 270:307)
quartz.save(file="Examples/Chapter 4/Example 4.7.pdf",type="pdf");dev.off()
# 4.8. Two grooves plotted around the circle ------------------------
source("Scripts/PlottingScripts/PlotSet.r")
quartz(width=4.55,height=2.225)
par(mfrow=c(1,2),mar=rep(0,4),mgp = c(2.5,1,0),cex = .65,las=1,bty='n',
family="Times New Roman")
PlotSet(c(2,4,7,10,12,15),16,F)
PlotSet(c(0,3,5,8,11,13),16,F)
quartz.save(file="Examples/Chapter 4/Example 4.8.pdf",type="pdf");dev.off()
# 4.9. All intervals of 333322 -------------------------------------------------
# Plot <333322> with all intervals
set = c(0,3,6,9,12,14)
card = 16
PlotSet(set,card,launch.quartz = T,width=2)
apply(combn(set,2),2,function(i) AddLine(i,16))
ints = apply(combn(set,2),2,diff) %>% sort
ints[ints>8] = 16 - ints[ints>8]
ints = sort(ints)
quartz.save(file="Examples/Chapter 4/Example 4.9.pdf",type="pdf");dev.off()
remove(list=c("AddLine","PlotSet","card","ints","set"))
# 4.10. ICH of 333322 --------------------------------------------------------
GetIntervalContentHistogram = function(set,card){
ints = matrix(c(
combn(set,2) %>% apply(.,2,diff),
combn(set,2) %>% .[2:1,] %>% apply(.,2,diff) %>% mod(.,card)),
dim(combn(set,2))[2],
2
)
ints = apply(ints,1,min)
ints = c(ints, 1:(ceiling(card/2)))
table(ints)-1
}
x = GetIntervalContentHistogram(c(0,3,6,9,12,14),16)
quartz(width=2,height=2)
par(mar=c(3,3,1,0),mgp = c(2,1,0), cex.axis = .65,cex.lab=.65)
barplot(x,cex.names = .75,xlab="Duration",mgp = c(2,1,0),ylab="Count",las=1,family="Times New Roman")
quartz.save(file="Examples/Chapter 4/Example 4.10.pdf",type="pdf");dev.off()
remove(list=c("GetIntervalContentHistogram","x"))
# 4.11. (See Word document) -------------------------------------------------
# 4.12. (Reprint of Pressing) -----------------------------------------------
# 4.13. complexity of different rotations of groove classes ------------------
source("Scripts/AnalysisScripts/Chapter 4 Analysis Script (excerpt).r")
r = range(vals)
quartz(width=4.5,height=2.5)
par(mfcol = c(7,1),mar=c(0,0,0,0),cex = .65)
llply(1:7,function(i){
plot(vals[[i]],rep(0,length(vals[[i]])),xlim = r,xaxt="n",pch=20,bty="n",
yaxt="n")
text(vals[[i]],rep(0,length(vals[[i]])),0:(length(vals[[i]])-1),pos=1,family="Times New Roman",font=3)
points(0:4,rep(0,5),pch="|")
lines(r,rep(0,2))
text(0.25,0,names(vals)[i],pos = 3,family="Times New Roman",cex=1.25)
if(2.64 %in% i){text(1:4,rep(0,4),1:4,pos=1)}
})
quartz.save(file="Examples/Chapter 4/Example 4.13.pdf",type="pdf")
quartz.save(file="Examples/Chapter 6/Example 6.2.pdf",type="pdf");dev.off()
remove(list=c("groove.classes","r","vals"))
# 4.14: Three segments with <332 2222> --------------------------------------
PlotVerse("astonMartin",row.index = 17:64,plot.3limit = F)
quartz.save(file="Examples/Chapter 4/Example 4.14a.pdf",type="pdf");dev.off()
PlotVerse("goToSleep1",m.range = 2:5,plot.3limit = F)
quartz.save(file="Examples/Chapter 4/Example 4.14b.pdf",type="pdf");dev.off()
PlotVerse("theWayIAm",m.range = 15:18,plot.3limit = F)
quartz.save(file="Examples/Chapter 4/Example 4.14c.pdf",type="pdf");dev.off()
# 15: Drug Ballad, verse 1, mm. 1–4 ---------------------------------------
PlotVerse("drugBallad1",m.range=0:3)
quartz.save(file="Examples/Chapter 4/Example 4.15.pdf",type="pdf");dev.off()
# 16: Renegade, verse two, mm. 5-8 ----------------------------------------
PlotVerse("renegade2",m.range=4:7,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.16.pdf",type="pdf");dev.off()
# 17 Possible grooves beginning at 0 of m. 5 ------------------------------
grooves = fread("DerivedData/corpus_grooves.txt")
grooves = tbl_df(grooves)
grooves %>% filter(verse=="renegade2", start==64,adj.length>=16) %>%
select(class,rotation,effort,rate,length,adj.length) %>%
group_by(adj.length) %>% slice(1) %>% arrange(class,desc(adj.length)) %>%
ungroup %>% mutate(index=(dim(.)[1]):1) -> x
quartz(width=4.55,height=2)
par(mar = c(0,0,0,0),cex=.65)
x1 = -35
plot(0,0,col="white",xlim=c(x1,64),ylim=c(dim(x)[1],0),yaxt="n",ylab="",xaxt="n",xlab="",bty="n")
for(i in 1:dim(x)[1]){
lines(c(0,x$length[i]),rep(i,2),lty=match(x$class[i],unique(x$class)),lwd=2,col=gray(.7))
lines(c(0,x$adj.length[i]),rep(i,2),lty=match(x$class[i],unique(x$class)),lwd=2)
text(x1,i,
paste(i,". ",x$class[i],"-",x$rotation[i]," (",x$length[i],",",x$effort[i],")",sep=""),
font=3,pos=4,family="Times New Roman")
}
text(seq(0,64,16),rep(0,5),c("m. 5","m. 6","m. 7","m.8","m. 9"),font=3,family="Times New Roman",cex=.65)
quartz.save(file="Examples/Chapter 4/Example 4.17.pdf",type="pdf");dev.off()
remove(list=c("x","x1","i","grooves"))
# 18 Segmentation of “Renegade --------------------------------------------
source("Scripts/PlottingScripts/PlotGrooveSegmentation.r")
segments = fread("DerivedData/corpus_groove_segments.txt",header=T)
PlotGrooveSegmentation("renegade2")
quartz.save(file="Examples/Chapter 4/Example 4.18.pdf",type="pdf");dev.off()
remove(list=c("segments","GrooveSwapDistance","GrooveSwapDistanceVerse","RotateGroovePF","PlotGrooveSegmentation"))
# Example 6.13 Groove segmentations of verses of Soldier ------------------
# Note: this needs to be rotated on the page.
source("Scripts/PlottingScripts/PlotGrooveSegmentation.r")
quartz(width=4.5,height=2.5)
par(mfcol = c(2,2),mar=c(0,0,1,0),cex = .65,las=1,bty='n',family="Times New Roman")
llply(c("loseYourselfDemo1","loseYourself1"),PlotGrooveSegmentation,
r.limit=c(0,.125),new.quartz=F,plot.all.costs=F,
x.max=1.2)
quartz.save(file="Examples/Chapter 4/Example 4.19.pdf",type="pdf");dev.off()
| /Scripts/PlottingScripts/Chapter 4 Plotting Script.r | no_license | mohriner/flowBook | R | false | false | 9,242 | r | source("Scripts/loadLibraries.r")
source("Scripts/PlottingScripts/PlotVerse.r")
load("DerivedData/FlowBookCorpus.rdata")
meta = read.delim("SourceData/verseMetadata.txt",header=T)
# 4.1 ---------------------------------------------------------------
# text, see Word file.
# 4.2., Lose Yourself demo, transcription -----------------------------------
load("DerivedData/FlowBookCorpus.rdata")
PlotVerse("loseYourselfDemo1",m.range=-1:7,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.2 (bottom).pdf",type="pdf");dev.off()
PlotVerse("loseYourselfDemo1",m.range=8:15,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.2 (top).pdf",type="pdf");dev.off()
# 4.3a and 3b. accents on each position of “Lose Yourself” demo ------------
x = corpora %>% filter(verse=="loseYourselfDemo1",accent==1) %>%
.[["beatIndex"]] %>% mod(4)
a = table(c(x,seq(0,3.75,.25)))-1
names(a) = 0:15
b = table(c(a,0:16))-1
quartz(width=4.55,height=2)
par(mfrow=c(1,2),mar = c(3.5,3.5,0,0),mgp = c(2.5,1,0),las=1,bty='n',
family="Times New Roman",cex=.65)
barplot(a,
mgp = c(2,1,0),family="Times New Roman",
col = rep(c("black",rep("gray",3)),4),cex.names = .5,
xlab = "Metric position",ylab="Count of accents")
#par(fig = c(0,1,0,.5),new=T)
barplot(b,cex.names = .5,mgp = c(2,1,0),family="Times New Roman",
xlab = "Count of accents",ylab="Number of positions")
quartz.save(file="Examples/Chapter 4/Example 4.3.pdf",type="pdf");dev.off()
remove(list=c("x","a","b"))
# 4.4. Lose Yourself, released version, transcription -----------------------
PlotVerse("loseYourself1",m.range=-1:7,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.4 (bottom).pdf",type="pdf");dev.off()
PlotVerse("loseYourself1",m.range=8:15,meas.scale = .8, Width=3.5,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.4 (top).pdf",type="pdf");dev.off()
# 4.5a and 5b (compare to Example 3) --------------------------------
x1 = corpora %>%
filter(verse=="loseYourself1",accent==1,beatIndex<32) %>%
select(syllable,beatIndex) %>%
.[["beatIndex"]] %>% mod(4)
x2 = corpora %>%
filter(verse=="loseYourself1",accent==1,beatIndex>=32) %>%
select(syllable,beatIndex) %>%
.[["beatIndex"]] %>% mod(4)
a1 = table(c(x1,seq(0,3.75,.25)))-1
names(a1) = 0:15
b1 = table(c(a1,0:8))-1
a2 = table(c(x2,seq(0,3.75,.25)))-1
names(a2) = 0:15
b2 = table(c(a2,0:8))-1
quartz(width=4.55,height=4.5)
par(mfrow=c(2,2),mar = c(3.5,3.5,0,0),mgp = c(2.5,1,0),cex = .65,las=1,bty='n',
family="Times New Roman")
barplot(a1,las=1,cex.names=.5,family="Times New Roman",
col = rep(c("black",rep("gray",3)),4),
xlab = "Metric position",ylab="Count of accents")
barplot(b1,las=1,cex.names = .5,family="Times New Roman",
xlab = "Count of accents",ylab="Number of positions")
barplot(a2,las=1,cex.names=.5,family="Times New Roman",
col = rep(c("black",rep("gray",3)),4),
xlab = "Metric position",ylab="Count of accents")
barplot(b2,las=1,cex.names = .5,family="Times New Roman",
xlab = "Count of accents",ylab="Number of positions")
quartz.save(file="Examples/Chapter 4/Example 4.5.pdf",type="pdf");dev.off()
remove(list=c("x1","x2","a1","a2","b1","b2"))
# 4.6. Temperley reprint ----------------------------------------------------
# 4.7. 8 Mile, verse 3, mm. 21–24 -----------------------------------
load("DerivedData/FlowBookCorpus.rdata")
PlotVerse(verseTitle = "8mile3", row.index = 270:307)
quartz.save(file="Examples/Chapter 4/Example 4.7.pdf",type="pdf");dev.off()
# 4.8. Two grooves plotted around the circle ------------------------
source("Scripts/PlottingScripts/PlotSet.r")
quartz(width=4.55,height=2.225)
par(mfrow=c(1,2),mar=rep(0,4),mgp = c(2.5,1,0),cex = .65,las=1,bty='n',
family="Times New Roman")
PlotSet(c(2,4,7,10,12,15),16,F)
PlotSet(c(0,3,5,8,11,13),16,F)
quartz.save(file="Examples/Chapter 4/Example 4.8.pdf",type="pdf");dev.off()
# 4.9. All intervals of 333322 -------------------------------------------------
# Plot <333322> with all intervals
set = c(0,3,6,9,12,14)
card = 16
PlotSet(set,card,launch.quartz = T,width=2)
apply(combn(set,2),2,function(i) AddLine(i,16))
ints = apply(combn(set,2),2,diff) %>% sort
ints[ints>8] = 16 - ints[ints>8]
ints = sort(ints)
quartz.save(file="Examples/Chapter 4/Example 4.9.pdf",type="pdf");dev.off()
remove(list=c("AddLine","PlotSet","card","ints","set"))
# 4.10. ICH of 333322 --------------------------------------------------------
GetIntervalContentHistogram = function(set,card){
ints = matrix(c(
combn(set,2) %>% apply(.,2,diff),
combn(set,2) %>% .[2:1,] %>% apply(.,2,diff) %>% mod(.,card)),
dim(combn(set,2))[2],
2
)
ints = apply(ints,1,min)
ints = c(ints, 1:(ceiling(card/2)))
table(ints)-1
}
x = GetIntervalContentHistogram(c(0,3,6,9,12,14),16)
quartz(width=2,height=2)
par(mar=c(3,3,1,0),mgp = c(2,1,0), cex.axis = .65,cex.lab=.65)
barplot(x,cex.names = .75,xlab="Duration",mgp = c(2,1,0),ylab="Count",las=1,family="Times New Roman")
quartz.save(file="Examples/Chapter 4/Example 4.10.pdf",type="pdf");dev.off()
remove(list=c("GetIntervalContentHistogram","x"))
# 4.11. (See Word document) -------------------------------------------------
# 4.12. (Reprint of Pressing) -----------------------------------------------
# 4.13. complexity of different rotations of groove classes ------------------
source("Scripts/AnalysisScripts/Chapter 4 Analysis Script (excerpt).r")
r = range(vals)
quartz(width=4.5,height=2.5)
par(mfcol = c(7,1),mar=c(0,0,0,0),cex = .65)
llply(1:7,function(i){
plot(vals[[i]],rep(0,length(vals[[i]])),xlim = r,xaxt="n",pch=20,bty="n",
yaxt="n")
text(vals[[i]],rep(0,length(vals[[i]])),0:(length(vals[[i]])-1),pos=1,family="Times New Roman",font=3)
points(0:4,rep(0,5),pch="|")
lines(r,rep(0,2))
text(0.25,0,names(vals)[i],pos = 3,family="Times New Roman",cex=1.25)
if(2.64 %in% i){text(1:4,rep(0,4),1:4,pos=1)}
})
quartz.save(file="Examples/Chapter 4/Example 4.13.pdf",type="pdf")
quartz.save(file="Examples/Chapter 6/Example 6.2.pdf",type="pdf");dev.off()
remove(list=c("groove.classes","r","vals"))
# 4.14: Three segments with <332 2222> --------------------------------------
PlotVerse("astonMartin",row.index = 17:64,plot.3limit = F)
quartz.save(file="Examples/Chapter 4/Example 4.14a.pdf",type="pdf");dev.off()
PlotVerse("goToSleep1",m.range = 2:5,plot.3limit = F)
quartz.save(file="Examples/Chapter 4/Example 4.14b.pdf",type="pdf");dev.off()
PlotVerse("theWayIAm",m.range = 15:18,plot.3limit = F)
quartz.save(file="Examples/Chapter 4/Example 4.14c.pdf",type="pdf");dev.off()
# 15: Drug Ballad, verse 1, mm. 1–4 ---------------------------------------
PlotVerse("drugBallad1",m.range=0:3)
quartz.save(file="Examples/Chapter 4/Example 4.15.pdf",type="pdf");dev.off()
# 16: Renegade, verse two, mm. 5-8 ----------------------------------------
PlotVerse("renegade2",m.range=4:7,plot.rhymeClasses = T)
quartz.save(file="Examples/Chapter 4/Example 4.16.pdf",type="pdf");dev.off()
# 17 Possible grooves beginning at 0 of m. 5 ------------------------------
grooves = fread("DerivedData/corpus_grooves.txt")
grooves = tbl_df(grooves)
grooves %>% filter(verse=="renegade2", start==64,adj.length>=16) %>%
select(class,rotation,effort,rate,length,adj.length) %>%
group_by(adj.length) %>% slice(1) %>% arrange(class,desc(adj.length)) %>%
ungroup %>% mutate(index=(dim(.)[1]):1) -> x
quartz(width=4.55,height=2)
par(mar = c(0,0,0,0),cex=.65)
x1 = -35
plot(0,0,col="white",xlim=c(x1,64),ylim=c(dim(x)[1],0),yaxt="n",ylab="",xaxt="n",xlab="",bty="n")
for(i in 1:dim(x)[1]){
lines(c(0,x$length[i]),rep(i,2),lty=match(x$class[i],unique(x$class)),lwd=2,col=gray(.7))
lines(c(0,x$adj.length[i]),rep(i,2),lty=match(x$class[i],unique(x$class)),lwd=2)
text(x1,i,
paste(i,". ",x$class[i],"-",x$rotation[i]," (",x$length[i],",",x$effort[i],")",sep=""),
font=3,pos=4,family="Times New Roman")
}
text(seq(0,64,16),rep(0,5),c("m. 5","m. 6","m. 7","m.8","m. 9"),font=3,family="Times New Roman",cex=.65)
quartz.save(file="Examples/Chapter 4/Example 4.17.pdf",type="pdf");dev.off()
remove(list=c("x","x1","i","grooves"))
# 18 Segmentation of “Renegade --------------------------------------------
source("Scripts/PlottingScripts/PlotGrooveSegmentation.r")
segments = fread("DerivedData/corpus_groove_segments.txt",header=T)
PlotGrooveSegmentation("renegade2")
quartz.save(file="Examples/Chapter 4/Example 4.18.pdf",type="pdf");dev.off()
remove(list=c("segments","GrooveSwapDistance","GrooveSwapDistanceVerse","RotateGroovePF","PlotGrooveSegmentation"))
# Example 6.13 Groove segmentations of verses of Soldier ------------------
# Note: this needs to be rotated on the page.
source("Scripts/PlottingScripts/PlotGrooveSegmentation.r")
quartz(width=4.5,height=2.5)
par(mfcol = c(2,2),mar=c(0,0,1,0),cex = .65,las=1,bty='n',family="Times New Roman")
llply(c("loseYourselfDemo1","loseYourself1"),PlotGrooveSegmentation,
r.limit=c(0,.125),new.quartz=F,plot.all.costs=F,
x.max=1.2)
quartz.save(file="Examples/Chapter 4/Example 4.19.pdf",type="pdf");dev.off()
|
library(tidyverse)
library(scales)
library(sf)
library(svglite)
# import the csv
raw <- read_csv('/Users/chriswhong/Sites/data-stories-scripts/nyc-capital-projects/scraper/csv/combined.csv')
# hand-jam a dataframe of borough populations so we can normalize things if we want to later
population <- frame_data(
~borough, ~pop_estimate_2019,
"BRONX", 1418207,
"BROOKLYN", 2559903,
"MANHATTAN", 1628706,
"QUEENS", 2253858,
"RICHMOND", 476143,
"CITYWIDE", 8336817
)
# calculate per-capital totals
boroughs <- group_by(raw, borough) %>% summarize(prior_actuals = sum(combined_prior_actuals), planned_spending = sum(combined_total)) %>%
left_join(population, by='borough') %>%
mutate(
prior_actuals_per_capita = (prior_actuals / pop_estimate_2019),
planned_spending_per_capita = (planned_spending / pop_estimate_2019)
)
# totals for the whole dataset
grand_total <- sum(raw$combined_total) #105.8 Billion
grand_prior_actuals <- sum(raw$combined_prior_actuals) #40.9 Billion
grand_planned_spending <- grand_total - grand_prior_actuals #64.9 Billion
# breakdown for each borough + citywide
by_borough <- group_by(raw, borough) %>% summarize(
combined_total = sum(combined_total),
combined_prior_actuals = sum(combined_prior_actuals)
) %>% mutate(combined_planned_spending = combined_total - combined_prior_actuals) %>%
mutate(
combined_prior_actuals = paste('$', label_number_si(accuracy=0.1)(combined_prior_actuals * 1000), sep=''),
combined_planned_spending = paste('$', label_number_si(accuracy=0.1)(combined_planned_spending * 1000), sep=''),
combined_total = paste('$', label_number_si(accuracy=0.1)(combined_total * 1000), sep='')
)
# a list of excluded community districts (I think these mean boroughwide, or citywide for 000 and 099)
excludedCommunityDistricts = c("000", "099", "100", "199", "200", "299", "300", "399", "400", "499", "500")
# given a space-delimited community districts string, determine if there is at least one local district
checkIfLocal <- function(communityBoardsServed) {
print(communityBoardsServed)
if (communityBoardsServed == "" || is.na(communityBoardsServed)) return(F)
parts <- str_split(communityBoardsServed, ' ')
hasExcludedCd <- str_detect(communityBoardsServed, excludedCommunityDistricts)
excludedCount <- length(hasExcludedCd[hasExcludedCd==TRUE])
localCount <- lengths(parts) - excludedCount
print(localCount > 0)
return(localCount > 0)
}
# necessary to use the custom function above in mutate()
v_checkIfLocal <- Vectorize(checkIfLocal)
# to get a rough idea of boroughwide vs community-specific, call any project with at least one CD identified in community_board_served as not boroughwide
boroughs <- filter(raw, borough != 'CITYWIDE') %>%
mutate(
is_local = v_checkIfLocal(community_boards_served)
) %>%
group_by(borough, is_local) %>% summarize(
combined_total = sum(combined_total),
combined_prior_actuals = sum(combined_prior_actuals)
) %>% mutate(combined_planned_spending = combined_total - combined_prior_actuals) %>%
mutate(
combined_prior_actuals = combined_prior_actuals * 1000,
combined_planned_spending = combined_planned_spending * 1000,
combined_total = combined_total * 1000
) %>% pivot_wider(
id_cols=borough,
names_from = is_local,
values_from = combined_total
) %>% rename(c("boroughwide"="FALSE", "local"="TRUE"))
write_csv(boroughs, '/Users/chriswhong/Desktop/boroughwide.csv')
# prettify by multiplying by 1000 and using label_number_si to abbreviate
pretty_boroughs <- mutate(
boroughs,
prior_actuals=label_number_si(accuracy=0.1)(prior_actuals * 1000),
planned_spending=label_number_si(accuracy=0.1)(planned_spending * 1000),
prior_actuals_per_capita=label_number_si(accuracy=0.1)(prior_actuals_per_capita * 1000),
planned_spending_per_capita=label_number_si(accuracy=0.1)(planned_spending_per_capita * 1000)
)
# analysis of one community district (mine), Brooklyn 6
cd6 <- filter(raw, grepl("306",community_boards_served))
write_csv(cd6, '/Users/chriswhong/Desktop/cd6.csv')
cd6_total <- sum(cd6$combined_total) # 480.9M
# analysis of one community district 208
bx8 <- filter(raw, grepl("208",community_boards_served)) %>% select('project_description', 'combined_total')
# let's just get combined_total for every community district in NYC
# asterisk - this will not split costs for projects with more that one cd served
# first we need to split every row with multiple projects into multiple rows with a single project
separated = separate_rows(raw, community_boards_served, sep = " ")
# group by and summarize
community_board_summary <- group_by(separated, community_boards_served) %>%
summarize(num_projects=n(), combined_total=sum(combined_total) * 1000 ) %>%
filter(!(community_boards_served %in% excludedCommunityDistricts), !is.na(community_boards_served))
write_csv(community_board_summary, '/Users/chriswhong/Desktop/capitalprojects/community_board_summary.csv')
# group by and summarize on community board and total spend
community_board_category_summary <- group_by(separated, community_boards_served, ten_year_plan_category, .drop=FALSE) %>%
summarize(num_projects=n(), combined_total=sum(combined_total) * 1000 ) %>%
filter(!(community_boards_served %in% excludedCommunityDistricts), !is.na(community_boards_served)) %>%
ungroup()
# get the totals for each category, which we will use in labeling the facets in the facet_wrap
category_totals <- group_by(community_board_category_summary, ten_year_plan_category) %>%
summarize(category_total=sum(combined_total) ,category_total_label = paste('$', label_number_si(accuracy=0.1)(sum(combined_total)), sep = "")) %>%
mutate(
ten_year_plan_category_label = paste(ten_year_plan_category, ' | ',category_total_label, sep='')
)
# us complete to fill in each combination, so that every district is mappable for every category whether it has data or not
all_combinations <- complete(community_board_category_summary, community_boards_served, ten_year_plan_category)
# thanks to https://timogrossenbacher.ch/2016/12/beautiful-thematic-maps-with-ggplot2-only/#a-better-color-scale
# for a wonderful example
labels <- c('< $1M', '$1M-10M', '$10M-100M' ,'$100M-500M', '> $500M')
pretty_breaks <- c(0, 1000000,10000000,100000000,500000000, max(all_combinations$combined_total, na.rm = T))
# cut the dataframe into breaks
all_combinations$breaks <- cut(all_combinations$combined_total,
breaks = pretty_breaks,
include.lowest = TRUE,
labels = labels)
# small multiples for each ten_year_plan_category
cds <- read_sf('/Users/chriswhong/Sites/data-stories-scripts/nyc-capital-projects/analysis/data/community_districts/community_districts.shp') %>%
mutate(boro_cd_string = toString(boro_cd)) %>%
left_join(all_combinations, by=c('borocdstr' = 'community_boards_served'))
# join with the totals for labeling
cds <- left_join(cds, category_totals, by='ten_year_plan_category')
# limit to smaller number of categories for testing, because doing the whole thing takes a long time
cds <- filter(
cds,
ten_year_plan_category %in% c(
'PROGRAMMATIC RESPONSE TO REGULATORY MANDATES',
'LAND ACQUISITION AND TREE PLANTING',
'REHABILITATION OF CITY-OWNED OFFICE SPACE',
'POLICE FACILITIES')
)
# facet wrap of little adorable choropleth maps
cds %>%
ggplot() +
geom_sf(aes(fill = breaks), color = "#cccccc", size = 0.05) +
theme_void() +
facet_wrap(facets=~fct_reorder(ten_year_plan_category_label, category_total, .desc = TRUE), labeller = label_wrap_gen(width=20), ncol=8) +
scale_fill_manual(
breaks=levels(cds$breaks),
values=c('#bdd7e7', '#9ecae1', '#6baed6', '#3182bd', '#2171b5'),
na.value = "#f7f7f7"
) +
theme(strip.text.x = element_text(size = 3.5))
ggsave("/Users/chriswhong/Desktop/facet.pdf", width = 12, height = 12)
| /nyc-capital-projects/analysis/script.R | no_license | qri-io/data-stories-scripts | R | false | false | 7,880 | r | library(tidyverse)
library(scales)
library(sf)
library(svglite)
# import the csv
raw <- read_csv('/Users/chriswhong/Sites/data-stories-scripts/nyc-capital-projects/scraper/csv/combined.csv')
# hand-jam a dataframe of borough populations so we can normalize things if we want to later
population <- frame_data(
~borough, ~pop_estimate_2019,
"BRONX", 1418207,
"BROOKLYN", 2559903,
"MANHATTAN", 1628706,
"QUEENS", 2253858,
"RICHMOND", 476143,
"CITYWIDE", 8336817
)
# calculate per-capital totals
boroughs <- group_by(raw, borough) %>% summarize(prior_actuals = sum(combined_prior_actuals), planned_spending = sum(combined_total)) %>%
left_join(population, by='borough') %>%
mutate(
prior_actuals_per_capita = (prior_actuals / pop_estimate_2019),
planned_spending_per_capita = (planned_spending / pop_estimate_2019)
)
# totals for the whole dataset
grand_total <- sum(raw$combined_total) #105.8 Billion
grand_prior_actuals <- sum(raw$combined_prior_actuals) #40.9 Billion
grand_planned_spending <- grand_total - grand_prior_actuals #64.9 Billion
# breakdown for each borough + citywide
by_borough <- group_by(raw, borough) %>% summarize(
combined_total = sum(combined_total),
combined_prior_actuals = sum(combined_prior_actuals)
) %>% mutate(combined_planned_spending = combined_total - combined_prior_actuals) %>%
mutate(
combined_prior_actuals = paste('$', label_number_si(accuracy=0.1)(combined_prior_actuals * 1000), sep=''),
combined_planned_spending = paste('$', label_number_si(accuracy=0.1)(combined_planned_spending * 1000), sep=''),
combined_total = paste('$', label_number_si(accuracy=0.1)(combined_total * 1000), sep='')
)
# a list of excluded community districts (I think these mean boroughwide, or citywide for 000 and 099)
excludedCommunityDistricts = c("000", "099", "100", "199", "200", "299", "300", "399", "400", "499", "500")
# given a space-delimited community districts string, determine if there is at least one local district
checkIfLocal <- function(communityBoardsServed) {
print(communityBoardsServed)
if (communityBoardsServed == "" || is.na(communityBoardsServed)) return(F)
parts <- str_split(communityBoardsServed, ' ')
hasExcludedCd <- str_detect(communityBoardsServed, excludedCommunityDistricts)
excludedCount <- length(hasExcludedCd[hasExcludedCd==TRUE])
localCount <- lengths(parts) - excludedCount
print(localCount > 0)
return(localCount > 0)
}
# necessary to use the custom function above in mutate()
v_checkIfLocal <- Vectorize(checkIfLocal)
# to get a rough idea of boroughwide vs community-specific, call any project with at least one CD identified in community_board_served as not boroughwide
boroughs <- filter(raw, borough != 'CITYWIDE') %>%
mutate(
is_local = v_checkIfLocal(community_boards_served)
) %>%
group_by(borough, is_local) %>% summarize(
combined_total = sum(combined_total),
combined_prior_actuals = sum(combined_prior_actuals)
) %>% mutate(combined_planned_spending = combined_total - combined_prior_actuals) %>%
mutate(
combined_prior_actuals = combined_prior_actuals * 1000,
combined_planned_spending = combined_planned_spending * 1000,
combined_total = combined_total * 1000
) %>% pivot_wider(
id_cols=borough,
names_from = is_local,
values_from = combined_total
) %>% rename(c("boroughwide"="FALSE", "local"="TRUE"))
write_csv(boroughs, '/Users/chriswhong/Desktop/boroughwide.csv')
# prettify by multiplying by 1000 and using label_number_si to abbreviate
pretty_boroughs <- mutate(
boroughs,
prior_actuals=label_number_si(accuracy=0.1)(prior_actuals * 1000),
planned_spending=label_number_si(accuracy=0.1)(planned_spending * 1000),
prior_actuals_per_capita=label_number_si(accuracy=0.1)(prior_actuals_per_capita * 1000),
planned_spending_per_capita=label_number_si(accuracy=0.1)(planned_spending_per_capita * 1000)
)
# analysis of one community district (mine), Brooklyn 6
cd6 <- filter(raw, grepl("306",community_boards_served))
write_csv(cd6, '/Users/chriswhong/Desktop/cd6.csv')
cd6_total <- sum(cd6$combined_total) # 480.9M
# analysis of one community district 208
bx8 <- filter(raw, grepl("208",community_boards_served)) %>% select('project_description', 'combined_total')
# let's just get combined_total for every community district in NYC
# asterisk - this will not split costs for projects with more that one cd served
# first we need to split every row with multiple projects into multiple rows with a single project
separated = separate_rows(raw, community_boards_served, sep = " ")
# group by and summarize
community_board_summary <- group_by(separated, community_boards_served) %>%
summarize(num_projects=n(), combined_total=sum(combined_total) * 1000 ) %>%
filter(!(community_boards_served %in% excludedCommunityDistricts), !is.na(community_boards_served))
write_csv(community_board_summary, '/Users/chriswhong/Desktop/capitalprojects/community_board_summary.csv')
# group by and summarize on community board and total spend
community_board_category_summary <- group_by(separated, community_boards_served, ten_year_plan_category, .drop=FALSE) %>%
summarize(num_projects=n(), combined_total=sum(combined_total) * 1000 ) %>%
filter(!(community_boards_served %in% excludedCommunityDistricts), !is.na(community_boards_served)) %>%
ungroup()
# get the totals for each category, which we will use in labeling the facets in the facet_wrap
category_totals <- group_by(community_board_category_summary, ten_year_plan_category) %>%
summarize(category_total=sum(combined_total) ,category_total_label = paste('$', label_number_si(accuracy=0.1)(sum(combined_total)), sep = "")) %>%
mutate(
ten_year_plan_category_label = paste(ten_year_plan_category, ' | ',category_total_label, sep='')
)
# us complete to fill in each combination, so that every district is mappable for every category whether it has data or not
all_combinations <- complete(community_board_category_summary, community_boards_served, ten_year_plan_category)
# thanks to https://timogrossenbacher.ch/2016/12/beautiful-thematic-maps-with-ggplot2-only/#a-better-color-scale
# for a wonderful example
labels <- c('< $1M', '$1M-10M', '$10M-100M' ,'$100M-500M', '> $500M')
pretty_breaks <- c(0, 1000000,10000000,100000000,500000000, max(all_combinations$combined_total, na.rm = T))
# cut the dataframe into breaks
all_combinations$breaks <- cut(all_combinations$combined_total,
breaks = pretty_breaks,
include.lowest = TRUE,
labels = labels)
# small multiples for each ten_year_plan_category
cds <- read_sf('/Users/chriswhong/Sites/data-stories-scripts/nyc-capital-projects/analysis/data/community_districts/community_districts.shp') %>%
mutate(boro_cd_string = toString(boro_cd)) %>%
left_join(all_combinations, by=c('borocdstr' = 'community_boards_served'))
# join with the totals for labeling
cds <- left_join(cds, category_totals, by='ten_year_plan_category')
# limit to smaller number of categories for testing, because doing the whole thing takes a long time
cds <- filter(
cds,
ten_year_plan_category %in% c(
'PROGRAMMATIC RESPONSE TO REGULATORY MANDATES',
'LAND ACQUISITION AND TREE PLANTING',
'REHABILITATION OF CITY-OWNED OFFICE SPACE',
'POLICE FACILITIES')
)
# facet wrap of little adorable choropleth maps
cds %>%
ggplot() +
geom_sf(aes(fill = breaks), color = "#cccccc", size = 0.05) +
theme_void() +
facet_wrap(facets=~fct_reorder(ten_year_plan_category_label, category_total, .desc = TRUE), labeller = label_wrap_gen(width=20), ncol=8) +
scale_fill_manual(
breaks=levels(cds$breaks),
values=c('#bdd7e7', '#9ecae1', '#6baed6', '#3182bd', '#2171b5'),
na.value = "#f7f7f7"
) +
theme(strip.text.x = element_text(size = 3.5))
ggsave("/Users/chriswhong/Desktop/facet.pdf", width = 12, height = 12)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hypothesistest.R
\name{ttest_chi}
\alias{ttest_chi}
\title{Provides the t-test and chi square's p value and statistic for binary targets and provides it in a dataframe for a set of columns or for a whole dataframe.In case of ANOVA provides all possible tests's summary}
\usage{
ttest_chi(data, type_of_test, User_BinaryTarget = NULL,
User_Variable = NULL, filename = NULL)
}
\arguments{
\item{For}{Hyp_test()----->data,type_of_test,User_BinaryTarget(optional),User_Variable(optional)}
}
\description{
1.Provides the t-test and chi square's p value and statistic and provides it in a dataframe for a set of columns or for a whole dataframe.
2.In case of ANOVA() provides p values in form of a dataframe
Assumption: No individual columns are provided for function Hyp_test().In such case a normal one on one t-test or chi would be better.
}
\examples{
ttest_chi(df,"chi",c(1,2),c(3,4)),ttest_chi(df,"ttest",filename="Apple")
}
| /man/ttest_chi.Rd | no_license | prasannakumartn/hypothesistest | R | false | true | 1,011 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hypothesistest.R
\name{ttest_chi}
\alias{ttest_chi}
\title{Provides the t-test and chi square's p value and statistic for binary targets and provides it in a dataframe for a set of columns or for a whole dataframe.In case of ANOVA provides all possible tests's summary}
\usage{
ttest_chi(data, type_of_test, User_BinaryTarget = NULL,
User_Variable = NULL, filename = NULL)
}
\arguments{
\item{For}{Hyp_test()----->data,type_of_test,User_BinaryTarget(optional),User_Variable(optional)}
}
\description{
1.Provides the t-test and chi square's p value and statistic and provides it in a dataframe for a set of columns or for a whole dataframe.
2.In case of ANOVA() provides p values in form of a dataframe
Assumption: No individual columns are provided for function Hyp_test().In such case a normal one on one t-test or chi would be better.
}
\examples{
ttest_chi(df,"chi",c(1,2),c(3,4)),ttest_chi(df,"ttest",filename="Apple")
}
|
##Mecklenburg County####
###Below code Cleans and Analysis the data to visualize it into the RShiny Dashboard####
##This is just the demonstration with 5 Insights, we can analysis more parameter
###and also create a dynamic dashboard where user can input the property information to get the insights and trends in a particular county###
### By Pallavi Varandani
# 1. Library
library(shiny)
library(shinythemes)
library(dplyr)
library(readr)
library(RMySQL)
library(data.table)
library(stringi)
library(ggplot2)
require(scales)
# 2. Read data from db
mydb <- dbConnect(MySQL(), user = 'root', password = 'Vansh@1234',
dbname = 'Landis', host = 'localhost', port = 3306)
df <- dbReadTable(mydb,"Mecklenburg")
#3. Cleaning and Transforming the dataset
cols <- names(df)
df[df == "''"] <- NA # replacing blanks with NA
df[df == "'-'"] <- NA # replacing "-" with NA
setDT(df)[, (cols) := lapply(.SD, function(x) type.convert(stri_sub(x, 2, -2)))] #Removing the extra Quotes
#Changing the datatype as per requirements####
df$ParcelID = as.factor(df$ParcelID)
df$AccountNo = as.factor(df$AccountNo)
df$LastSaleDate = as.Date.character(df$LastSaleDate)
df$LastSalePrice = substring(df$LastSalePrice,2)
df$LastSalePrice = as.numeric(gsub(",","",df$LastSalePrice))
df$LandValue = substring(df$LandValue,2)
df$LandValue = as.numeric(gsub(",","",df$LandValue))
df$BuildingValue = substring(df$BuildingValue,2)
df$BuildingValue = as.numeric(gsub(",","",df$BuildingValue))
df$Features = substring(df$Features,2)
df$Features = as.numeric(gsub(",","",df$Features))
df$TotalAppraisedValue = substring(df$TotalAppraisedValue,2)
df$TotalAppraisedValue = as.numeric(gsub(",","",df$TotalAppraisedValue))
df$HeatedArea = as.numeric(gsub(",","",df$HeatedArea))
df$TotalSqFt = as.numeric(gsub(",","",df$TotalSqFt))
df$Bedrooms = as.factor(df$Bedrooms)
#str(df)
#4. Defining the User interface function of the Rshiny Dashboard
ui <- fluidPage(theme = shinytheme("superhero"),
titlePanel(title=h1("Mecklenburg County", align="center")),
sidebarPanel(
selectInput("Type", label = h3("Select the Graph:"),
choices = c("Heat VS LastSalePrice VS Bedroom VS Fuel","Story VS LastSalePrice VS Foundation",
"BuildingValue VS LastSalePrice","Externalwall VS TotalAppraisedValue VS Heat VS Fuel",
"TotalAppraisedValue VS LastSalePrice")),
actionButton(inputId = "go", label = "RUN")
),
mainPanel(
uiOutput("type")
)
)
#5. defining the backend server function of the R Shiny Dashboard
server <- shinyServer(function(input, output){
#Defining the layout for each output
output$type <- renderUI({
check1 <- input$Type == "Heat VS LastSalePrice VS Bedroom VS Fuel"
check2 <- input$Type == "Story VS LastSalePrice VS Foundation"
check3 <- input$Type == "BuildingValue VS LastSalePrice"
check4 <- input$Type == "Externalwall VS TotalAppraisedValue VS Heat VS Fuel"
check5 <- input$Type == "TotalAppraisedValue VS LastSalePrice"
if (check1){
tabsetPanel(tabPanel("Heat VS LastSalePrice VS Bedroom VS Fuel",plotOutput(outputId = "plot1"),textOutput(outputId = 'text1')))
}
else if (check2){
tabsetPanel(tabPanel("Story VS LastSalePrice VS Foundation",plotOutput(outputId = "plot2"),textOutput(outputId = 'text2')))
}
else if (check3){
tabsetPanel(tabPanel("BuildingValue VS LastSalePrice",plotOutput(outputId = "plot3"),textOutput(outputId = 'text3')))
}
else if (check4){
tabsetPanel(tabPanel("Externalwall VS TotalAppraisedValue VS Heat VS Fuel",plotOutput(outputId = "plot4"),textOutput(outputId = 'text4')))
}
else if (check5){
tabsetPanel(tabPanel("TotalAppraisedValue VS LastSalePrice",plotOutput(outputId = "plot5"),textOutput(outputId = 'text5')))
}
else{
print("Not Applicable")
}
})
#plot1
plot1 <- eventReactive(input$go,{
###Heat,LastSalePrice,Bedroom,Fuel###
ggplot(df, aes(x=Heat, y=LastSalePrice, shape=Bedrooms, color=Fuel)) +
geom_point()
})
#plot2
plot2 <- eventReactive(input$go,{
###Story,LastSalePrice,Foundation####
ggplot(df, aes(x=Story, y=LastSalePrice, shape=Foundation, color=Foundation)) +
geom_point()
})
#plot3
plot3 <- eventReactive(input$go,{
###BuildingValue,LastSalePrice####
ggplot(df, aes(x=BuildingValue, y=LastSalePrice))+geom_point()+scale_x_continuous(labels = comma)
})
#plot4
plot4 <- eventReactive(input$go,{
####Externalwall, TotalAppraisedValue,Heat,Fuel#####
ggplot(df, aes(x=ExternalWall,y=TotalAppraisedValue, shape=Heat, color=Fuel))+geom_point()
})
#plot 5
plot5 <- eventReactive(input$go,{
####TotalAppraisedValue,LastSalePrice####
ggplot(df, aes(x=TotalAppraisedValue, y=LastSalePrice))+geom_point()+scale_x_continuous(labels = comma)
})
#Insight 1
text1 <- eventReactive(input$go,{
print("For Mecklenburg County, A 4 Bedroom property with Heat type Forced Air Ducted and Fuel Type Gas has the highest Last Sale Price, however, the Least Last Price is for Baseboard Heat with Electric Fuel 3 Bedroom Property")
})
#Insight 2
text2 <- eventReactive(input$go,{
print("For Mecklenburg County, A Two Story property with Foundation type Crawl Space has the highest Last Sale Price, whereas, the Least Last Sale Price is for a Bi-Level Story Property with Foundation Crawl Space.")
})
#Insight 3
text3 <- eventReactive(input$go,{
print("For Mecklenburg County, There is positive correlation between Last Sale Price and Building Value, i.e. the higher the Building Value, higher is the Last Sale Price")
})
#Insight 4
text4 <- eventReactive(input$go,{
print("For Mecklenburg County, A property with Heat type Forced Air Ducted,Fuel Type Gas and Face Brick External wall has the highest Last Sale Price, however, the Least Last Price is for Forced Air Ducted Heat with Gas Fuel and Interior Plywood External Wall")
})
#Insight 5
text5 <-eventReactive(input$go,{
print("For Mecklenburg County, There is positive correlation between Last Sale Price and Total Appraised Value, i.e. the higher the Total Appraised Value, higher is the Last Sale Price. However, there are exception when Total Appraised Value is low and still the Last Sale Prices are high.")
})
##Defining the outpusa to display
output$plot1 <- renderPlot({plot1()})
output$plot2 <- renderPlot({plot2()})
output$plot3 <- renderPlot({plot3()})
output$plot4 <- renderPlot({plot4()})
output$plot5 <- renderPlot({plot5()})
output$text1 <- renderPrint({text1()})
output$text2 <- renderPrint({text2()})
output$text3 <- renderPrint({text3()})
output$text4 <- renderPrint({text4()})
output$text5 <- renderPrint({text5()})
})
##Calling the Rshiny Dashboard function
shinyApp(ui = ui , server = server) | /Mecklenburg-Dashboard.R | no_license | PallaviVarandani/Real-Estate-Projects | R | false | false | 7,083 | r | ##Mecklenburg County####
###Below code Cleans and Analysis the data to visualize it into the RShiny Dashboard####
##This is just the demonstration with 5 Insights, we can analysis more parameter
###and also create a dynamic dashboard where user can input the property information to get the insights and trends in a particular county###
### By Pallavi Varandani
# 1. Library
library(shiny)
library(shinythemes)
library(dplyr)
library(readr)
library(RMySQL)
library(data.table)
library(stringi)
library(ggplot2)
require(scales)
# 2. Read data from db
mydb <- dbConnect(MySQL(), user = 'root', password = 'Vansh@1234',
dbname = 'Landis', host = 'localhost', port = 3306)
df <- dbReadTable(mydb,"Mecklenburg")
#3. Cleaning and Transforming the dataset
cols <- names(df)
df[df == "''"] <- NA # replacing blanks with NA
df[df == "'-'"] <- NA # replacing "-" with NA
setDT(df)[, (cols) := lapply(.SD, function(x) type.convert(stri_sub(x, 2, -2)))] #Removing the extra Quotes
#Changing the datatype as per requirements####
df$ParcelID = as.factor(df$ParcelID)
df$AccountNo = as.factor(df$AccountNo)
df$LastSaleDate = as.Date.character(df$LastSaleDate)
df$LastSalePrice = substring(df$LastSalePrice,2)
df$LastSalePrice = as.numeric(gsub(",","",df$LastSalePrice))
df$LandValue = substring(df$LandValue,2)
df$LandValue = as.numeric(gsub(",","",df$LandValue))
df$BuildingValue = substring(df$BuildingValue,2)
df$BuildingValue = as.numeric(gsub(",","",df$BuildingValue))
df$Features = substring(df$Features,2)
df$Features = as.numeric(gsub(",","",df$Features))
df$TotalAppraisedValue = substring(df$TotalAppraisedValue,2)
df$TotalAppraisedValue = as.numeric(gsub(",","",df$TotalAppraisedValue))
df$HeatedArea = as.numeric(gsub(",","",df$HeatedArea))
df$TotalSqFt = as.numeric(gsub(",","",df$TotalSqFt))
df$Bedrooms = as.factor(df$Bedrooms)
#str(df)
#4. Defining the User interface function of the Rshiny Dashboard
ui <- fluidPage(theme = shinytheme("superhero"),
titlePanel(title=h1("Mecklenburg County", align="center")),
sidebarPanel(
selectInput("Type", label = h3("Select the Graph:"),
choices = c("Heat VS LastSalePrice VS Bedroom VS Fuel","Story VS LastSalePrice VS Foundation",
"BuildingValue VS LastSalePrice","Externalwall VS TotalAppraisedValue VS Heat VS Fuel",
"TotalAppraisedValue VS LastSalePrice")),
actionButton(inputId = "go", label = "RUN")
),
mainPanel(
uiOutput("type")
)
)
#5. defining the backend server function of the R Shiny Dashboard
server <- shinyServer(function(input, output){
#Defining the layout for each output
output$type <- renderUI({
check1 <- input$Type == "Heat VS LastSalePrice VS Bedroom VS Fuel"
check2 <- input$Type == "Story VS LastSalePrice VS Foundation"
check3 <- input$Type == "BuildingValue VS LastSalePrice"
check4 <- input$Type == "Externalwall VS TotalAppraisedValue VS Heat VS Fuel"
check5 <- input$Type == "TotalAppraisedValue VS LastSalePrice"
if (check1){
tabsetPanel(tabPanel("Heat VS LastSalePrice VS Bedroom VS Fuel",plotOutput(outputId = "plot1"),textOutput(outputId = 'text1')))
}
else if (check2){
tabsetPanel(tabPanel("Story VS LastSalePrice VS Foundation",plotOutput(outputId = "plot2"),textOutput(outputId = 'text2')))
}
else if (check3){
tabsetPanel(tabPanel("BuildingValue VS LastSalePrice",plotOutput(outputId = "plot3"),textOutput(outputId = 'text3')))
}
else if (check4){
tabsetPanel(tabPanel("Externalwall VS TotalAppraisedValue VS Heat VS Fuel",plotOutput(outputId = "plot4"),textOutput(outputId = 'text4')))
}
else if (check5){
tabsetPanel(tabPanel("TotalAppraisedValue VS LastSalePrice",plotOutput(outputId = "plot5"),textOutput(outputId = 'text5')))
}
else{
print("Not Applicable")
}
})
#plot1
plot1 <- eventReactive(input$go,{
###Heat,LastSalePrice,Bedroom,Fuel###
ggplot(df, aes(x=Heat, y=LastSalePrice, shape=Bedrooms, color=Fuel)) +
geom_point()
})
#plot2
plot2 <- eventReactive(input$go,{
###Story,LastSalePrice,Foundation####
ggplot(df, aes(x=Story, y=LastSalePrice, shape=Foundation, color=Foundation)) +
geom_point()
})
#plot3
plot3 <- eventReactive(input$go,{
###BuildingValue,LastSalePrice####
ggplot(df, aes(x=BuildingValue, y=LastSalePrice))+geom_point()+scale_x_continuous(labels = comma)
})
#plot4
plot4 <- eventReactive(input$go,{
####Externalwall, TotalAppraisedValue,Heat,Fuel#####
ggplot(df, aes(x=ExternalWall,y=TotalAppraisedValue, shape=Heat, color=Fuel))+geom_point()
})
#plot 5
plot5 <- eventReactive(input$go,{
####TotalAppraisedValue,LastSalePrice####
ggplot(df, aes(x=TotalAppraisedValue, y=LastSalePrice))+geom_point()+scale_x_continuous(labels = comma)
})
#Insight 1
text1 <- eventReactive(input$go,{
print("For Mecklenburg County, A 4 Bedroom property with Heat type Forced Air Ducted and Fuel Type Gas has the highest Last Sale Price, however, the Least Last Price is for Baseboard Heat with Electric Fuel 3 Bedroom Property")
})
#Insight 2
text2 <- eventReactive(input$go,{
print("For Mecklenburg County, A Two Story property with Foundation type Crawl Space has the highest Last Sale Price, whereas, the Least Last Sale Price is for a Bi-Level Story Property with Foundation Crawl Space.")
})
#Insight 3
text3 <- eventReactive(input$go,{
print("For Mecklenburg County, There is positive correlation between Last Sale Price and Building Value, i.e. the higher the Building Value, higher is the Last Sale Price")
})
#Insight 4
text4 <- eventReactive(input$go,{
print("For Mecklenburg County, A property with Heat type Forced Air Ducted,Fuel Type Gas and Face Brick External wall has the highest Last Sale Price, however, the Least Last Price is for Forced Air Ducted Heat with Gas Fuel and Interior Plywood External Wall")
})
#Insight 5
text5 <-eventReactive(input$go,{
print("For Mecklenburg County, There is positive correlation between Last Sale Price and Total Appraised Value, i.e. the higher the Total Appraised Value, higher is the Last Sale Price. However, there are exception when Total Appraised Value is low and still the Last Sale Prices are high.")
})
##Defining the outpusa to display
output$plot1 <- renderPlot({plot1()})
output$plot2 <- renderPlot({plot2()})
output$plot3 <- renderPlot({plot3()})
output$plot4 <- renderPlot({plot4()})
output$plot5 <- renderPlot({plot5()})
output$text1 <- renderPrint({text1()})
output$text2 <- renderPrint({text2()})
output$text3 <- renderPrint({text3()})
output$text4 <- renderPrint({text4()})
output$text5 <- renderPrint({text5()})
})
##Calling the Rshiny Dashboard function
shinyApp(ui = ui , server = server) |
require(mzkit);
imports ["GCxGC", "mzweb"] from "mzkit";
imports "visual" from "mzplot";
inputfile = ?"--2d_cdf" || stop("no source data!");
image_TIC = `${dirname(inputfile)}/${basename(inputfile)}.png`;
image_TIC1D = `${dirname(inputfile)}/${basename(inputfile)}_1D.png`;
gcxgc = read.cdf(inputfile);
# gcxgc = GCxGC::extract_2D_peaks(raw)
plt = plot(gcxgc, size = [5000,3300], padding = "padding: 250px 600px 300px 350px;", TrIQ = 1, colorSet = "viridis:turbo");
bitmap(plt, file = image_TIC);
tic = GCxGC::TIC1D(gcxgc);
plt = plot(tic);
bitmap(plt, file = image_TIC1D ); | /dist/Rscript/GCMS/GCxGC_TopMS/plot.R | permissive | xieguigang/mzkit | R | false | false | 583 | r | require(mzkit);
imports ["GCxGC", "mzweb"] from "mzkit";
imports "visual" from "mzplot";
inputfile = ?"--2d_cdf" || stop("no source data!");
image_TIC = `${dirname(inputfile)}/${basename(inputfile)}.png`;
image_TIC1D = `${dirname(inputfile)}/${basename(inputfile)}_1D.png`;
gcxgc = read.cdf(inputfile);
# gcxgc = GCxGC::extract_2D_peaks(raw)
plt = plot(gcxgc, size = [5000,3300], padding = "padding: 250px 600px 300px 350px;", TrIQ = 1, colorSet = "viridis:turbo");
bitmap(plt, file = image_TIC);
tic = GCxGC::TIC1D(gcxgc);
plt = plot(tic);
bitmap(plt, file = image_TIC1D ); |
\name{Pines}
\alias{Pines}
\docType{data}
\title{Measurements of Pine Tree Seedlings}
\description{
Data from pine seedlings planted in 1990
}
\format{
A dataset with 1000 observations on the following 15 variables.
\tabular{rl}{
\code{Row} \tab {Row number in pine plantation}\cr
\code{Col} \tab {Column number in pine plantation}\cr
\code{Hgt90} \tab {Tree height at time of planting (cm)}\cr
\code{Hgt96} \tab {Tree height in September 1996 (cm)}\cr
\code{Diam96} \tab {Tree trunk diameter in September 1996 (cm)}\cr
\code{Grow96} \tab {Leader growth during 1996 (cm)}\cr
\code{Hgt97} \tab {Tree height in September 1997 (cm)}\cr
\code{Diam97} \tab {Tree trunk diameter in September 1997 (cm)}\cr
\code{Spread97} \tab {Widest lateral spread in September 1997 (cm)}\cr
\code{Needles97} \tab {Needle length in September 1997 (mm)}\cr
\code{Deer95} \tab {Type of deer damage in September 1995: 0 = none, 1 = browsed}\cr
\code{Deer97} \tab {Type of deer damage in September 1997: 0 = none, 1 = browsed}\cr
\code{Cover95} \tab {Thorny cover in September 1995: 0 = none; 1 = some; 2 = moderate; 3 = lots}\cr
\code{Fert} \tab {Indicator for fertilizer: 0 = no, 1 = yes}\cr
\code{Spacing} \tab {Distance (in feet) between trees (10 or 15)}\cr
}
}
\details{
This dataset contains data from an experiment conducted by the Department
of Biology at Kenyon College at a site near the campus in Gambier, Ohio. In April 1990, student
and faculty volunteers planted 1000 white pine (Pinus strobes) seedlings at the Brown Family Environmental
Center. These seedlings were planted in two grids, distinguished by 10- and 15-foot
spacings between the seedlings. Several variables were measured and recorded
for each seedling over time (in 1990, 1996, and 1997).
}
\source{
Thanks to the Kenyon College Department of Biology for sharing these data.
}
\keyword{datasets}
| /man/Pines.Rd | no_license | cran/Stat2Data | R | false | false | 1,959 | rd | \name{Pines}
\alias{Pines}
\docType{data}
\title{Measurements of Pine Tree Seedlings}
\description{
Data from pine seedlings planted in 1990
}
\format{
A dataset with 1000 observations on the following 15 variables.
\tabular{rl}{
\code{Row} \tab {Row number in pine plantation}\cr
\code{Col} \tab {Column number in pine plantation}\cr
\code{Hgt90} \tab {Tree height at time of planting (cm)}\cr
\code{Hgt96} \tab {Tree height in September 1996 (cm)}\cr
\code{Diam96} \tab {Tree trunk diameter in September 1996 (cm)}\cr
\code{Grow96} \tab {Leader growth during 1996 (cm)}\cr
\code{Hgt97} \tab {Tree height in September 1997 (cm)}\cr
\code{Diam97} \tab {Tree trunk diameter in September 1997 (cm)}\cr
\code{Spread97} \tab {Widest lateral spread in September 1997 (cm)}\cr
\code{Needles97} \tab {Needle length in September 1997 (mm)}\cr
\code{Deer95} \tab {Type of deer damage in September 1995: 0 = none, 1 = browsed}\cr
\code{Deer97} \tab {Type of deer damage in September 1997: 0 = none, 1 = browsed}\cr
\code{Cover95} \tab {Thorny cover in September 1995: 0 = none; 1 = some; 2 = moderate; 3 = lots}\cr
\code{Fert} \tab {Indicator for fertilizer: 0 = no, 1 = yes}\cr
\code{Spacing} \tab {Distance (in feet) between trees (10 or 15)}\cr
}
}
\details{
This dataset contains data from an experiment conducted by the Department
of Biology at Kenyon College at a site near the campus in Gambier, Ohio. In April 1990, student
and faculty volunteers planted 1000 white pine (Pinus strobes) seedlings at the Brown Family Environmental
Center. These seedlings were planted in two grids, distinguished by 10- and 15-foot
spacings between the seedlings. Several variables were measured and recorded
for each seedling over time (in 1990, 1996, and 1997).
}
\source{
Thanks to the Kenyon College Department of Biology for sharing these data.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotMCMCAlgorithmObject.R
\name{plot.Rcpp_MCMCAlgorithm}
\alias{plot.Rcpp_MCMCAlgorithm}
\title{Plot MCMC algorithm}
\usage{
\method{plot}{Rcpp_MCMCAlgorithm}(x, what = "LogPosterior", zoom.window = NULL, ...)
}
\arguments{
\item{x}{An Rcpp_MCMC object initialized with \code{initializeMCMCObject}.}
\item{what}{character defining if log(Posterior) (Default) or log(Likelihood)
options are: LogPosterior or logLikelihood}
\item{zoom.window}{A vector describing the start and end of the zoom window.}
\item{...}{Arguments to be passed to methods, such as graphical parameters.}
}
\value{
This function has no return value.
}
\description{
This function will plot the logLikelihood trace, and if the Hmisc package is installed, it will
plot a subplot of the logLikelihood trace with the first few samples removed.
}
| /fuzzedpackages/AnaCoDa/man/plot.Rcpp_MCMCAlgorithm.Rd | no_license | akhikolla/testpackages | R | false | true | 898 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotMCMCAlgorithmObject.R
\name{plot.Rcpp_MCMCAlgorithm}
\alias{plot.Rcpp_MCMCAlgorithm}
\title{Plot MCMC algorithm}
\usage{
\method{plot}{Rcpp_MCMCAlgorithm}(x, what = "LogPosterior", zoom.window = NULL, ...)
}
\arguments{
\item{x}{An Rcpp_MCMC object initialized with \code{initializeMCMCObject}.}
\item{what}{character defining if log(Posterior) (Default) or log(Likelihood)
options are: LogPosterior or logLikelihood}
\item{zoom.window}{A vector describing the start and end of the zoom window.}
\item{...}{Arguments to be passed to methods, such as graphical parameters.}
}
\value{
This function has no return value.
}
\description{
This function will plot the logLikelihood trace, and if the Hmisc package is installed, it will
plot a subplot of the logLikelihood trace with the first few samples removed.
}
|
### Baxter-King filter
bkfilter <- function(x,pl=NULL,pu=NULL,nfix=NULL,type=c("fixed","variable"),drift=FALSE)
{
if(is.null(drift)) drift <- FALSE
xname=deparse(substitute(x))
type = match.arg(type)
if(is.null(type)) type <- "fixed"
if(is.ts(x))
freq=frequency(x)
else
freq=1
if(is.null(pl))
{
if(freq > 1)
pl=trunc(freq*1.5)
else
pl=2
}
if(is.null(pu))
pu=trunc(freq*8)
b = 2*pi/pl
a = 2*pi/pu
n = length(x)
if(n<5)
warning("# of observations in Baxter-King filter < 5")
if(pu<=pl)
stop("pu must be larger than pl")
if(pl<2)
{
warning("in Baxter-King kfilter, pl less than 2 , reset to 2")
pl = 2
}
if(is.null(nfix))
nfix = freq*3
if(nfix>=n/2)
stop("fixed lag length must be < n/2")
j = 1:(2*n)
B = as.matrix(c((b-a)/pi,(sin(j*b)-sin(j*a))/(j*pi)))
AA = matrix(0,n,n)
if(type=="fixed")
{
bb = matrix(0,2*nfix+1,1)
bb[(nfix+1):(2*nfix+1)] = B[1:(nfix+1)]
bb[nfix:1] = B[2:(nfix+1)]
bb = bb-sum(bb)/(2*nfix+1)
for(i in (nfix+1):(n-nfix))
AA[i,(i-nfix):(i+nfix)] = t(bb)
}
if(type=="variable")
{
for(i in (nfix+1):(n-nfix))
{
j=min(c(i-1,n-i))
bb=matrix(0,2*j+1,1)
bb[(j+1):(2*j+1)] = B[1:(j+1)]
bb[j:1] = B[2:(j+1)]
bb = bb-sum(bb)/(2*j+1)
AA[i,(i-j):(i+j)] = t(bb)
}
}
xo = x
x = as.matrix(x)
if(drift)
x = undrift(x)
x.cycle = AA%*%as.matrix(x)
x.cycle[c(1:nfix,(n-nfix+1):n)] = NA
x.trend = x-x.cycle
if(is.ts(xo))
{
tsp.x = tsp(xo)
x.cycle=ts(x.cycle,start=tsp.x[1],frequency=tsp.x[3])
x.trend=ts(x.trend,start=tsp.x[1],frequency=tsp.x[3])
x=ts(x,start=tsp.x[1],frequency=tsp.x[3])
}
res <- list(cycle=x.cycle,trend=x.trend,fmatrix=AA,title="Baxter-King Filter",
xname=xname,call=as.call(match.call()),
type=type,pl=pl,pu=pu,nfix=nfix,method="bkfilter",x=x)
return(structure(res,class="mFilter"))
}
| /R/bkfilter.R | no_license | cran/mFilter | R | false | false | 2,213 | r | ### Baxter-King filter
bkfilter <- function(x,pl=NULL,pu=NULL,nfix=NULL,type=c("fixed","variable"),drift=FALSE)
{
if(is.null(drift)) drift <- FALSE
xname=deparse(substitute(x))
type = match.arg(type)
if(is.null(type)) type <- "fixed"
if(is.ts(x))
freq=frequency(x)
else
freq=1
if(is.null(pl))
{
if(freq > 1)
pl=trunc(freq*1.5)
else
pl=2
}
if(is.null(pu))
pu=trunc(freq*8)
b = 2*pi/pl
a = 2*pi/pu
n = length(x)
if(n<5)
warning("# of observations in Baxter-King filter < 5")
if(pu<=pl)
stop("pu must be larger than pl")
if(pl<2)
{
warning("in Baxter-King kfilter, pl less than 2 , reset to 2")
pl = 2
}
if(is.null(nfix))
nfix = freq*3
if(nfix>=n/2)
stop("fixed lag length must be < n/2")
j = 1:(2*n)
B = as.matrix(c((b-a)/pi,(sin(j*b)-sin(j*a))/(j*pi)))
AA = matrix(0,n,n)
if(type=="fixed")
{
bb = matrix(0,2*nfix+1,1)
bb[(nfix+1):(2*nfix+1)] = B[1:(nfix+1)]
bb[nfix:1] = B[2:(nfix+1)]
bb = bb-sum(bb)/(2*nfix+1)
for(i in (nfix+1):(n-nfix))
AA[i,(i-nfix):(i+nfix)] = t(bb)
}
if(type=="variable")
{
for(i in (nfix+1):(n-nfix))
{
j=min(c(i-1,n-i))
bb=matrix(0,2*j+1,1)
bb[(j+1):(2*j+1)] = B[1:(j+1)]
bb[j:1] = B[2:(j+1)]
bb = bb-sum(bb)/(2*j+1)
AA[i,(i-j):(i+j)] = t(bb)
}
}
xo = x
x = as.matrix(x)
if(drift)
x = undrift(x)
x.cycle = AA%*%as.matrix(x)
x.cycle[c(1:nfix,(n-nfix+1):n)] = NA
x.trend = x-x.cycle
if(is.ts(xo))
{
tsp.x = tsp(xo)
x.cycle=ts(x.cycle,start=tsp.x[1],frequency=tsp.x[3])
x.trend=ts(x.trend,start=tsp.x[1],frequency=tsp.x[3])
x=ts(x,start=tsp.x[1],frequency=tsp.x[3])
}
res <- list(cycle=x.cycle,trend=x.trend,fmatrix=AA,title="Baxter-King Filter",
xname=xname,call=as.call(match.call()),
type=type,pl=pl,pu=pu,nfix=nfix,method="bkfilter",x=x)
return(structure(res,class="mFilter"))
}
|
library(testthat)
library(boot.heterogeneity)
test_check("boot.heterogeneity")
| /tests/testthat.R | no_license | cran/boot.heterogeneity | R | false | false | 80 | r | library(testthat)
library(boot.heterogeneity)
test_check("boot.heterogeneity")
|
#### This is plot2.R my attempt at making the second .PNG plot
#### Setting up some preliminary stuff
tempset <- read.csv("household_power_consumption.txt", sep = ";", colClasses = "character", na.strings = "?")
dataset <- tempset[66638:69517,]
tempset <- NULL
#### Naming some variables for an awesome pinball game
GORGAR <- as.numeric(dataset$Global_active_power)
BEAT <- paste(dataset$Date,dataset$Time)
YOU <- strptime(BEAT, "%d/%m/%Y %H:%M:%S")
#### Making the .PNG file
png("plot2.png")
plot(YOU, GORGAR, type="l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | ejnolaniv/ExData_Plotting1 | R | false | false | 598 | r | #### This is plot2.R my attempt at making the second .PNG plot
#### Setting up some preliminary stuff
tempset <- read.csv("household_power_consumption.txt", sep = ";", colClasses = "character", na.strings = "?")
dataset <- tempset[66638:69517,]
tempset <- NULL
#### Naming some variables for an awesome pinball game
GORGAR <- as.numeric(dataset$Global_active_power)
BEAT <- paste(dataset$Date,dataset$Time)
YOU <- strptime(BEAT, "%d/%m/%Y %H:%M:%S")
#### Making the .PNG file
png("plot2.png")
plot(YOU, GORGAR, type="l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.omicsData.R
\name{as.seqData}
\alias{as.seqData}
\title{Create pmartR Object of Class seqData}
\usage{
as.seqData(
e_data,
f_data,
e_meta = NULL,
edata_cname,
fdata_cname,
emeta_cname = NULL,
techrep_cname = NULL,
...
)
}
\arguments{
\item{e_data}{a \eqn{p \times n + 1} data frame of expression data, where
\eqn{p} is the number of RNA transcripts observed and \eqn{n} is the number
of samples (an additional transcript identifier/name column should also be
present somewhere in the data frame). Each row corresponds to data for one
transcript. One column specifying a unique identifier for each transcript
(row) must be present. All counts are required to be raw for processing.}
\item{f_data}{a data frame with \eqn{n} rows. Each row corresponds to a
sample with one column giving the unique sample identifiers found in e_data
column names and other columns providing qualitative and/or quantitative
traits of each sample. For library size normalization, this can be provided as part of f_data
or calculated from columns in e_data.}
\item{e_meta}{an optional data frame with at least \eqn{p} rows. Each row
corresponds to a transcript with one column giving transcript names (must be
named the same as the column in \code{e_data}) and other columns giving biomolecule
meta information (e.g. mappings of transcripts to genes or proteins). Can be the same as edata_cname, if desired.}
\item{edata_cname}{character string specifying the name of the column
containing the transcript identifiers in \code{e_data} and \code{e_meta}
(if applicable).}
\item{fdata_cname}{character string specifying the name of the column
containing the sample identifiers in \code{f_data}.}
\item{emeta_cname}{character string specifying the name of the column
containing the gene identifiers (or other mapping variable) in
\code{e_meta} (if applicable). Defaults to NULL. If \code{e_meta} is NULL,
then either do not specify \code{emeta_cname} or specify it as NULL.}
\item{techrep_cname}{character string specifying the name of the column in
\code{f_data} that specifies which samples are technical replicates. This column is used to
collapse the data when \code{combine_techreps} is called on this object.
Defaults to NULL (no technical replicates).}
\item{...}{further arguments}
}
\value{
Object of class seqData
}
\description{
Converts several data frames of RNA-seq transcript data to
an object of the class 'seqData'. Objects of the class 'seqData' are lists
with two obligatory components, \code{e_data} and \code{f_data}. An optional
list component, \code{e_meta}, is used if analysis or visualization at other
levels (e.g. gene, protein, pathway) is also desired.
}
\details{
Objects of class 'seqData' contain some attributes that are
referenced by downstream functions. These attributes can be changed from
their default value by manual specification. A list of these attributes as
well as their default values are as follows: \tabular{ll}{ data_scale \tab
Scale of the data provided in \code{e_data}. Only 'counts' is valid for
'seqData'. \cr \tab \cr is_normalized \tab A logical argument, specifying
whether the data has been normalized or not. Default value is FALSE. \cr
\tab \cr norm_info \tab Default value is an empty list, which will be
populated with a single named element \code{is_normalized = is_normalized}.
\cr \tab \cr data_types \tab Character string describing the type of data, most commonly used for lipidomic data (lipidData objects) or NMR data (nmrData objects) but available for other data classes as well. Default value is NULL. \cr } Computed
values included in the \code{data_info} attribute are as follows:
\tabular{ll}{ num_edata \tab The number of unique \code{edata_cname}
entries.\cr \tab \cr num_zero_obs \tab The number of zero-value
observations.\cr \tab \cr num_emeta \tab The number of unique
\code{emeta_cname} entries. \cr \tab \cr prop_missing \tab The proportion
of \code{e_data} values that are NA. \cr \tab \cr num_samps \tab The number
of samples that make up the columns of \code{e_data}.\cr \tab \cr meta_info
\tab A logical argument, specifying whether \code{e_meta} is provided.\cr
\tab \cr }
}
\examples{
library(pmartRdata)
myseq <- as.seqData(
e_data = rnaseq_edata,
e_meta = rnaseq_emeta,
f_data = rnaseq_fdata,
edata_cname = "Transcript",
fdata_cname = "SampleName",
emeta_cname = "Transcript"
)
}
\seealso{
\code{\link{as.proData}}
\code{\link{as.pepData}}
\code{\link{as.lipidData}}
\code{\link{as.metabData}}
\code{\link{as.nmrData}}
}
\author{
Rachel Richardson, Kelly Stratton, Lisa Bramer
}
| /man/as.seqData.Rd | permissive | pmartR/pmartR | R | false | true | 4,700 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.omicsData.R
\name{as.seqData}
\alias{as.seqData}
\title{Create pmartR Object of Class seqData}
\usage{
as.seqData(
e_data,
f_data,
e_meta = NULL,
edata_cname,
fdata_cname,
emeta_cname = NULL,
techrep_cname = NULL,
...
)
}
\arguments{
\item{e_data}{a \eqn{p \times n + 1} data frame of expression data, where
\eqn{p} is the number of RNA transcripts observed and \eqn{n} is the number
of samples (an additional transcript identifier/name column should also be
present somewhere in the data frame). Each row corresponds to data for one
transcript. One column specifying a unique identifier for each transcript
(row) must be present. All counts are required to be raw for processing.}
\item{f_data}{a data frame with \eqn{n} rows. Each row corresponds to a
sample with one column giving the unique sample identifiers found in e_data
column names and other columns providing qualitative and/or quantitative
traits of each sample. For library size normalization, this can be provided as part of f_data
or calculated from columns in e_data.}
\item{e_meta}{an optional data frame with at least \eqn{p} rows. Each row
corresponds to a transcript with one column giving transcript names (must be
named the same as the column in \code{e_data}) and other columns giving biomolecule
meta information (e.g. mappings of transcripts to genes or proteins). Can be the same as edata_cname, if desired.}
\item{edata_cname}{character string specifying the name of the column
containing the transcript identifiers in \code{e_data} and \code{e_meta}
(if applicable).}
\item{fdata_cname}{character string specifying the name of the column
containing the sample identifiers in \code{f_data}.}
\item{emeta_cname}{character string specifying the name of the column
containing the gene identifiers (or other mapping variable) in
\code{e_meta} (if applicable). Defaults to NULL. If \code{e_meta} is NULL,
then either do not specify \code{emeta_cname} or specify it as NULL.}
\item{techrep_cname}{character string specifying the name of the column in
\code{f_data} that specifies which samples are technical replicates. This column is used to
collapse the data when \code{combine_techreps} is called on this object.
Defaults to NULL (no technical replicates).}
\item{...}{further arguments}
}
\value{
Object of class seqData
}
\description{
Converts several data frames of RNA-seq transcript data to
an object of the class 'seqData'. Objects of the class 'seqData' are lists
with two obligatory components, \code{e_data} and \code{f_data}. An optional
list component, \code{e_meta}, is used if analysis or visualization at other
levels (e.g. gene, protein, pathway) is also desired.
}
\details{
Objects of class 'seqData' contain some attributes that are
referenced by downstream functions. These attributes can be changed from
their default value by manual specification. A list of these attributes as
well as their default values are as follows: \tabular{ll}{ data_scale \tab
Scale of the data provided in \code{e_data}. Only 'counts' is valid for
'seqData'. \cr \tab \cr is_normalized \tab A logical argument, specifying
whether the data has been normalized or not. Default value is FALSE. \cr
\tab \cr norm_info \tab Default value is an empty list, which will be
populated with a single named element \code{is_normalized = is_normalized}.
\cr \tab \cr data_types \tab Character string describing the type of data, most commonly used for lipidomic data (lipidData objects) or NMR data (nmrData objects) but available for other data classes as well. Default value is NULL. \cr } Computed
values included in the \code{data_info} attribute are as follows:
\tabular{ll}{ num_edata \tab The number of unique \code{edata_cname}
entries.\cr \tab \cr num_zero_obs \tab The number of zero-value
observations.\cr \tab \cr num_emeta \tab The number of unique
\code{emeta_cname} entries. \cr \tab \cr prop_missing \tab The proportion
of \code{e_data} values that are NA. \cr \tab \cr num_samps \tab The number
of samples that make up the columns of \code{e_data}.\cr \tab \cr meta_info
\tab A logical argument, specifying whether \code{e_meta} is provided.\cr
\tab \cr }
}
\examples{
library(pmartRdata)
myseq <- as.seqData(
e_data = rnaseq_edata,
e_meta = rnaseq_emeta,
f_data = rnaseq_fdata,
edata_cname = "Transcript",
fdata_cname = "SampleName",
emeta_cname = "Transcript"
)
}
\seealso{
\code{\link{as.proData}}
\code{\link{as.pepData}}
\code{\link{as.lipidData}}
\code{\link{as.metabData}}
\code{\link{as.nmrData}}
}
\author{
Rachel Richardson, Kelly Stratton, Lisa Bramer
}
|
#GET LDA TOPICS
showTopics <- function(url) {
article_words <- articlewords(url)
# Find the uniquess in the document
word_summary <- article_words %>%
anti_join(stop_words) %>%
count(sentence_id, word, sort =T)
# Topic Modellinh #######
library(topicmodels)
library(SnowballC)
sentence_dtm <- word_summary %>%
cast_dtm(sentence_id, word, n )
sentence_lda <- LDA(sentence_dtm, k = 4, control = list(seed = 1234))
sentence_lda
sentence_topics <- tidy(sentence_lda, matrix = "beta")
sentence_topics
top_terms <- sentence_topics %>%
group_by(topic) %>%
top_n(5, beta) %>%
ungroup() %>%
arrange(topic, -beta)
library(ggplot2)
plot <- top_terms %>%
mutate(term = reorder_within(term, beta, topic)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip() +
ggtitle("LDA based topics in the speech") +
scale_x_reordered()
return(plot)
}
| /topicmodeling.R | no_license | rishkum/shinyTextAnal | R | false | false | 951 | r | #GET LDA TOPICS
showTopics <- function(url) {
article_words <- articlewords(url)
# Find the uniquess in the document
word_summary <- article_words %>%
anti_join(stop_words) %>%
count(sentence_id, word, sort =T)
# Topic Modellinh #######
library(topicmodels)
library(SnowballC)
sentence_dtm <- word_summary %>%
cast_dtm(sentence_id, word, n )
sentence_lda <- LDA(sentence_dtm, k = 4, control = list(seed = 1234))
sentence_lda
sentence_topics <- tidy(sentence_lda, matrix = "beta")
sentence_topics
top_terms <- sentence_topics %>%
group_by(topic) %>%
top_n(5, beta) %>%
ungroup() %>%
arrange(topic, -beta)
library(ggplot2)
plot <- top_terms %>%
mutate(term = reorder_within(term, beta, topic)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip() +
ggtitle("LDA based topics in the speech") +
scale_x_reordered()
return(plot)
}
|
%% File Name: md.pattern.sirt.Rd
%% File Version: 0.13
%% File Last Change: 2017-01-18 18:08:39
\name{md.pattern.sirt}
\alias{md.pattern.sirt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Response Pattern in a Binary Matrix
}
\description{
Computes different statistics of the response pattern in a binary
matrix.
}
\usage{
md.pattern.sirt(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
A binary data matrix
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A list with following entries:
\item{dat}{Original dataset}
\item{dat.resp1}{Indices for responses of 1's}
\item{dat.resp0}{Indices for responses of 0's}
\item{resp_patt}{Vector of response patterns}
\item{unique_resp_patt}{Unique response patterns}
\item{unique_resp_patt_freq}{Frequencies of unique response patterns}
\item{unique_resp_patt_firstobs}{First observation in original dataset
\code{dat} of a unique response pattern}
\item{freq1}{Frequencies of 1's}
\item{freq0}{Frequencies of 0's}
\item{dat.ordered}{Dataset according to response patterns}
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
Alexander Robitzsch
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See also the \code{md.pattern} function in the \pkg{mice} package.
}
\examples{
#############################################################################
# EXAMPLE 1: Response patterns
#############################################################################
set.seed(7654)
N <- 21 # number of rows
I <- 4 # number of columns
dat <- matrix( 1*( stats::runif(N*I) > .3 ) , N, I )
res <- sirt::md.pattern.sirt(dat)
# plot of response patterns
res$dat.ordered
image( z=t(res$dat.ordered) , y =1:N , x=1:I , xlab="Items" , ylab="Persons")
# 0's are yellow and 1's are red
#############################################################################
# EXAMPLE 2: Item response patterns for dataset data.read
#############################################################################
data(data.read)
dat <- data.read ; N <- nrow(dat) ; I <- ncol(dat)
# order items according to p values
dat <- dat[ , order(colMeans(dat , na.rm=TRUE )) ]
# analyzing response pattern
res <- sirt::md.pattern.sirt(dat)
res$dat.ordered
image( z=t(res$dat.ordered) , y =1:N , x=1:I , xlab="Items" , ylab="Persons")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Utilities}
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/md.pattern.sirt.Rd | no_license | SanVerhavert/sirt | R | false | false | 2,643 | rd | %% File Name: md.pattern.sirt.Rd
%% File Version: 0.13
%% File Last Change: 2017-01-18 18:08:39
\name{md.pattern.sirt}
\alias{md.pattern.sirt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Response Pattern in a Binary Matrix
}
\description{
Computes different statistics of the response pattern in a binary
matrix.
}
\usage{
md.pattern.sirt(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
A binary data matrix
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A list with following entries:
\item{dat}{Original dataset}
\item{dat.resp1}{Indices for responses of 1's}
\item{dat.resp0}{Indices for responses of 0's}
\item{resp_patt}{Vector of response patterns}
\item{unique_resp_patt}{Unique response patterns}
\item{unique_resp_patt_freq}{Frequencies of unique response patterns}
\item{unique_resp_patt_firstobs}{First observation in original dataset
\code{dat} of a unique response pattern}
\item{freq1}{Frequencies of 1's}
\item{freq0}{Frequencies of 0's}
\item{dat.ordered}{Dataset according to response patterns}
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
Alexander Robitzsch
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See also the \code{md.pattern} function in the \pkg{mice} package.
}
\examples{
#############################################################################
# EXAMPLE 1: Response patterns
#############################################################################
set.seed(7654)
N <- 21 # number of rows
I <- 4 # number of columns
dat <- matrix( 1*( stats::runif(N*I) > .3 ) , N, I )
res <- sirt::md.pattern.sirt(dat)
# plot of response patterns
res$dat.ordered
image( z=t(res$dat.ordered) , y =1:N , x=1:I , xlab="Items" , ylab="Persons")
# 0's are yellow and 1's are red
#############################################################################
# EXAMPLE 2: Item response patterns for dataset data.read
#############################################################################
data(data.read)
dat <- data.read ; N <- nrow(dat) ; I <- ncol(dat)
# order items according to p values
dat <- dat[ , order(colMeans(dat , na.rm=TRUE )) ]
# analyzing response pattern
res <- sirt::md.pattern.sirt(dat)
res$dat.ordered
image( z=t(res$dat.ordered) , y =1:N , x=1:I , xlab="Items" , ylab="Persons")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Utilities}
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
setwd("D:\\newFo\\SRR8570506\\job4")
DATA_FILE <- 'completeMADS.txt'
X <- read.table(DATA_FILE, header=TRUE, check.names=FALSE)
library(MASS)
# Salmon TPM versus Sailfish TPM (log scale)
pdf('MADSsalmon-vs-sailfish-log-scale.pdf')
y <- log2(X$Salmon)
x <- log2(X$Sailfish)
eqscplot(x, y, xlab="log2(Sailfish TPM)", ylab="log2(Salmon TPM)", main="MADS Salmon versus Sailfish(TPM in log scale)")
abline(0, 1, col="red")
dev.off()
# Salmon TPM versus STAR TPM (log scale)
pdf('MADSsalmon-vs-star-log-scale.pdf')
y <- log2(X$Salmon)
x <- log2(X$STAR)
eqscplot(x, y, xlab="log2(STAR-HTSeq TPM)", ylab="log2(Salmon TPM)", main="MADS Salmon versus STAR-HTSeq(TPM in log scale)")
abline(0, 1, col="red")
dev.off()
#Sailfish TPM versus STAR TPM(log scale)
pdf('MADSsailfish-vs-star-log-scale.pdf')
y <- log2(X$Sailfish)
x <- log2(X$STAR)
eqscplot(x, y, xlab="log2(STAR-HTSeq TPM)", ylab="log2(Sailfish TPM)", main="MADS Sailfish versus STAR-HTSeq(TPM in log scale)")
abline(0, 1, col="red")
dev.off()
| /SRR8570506/job4/compareSalmonSailfishSTAR.R | no_license | Acero522/Salmon-Sailfish-and-STAR-HTSeq-File | R | false | false | 1,033 | r | setwd("D:\\newFo\\SRR8570506\\job4")
DATA_FILE <- 'completeMADS.txt'
X <- read.table(DATA_FILE, header=TRUE, check.names=FALSE)
library(MASS)
# Salmon TPM versus Sailfish TPM (log scale)
pdf('MADSsalmon-vs-sailfish-log-scale.pdf')
y <- log2(X$Salmon)
x <- log2(X$Sailfish)
eqscplot(x, y, xlab="log2(Sailfish TPM)", ylab="log2(Salmon TPM)", main="MADS Salmon versus Sailfish(TPM in log scale)")
abline(0, 1, col="red")
dev.off()
# Salmon TPM versus STAR TPM (log scale)
pdf('MADSsalmon-vs-star-log-scale.pdf')
y <- log2(X$Salmon)
x <- log2(X$STAR)
eqscplot(x, y, xlab="log2(STAR-HTSeq TPM)", ylab="log2(Salmon TPM)", main="MADS Salmon versus STAR-HTSeq(TPM in log scale)")
abline(0, 1, col="red")
dev.off()
#Sailfish TPM versus STAR TPM(log scale)
pdf('MADSsailfish-vs-star-log-scale.pdf')
y <- log2(X$Sailfish)
x <- log2(X$STAR)
eqscplot(x, y, xlab="log2(STAR-HTSeq TPM)", ylab="log2(Sailfish TPM)", main="MADS Sailfish versus STAR-HTSeq(TPM in log scale)")
abline(0, 1, col="red")
dev.off()
|
#' @title check_wilcoxon
#' @description Calculate Wilcoxon test (with BH correction) for two-group comparison
#' @param x \code{\link{phyloseq-class}} object or a data matrix
#' (features x samples; eg. HITChip taxa vs. samples)
#' @param group Vector with specifying the groups
#' @param p.adjust.method p-value correction method for p.adjust function
#' (default 'BH'). For other options, see ?p.adjust
#' @param sort sort the results
#' @param paired Paired comparison (Default: FALSE)
#' @return Corrected p-values for two-group comparison.
#' @examples
#' #pseq <- download_microbiome("peerj32")$physeq
#' #pval <- check_wilcoxon(pseq, "gender")
#' @export
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
check_wilcoxon <- function (x, group, p.adjust.method = "BH", sort = FALSE, paired = FALSE) {
# Also calculate fold changes
fc <- check_foldchange(x, group, paired = paired)
# Pick the grouping variable from sample metadata
g <- group
if (length(g) == 1) {
g <- sample_data(x)[[g]]
if (!is.factor(g)) {
warning(paste("Converting the grouping variable", group, "into a factor."))
g <- as.factor(g)
}
g <- droplevels(g)
if (!length(levels(g)) == 2) {
stop(paste("check_wilcoxon is valid only for two-group comparisons. The selected variable", group, "has", length(unique(g)), "levels: ", paste(unique(g), collapse = "/")))
}
}
if (class(x) == "phyloseq") {
x <- log10(otu_table(x)@.Data)
}
# Calculate Wilcoxon test with BH p-value correction for gender
pval <- suppressWarnings(apply(x, 1, function (xi) {wilcox.test(xi ~ g, paired = paired)$p.value}))
# Multiple testing correction
pval <- p.adjust(pval, method = p.adjust.method)
# Sort p-values
if (sort) {
pval <- sort(pval)
}
data.frame(list(p.value = pval, fold.change.log10 = fc[names(pval)]))
}
| /R/wilcoxon.R | no_license | TTloveTT/microbiome | R | false | false | 1,988 | r | #' @title check_wilcoxon
#' @description Calculate Wilcoxon test (with BH correction) for two-group comparison
#' @param x \code{\link{phyloseq-class}} object or a data matrix
#' (features x samples; eg. HITChip taxa vs. samples)
#' @param group Vector with specifying the groups
#' @param p.adjust.method p-value correction method for p.adjust function
#' (default 'BH'). For other options, see ?p.adjust
#' @param sort sort the results
#' @param paired Paired comparison (Default: FALSE)
#' @return Corrected p-values for two-group comparison.
#' @examples
#' #pseq <- download_microbiome("peerj32")$physeq
#' #pval <- check_wilcoxon(pseq, "gender")
#' @export
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
check_wilcoxon <- function (x, group, p.adjust.method = "BH", sort = FALSE, paired = FALSE) {
# Also calculate fold changes
fc <- check_foldchange(x, group, paired = paired)
# Pick the grouping variable from sample metadata
g <- group
if (length(g) == 1) {
g <- sample_data(x)[[g]]
if (!is.factor(g)) {
warning(paste("Converting the grouping variable", group, "into a factor."))
g <- as.factor(g)
}
g <- droplevels(g)
if (!length(levels(g)) == 2) {
stop(paste("check_wilcoxon is valid only for two-group comparisons. The selected variable", group, "has", length(unique(g)), "levels: ", paste(unique(g), collapse = "/")))
}
}
if (class(x) == "phyloseq") {
x <- log10(otu_table(x)@.Data)
}
# Calculate Wilcoxon test with BH p-value correction for gender
pval <- suppressWarnings(apply(x, 1, function (xi) {wilcox.test(xi ~ g, paired = paired)$p.value}))
# Multiple testing correction
pval <- p.adjust(pval, method = p.adjust.method)
# Sort p-values
if (sort) {
pval <- sort(pval)
}
data.frame(list(p.value = pval, fold.change.log10 = fc[names(pval)]))
}
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_knit$set(
global.par = TRUE
)
## ----setup--------------------------------------------------------------------
library(calculus)
## ---- echo=FALSE--------------------------------------------------------------
par(mar = c(4, 4, 1, 1))
## -----------------------------------------------------------------------------
f <- "x"
var <- c(x=1)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times)
plot(times, x, type = "l")
## -----------------------------------------------------------------------------
f <- "cos(t)"
var <- c(x=0)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times, timevar = "t")
plot(times, x, type = "l")
## -----------------------------------------------------------------------------
f <- c("x", "x*(1+cos(10*t))")
var <- c(x=1, y=1)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times, timevar = "t")
matplot(times, x, type = "l", lty = 1, col = 1:2)
## -----------------------------------------------------------------------------
f <- function(x, y) c(x, y)
var <- c(x=1, y=2)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times)
matplot(times, x, type = "l", lty = 1, col = 1:2)
## -----------------------------------------------------------------------------
f <- function(x, t) c(x[1], x[2], x[2]*(1+cos(10*t)))
var <- c(1,2,2)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times, timevar = "t")
matplot(times, x, type = "l", lty = 1, col = 1:3)
| /inst/doc/ode.R | no_license | cran/calculus | R | false | false | 1,641 | r | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_knit$set(
global.par = TRUE
)
## ----setup--------------------------------------------------------------------
library(calculus)
## ---- echo=FALSE--------------------------------------------------------------
par(mar = c(4, 4, 1, 1))
## -----------------------------------------------------------------------------
f <- "x"
var <- c(x=1)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times)
plot(times, x, type = "l")
## -----------------------------------------------------------------------------
f <- "cos(t)"
var <- c(x=0)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times, timevar = "t")
plot(times, x, type = "l")
## -----------------------------------------------------------------------------
f <- c("x", "x*(1+cos(10*t))")
var <- c(x=1, y=1)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times, timevar = "t")
matplot(times, x, type = "l", lty = 1, col = 1:2)
## -----------------------------------------------------------------------------
f <- function(x, y) c(x, y)
var <- c(x=1, y=2)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times)
matplot(times, x, type = "l", lty = 1, col = 1:2)
## -----------------------------------------------------------------------------
f <- function(x, t) c(x[1], x[2], x[2]*(1+cos(10*t)))
var <- c(1,2,2)
times <- seq(0, 2*pi, by=0.001)
x <- ode(f = f, var = var, times = times, timevar = "t")
matplot(times, x, type = "l", lty = 1, col = 1:3)
|
#Note - the code takes a LONG time to run and produce results
#but it works. Please be patient if you try to run this.
#Find out all days for which we need data
corrected_all_days <- grep('^1/2/2007|^2/2/2007',readLines("household_power_consumption.txt"))
df <- read.csv("household_power_consumption.txt", sep=";", nrows=1, skip=0, header=TRUE)
#Get the column names
data_col_names <- colnames(df)
#Now read in the rows which you need to and add them to the dataframe
for (i in corrected_all_days) {
#print(i)
new_row = read.csv("household_power_consumption.txt", sep=";", nrows=1, skip=i-1, header=FALSE, check.names = FALSE, col.names = data_col_names, na.strings="?")
df <- rbind(df, new_row)
}
# Now delete the first row which we read in first and does not belong to the pattern
df <- df[-c(1),]
write.table(df, file="revised_data_orig_filtered.txt", row.names = FALSE, sep=";")
#End of common reading code
df1 <-read.csv("revised_data_orig_filtered.txt", sep=";", header=TRUE)
png(df1, filename="plot4.png",width = 480, height = 480, units = "px")
par(mfrow = c(2,2))
#hist(df1$Global_active_power, xlab = "Global Active Power (kilowatts)", main = "Global Active Power", col = "red")
# df1[,1]<- as.Date(df1[, 1])
# df1[,2]<- as.Date(df1[, 2])
df1[,1]<- as.character(df1[, 1])
df1[,2]<- as.character(df1[, 2])
df1["datetime"] <- NA # That creates the new column named "datetime" filled with "NA"
#df1$datetime <- df1$Date + df1$Time # As an example, the new column receives the result of C - D
df1$datetime <- paste(df1$Date, df1$Time) # As an example, the new column receives the result of C - D
df1$datetime <- strptime(df1$datetime, "%d/%m/%Y %H:%M:%S")
#Plot # 1
plot(df1$datetime, df1$Global_active_power, type = "l",ylab = "Global Active Power (kilowatts)", xlab = "")
#End of plot #1
#Plot #2
plot(df1$datetime, df1$Voltage , type = "l",ylab = "Voltage", xlab = "datetime")
#End of plot #2
#Plot #3
plot(df1$datetime, df1$Sub_metering_1, type="l", ylab = "Energy sub metering", xlab = "")
#plot(df1$datetime, df1$Sub_metering_2, type="l", ylab = "Energy sub metering", xlab = "", col = "red")
lines(df1$datetime, df1$Sub_metering_2, ylab = "Energy sub metering", xlab = "", col = "red")
lines(df1$datetime, df1$Sub_metering_3, ylab = "Energy sub metering", xlab = "", col = "blue")
#legend("topright", legend ="Data")
legend('topright', names(df1)[c(7,8,9)] , lty=1, col=c('black', 'red', 'blue'), bty='o', cex=.75)
#End of plot #3
#Plot #4
plot(df1$datetime, df1$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
#End of plot #4
dev.off() | /plot4.R | no_license | rravishankar/ExData_Plotting1 | R | false | false | 2,644 | r | #Note - the code takes a LONG time to run and produce results
#but it works. Please be patient if you try to run this.
#Find out all days for which we need data
corrected_all_days <- grep('^1/2/2007|^2/2/2007',readLines("household_power_consumption.txt"))
df <- read.csv("household_power_consumption.txt", sep=";", nrows=1, skip=0, header=TRUE)
#Get the column names
data_col_names <- colnames(df)
#Now read in the rows which you need to and add them to the dataframe
for (i in corrected_all_days) {
#print(i)
new_row = read.csv("household_power_consumption.txt", sep=";", nrows=1, skip=i-1, header=FALSE, check.names = FALSE, col.names = data_col_names, na.strings="?")
df <- rbind(df, new_row)
}
# Now delete the first row which we read in first and does not belong to the pattern
df <- df[-c(1),]
write.table(df, file="revised_data_orig_filtered.txt", row.names = FALSE, sep=";")
#End of common reading code
df1 <-read.csv("revised_data_orig_filtered.txt", sep=";", header=TRUE)
png(df1, filename="plot4.png",width = 480, height = 480, units = "px")
par(mfrow = c(2,2))
#hist(df1$Global_active_power, xlab = "Global Active Power (kilowatts)", main = "Global Active Power", col = "red")
# df1[,1]<- as.Date(df1[, 1])
# df1[,2]<- as.Date(df1[, 2])
df1[,1]<- as.character(df1[, 1])
df1[,2]<- as.character(df1[, 2])
df1["datetime"] <- NA # That creates the new column named "datetime" filled with "NA"
#df1$datetime <- df1$Date + df1$Time # As an example, the new column receives the result of C - D
df1$datetime <- paste(df1$Date, df1$Time) # As an example, the new column receives the result of C - D
df1$datetime <- strptime(df1$datetime, "%d/%m/%Y %H:%M:%S")
#Plot # 1
plot(df1$datetime, df1$Global_active_power, type = "l",ylab = "Global Active Power (kilowatts)", xlab = "")
#End of plot #1
#Plot #2
plot(df1$datetime, df1$Voltage , type = "l",ylab = "Voltage", xlab = "datetime")
#End of plot #2
#Plot #3
plot(df1$datetime, df1$Sub_metering_1, type="l", ylab = "Energy sub metering", xlab = "")
#plot(df1$datetime, df1$Sub_metering_2, type="l", ylab = "Energy sub metering", xlab = "", col = "red")
lines(df1$datetime, df1$Sub_metering_2, ylab = "Energy sub metering", xlab = "", col = "red")
lines(df1$datetime, df1$Sub_metering_3, ylab = "Energy sub metering", xlab = "", col = "blue")
#legend("topright", legend ="Data")
legend('topright', names(df1)[c(7,8,9)] , lty=1, col=c('black', 'red', 'blue'), bty='o', cex=.75)
#End of plot #3
#Plot #4
plot(df1$datetime, df1$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
#End of plot #4
dev.off() |
\name{targetGene.tss}
\alias{targetGene.tss}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("targetGene.tss")}
\format{
The format is:
num 88904257
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(targetGene.tss)
## maybe str(targetGene.tss) ; plot(targetGene.tss) ...
}
\keyword{datasets}
| /mef2c/SingleGeneAnalyzer/man-hidden/targetGene.tss.Rd | permissive | PriceLab/eqtlTrenaNotebooks | R | false | false | 590 | rd | \name{targetGene.tss}
\alias{targetGene.tss}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("targetGene.tss")}
\format{
The format is:
num 88904257
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(targetGene.tss)
## maybe str(targetGene.tss) ; plot(targetGene.tss) ...
}
\keyword{datasets}
|
rankall <- function(outcome, num = "best"){
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
fd <- as.data.frame(cbind(data[, 2], # hospital
data[, 7], # state
data[, 11], # heart attack
data[, 17], # heart failure
data[, 23]), # pneumonia
stringsAsFactors = FALSE)
colnames(fd) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
fd[, eval(outcome)] <- as.numeric(fd[, eval(outcome)])
## Check that state and outcome are valid
if (!outcome %in% c("heart attack", "heart failure", "pneumonia")){
stop('invalid outcome')
} else if (is.numeric(num)) {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][num, "hospital"], by_state[[i]][, "state"][1])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, row.names = result[, 2], stringsAsFactors = FALSE)
names(output) <- c("hospital", "state")
} else if (!is.numeric(num)) {
if (num == "best") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else if (num == "worst") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"],
decreasing = TRUE), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else {
stop('invalid num')
}
}
return(output)
}
# To put the path where the file 'outcome-of-care-measures.csv' is located
path = "/home/sebastian/Documentos/CURSOS/Cursos-Coursera/R-Programming/Week-4/Programming Assignment 3/rprog_data_ProgAssignment3-data"
setwd(path)
getwd()
head(rankall("heart attack", 20), 10)
| /Week-4/Programming Assignment 3/rankall.R | permissive | JohamSMC/R-Programming-Course | R | false | false | 2,767 | r | rankall <- function(outcome, num = "best"){
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
fd <- as.data.frame(cbind(data[, 2], # hospital
data[, 7], # state
data[, 11], # heart attack
data[, 17], # heart failure
data[, 23]), # pneumonia
stringsAsFactors = FALSE)
colnames(fd) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
fd[, eval(outcome)] <- as.numeric(fd[, eval(outcome)])
## Check that state and outcome are valid
if (!outcome %in% c("heart attack", "heart failure", "pneumonia")){
stop('invalid outcome')
} else if (is.numeric(num)) {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][num, "hospital"], by_state[[i]][, "state"][1])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, row.names = result[, 2], stringsAsFactors = FALSE)
names(output) <- c("hospital", "state")
} else if (!is.numeric(num)) {
if (num == "best") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else if (num == "worst") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"],
decreasing = TRUE), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else {
stop('invalid num')
}
}
return(output)
}
# To put the path where the file 'outcome-of-care-measures.csv' is located
path = "/home/sebastian/Documentos/CURSOS/Cursos-Coursera/R-Programming/Week-4/Programming Assignment 3/rprog_data_ProgAssignment3-data"
setwd(path)
getwd()
head(rankall("heart attack", 20), 10)
|
#' @title
#' Create ChemiDPlus Schema If Not Exist
#'
#' @importFrom pg13 send
#' @export
start_cdp <-
function(conn,
verbose = TRUE,
render_sql = TRUE) {
pg13::send(conn = conn,
sql_statement =
"
CREATE TABLE IF NOT EXISTS chemidplus.classification (
c_datetime timestamp without time zone,
substance_classification character varying(255),
rn_url character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.links_to_resources (
ltr_datetime timestamp without time zone,
resource_agency character varying(255),
resource_link character varying(255),
rn_url character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.names_and_synonyms (
nas_datetime timestamp without time zone,
rn_url character varying(255),
substance_synonym_type character varying(255),
substance_synonym text
);
CREATE TABLE IF NOT EXISTS chemidplus.registry_number_log (
rnl_datetime timestamp without time zone,
raw_search_term character varying(255),
processed_search_term character varying(255),
search_type character varying(255),
url character varying(255),
response_received character varying(255),
no_record character varying(255),
response_recorded character varying(255),
compound_match character varying(255),
rn character varying(255),
rn_url character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.registry_numbers (
rn_datetime timestamp without time zone,
rn_url character varying(255),
registry_number_type character varying(255),
registry_number character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.rn_url_validity (
rnuv_datetime timestamp without time zone,
rn_url character varying(255),
is_404 character varying(255)
);
",
verbose = verbose,
render_sql = render_sql)
}
#' @export
list_cdp_tables <-
function(conn,
verbose = TRUE,
render_sql = TRUE) {
pg13::lsTables(conn = conn,
schema = "chemidplus",
verbose = verbose,
render_sql = render_sql)
}
#' @export
read_cdp_tables <-
function(conn,
verbose = TRUE,
render_sql = TRUE) {
list_cdp_tables(conn = conn,
verbose = verbose,
render_sql = render_sql) %>%
rubix::map_names_set(function(x)
pg13::readTable(conn = conn,
schema = "chemidplus",
tableName = x,
verbose = verbose,
render_sql = render_sql))
}
#' @export
rn_url_to_rn <-
function(data,
col = rn_url) {
data %>%
tidyr::extract(col = {{ col }},
into = "rn",
regex = "^.*[/]{1}(.*$)")
}
| /R/cdp-utils2.R | no_license | meerapatelmd/skyscraper | R | false | false | 4,103 | r | #' @title
#' Create ChemiDPlus Schema If Not Exist
#'
#' @importFrom pg13 send
#' @export
start_cdp <-
function(conn,
verbose = TRUE,
render_sql = TRUE) {
pg13::send(conn = conn,
sql_statement =
"
CREATE TABLE IF NOT EXISTS chemidplus.classification (
c_datetime timestamp without time zone,
substance_classification character varying(255),
rn_url character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.links_to_resources (
ltr_datetime timestamp without time zone,
resource_agency character varying(255),
resource_link character varying(255),
rn_url character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.names_and_synonyms (
nas_datetime timestamp without time zone,
rn_url character varying(255),
substance_synonym_type character varying(255),
substance_synonym text
);
CREATE TABLE IF NOT EXISTS chemidplus.registry_number_log (
rnl_datetime timestamp without time zone,
raw_search_term character varying(255),
processed_search_term character varying(255),
search_type character varying(255),
url character varying(255),
response_received character varying(255),
no_record character varying(255),
response_recorded character varying(255),
compound_match character varying(255),
rn character varying(255),
rn_url character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.registry_numbers (
rn_datetime timestamp without time zone,
rn_url character varying(255),
registry_number_type character varying(255),
registry_number character varying(255)
);
CREATE TABLE IF NOT EXISTS chemidplus.rn_url_validity (
rnuv_datetime timestamp without time zone,
rn_url character varying(255),
is_404 character varying(255)
);
",
verbose = verbose,
render_sql = render_sql)
}
#' @export
list_cdp_tables <-
function(conn,
verbose = TRUE,
render_sql = TRUE) {
pg13::lsTables(conn = conn,
schema = "chemidplus",
verbose = verbose,
render_sql = render_sql)
}
#' @export
read_cdp_tables <-
function(conn,
verbose = TRUE,
render_sql = TRUE) {
list_cdp_tables(conn = conn,
verbose = verbose,
render_sql = render_sql) %>%
rubix::map_names_set(function(x)
pg13::readTable(conn = conn,
schema = "chemidplus",
tableName = x,
verbose = verbose,
render_sql = render_sql))
}
#' @export
rn_url_to_rn <-
function(data,
col = rn_url) {
data %>%
tidyr::extract(col = {{ col }},
into = "rn",
regex = "^.*[/]{1}(.*$)")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ume.network.R
\name{ume.network.run}
\alias{ume.network.run}
\title{Run the model using the network object}
\usage{
ume.network.run(
network,
inits = NULL,
n.chains = 3,
max.run = 1e+05,
setsize = 10000,
n.run = 50000,
conv.limit = 1.05,
extra.pars.save = NULL
)
}
\arguments{
\item{network}{network object created from \code{\link{ume.network.data}} function}
\item{inits}{Initial values for the parameters being sampled. If left unspecified, program will generate reasonable initial values.}
\item{n.chains}{Number of chains to run}
\item{max.run}{Maximum number of iterations that user is willing to run. If the algorithm is not converging, it will run up to \code{max.run} iterations before printing a message that it did not converge}
\item{setsize}{Number of iterations that are run between convergence checks. If the algorithm converges fast, user wouldn't need a big setsize. The number that is printed between each convergence checks is the gelman-rubin diagnostics and we would want that to be below the conv.limit the user specifies.}
\item{n.run}{Final number of iterations that the user wants to store. If after the algorithm converges, user wants less number of iterations, we thin the sequence. If the user wants more iterations, we run extra iterations to reach the specified number of runs}
\item{conv.limit}{Convergence limit for Gelman and Rubin's convergence diagnostic. Point estimate is used to test convergence of parameters for study effect (eta), relative effect (d), and heterogeneity (log variance (logvar)).}
\item{extra.pars.save}{Parameters that user wants to save besides the default parameters saved. See code using \code{cat(network$code)} to see which parameters can be saved.}
}
\value{
\item{data_rjags}{Data that is put into rjags function jags.model}
\item{inits}{Initial values that are either specified by the user or generated as a default}
\item{pars.save}{Parameters that are saved. Add more parameters in extra.pars.save if other variables are desired}
\item{burnin}{Half of the converged sequence is thrown out as a burnin}
\item{n.thin}{If the number of iterations user wants (n.run) is less than the number of converged sequence after burnin, we thin the sequence and store the thinning interval}
\item{samples}{MCMC samples stored using jags. The returned samples have the form of mcmc.list and can be directly applied to coda functions}
\item{max.gelman}{Maximum Gelman and Rubin's convergence diagnostic calculated for the final sample}
\item{deviance}{Contains deviance statistics such as pD (effective number of parameters) and DIC (Deviance Information Criterion)}
}
\description{
This is similar to the function \code{\link{network.run}}, except this is used for the unrelated mean effects model.
}
\examples{
network <- with(thrombolytic, {
ume.network.data(Outcomes, Study, Treat, N = N, response = "binomial")
})
\donttest{
result <- ume.network.run(network, n.run = 10000)
}
}
| /man/ume.network.run.Rd | no_license | MikeJSeo/bnma | R | false | true | 3,037 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ume.network.R
\name{ume.network.run}
\alias{ume.network.run}
\title{Run the model using the network object}
\usage{
ume.network.run(
network,
inits = NULL,
n.chains = 3,
max.run = 1e+05,
setsize = 10000,
n.run = 50000,
conv.limit = 1.05,
extra.pars.save = NULL
)
}
\arguments{
\item{network}{network object created from \code{\link{ume.network.data}} function}
\item{inits}{Initial values for the parameters being sampled. If left unspecified, program will generate reasonable initial values.}
\item{n.chains}{Number of chains to run}
\item{max.run}{Maximum number of iterations that user is willing to run. If the algorithm is not converging, it will run up to \code{max.run} iterations before printing a message that it did not converge}
\item{setsize}{Number of iterations that are run between convergence checks. If the algorithm converges fast, user wouldn't need a big setsize. The number that is printed between each convergence checks is the gelman-rubin diagnostics and we would want that to be below the conv.limit the user specifies.}
\item{n.run}{Final number of iterations that the user wants to store. If after the algorithm converges, user wants less number of iterations, we thin the sequence. If the user wants more iterations, we run extra iterations to reach the specified number of runs}
\item{conv.limit}{Convergence limit for Gelman and Rubin's convergence diagnostic. Point estimate is used to test convergence of parameters for study effect (eta), relative effect (d), and heterogeneity (log variance (logvar)).}
\item{extra.pars.save}{Parameters that user wants to save besides the default parameters saved. See code using \code{cat(network$code)} to see which parameters can be saved.}
}
\value{
\item{data_rjags}{Data that is put into rjags function jags.model}
\item{inits}{Initial values that are either specified by the user or generated as a default}
\item{pars.save}{Parameters that are saved. Add more parameters in extra.pars.save if other variables are desired}
\item{burnin}{Half of the converged sequence is thrown out as a burnin}
\item{n.thin}{If the number of iterations user wants (n.run) is less than the number of converged sequence after burnin, we thin the sequence and store the thinning interval}
\item{samples}{MCMC samples stored using jags. The returned samples have the form of mcmc.list and can be directly applied to coda functions}
\item{max.gelman}{Maximum Gelman and Rubin's convergence diagnostic calculated for the final sample}
\item{deviance}{Contains deviance statistics such as pD (effective number of parameters) and DIC (Deviance Information Criterion)}
}
\description{
This is similar to the function \code{\link{network.run}}, except this is used for the unrelated mean effects model.
}
\examples{
network <- with(thrombolytic, {
ume.network.data(Outcomes, Study, Treat, N = N, response = "binomial")
})
\donttest{
result <- ume.network.run(network, n.run = 10000)
}
}
|
testlist <- list(scale = 0, shape = 5.09420361733571e-312)
result <- do.call(bama:::rand_igamma,testlist)
str(result) | /bama/inst/testfiles/rand_igamma/AFL_rand_igamma/rand_igamma_valgrind_files/1615926807-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 117 | r | testlist <- list(scale = 0, shape = 5.09420361733571e-312)
result <- do.call(bama:::rand_igamma,testlist)
str(result) |
#' @importFrom dplyr arrange
#' @importFrom dplyr do
#' @importFrom dplyr filter
#' @importFrom dplyr group_by
#' @importFrom dplyr select
#' @importFrom dplyr summarise
#' @importFrom dplyr n
#' @importFrom dplyr mutate
#' @importFrom dplyr %>%
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 aes_string
#' @importFrom ggplot2 coord_flip
#' @importFrom ggplot2 coord_cartesian
#' @importFrom ggplot2 element_blank
#' @importFrom ggplot2 element_rect
#' @importFrom ggplot2 element_text
#' @importFrom ggplot2 facet_wrap
#' @importFrom ggplot2 geom_bar
#' @importFrom ggplot2 geom_boxplot
#' @importFrom ggplot2 geom_density
#' @importFrom ggplot2 geom_histogram
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 geom_linerange
#' @importFrom ggplot2 geom_hline
#' @importFrom ggplot2 geom_vline
#' @importFrom ggplot2 geom_path
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 geom_polygon
#' @importFrom ggplot2 geom_smooth
#' @importFrom ggplot2 geom_text
#' @importFrom ggplot2 geom_tile
#' @importFrom ggplot2 geom_violin
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 ggtitle
#' @importFrom ggplot2 guides
#' @importFrom ggplot2 guide_legend
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 position_jitter
#' @importFrom ggplot2 scale_alpha_continuous
#' @importFrom ggplot2 scale_colour_gradient2
#' @importFrom ggplot2 scale_x_log10
#' @importFrom ggplot2 scale_y_log10
#' @importFrom ggplot2 scale_fill_gradientn
#' @importFrom ggplot2 scale_fill_gradient
#' @importFrom ggplot2 scale_x_discrete
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom ggplot2 scale_y_discrete
#' @importFrom ggplot2 scale_y_continuous
#' @importFrom ggplot2 scale_size_manual
#' @importFrom ggplot2 scale_size
#' @importFrom ggplot2 scale_color_manual
#' @importFrom ggplot2 stat_density2d
#' @importFrom ggplot2 theme_set
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 xlab
#' @importFrom ggplot2 ylab
#' @importFrom phyloseq distance
#' @importFrom phyloseq estimate_richness
#' @importFrom phyloseq get_taxa
#' @importFrom phyloseq get_variable
#' @importFrom phyloseq merge_phyloseq
#' @importFrom phyloseq nsamples
#' @importFrom phyloseq ntaxa
#' @importFrom phyloseq ordinate
#' @importFrom phyloseq otu_table
#' @importFrom phyloseq otu_table<-
#' @importFrom phyloseq phyloseq
#' @importFrom phyloseq psmelt
#' @importFrom phyloseq prune_samples
#' @importFrom phyloseq prune_taxa
#' @importFrom phyloseq sample_data
#' @importFrom phyloseq sample_data<-
#' @importFrom phyloseq sample_names
#' @importFrom phyloseq taxa_are_rows
#' @importFrom phyloseq taxa_names
#' @importFrom phyloseq tax_glom
#' @importFrom phyloseq tax_table
#' @importFrom phyloseq tax_table<-
#' @importFrom phyloseq import_biom
#' @importFrom phyloseq parse_taxonomy_default
#' @importFrom reshape2 melt
#' @importFrom stats aggregate
#' @importFrom stats as.dist
#' @importFrom stats coef
#' @importFrom stats cor
#' @importFrom stats cor.test
#' @importFrom stats density
#' @importFrom stats dist
#' @importFrom stats dnorm
#' @importFrom stats hclust
#' @importFrom stats kernel
#' @importFrom stats lm
#' @importFrom stats loess
#' @importFrom stats loess.control
#' @importFrom stats median
#' @importFrom stats na.fail
#' @importFrom stats na.omit
#' @importFrom stats p.adjust
#' @importFrom stats pnorm
#' @importFrom stats predict
#' @importFrom stats quantile
#' @importFrom stats rnorm
#' @importFrom stats sd
#' @importFrom stats time
#' @importFrom stats frequency
#' @importFrom tidyr gather
#' @importFrom tidyr separate
#' @importFrom utils capture.output
#' @importFrom utils flush.console
#' @importFrom utils head
#' @importFrom utils read.csv
#' @importFrom utils read.table
#' @importFrom utils tail
#' @importFrom utils write.csv
#' @importFrom vegan decostand
#' @importFrom vegan fisher.alpha
#' @importFrom vegan metaMDS
#' @importFrom vegan scores
#' @importFrom vegan vegdist
#' @importFrom vegan wascores
.onAttach <- function(lib, pkg) {
packageStartupMessage("\nmicrobiome R package (microbiome.github.com)
\n\n\n Copyright (C) 2011-2017 Leo Lahti et al. <microbiome.github.io>\n")
}
# As far as I understand the problem, running into this error / limit is _not_
# the fault of the user. Instead, I'd argue that it is the responsibility of
# package developers to make sure to unregister any registered DLLs of theirs
# when the package is unloaded. A developer can do this by adding the following
# to their package: .onUnload <- function(libpath) {
# library.dynam.unload(utils::packageName(), libpath) }
| /R/firstlib.R | no_license | rpatil8/microbiome | R | false | false | 4,590 | r | #' @importFrom dplyr arrange
#' @importFrom dplyr do
#' @importFrom dplyr filter
#' @importFrom dplyr group_by
#' @importFrom dplyr select
#' @importFrom dplyr summarise
#' @importFrom dplyr n
#' @importFrom dplyr mutate
#' @importFrom dplyr %>%
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 aes_string
#' @importFrom ggplot2 coord_flip
#' @importFrom ggplot2 coord_cartesian
#' @importFrom ggplot2 element_blank
#' @importFrom ggplot2 element_rect
#' @importFrom ggplot2 element_text
#' @importFrom ggplot2 facet_wrap
#' @importFrom ggplot2 geom_bar
#' @importFrom ggplot2 geom_boxplot
#' @importFrom ggplot2 geom_density
#' @importFrom ggplot2 geom_histogram
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 geom_linerange
#' @importFrom ggplot2 geom_hline
#' @importFrom ggplot2 geom_vline
#' @importFrom ggplot2 geom_path
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 geom_polygon
#' @importFrom ggplot2 geom_smooth
#' @importFrom ggplot2 geom_text
#' @importFrom ggplot2 geom_tile
#' @importFrom ggplot2 geom_violin
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 ggtitle
#' @importFrom ggplot2 guides
#' @importFrom ggplot2 guide_legend
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 position_jitter
#' @importFrom ggplot2 scale_alpha_continuous
#' @importFrom ggplot2 scale_colour_gradient2
#' @importFrom ggplot2 scale_x_log10
#' @importFrom ggplot2 scale_y_log10
#' @importFrom ggplot2 scale_fill_gradientn
#' @importFrom ggplot2 scale_fill_gradient
#' @importFrom ggplot2 scale_x_discrete
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom ggplot2 scale_y_discrete
#' @importFrom ggplot2 scale_y_continuous
#' @importFrom ggplot2 scale_size_manual
#' @importFrom ggplot2 scale_size
#' @importFrom ggplot2 scale_color_manual
#' @importFrom ggplot2 stat_density2d
#' @importFrom ggplot2 theme_set
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 xlab
#' @importFrom ggplot2 ylab
#' @importFrom phyloseq distance
#' @importFrom phyloseq estimate_richness
#' @importFrom phyloseq get_taxa
#' @importFrom phyloseq get_variable
#' @importFrom phyloseq merge_phyloseq
#' @importFrom phyloseq nsamples
#' @importFrom phyloseq ntaxa
#' @importFrom phyloseq ordinate
#' @importFrom phyloseq otu_table
#' @importFrom phyloseq otu_table<-
#' @importFrom phyloseq phyloseq
#' @importFrom phyloseq psmelt
#' @importFrom phyloseq prune_samples
#' @importFrom phyloseq prune_taxa
#' @importFrom phyloseq sample_data
#' @importFrom phyloseq sample_data<-
#' @importFrom phyloseq sample_names
#' @importFrom phyloseq taxa_are_rows
#' @importFrom phyloseq taxa_names
#' @importFrom phyloseq tax_glom
#' @importFrom phyloseq tax_table
#' @importFrom phyloseq tax_table<-
#' @importFrom phyloseq import_biom
#' @importFrom phyloseq parse_taxonomy_default
#' @importFrom reshape2 melt
#' @importFrom stats aggregate
#' @importFrom stats as.dist
#' @importFrom stats coef
#' @importFrom stats cor
#' @importFrom stats cor.test
#' @importFrom stats density
#' @importFrom stats dist
#' @importFrom stats dnorm
#' @importFrom stats hclust
#' @importFrom stats kernel
#' @importFrom stats lm
#' @importFrom stats loess
#' @importFrom stats loess.control
#' @importFrom stats median
#' @importFrom stats na.fail
#' @importFrom stats na.omit
#' @importFrom stats p.adjust
#' @importFrom stats pnorm
#' @importFrom stats predict
#' @importFrom stats quantile
#' @importFrom stats rnorm
#' @importFrom stats sd
#' @importFrom stats time
#' @importFrom stats frequency
#' @importFrom tidyr gather
#' @importFrom tidyr separate
#' @importFrom utils capture.output
#' @importFrom utils flush.console
#' @importFrom utils head
#' @importFrom utils read.csv
#' @importFrom utils read.table
#' @importFrom utils tail
#' @importFrom utils write.csv
#' @importFrom vegan decostand
#' @importFrom vegan fisher.alpha
#' @importFrom vegan metaMDS
#' @importFrom vegan scores
#' @importFrom vegan vegdist
#' @importFrom vegan wascores
.onAttach <- function(lib, pkg) {
packageStartupMessage("\nmicrobiome R package (microbiome.github.com)
\n\n\n Copyright (C) 2011-2017 Leo Lahti et al. <microbiome.github.io>\n")
}
# As far as I understand the problem, running into this error / limit is _not_
# the fault of the user. Instead, I'd argue that it is the responsibility of
# package developers to make sure to unregister any registered DLLs of theirs
# when the package is unloaded. A developer can do this by adding the following
# to their package: .onUnload <- function(libpath) {
# library.dynam.unload(utils::packageName(), libpath) }
|
# A.Ritz
# 6/2016
source("PredictionFuncs.R")
textModel<-readRDS("textModel.RDS")
shinyServer(
function(input,output)
{
output$nextWord<-renderText({
if (input$text == "")
{
word <- "the"
nextBest = character()
}
else
{
res<-guessWord(textModel, input$text)
word<-res$bestWord
nextBest<-res$nextBest
}
output$nextBest<-renderText(paste0(nextBest, collapse=", "))
word
})
})
| /Shiny/server.R | no_license | drewCo2/DS-Capstone | R | false | false | 507 | r | # A.Ritz
# 6/2016
source("PredictionFuncs.R")
textModel<-readRDS("textModel.RDS")
shinyServer(
function(input,output)
{
output$nextWord<-renderText({
if (input$text == "")
{
word <- "the"
nextBest = character()
}
else
{
res<-guessWord(textModel, input$text)
word<-res$bestWord
nextBest<-res$nextBest
}
output$nextBest<-renderText(paste0(nextBest, collapse=", "))
word
})
})
|
csx = read.csv2("OUTPUT/Harian/CHP/DAILY_CHP_GABUNG.csv", stringsAsFactors = F)
DEYS = data.frame(as.matrix(read_xlsx("VALIDASI_joz.xlsx", sheet = "DAILY")), stringsAsFactors = F)
DEYS$Tanggal = as.numeric(DEYS$Tanggal)
DEYS$Tanggal[nchar(DEYS$Tanggal) == 1] = paste0("0", DEYS$Tanggal[nchar(DEYS$Tanggal) == 1])
DEYS$Bulan = as.numeric(DEYS$Bulan)
DEYS$Bulan[nchar(DEYS$Bulan) == 1] = paste0("0", DEYS$Bulan[nchar(DEYS$Bulan) == 1])
CHP_DATE = as.POSIXct(paste0(csx$Tahun, "-", csx$Bulan, "-", csx$Tanggal), tz = "UTC")
#ini_ilang_30 = which(OBS_DATE == "2019-09-30")
OBS_DATE = as.POSIXct(paste0(DEYS$Tahun, "-", DEYS$Bulan, "-", DEYS$Tanggal), tz = "UTC")
is.character0 = function(x)
{
is.character(x) && length(x) == 0L
}
is.integer0 <- function(x)
{
is.integer(x) && length(x) == 0L
}
is.numeric0 <- function(x)
{
is.numeric(x) && length(x) == 0L
}
CHP_DATE = as.character(CHP_DATE)
OBS_DATE = as.character(OBS_DATE)
ready = which(as.character(CHP_DATE) %in% as.character(OBS_DATE))
minready = which(!as.character(CHP_DATE) %in% as.character(OBS_DATE))
CHP_DATE[minready][substr(CHP_DATE[minready], 9, 10) == "01"]
aout = data.frame(STA =csx$STA[ready],
Tahun = csx$Tahun[ready],
Bulan = csx$Bulan[ready],
Tanggal = csx$Tanggal[ready],
CH = csx$CH[ready], stringsAsFactors = F)
write.csv2(aout, file = "OUTPUT/Harian/CHP/REV_DAILY_CHP_GABUNG.csv", row.names = F, na = "") | /PET.R | permissive | yosiknorman/ekstrak_chirps_day | R | false | false | 1,436 | r | csx = read.csv2("OUTPUT/Harian/CHP/DAILY_CHP_GABUNG.csv", stringsAsFactors = F)
DEYS = data.frame(as.matrix(read_xlsx("VALIDASI_joz.xlsx", sheet = "DAILY")), stringsAsFactors = F)
DEYS$Tanggal = as.numeric(DEYS$Tanggal)
DEYS$Tanggal[nchar(DEYS$Tanggal) == 1] = paste0("0", DEYS$Tanggal[nchar(DEYS$Tanggal) == 1])
DEYS$Bulan = as.numeric(DEYS$Bulan)
DEYS$Bulan[nchar(DEYS$Bulan) == 1] = paste0("0", DEYS$Bulan[nchar(DEYS$Bulan) == 1])
CHP_DATE = as.POSIXct(paste0(csx$Tahun, "-", csx$Bulan, "-", csx$Tanggal), tz = "UTC")
#ini_ilang_30 = which(OBS_DATE == "2019-09-30")
OBS_DATE = as.POSIXct(paste0(DEYS$Tahun, "-", DEYS$Bulan, "-", DEYS$Tanggal), tz = "UTC")
is.character0 = function(x)
{
is.character(x) && length(x) == 0L
}
is.integer0 <- function(x)
{
is.integer(x) && length(x) == 0L
}
is.numeric0 <- function(x)
{
is.numeric(x) && length(x) == 0L
}
CHP_DATE = as.character(CHP_DATE)
OBS_DATE = as.character(OBS_DATE)
ready = which(as.character(CHP_DATE) %in% as.character(OBS_DATE))
minready = which(!as.character(CHP_DATE) %in% as.character(OBS_DATE))
CHP_DATE[minready][substr(CHP_DATE[minready], 9, 10) == "01"]
aout = data.frame(STA =csx$STA[ready],
Tahun = csx$Tahun[ready],
Bulan = csx$Bulan[ready],
Tanggal = csx$Tanggal[ready],
CH = csx$CH[ready], stringsAsFactors = F)
write.csv2(aout, file = "OUTPUT/Harian/CHP/REV_DAILY_CHP_GABUNG.csv", row.names = F, na = "") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intestinalDataSmall.R
\docType{data}
\name{intestinalDataSmall}
\alias{intestinalDataSmall}
\title{Single-cell transcriptome data of intestinal epithelial cells}
\format{
A sparse matrix (using the \pkg{Matrix}) with cells as columns and genes as rows. Entries are raw transcript counts.
}
\usage{
intestinalDataSmall
}
\value{
None
}
\description{
This dataset is a smaller subset of the original dataset, which contains gene expression values, i. e. transcript counts, of 278 intestinal epithelial cells.
The dataset is included for quick testing and examples. Only cells with >10,000 transcripts per cell and only genes with >20 transcript counts in >10 cells were retained.
}
\references{
Grün et al. (2016) Cell Stem Cell 19(2): 266-77 <DOI:10.1016/j.stem.2016.05.010>
(\href{https://pubmed.ncbi.nlm.nih.gov/27345837/}{PubMed})
}
\keyword{datasets}
| /man/intestinalDataSmall.Rd | no_license | dgrun/RaceID3_StemID2_package | R | false | true | 933 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intestinalDataSmall.R
\docType{data}
\name{intestinalDataSmall}
\alias{intestinalDataSmall}
\title{Single-cell transcriptome data of intestinal epithelial cells}
\format{
A sparse matrix (using the \pkg{Matrix}) with cells as columns and genes as rows. Entries are raw transcript counts.
}
\usage{
intestinalDataSmall
}
\value{
None
}
\description{
This dataset is a smaller subset of the original dataset, which contains gene expression values, i. e. transcript counts, of 278 intestinal epithelial cells.
The dataset is included for quick testing and examples. Only cells with >10,000 transcripts per cell and only genes with >20 transcript counts in >10 cells were retained.
}
\references{
Grün et al. (2016) Cell Stem Cell 19(2): 266-77 <DOI:10.1016/j.stem.2016.05.010>
(\href{https://pubmed.ncbi.nlm.nih.gov/27345837/}{PubMed})
}
\keyword{datasets}
|
## Put comments here that give an overall description of what your
## functions do
# Create a 'special' matrix as a wrapper around the actual matrix x (and
# its inverse x_inv). Keep track of change in value of x (hence x_inv). To
# do this, prohibit direct access to x or x_inv by constructing the
# special matrix as an encapsulation of accessor functions of x and x_inv,
# only through which one may get/ set them. x and x_inv are referred from
# the closures of these accessor methods.
# When x is initialized or set to a new value, x_inv is set to NULL.
# Depending on whether x_inv is null or not at the time of inverse
# computation, the inverse will be newly computed or the precomputed
# (cached) value of inverse returned.
## Write a short comment describing this function
# makeCacheMatrix() takes in an actual matrix x and constructs a list of
# getter and setter methods for x and its inverse x_inv. elements of the
# list returned are pointers to these methods, thus containing the
# reference of their respective closures. it is in these closures that x
# and x_inv are preserved.
makeCacheMatrix <- function(x = matrix()) {
x_inv <- NULL
# x and x_inv are set in different environments than the current one,
# such that their values persist even after exiting set(). the actual
# objects referred to by x & x_inv are determined by lexical scoping,
# which finds them in the parent frame of set().
set <- function(y) {
x <<- y
x_inv <<- NULL
}
get <- function() {
x
}
# like set(), setinverse() also sets x_inv in a different environment
# so that its value is preserved.
setinverse <- function(xi) {
x_inv <<- xi
}
getinverse <- function() {
x_inv
}
# return the special matrix as list of functions hence environments,
# with its elements named for more intuitive extraction.
list(get=get, set=set, getinverse=getinverse, setinverse=setinverse)
}
## Write a short comment describing this function
# cacheSolve() computes inverse of special matrix x only if the value of
# x_inv in it is NULL. otherwise, returns the preserved inverse value. the
# value of x_inv is looked up in the closure of methods contained in x.
# the extra args in the function are passed on to the solve() function.
cacheSolve <- function(x, ...) {
# extract element named 'getinverse' from special matrix x (i.e.list),
# then evaluate the resulting expression. putting () does so, since
# the resulting expression is a function. the value evaluated is same
# as preserved in the environment of getinverse(); assign it to xi.
xi <- x$getinverse()
if (!is.null(xi)) {
message("getting cached inverse")
return(xi)
}
data <- x$get()
message("calculating new inverse")
xi <- solve(data, ...)
x$setinverse(xi)
## Return a matrix that is the inverse of 'x'
xi
}
| /cachematrix.R | no_license | mayukh42/ProgrammingAssignment2 | R | false | false | 2,981 | r | ## Put comments here that give an overall description of what your
## functions do
# Create a 'special' matrix as a wrapper around the actual matrix x (and
# its inverse x_inv). Keep track of change in value of x (hence x_inv). To
# do this, prohibit direct access to x or x_inv by constructing the
# special matrix as an encapsulation of accessor functions of x and x_inv,
# only through which one may get/ set them. x and x_inv are referred from
# the closures of these accessor methods.
# When x is initialized or set to a new value, x_inv is set to NULL.
# Depending on whether x_inv is null or not at the time of inverse
# computation, the inverse will be newly computed or the precomputed
# (cached) value of inverse returned.
## Write a short comment describing this function
# makeCacheMatrix() takes in an actual matrix x and constructs a list of
# getter and setter methods for x and its inverse x_inv. elements of the
# list returned are pointers to these methods, thus containing the
# reference of their respective closures. it is in these closures that x
# and x_inv are preserved.
makeCacheMatrix <- function(x = matrix()) {
x_inv <- NULL
# x and x_inv are set in different environments than the current one,
# such that their values persist even after exiting set(). the actual
# objects referred to by x & x_inv are determined by lexical scoping,
# which finds them in the parent frame of set().
set <- function(y) {
x <<- y
x_inv <<- NULL
}
get <- function() {
x
}
# like set(), setinverse() also sets x_inv in a different environment
# so that its value is preserved.
setinverse <- function(xi) {
x_inv <<- xi
}
getinverse <- function() {
x_inv
}
# return the special matrix as list of functions hence environments,
# with its elements named for more intuitive extraction.
list(get=get, set=set, getinverse=getinverse, setinverse=setinverse)
}
## Write a short comment describing this function
# cacheSolve() computes inverse of special matrix x only if the value of
# x_inv in it is NULL. otherwise, returns the preserved inverse value. the
# value of x_inv is looked up in the closure of methods contained in x.
# the extra args in the function are passed on to the solve() function.
cacheSolve <- function(x, ...) {
# extract element named 'getinverse' from special matrix x (i.e.list),
# then evaluate the resulting expression. putting () does so, since
# the resulting expression is a function. the value evaluated is same
# as preserved in the environment of getinverse(); assign it to xi.
xi <- x$getinverse()
if (!is.null(xi)) {
message("getting cached inverse")
return(xi)
}
data <- x$get()
message("calculating new inverse")
xi <- solve(data, ...)
x$setinverse(xi)
## Return a matrix that is the inverse of 'x'
xi
}
|
library(tidyverse)
library(sf)
library(rnaturalearth)
library(janitor)
hike_data <- readr::read_rds(url('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-11-24/hike_data.rds'))
trails_shp <- read_sf(here::here("2020-week48", "data", "WA_RCO_Trails_2017-shp", "WA_RCO_Trails_2017.shp")) %>%
clean_names() %>%
select(name = tr_nm, geometry)
wa <- ne_states(iso_a2 = "US", returnclass = "sf") %>%
filter(name == "Washington")
hike_shp <- hike_data %>%
left_join(trails_shp) %>%
filter(lengths(geometry) > 0)
ggplot(hike_shp) +
geom_sf(data = wa) +
geom_sf(aes(geometry = geometry), size = 0.15) +
theme_void() +
ggsave(here::here("temp", paste0("washington-hiking-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")), dpi = 320)
| /2020-week48/deprecated/washington-hiking-sf.R | permissive | FBaudron/tidytuesday | R | false | false | 786 | r | library(tidyverse)
library(sf)
library(rnaturalearth)
library(janitor)
hike_data <- readr::read_rds(url('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-11-24/hike_data.rds'))
trails_shp <- read_sf(here::here("2020-week48", "data", "WA_RCO_Trails_2017-shp", "WA_RCO_Trails_2017.shp")) %>%
clean_names() %>%
select(name = tr_nm, geometry)
wa <- ne_states(iso_a2 = "US", returnclass = "sf") %>%
filter(name == "Washington")
hike_shp <- hike_data %>%
left_join(trails_shp) %>%
filter(lengths(geometry) > 0)
ggplot(hike_shp) +
geom_sf(data = wa) +
geom_sf(aes(geometry = geometry), size = 0.15) +
theme_void() +
ggsave(here::here("temp", paste0("washington-hiking-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")), dpi = 320)
|
y <- read.table(pipe("pbpaste"))
rm(list = ls())
i <- 1
blood_values <- matrix(
c(rep(0, 8)),
dimnames = list(c("Hb", "fO2art", "fO2mven", "PaO2",
"PaCO2", "PvO2", "fiO2", "Patm")))
ask_for_values <- function(blood_values){
while(i <= length(blood_values)){
input <- NA
while(is.na(input) == TRUE){
input <- as.numeric(readline(cat(names(blood_values[i,]), "is: ")))
}
blood_values[i,] <- input
i <- i+1
}
return(blood_values)
}
# dont forget to assign the method return to a new object
# otherwise everything stays inside the method
blood_gas <- ask_for_values(blood_values)
# partial alveolar O2 pressure
# FiO2*(Patm-PH2O)-PaCO2/RQ + Correction Factor (2)
PAO2 <- as.numeric(blood_gas["fiO2",]*(blood_gas["Patm",]-46)-(blood_gas["PaCO2",]/0.8))
# capillary oxygen content
# CcO2 = (Hgb * 1.31) + (0.0031 * PAO2)
# no multiplication with the arterial sat fraction
Cco2 <- as.numeric(blood_gas["Hb",]*1.39)+(0.0031*PAO2)
# mixed venous oxygen content
Cvo2 <- as.numeric(blood_gas["Hb",]*1.39*(blood_gas["fO2art",]/100))+(0.0031*PAO2)
runif(100, 0,1) # homogenious destributer
rnorm(100, 0,1) # normal, bell destributed
rbinom(100, 5, .9) # "prob" argument 90% of 1 to 10% 0, also binomial
?rbinom
table(x)
xy <- rbinom(100, 1, .6)
table(xy)
ggplot(x, aes(a)) +
geom_histogram(binwidth = .1)+
geom_vline(aes(xintercept = low, col ="red"))+
geom_vline(aes(xintercept = high, col ="red"))
| /SHUNT_CALC V2.R | no_license | GrigorijSchleifer/Pulmonary_shunt | R | false | false | 1,480 | r | y <- read.table(pipe("pbpaste"))
rm(list = ls())
i <- 1
blood_values <- matrix(
c(rep(0, 8)),
dimnames = list(c("Hb", "fO2art", "fO2mven", "PaO2",
"PaCO2", "PvO2", "fiO2", "Patm")))
ask_for_values <- function(blood_values){
while(i <= length(blood_values)){
input <- NA
while(is.na(input) == TRUE){
input <- as.numeric(readline(cat(names(blood_values[i,]), "is: ")))
}
blood_values[i,] <- input
i <- i+1
}
return(blood_values)
}
# dont forget to assign the method return to a new object
# otherwise everything stays inside the method
blood_gas <- ask_for_values(blood_values)
# partial alveolar O2 pressure
# FiO2*(Patm-PH2O)-PaCO2/RQ + Correction Factor (2)
PAO2 <- as.numeric(blood_gas["fiO2",]*(blood_gas["Patm",]-46)-(blood_gas["PaCO2",]/0.8))
# capillary oxygen content
# CcO2 = (Hgb * 1.31) + (0.0031 * PAO2)
# no multiplication with the arterial sat fraction
Cco2 <- as.numeric(blood_gas["Hb",]*1.39)+(0.0031*PAO2)
# mixed venous oxygen content
Cvo2 <- as.numeric(blood_gas["Hb",]*1.39*(blood_gas["fO2art",]/100))+(0.0031*PAO2)
runif(100, 0,1) # homogenious destributer
rnorm(100, 0,1) # normal, bell destributed
rbinom(100, 5, .9) # "prob" argument 90% of 1 to 10% 0, also binomial
?rbinom
table(x)
xy <- rbinom(100, 1, .6)
table(xy)
ggplot(x, aes(a)) +
geom_histogram(binwidth = .1)+
geom_vline(aes(xintercept = low, col ="red"))+
geom_vline(aes(xintercept = high, col ="red"))
|
library(CHESS)
library(ape)
library(data.table)
library(transport)
library(magrittr)
args <- commandArgs(trailingOnly = TRUE)
#args <- c(1, 200, 20, 8, 200)
patient <- as.numeric(args[1])
N <- as.numeric(args[2])
rounds <- as.numeric(args[3])
ncores <- as.numeric(args[4])
grid <- as.numeric(args[5])
# path.to.data <- 'data/supp/inference_sc_organoids/target_data'
# path.to.output <- 'data/supp/inference_sc_organoids/inferred_params/BT/'
path.to.data <- '../data/target_data/'
path.to.output <- '../data/inferred_params/BL/'
target.tree <- read.nexus(paste0(
path.to.data,'/pt',patient,'.subs.csv.nex.tre'))
target.tree <- drop.tip(
target.tree,
c('normal',
target.tree$tip.label[substr(target.tree$tip.label, 4, 4) == 'N']))
target.tree.size <- length(target.tree$tip.label)
params <- data.table(
name = c('mu', 't', 's', 'd', 'a'),
min = c(1, 4, 1, 0, .1),
max = c(3000, 50, 4.5, .9, 1),
fix = c(T, T, F, F, F))
params.to.rec <- 's_d_a'
# vactors of inferred posterior mode values. Set the corresponding fix=T
# in the params table above and rerun the inference to recover
# the remaining parameters:
mu.rls <- c(1096, 308, 137)
t.rls <- c(12, 19, 27)
d.rls <- c(.2, .4, .5)
s.rls <- c()
a.rls <- c()
ABCSMCwithTreeSamplesBL(
sim.id = patient,
N = N,
rounds = rounds,
ncores = ncores,
grid = grid,
params = params,
nsamples = target.tree.size,
target.tree = target.tree,
target.popsize = .5*3.14*((grid/2)^2),
params.to.rec = params.to.rec,
mu.rl = mu.rls[patient],
s.rl = s.rls[patient],
t.rl = t.rls[patient],
d.rl = d.rls[patient],
a.rl = a.rls[patient],
output.dir = path.to.output)
| /scripts/inference/run_inference_sc_organoids.R | no_license | kchkhaidze/chkhaidze_et_al_2019_figures | R | false | false | 1,655 | r | library(CHESS)
library(ape)
library(data.table)
library(transport)
library(magrittr)
args <- commandArgs(trailingOnly = TRUE)
#args <- c(1, 200, 20, 8, 200)
patient <- as.numeric(args[1])
N <- as.numeric(args[2])
rounds <- as.numeric(args[3])
ncores <- as.numeric(args[4])
grid <- as.numeric(args[5])
# path.to.data <- 'data/supp/inference_sc_organoids/target_data'
# path.to.output <- 'data/supp/inference_sc_organoids/inferred_params/BT/'
path.to.data <- '../data/target_data/'
path.to.output <- '../data/inferred_params/BL/'
target.tree <- read.nexus(paste0(
path.to.data,'/pt',patient,'.subs.csv.nex.tre'))
target.tree <- drop.tip(
target.tree,
c('normal',
target.tree$tip.label[substr(target.tree$tip.label, 4, 4) == 'N']))
target.tree.size <- length(target.tree$tip.label)
params <- data.table(
name = c('mu', 't', 's', 'd', 'a'),
min = c(1, 4, 1, 0, .1),
max = c(3000, 50, 4.5, .9, 1),
fix = c(T, T, F, F, F))
params.to.rec <- 's_d_a'
# vactors of inferred posterior mode values. Set the corresponding fix=T
# in the params table above and rerun the inference to recover
# the remaining parameters:
mu.rls <- c(1096, 308, 137)
t.rls <- c(12, 19, 27)
d.rls <- c(.2, .4, .5)
s.rls <- c()
a.rls <- c()
ABCSMCwithTreeSamplesBL(
sim.id = patient,
N = N,
rounds = rounds,
ncores = ncores,
grid = grid,
params = params,
nsamples = target.tree.size,
target.tree = target.tree,
target.popsize = .5*3.14*((grid/2)^2),
params.to.rec = params.to.rec,
mu.rl = mu.rls[patient],
s.rl = s.rls[patient],
t.rl = t.rls[patient],
d.rl = d.rls[patient],
a.rl = a.rls[patient],
output.dir = path.to.output)
|
working_dir="/home/theuer/Dropbox/Studium Informatik/10. Semester/KaHyParMaxFlow"
#working_dir="C:\\Users\\tobia\\Dropbox\\Studium Informatik\\10. Semester\\KaHyParMaxFlow\\experiments"
setwd(working_dir)
source(paste(working_dir, "experiments/flow_network_functions.R", sep="/"))
aggreg = function(df) data.frame(avg_hn_degree=mean(df$avgHypernodeDegree),
avg_he_size=mean(df$avgHyperedgeSize),
avg_num_nodes=mean(df$flow_network_num_nodes),
avg_num_edges=mean(df$flow_network_num_edges),
avg_cut=mean(df$cut),
avg_max_flow=mean(df$max_flow),
min_network_build_time=min(df$min_network_build_time),
avg_network_build_time=mean(df$avg_network_build_time),
min_max_flow_time=min(df$min_max_flow_time),
avg_max_flow_time=mean(df$avg_max_flow_time))
db_name=paste(working_dir,"experiments/flow_network_experiment/global_relabeling_test.db",sep="/")
flow_network_db = dbGetQuery(dbConnect(SQLite(), dbname=db_name), "select * from experiments")
flow_network_db <- flow_network_db[flow_network_db$num_hypernodes != 0,]
flow_network_db$num_hypernodes <- ifelse(flow_network_db$num_hypernodes > 10000, 25000, flow_network_db$num_hypernodes)
flow_network_db$num_hypernodes <- ifelse(flow_network_db$num_hypernodes > 5000 & flow_network_db$num_hypernodes <= 10000, 10000, flow_network_db$num_hypernodes)
flow_network_db <- ddply(flow_network_db, c("hypergraph","flow_network","flow_algorithm","num_hypernodes","global_relabeling"), aggreg)
flow_network_db$type <- as.factor(apply(flow_network_db, 1, function(x) graphclass(x)))
flow_network_db$avg_cut <- as.numeric(as.character(flow_network_db$avg_cut))
flow_network_db$avg_max_flow <- as.numeric(as.character(flow_network_db$avg_max_flow))
flow_network_db$num_hypernodes <- as.numeric(as.character(flow_network_db$num_hypernodes))
flow_network_db$avg_num_nodes <- as.numeric(as.character(flow_network_db$avg_num_nodes))
flow_network_db$avg_num_edges <- as.numeric(as.character(flow_network_db$avg_num_edges))
flow_network_db$min_network_build_time <- as.numeric(as.character(flow_network_db$min_network_build_time))
flow_network_db$avg_network_build_time <- as.numeric(as.character(flow_network_db$avg_network_build_time))
flow_network_db$min_max_flow_time <- as.numeric(as.character(flow_network_db$min_max_flow_time))
flow_network_db$avg_max_flow_time <- as.numeric(as.character(flow_network_db$avg_max_flow_time))
flow_network_db$global_relabeling <- as.numeric(as.character(flow_network_db$global_relabeling))
flow_network_db$num_hypernodes <- factor(flow_network_db$num_hypernodes)
flow_network_db$type <- factor(flow_network_db$type)
flow_network_db$flow_network <- factor(flow_network_db$flow_network)
flow_network_db$flow_algorithm <- factor(flow_network_db$flow_algorithm)
flow_network_db$type <- factor(flow_network_db$type, levels = levels(factor(flow_network_db$type))[c(1,3,2,5,4,6)])
##############################################################################################################################
speedup <- function(db) {
aggreg <- function(df) data.frame(avg_max_flow_time=mean(df$avg_max_flow_time))
df <- ddply(db, c("num_hypernodes", "type", "global_relabeling"), aggreg)
for( num_hn in levels(factor(df$num_hypernodes)) ) {
for( type in levels(factor(df$type))) {
df[df$num_hypernodes == num_hn & df$type == type,]$avg_max_flow_time <- (df[df$num_hypernodes == num_hn & df$type == type & df$global_relabeling == 1,]$avg_max_flow_time /
df[df$num_hypernodes == num_hn & df$type == type,]$avg_max_flow_time - 1.0) * 100.0
}
}
return(df)
}
max_flow_time_plot <- function(db, title="") {
df <- speedup(db)
plot <- ggplot(df, aes( x= global_relabeling, y = avg_max_flow_time)) +
geom_bar(stat = "identity", position="dodge") +
facet_grid(type ~ num_hypernodes) +
ggtitle(title) +
ylab("Speed-Up in t[%]") +
xlab("Global Relabeling Parameter") +
theme_complete_bw()
return(plot)
}
##############################################################################################################################
plot(max_flow_time_plot(flow_network_db))
| /experiments/flow_network_experiment/global_relabeling_stat.R | no_license | kittobi1992/KaHyParMaxFlow | R | false | false | 4,405 | r | working_dir="/home/theuer/Dropbox/Studium Informatik/10. Semester/KaHyParMaxFlow"
#working_dir="C:\\Users\\tobia\\Dropbox\\Studium Informatik\\10. Semester\\KaHyParMaxFlow\\experiments"
setwd(working_dir)
source(paste(working_dir, "experiments/flow_network_functions.R", sep="/"))
aggreg = function(df) data.frame(avg_hn_degree=mean(df$avgHypernodeDegree),
avg_he_size=mean(df$avgHyperedgeSize),
avg_num_nodes=mean(df$flow_network_num_nodes),
avg_num_edges=mean(df$flow_network_num_edges),
avg_cut=mean(df$cut),
avg_max_flow=mean(df$max_flow),
min_network_build_time=min(df$min_network_build_time),
avg_network_build_time=mean(df$avg_network_build_time),
min_max_flow_time=min(df$min_max_flow_time),
avg_max_flow_time=mean(df$avg_max_flow_time))
db_name=paste(working_dir,"experiments/flow_network_experiment/global_relabeling_test.db",sep="/")
flow_network_db = dbGetQuery(dbConnect(SQLite(), dbname=db_name), "select * from experiments")
flow_network_db <- flow_network_db[flow_network_db$num_hypernodes != 0,]
flow_network_db$num_hypernodes <- ifelse(flow_network_db$num_hypernodes > 10000, 25000, flow_network_db$num_hypernodes)
flow_network_db$num_hypernodes <- ifelse(flow_network_db$num_hypernodes > 5000 & flow_network_db$num_hypernodes <= 10000, 10000, flow_network_db$num_hypernodes)
flow_network_db <- ddply(flow_network_db, c("hypergraph","flow_network","flow_algorithm","num_hypernodes","global_relabeling"), aggreg)
flow_network_db$type <- as.factor(apply(flow_network_db, 1, function(x) graphclass(x)))
flow_network_db$avg_cut <- as.numeric(as.character(flow_network_db$avg_cut))
flow_network_db$avg_max_flow <- as.numeric(as.character(flow_network_db$avg_max_flow))
flow_network_db$num_hypernodes <- as.numeric(as.character(flow_network_db$num_hypernodes))
flow_network_db$avg_num_nodes <- as.numeric(as.character(flow_network_db$avg_num_nodes))
flow_network_db$avg_num_edges <- as.numeric(as.character(flow_network_db$avg_num_edges))
flow_network_db$min_network_build_time <- as.numeric(as.character(flow_network_db$min_network_build_time))
flow_network_db$avg_network_build_time <- as.numeric(as.character(flow_network_db$avg_network_build_time))
flow_network_db$min_max_flow_time <- as.numeric(as.character(flow_network_db$min_max_flow_time))
flow_network_db$avg_max_flow_time <- as.numeric(as.character(flow_network_db$avg_max_flow_time))
flow_network_db$global_relabeling <- as.numeric(as.character(flow_network_db$global_relabeling))
flow_network_db$num_hypernodes <- factor(flow_network_db$num_hypernodes)
flow_network_db$type <- factor(flow_network_db$type)
flow_network_db$flow_network <- factor(flow_network_db$flow_network)
flow_network_db$flow_algorithm <- factor(flow_network_db$flow_algorithm)
flow_network_db$type <- factor(flow_network_db$type, levels = levels(factor(flow_network_db$type))[c(1,3,2,5,4,6)])
##############################################################################################################################
speedup <- function(db) {
aggreg <- function(df) data.frame(avg_max_flow_time=mean(df$avg_max_flow_time))
df <- ddply(db, c("num_hypernodes", "type", "global_relabeling"), aggreg)
for( num_hn in levels(factor(df$num_hypernodes)) ) {
for( type in levels(factor(df$type))) {
df[df$num_hypernodes == num_hn & df$type == type,]$avg_max_flow_time <- (df[df$num_hypernodes == num_hn & df$type == type & df$global_relabeling == 1,]$avg_max_flow_time /
df[df$num_hypernodes == num_hn & df$type == type,]$avg_max_flow_time - 1.0) * 100.0
}
}
return(df)
}
max_flow_time_plot <- function(db, title="") {
df <- speedup(db)
plot <- ggplot(df, aes( x= global_relabeling, y = avg_max_flow_time)) +
geom_bar(stat = "identity", position="dodge") +
facet_grid(type ~ num_hypernodes) +
ggtitle(title) +
ylab("Speed-Up in t[%]") +
xlab("Global Relabeling Parameter") +
theme_complete_bw()
return(plot)
}
##############################################################################################################################
plot(max_flow_time_plot(flow_network_db))
|
#################################################################################
# escrever uma função para saber quantas partes de cada tipo (hole ou não-hole) há em cada multi-poligono
# o output é uma lista com duas componentes
# $positivos: vector de número de partes em que hole=FALSE
# $negativos: vector de número de partes em que hole=TRUE
tipo.partes<-function(spdf)
{
numero.partes.hole.false<-c()
numero.partes.hole.true<-c()
for (i in 1:length(spdf@polygons))
{
conta.hole.false<-0
conta.hole.true<-0
for (j in 1:length(spdf@polygons[[i]]@Polygons))
{
if (spdf@polygons[[i]]@Polygons[[j]]@hole) conta.hole.true<-conta.hole.true+1 else conta.hole.false<-conta.hole.false+1
}
numero.partes.hole.false<-c(numero.partes.hole.false,conta.hole.false)
numero.partes.hole.true<-c(numero.partes.hole.true,conta.hole.true)
}
return(list(positivos=numero.partes.hole.false,negativos=numero.partes.hole.true))
}
tipo.partes(icnf)
| /function.tipo.partes.r | no_license | manuelcampagnolo/vector_datasets_R | R | false | false | 985 | r | #################################################################################
# escrever uma função para saber quantas partes de cada tipo (hole ou não-hole) há em cada multi-poligono
# o output é uma lista com duas componentes
# $positivos: vector de número de partes em que hole=FALSE
# $negativos: vector de número de partes em que hole=TRUE
tipo.partes<-function(spdf)
{
numero.partes.hole.false<-c()
numero.partes.hole.true<-c()
for (i in 1:length(spdf@polygons))
{
conta.hole.false<-0
conta.hole.true<-0
for (j in 1:length(spdf@polygons[[i]]@Polygons))
{
if (spdf@polygons[[i]]@Polygons[[j]]@hole) conta.hole.true<-conta.hole.true+1 else conta.hole.false<-conta.hole.false+1
}
numero.partes.hole.false<-c(numero.partes.hole.false,conta.hole.false)
numero.partes.hole.true<-c(numero.partes.hole.true,conta.hole.true)
}
return(list(positivos=numero.partes.hole.false,negativos=numero.partes.hole.true))
}
tipo.partes(icnf)
|
### =========================================================================
### Set operations
### -------------------------------------------------------------------------
###
### The methods below are endomorphisms with respect to their first argument
### 'x'. They propagates the names and metadata columns.
###
### S3/S4 combo for union.Vector
setMethod("union", c("Vector", "Vector"), function(x, y) unique(c(x, y)))
union.Vector <- function(x, y, ...) union(x, y, ...)
### S3/S4 combo for intersect.Vector
setMethod("intersect", c("Vector", "Vector"),
function(x, y) unique(x[x %in% y]))
intersect.Vector <- function(x, y, ...) intersect(x, y, ...)
### S3/S4 combo for setdiff.Vector
setMethod("setdiff", c("Vector", "Vector"),
function(x, y) unique(x[!(x %in% y)]))
setdiff.Vector <- function(x, y, ...) setdiff(x, y, ...)
### S3/S4 combo for setequal.Vector
setMethod("setequal", c("Vector", "Vector"),
function(x, y) all(x %in% y) && all(y %in% x))
setequal.Vector <- function(x, y, ...) setequal(x, y, ...)
| /R/Vector-setops.R | no_license | Bioconductor/S4Vectors | R | false | false | 1,053 | r | ### =========================================================================
### Set operations
### -------------------------------------------------------------------------
###
### The methods below are endomorphisms with respect to their first argument
### 'x'. They propagates the names and metadata columns.
###
### S3/S4 combo for union.Vector
setMethod("union", c("Vector", "Vector"), function(x, y) unique(c(x, y)))
union.Vector <- function(x, y, ...) union(x, y, ...)
### S3/S4 combo for intersect.Vector
setMethod("intersect", c("Vector", "Vector"),
function(x, y) unique(x[x %in% y]))
intersect.Vector <- function(x, y, ...) intersect(x, y, ...)
### S3/S4 combo for setdiff.Vector
setMethod("setdiff", c("Vector", "Vector"),
function(x, y) unique(x[!(x %in% y)]))
setdiff.Vector <- function(x, y, ...) setdiff(x, y, ...)
### S3/S4 combo for setequal.Vector
setMethod("setequal", c("Vector", "Vector"),
function(x, y) all(x %in% y) && all(y %in% x))
setequal.Vector <- function(x, y, ...) setequal(x, y, ...)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function takes a matrix argument and return a list with
## get and set matrix and get and set inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## It is to check if inverse has been ready. If so, get inverse from cache
## otherwise calculate inverse
cacheSolve <- function(x, ...) {
m <- x$getinverse()
## check if inversion is ready and return the data if so.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## otherwise, calculate inversion
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | Judyzh/ProgrammingAssignment2 | R | false | false | 1,131 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function takes a matrix argument and return a list with
## get and set matrix and get and set inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## It is to check if inverse has been ready. If so, get inverse from cache
## otherwise calculate inverse
cacheSolve <- function(x, ...) {
m <- x$getinverse()
## check if inversion is ready and return the data if so.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## otherwise, calculate inversion
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
############### load data #############
hit <- "ORMDL3"
nearby <- "ERBB2"
load("data/CCLE/CCLE_Expression_clean.RData")
load("data/GARP/garp.score.RData")
rm(expr.ccle.raw, garp.data.raw)
############### clean data ##############
### ccle expression data
expr.ccle <- expr.ccle[,grep("BREAST", colnames(expr.ccle))]
colnames(expr.ccle) <- gsub("_BREAST", "", colnames(expr.ccle))
expr.ccle <- expr.ccle[,sort(colnames(expr.ccle))]
### grap score data
colnames(garp.score) <- toupper(gsub("[.]", "", colnames(garp.score)))
### garp cancer cancer cell lines
garp.cellLines$Cell.line <- toupper(gsub("-| ", "", garp.cellLines$Cell.line))
row.names(garp.cellLines) <- garp.cellLines$Cell.line; garp.cellLines$Cell.line <- NULL
### select breast cancer cell lines
brca.cell <- row.names(garp.cellLines)[garp.cellLines$Tumor.type == "Breast"]
brca.cell <- intersect(brca.cell, colnames(expr.ccle))
expr.ccle <- as.matrix(expr.ccle[,brca.cell])
garp.score <- as.matrix(garp.score[,brca.cell])
garp.cellLines <- garp.cellLines[brca.cell,]
############ gene essentiality and expression #############
### all subtypes
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp <- function(gene, correct = T) {
x <- expr.ccle[gene,]
y <- garp.score[gene,]
if (correct == T) {
x <- x - predict(lm(x~factor(garp.cellLines$Subtype)))
y <- y - predict(lm(y~factor(garp.cellLines$Subtype)))
}
plot(x, y, main = gene, xlab = "expression", ylab = "Gene essentiality", pch=19, col = "gold")
lines(x[order(x)], predict(lm(y~x))[order(x)], lwd = 3, col = "purple")
text(min(x)*0.95 + max(x)*0.05, min(y)*0.95 + max(y)*0.05, paste("Pear. corr.=", round(cor(x,y), d=2)), col = "purple", pos = 4)
}
plot_garp(nearby)
plot_garp(hit)
plot_garp("MYC")
plot_garp("CCND1")
plot_garp("FAM83B")
plot_garp("TP53")
plot_garp("BRCA1")
plot_garp("BRCA2")
plot_garp("PTEN")
plot_garp("ATM")
plot_garp("RAD50")
dev.off()
### Luminal
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.Luminal.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp_subtype <- function(gene, subtype) {
x <- expr.ccle[gene, row.names(garp.cellLines)[garp.cellLines$Subtype == subtype]]
y <- garp.score[gene, row.names(garp.cellLines)[garp.cellLines$Subtype == subtype]]
plot(x, y, main = gene, xlab = paste("expression", subtype), ylab = "Gene essentiality", pch=19, col = "gold")
lines(x[order(x)], predict(lm(y~x))[order(x)], lwd = 3, col = "purple")
text(min(x)*0.95 + max(x)*0.05, min(y)*0.95 + max(y)*0.05, paste("Pear. corr.=", round(cor(x,y), d=2)), col = "purple", pos = 4)
}
plot_garp_subtype(nearby, "Luminal")
plot_garp_subtype(hit, "Luminal")
plot_garp_subtype("MYC", "Luminal")
plot_garp_subtype("CCND1", "Luminal")
plot_garp_subtype("FAM83B", "Luminal")
plot_garp_subtype("TP53", "Luminal")
plot_garp_subtype("BRCA1", "Luminal")
plot_garp_subtype("BRCA2", "Luminal")
plot_garp_subtype("PTEN", "Luminal")
plot_garp_subtype("ATM", "Luminal")
plot_garp_subtype("RAD50", "Luminal")
dev.off()
### BasalA/B
garp.cellLines$Subtype[garp.cellLines$Subtype %in% c("Basal A", "Basal B")] <- "BasalA/B"
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.Basal.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp_subtype(nearby, "BasalA/B")
plot_garp_subtype(hit, "BasalA/B")
plot_garp_subtype("MYC", "BasalA/B")
plot_garp_subtype("CCND1", "BasalA/B")
plot_garp_subtype("FAM83B", "BasalA/B")
plot_garp_subtype("TP53", "BasalA/B")
plot_garp_subtype("BRCA1", "BasalA/B")
plot_garp_subtype("BRCA2", "BasalA/B")
plot_garp_subtype("PTEN", "BasalA/B")
plot_garp_subtype("ATM", "BasalA/B")
plot_garp_subtype("RAD50", "BasalA/B")
dev.off()
### HER2
garp.cellLines$Subtype[garp.cellLines$Subtype %in% c("Basal A", "Basal B")] <- "HER2"
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.HER2.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp_subtype(nearby, "HER2")
plot_garp_subtype(hit, "HER2")
plot_garp_subtype("MYC", "HER2")
plot_garp_subtype("CCND1", "HER2")
plot_garp_subtype("FAM83B", "HER2")
plot_garp_subtype("TP53", "HER2")
plot_garp_subtype("BRCA1", "HER2")
plot_garp_subtype("BRCA2", "HER2")
plot_garp_subtype("PTEN", "HER2")
plot_garp_subtype("ATM", "HER2")
plot_garp_subtype("RAD50", "HER2")
dev.off()
| /code/esstiality/essentiality.R | no_license | Minzhe/trimodal | R | false | false | 4,400 | r | ############### load data #############
hit <- "ORMDL3"
nearby <- "ERBB2"
load("data/CCLE/CCLE_Expression_clean.RData")
load("data/GARP/garp.score.RData")
rm(expr.ccle.raw, garp.data.raw)
############### clean data ##############
### ccle expression data
expr.ccle <- expr.ccle[,grep("BREAST", colnames(expr.ccle))]
colnames(expr.ccle) <- gsub("_BREAST", "", colnames(expr.ccle))
expr.ccle <- expr.ccle[,sort(colnames(expr.ccle))]
### grap score data
colnames(garp.score) <- toupper(gsub("[.]", "", colnames(garp.score)))
### garp cancer cancer cell lines
garp.cellLines$Cell.line <- toupper(gsub("-| ", "", garp.cellLines$Cell.line))
row.names(garp.cellLines) <- garp.cellLines$Cell.line; garp.cellLines$Cell.line <- NULL
### select breast cancer cell lines
brca.cell <- row.names(garp.cellLines)[garp.cellLines$Tumor.type == "Breast"]
brca.cell <- intersect(brca.cell, colnames(expr.ccle))
expr.ccle <- as.matrix(expr.ccle[,brca.cell])
garp.score <- as.matrix(garp.score[,brca.cell])
garp.cellLines <- garp.cellLines[brca.cell,]
############ gene essentiality and expression #############
### all subtypes
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp <- function(gene, correct = T) {
x <- expr.ccle[gene,]
y <- garp.score[gene,]
if (correct == T) {
x <- x - predict(lm(x~factor(garp.cellLines$Subtype)))
y <- y - predict(lm(y~factor(garp.cellLines$Subtype)))
}
plot(x, y, main = gene, xlab = "expression", ylab = "Gene essentiality", pch=19, col = "gold")
lines(x[order(x)], predict(lm(y~x))[order(x)], lwd = 3, col = "purple")
text(min(x)*0.95 + max(x)*0.05, min(y)*0.95 + max(y)*0.05, paste("Pear. corr.=", round(cor(x,y), d=2)), col = "purple", pos = 4)
}
plot_garp(nearby)
plot_garp(hit)
plot_garp("MYC")
plot_garp("CCND1")
plot_garp("FAM83B")
plot_garp("TP53")
plot_garp("BRCA1")
plot_garp("BRCA2")
plot_garp("PTEN")
plot_garp("ATM")
plot_garp("RAD50")
dev.off()
### Luminal
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.Luminal.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp_subtype <- function(gene, subtype) {
x <- expr.ccle[gene, row.names(garp.cellLines)[garp.cellLines$Subtype == subtype]]
y <- garp.score[gene, row.names(garp.cellLines)[garp.cellLines$Subtype == subtype]]
plot(x, y, main = gene, xlab = paste("expression", subtype), ylab = "Gene essentiality", pch=19, col = "gold")
lines(x[order(x)], predict(lm(y~x))[order(x)], lwd = 3, col = "purple")
text(min(x)*0.95 + max(x)*0.05, min(y)*0.95 + max(y)*0.05, paste("Pear. corr.=", round(cor(x,y), d=2)), col = "purple", pos = 4)
}
plot_garp_subtype(nearby, "Luminal")
plot_garp_subtype(hit, "Luminal")
plot_garp_subtype("MYC", "Luminal")
plot_garp_subtype("CCND1", "Luminal")
plot_garp_subtype("FAM83B", "Luminal")
plot_garp_subtype("TP53", "Luminal")
plot_garp_subtype("BRCA1", "Luminal")
plot_garp_subtype("BRCA2", "Luminal")
plot_garp_subtype("PTEN", "Luminal")
plot_garp_subtype("ATM", "Luminal")
plot_garp_subtype("RAD50", "Luminal")
dev.off()
### BasalA/B
garp.cellLines$Subtype[garp.cellLines$Subtype %in% c("Basal A", "Basal B")] <- "BasalA/B"
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.Basal.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp_subtype(nearby, "BasalA/B")
plot_garp_subtype(hit, "BasalA/B")
plot_garp_subtype("MYC", "BasalA/B")
plot_garp_subtype("CCND1", "BasalA/B")
plot_garp_subtype("FAM83B", "BasalA/B")
plot_garp_subtype("TP53", "BasalA/B")
plot_garp_subtype("BRCA1", "BasalA/B")
plot_garp_subtype("BRCA2", "BasalA/B")
plot_garp_subtype("PTEN", "BasalA/B")
plot_garp_subtype("ATM", "BasalA/B")
plot_garp_subtype("RAD50", "BasalA/B")
dev.off()
### HER2
garp.cellLines$Subtype[garp.cellLines$Subtype %in% c("Basal A", "Basal B")] <- "HER2"
pdf("~/project/ORMDL3/metabric/trimodal/figures/gene.essentiality.HER2.pdf", width=5, height=5)
par(mfrow = c(2,2))
plot_garp_subtype(nearby, "HER2")
plot_garp_subtype(hit, "HER2")
plot_garp_subtype("MYC", "HER2")
plot_garp_subtype("CCND1", "HER2")
plot_garp_subtype("FAM83B", "HER2")
plot_garp_subtype("TP53", "HER2")
plot_garp_subtype("BRCA1", "HER2")
plot_garp_subtype("BRCA2", "HER2")
plot_garp_subtype("PTEN", "HER2")
plot_garp_subtype("ATM", "HER2")
plot_garp_subtype("RAD50", "HER2")
dev.off()
|
#
# CatterPlots
#
# Copyright (c) 2016 David L Gibbs
# email: gibbsdavidl@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
.onAttach <- function(libname = find.package("CatterPlots"), pkgname = "CatterPlots") {
packageStartupMessage("\nWelcome to CatterPlots.\n")
}
| /R/zzz.R | permissive | Gibbsdavidl/CatterPlots | R | false | false | 775 | r | #
# CatterPlots
#
# Copyright (c) 2016 David L Gibbs
# email: gibbsdavidl@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
.onAttach <- function(libname = find.package("CatterPlots"), pkgname = "CatterPlots") {
packageStartupMessage("\nWelcome to CatterPlots.\n")
}
|
pelanggan <- read.csv("customer_segments.txt", sep = "\t")
pelanggan[c("Jenis.Kelamin", "Umur", "Profesi", "Tipe.Residen")] | /Data Science In Marketing_Customer Segmentation/Membaca data dengan fungsi read_csv.R | no_license | rhedi/Data_Science | R | false | false | 124 | r | pelanggan <- read.csv("customer_segments.txt", sep = "\t")
pelanggan[c("Jenis.Kelamin", "Umur", "Profesi", "Tipe.Residen")] |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scale-pattern.R
\name{scale_pattern_scale_continuous}
\alias{scale_pattern_scale_continuous}
\alias{scale_pattern_scale_discrete}
\title{Scales for pattern_scale}
\usage{
scale_pattern_scale_continuous(
name = waiver(),
breaks = waiver(),
labels = waiver(),
limits = NULL,
range = c(0.5, 2),
trans = "identity",
guide = "legend"
)
scale_pattern_scale_discrete(..., range = c(0.5, 2))
}
\arguments{
\item{name, breaks, labels, limits, range, trans, guide, ...}{See
\code{ggplot2} documentation for more information on scales.}
}
\description{
Scales for pattern_scale
}
| /man/scale_pattern_scale_continuous.Rd | permissive | idavydov/ggpattern | R | false | true | 662 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scale-pattern.R
\name{scale_pattern_scale_continuous}
\alias{scale_pattern_scale_continuous}
\alias{scale_pattern_scale_discrete}
\title{Scales for pattern_scale}
\usage{
scale_pattern_scale_continuous(
name = waiver(),
breaks = waiver(),
labels = waiver(),
limits = NULL,
range = c(0.5, 2),
trans = "identity",
guide = "legend"
)
scale_pattern_scale_discrete(..., range = c(0.5, 2))
}
\arguments{
\item{name, breaks, labels, limits, range, trans, guide, ...}{See
\code{ggplot2} documentation for more information on scales.}
}
\description{
Scales for pattern_scale
}
|
#----------------------------------------------------------------------------
#' Sample the (common) stochastic volatility parameters
#'
#' Compute one draw for each of the parameters in the stochastic volatility model
#' where each component of omega_{j,t} has a common dynamic variance.
#'
#' @param omega \code{T x m} matrix of residuals
#' @param svParams list of parameters to be updated (see Value below)
#' @param prior_phi the parameters of the prior for the log-volatilty AR(1) coefficient \code{h_phi};
#' either \code{NULL} for uniform on [-1,1] or a 2-dimensional vector of (shape1, shape2) for a Beta prior
#' on \code{[(h_phi + 1)/2]}
#' @return List of relevant components:
#' \itemize{
#' \item the \code{T x 1} error standard deviations \code{sigma_et},
#' \item the \code{T x 1} log-volatility \code{ht},
#' \item the log-vol unconditional mean \code{h_mu},
#' \item the log-vol AR(1) coefficient \code{h_phi},
#' \item the log-vol innovation standard deviation \code{h_sigma_eta}
#' }
#'
#' @import truncdist
#' @export
sampleCommonSV = function(omega, svParams, prior_phi = c(20,1.5)){
# Store the SV parameters locally:
ht = svParams$ht; h_mu = svParams$h_mu; h_phi = svParams$h_phi; h_sigma_eta = svParams$h_sigma_eta;
# "Local" number of time points
ht = as.matrix(ht)
n = nrow(ht); m = ncol(ht)
# Sample the log-volatilities using AWOL sampler
ht = sampleCommonLogVols(h_y = omega, h_prev = ht, h_mu = h_mu, h_phi=h_phi, h_sigma_eta = h_sigma_eta)
# Compute centered log-vols for the samplers below:
ht_tilde = ht - h_mu
# Sample AR(1) parameters
h_phi = sampleAR1(h_yc = ht_tilde, h_phi = h_phi, h_sigma_eta_t = matrix(rep(h_sigma_eta, n-1)), prior_dhs_phi = prior_phi)
# Sample evolution error: uniform prior on standard deviation
eta_t = ht_tilde[-1] - h_phi*ht_tilde[-n] # Residuals
#h_sigma_eta = 1/sqrt(rgamma(n = 1, shape = 0.01 + length(eta_t)/2, rate = 0.01 + sum(eta_t^2)/2))
h_sigma_eta = 1/sqrt(truncdist::rtrunc(n = 1,
'gamma',
a = 1/100^2, # Lower interval
b = Inf, # Upper interval
shape = length(eta_t)/2 - 1/2,
rate = sum(eta_t^2)/2))
# Sample the unconditional mean:
# Prior mean: h_mu ~ N(-10, 100)
y_mu = (ht[-1] - h_phi*ht[-n])/h_sigma_eta
x_mu = (1 - h_phi)/h_sigma_eta
postSD = 1/sqrt((n-1)*x_mu^2 + 1/100)
postMean = (sum(x_mu*y_mu) + -10/100)*postSD^2
h_mu = rnorm(n = 1, mean = postMean, sd = postSD)
# Evolution error SD:
sigma_et = exp(ht/2)
# Note: if we have enormous SDs, reduce:
sigma_et[which(sigma_et > 10^3, arr.ind = TRUE)] = 10^3
# Return the same list, but with the new values
list(sigma_et = sigma_et, ht = ht, h_mu = h_mu, h_phi = h_phi, h_sigma_eta = h_sigma_eta)
}
#----------------------------------------------------------------------------
#' Sample the latent log-volatilities, common to m-dimensional time series
#'
#' Compute one draw of the log-volatilities using a discrete mixture of Gaussians
#' approximation to the likelihood (see Omori, Chib, Shephard, and Nakajima, 2007)
#' where the log-vols are assumed to follow an AR(1) model. The model assumes that
#' the volatility are common to an m-dimensional time series.
#'
#' @param h_y the \code{T x m} matrix of data, which follows one join SV model
#' @param h_prev the \code{T x 1} matrix of the previous log-vols
#' @param h_mu the log-vol unconditional mean
#' @param h_phi the log-vol AR(1) coefficient
#' @param h_sigma_eta the log-vol innovation standard deviation
#'
#' @return \code{T x 1} matrix of simulated log-vols
#' @import Matrix
#' @export
sampleCommonLogVols = function(h_y, h_prev, h_mu, h_phi, h_sigma_eta){
# Compute dimensions:
h_y = as.matrix(h_y) # Just to be sure (T x m)
n = nrow(h_y); m = ncol(h_y)
h_prev = as.matrix(h_prev)
# Mixture params: mean, variance, and weights
# Kim, Shephard, Chib (1998) 7-component mixture:
#m_st = c(-11.40039, -5.24321, -9.83726, 1.50746, -0.65098, 0.52478, -2.35859)
#v_st2 = c(5.795960, 2.613690, 5.179500, 0.167350, 0.640090, 0.340230, 1.262610)
#q = c(0.007300, 0.105560, 0.000020, 0.043950, 0.340010, 0.245660, 0.257500)
# Omori, Chib, Shephard, Nakajima (2007) 10-component mixture:
m_st = c(1.92677, 1.34744, 0.73504, 0.02266, -0.85173, -1.97278, -3.46788, -5.55246, -8.68384, -14.65000)
v_st2 = c(0.11265, 0.17788, 0.26768, 0.40611, 0.62699, 0.98583, 1.57469, 2.54498, 4.16591, 7.33342)
q = c(0.00609, 0.04775, 0.13057, 0.20674, 0.22715, 0.18842, 0.12047, 0.05591, 0.01575, 0.00115)
# Add an offset: common for all times, but distict for each j=1,...,p
#yoffset = any(h_y^2 < 10^-16)*max(10^-8, mad(h_y)/10^6)
yoffset = any(h_y==0)*sd(h_y)/10000
# This is the response in our DLM, log(y^2)
ystar = log(h_y^2 + yoffset)
# Sample the mixture components
#z = draw.indicators(res = ystar - matrix(rep(h_prev, m), nr = n), nmix = list(m = m_st, v = v_st2, p = q))
z = sapply(ystar-ystar-matrix(rep(h_prev, m), nr = n), ncind, m_st, sqrt(v_st2), q)
# Subset mean and variances to the sampled mixture components; (n x p) matrices
m_st_all = matrix(m_st[z], nr=n); v_st2_all = matrix(v_st2[z], nr=n)
# Evolution error variance:
if(length(h_sigma_eta) == 1){
sigmat2 = rep(h_sigma_eta^2, n);
# for simplicity, ignore that part:
#sigmat2[1] = h_sigma_eta^2/(1 - h_phi^2)
} else sigmat2 = h_sigma_eta^2
# Quadratic term:
QHt.Matrix = bandSparse(n, k = c(0,1),
diag = list(rowSums(1/v_st2_all) + 1/sigmat2 + c(h_phi^2/sigmat2[-1], 0),
-h_phi/sigmat2[-1]), symm = TRUE)
chQht_Matrix = Matrix::chol(QHt.Matrix)
# Linear term:
linht = matrix(rowSums((ystar - m_st_all - h_mu)/v_st2_all))
# Sample the log-vols:
hsamp = h_mu + matrix(Matrix::solve(chQht_Matrix,Matrix::solve(Matrix::t(chQht_Matrix), linht) + rnorm(n)), nr = n)
# Return the (uncentered) log-vols
hsamp
}
#----------------------------------------------------------------------------
#' Initialize the (common) SV parameters
#'
#' Compute initial values for common stochastic volatility parameters
#'
#' @param omega \code{T x m} matrix of residuals
#' @return List of relevant components:
#' \itemize{
#' \item the \code{T x 1} error standard deviations \code{sigma_et},
#' \item the \code{T x 1} log-volatility \code{ht},
#' \item the log-vol unconditional mean \code{h_mu},
#' \item the log-vol AR(1) coefficient \code{h_phi},
#' \item the log-vol innovation standard deviation \code{h_sigma_eta}
#' }
#' @export
initCommonSV = function(omega){
# "Local" number of time points
omega = as.matrix(omega)
n = nrow(omega); m = ncol(omega)
# Initialize the log-volatilities:
ht = rowMeans(log(omega^2 + 0.0001))
# Initialize the AR(1) model to obtain unconditional mean and AR(1) coefficient
arCoefs = arima(ht, c(1,0,0))$coef
h_mu = arCoefs[2]; h_phi = arCoefs[1]
# Initialize the SD of log-vol innovations simply using the expectation:
h_sigma_eta = sd((ht - h_mu)[-1] - h_phi*(ht - h_mu)[-n] )
# Evolution error SD:
sigma_et = exp(ht/2)
# Return the same list, but with the new values
list(sigma_et = sigma_et, ht = ht, h_mu = h_mu, h_phi = h_phi, h_sigma_eta = h_sigma_eta)
}
| /R/commonSV_source.R | no_license | drkowal/FDLM | R | false | false | 7,428 | r | #----------------------------------------------------------------------------
#' Sample the (common) stochastic volatility parameters
#'
#' Compute one draw for each of the parameters in the stochastic volatility model
#' where each component of omega_{j,t} has a common dynamic variance.
#'
#' @param omega \code{T x m} matrix of residuals
#' @param svParams list of parameters to be updated (see Value below)
#' @param prior_phi the parameters of the prior for the log-volatilty AR(1) coefficient \code{h_phi};
#' either \code{NULL} for uniform on [-1,1] or a 2-dimensional vector of (shape1, shape2) for a Beta prior
#' on \code{[(h_phi + 1)/2]}
#' @return List of relevant components:
#' \itemize{
#' \item the \code{T x 1} error standard deviations \code{sigma_et},
#' \item the \code{T x 1} log-volatility \code{ht},
#' \item the log-vol unconditional mean \code{h_mu},
#' \item the log-vol AR(1) coefficient \code{h_phi},
#' \item the log-vol innovation standard deviation \code{h_sigma_eta}
#' }
#'
#' @import truncdist
#' @export
sampleCommonSV = function(omega, svParams, prior_phi = c(20,1.5)){
# Store the SV parameters locally:
ht = svParams$ht; h_mu = svParams$h_mu; h_phi = svParams$h_phi; h_sigma_eta = svParams$h_sigma_eta;
# "Local" number of time points
ht = as.matrix(ht)
n = nrow(ht); m = ncol(ht)
# Sample the log-volatilities using AWOL sampler
ht = sampleCommonLogVols(h_y = omega, h_prev = ht, h_mu = h_mu, h_phi=h_phi, h_sigma_eta = h_sigma_eta)
# Compute centered log-vols for the samplers below:
ht_tilde = ht - h_mu
# Sample AR(1) parameters
h_phi = sampleAR1(h_yc = ht_tilde, h_phi = h_phi, h_sigma_eta_t = matrix(rep(h_sigma_eta, n-1)), prior_dhs_phi = prior_phi)
# Sample evolution error: uniform prior on standard deviation
eta_t = ht_tilde[-1] - h_phi*ht_tilde[-n] # Residuals
#h_sigma_eta = 1/sqrt(rgamma(n = 1, shape = 0.01 + length(eta_t)/2, rate = 0.01 + sum(eta_t^2)/2))
h_sigma_eta = 1/sqrt(truncdist::rtrunc(n = 1,
'gamma',
a = 1/100^2, # Lower interval
b = Inf, # Upper interval
shape = length(eta_t)/2 - 1/2,
rate = sum(eta_t^2)/2))
# Sample the unconditional mean:
# Prior mean: h_mu ~ N(-10, 100)
y_mu = (ht[-1] - h_phi*ht[-n])/h_sigma_eta
x_mu = (1 - h_phi)/h_sigma_eta
postSD = 1/sqrt((n-1)*x_mu^2 + 1/100)
postMean = (sum(x_mu*y_mu) + -10/100)*postSD^2
h_mu = rnorm(n = 1, mean = postMean, sd = postSD)
# Evolution error SD:
sigma_et = exp(ht/2)
# Note: if we have enormous SDs, reduce:
sigma_et[which(sigma_et > 10^3, arr.ind = TRUE)] = 10^3
# Return the same list, but with the new values
list(sigma_et = sigma_et, ht = ht, h_mu = h_mu, h_phi = h_phi, h_sigma_eta = h_sigma_eta)
}
#----------------------------------------------------------------------------
#' Sample the latent log-volatilities, common to m-dimensional time series
#'
#' Compute one draw of the log-volatilities using a discrete mixture of Gaussians
#' approximation to the likelihood (see Omori, Chib, Shephard, and Nakajima, 2007)
#' where the log-vols are assumed to follow an AR(1) model. The model assumes that
#' the volatility are common to an m-dimensional time series.
#'
#' @param h_y the \code{T x m} matrix of data, which follows one join SV model
#' @param h_prev the \code{T x 1} matrix of the previous log-vols
#' @param h_mu the log-vol unconditional mean
#' @param h_phi the log-vol AR(1) coefficient
#' @param h_sigma_eta the log-vol innovation standard deviation
#'
#' @return \code{T x 1} matrix of simulated log-vols
#' @import Matrix
#' @export
sampleCommonLogVols = function(h_y, h_prev, h_mu, h_phi, h_sigma_eta){
# Compute dimensions:
h_y = as.matrix(h_y) # Just to be sure (T x m)
n = nrow(h_y); m = ncol(h_y)
h_prev = as.matrix(h_prev)
# Mixture params: mean, variance, and weights
# Kim, Shephard, Chib (1998) 7-component mixture:
#m_st = c(-11.40039, -5.24321, -9.83726, 1.50746, -0.65098, 0.52478, -2.35859)
#v_st2 = c(5.795960, 2.613690, 5.179500, 0.167350, 0.640090, 0.340230, 1.262610)
#q = c(0.007300, 0.105560, 0.000020, 0.043950, 0.340010, 0.245660, 0.257500)
# Omori, Chib, Shephard, Nakajima (2007) 10-component mixture:
m_st = c(1.92677, 1.34744, 0.73504, 0.02266, -0.85173, -1.97278, -3.46788, -5.55246, -8.68384, -14.65000)
v_st2 = c(0.11265, 0.17788, 0.26768, 0.40611, 0.62699, 0.98583, 1.57469, 2.54498, 4.16591, 7.33342)
q = c(0.00609, 0.04775, 0.13057, 0.20674, 0.22715, 0.18842, 0.12047, 0.05591, 0.01575, 0.00115)
# Add an offset: common for all times, but distict for each j=1,...,p
#yoffset = any(h_y^2 < 10^-16)*max(10^-8, mad(h_y)/10^6)
yoffset = any(h_y==0)*sd(h_y)/10000
# This is the response in our DLM, log(y^2)
ystar = log(h_y^2 + yoffset)
# Sample the mixture components
#z = draw.indicators(res = ystar - matrix(rep(h_prev, m), nr = n), nmix = list(m = m_st, v = v_st2, p = q))
z = sapply(ystar-ystar-matrix(rep(h_prev, m), nr = n), ncind, m_st, sqrt(v_st2), q)
# Subset mean and variances to the sampled mixture components; (n x p) matrices
m_st_all = matrix(m_st[z], nr=n); v_st2_all = matrix(v_st2[z], nr=n)
# Evolution error variance:
if(length(h_sigma_eta) == 1){
sigmat2 = rep(h_sigma_eta^2, n);
# for simplicity, ignore that part:
#sigmat2[1] = h_sigma_eta^2/(1 - h_phi^2)
} else sigmat2 = h_sigma_eta^2
# Quadratic term:
QHt.Matrix = bandSparse(n, k = c(0,1),
diag = list(rowSums(1/v_st2_all) + 1/sigmat2 + c(h_phi^2/sigmat2[-1], 0),
-h_phi/sigmat2[-1]), symm = TRUE)
chQht_Matrix = Matrix::chol(QHt.Matrix)
# Linear term:
linht = matrix(rowSums((ystar - m_st_all - h_mu)/v_st2_all))
# Sample the log-vols:
hsamp = h_mu + matrix(Matrix::solve(chQht_Matrix,Matrix::solve(Matrix::t(chQht_Matrix), linht) + rnorm(n)), nr = n)
# Return the (uncentered) log-vols
hsamp
}
#----------------------------------------------------------------------------
#' Initialize the (common) SV parameters
#'
#' Compute initial values for common stochastic volatility parameters
#'
#' @param omega \code{T x m} matrix of residuals
#' @return List of relevant components:
#' \itemize{
#' \item the \code{T x 1} error standard deviations \code{sigma_et},
#' \item the \code{T x 1} log-volatility \code{ht},
#' \item the log-vol unconditional mean \code{h_mu},
#' \item the log-vol AR(1) coefficient \code{h_phi},
#' \item the log-vol innovation standard deviation \code{h_sigma_eta}
#' }
#' @export
initCommonSV = function(omega){
# "Local" number of time points
omega = as.matrix(omega)
n = nrow(omega); m = ncol(omega)
# Initialize the log-volatilities:
ht = rowMeans(log(omega^2 + 0.0001))
# Initialize the AR(1) model to obtain unconditional mean and AR(1) coefficient
arCoefs = arima(ht, c(1,0,0))$coef
h_mu = arCoefs[2]; h_phi = arCoefs[1]
# Initialize the SD of log-vol innovations simply using the expectation:
h_sigma_eta = sd((ht - h_mu)[-1] - h_phi*(ht - h_mu)[-n] )
# Evolution error SD:
sigma_et = exp(ht/2)
# Return the same list, but with the new values
list(sigma_et = sigma_et, ht = ht, h_mu = h_mu, h_phi = h_phi, h_sigma_eta = h_sigma_eta)
}
|
\name{mongo.drop.database}
\alias{mongo.drop.database}
\title{Drop a database from a MongoDB server}
\usage{
mongo.drop.database(mongo, db)
}
\arguments{
\item{mongo}{(\link{mongo}) A mongo connection object.}
\item{db}{(string) The name of the database to drop.}
}
\value{
(Logical) TRUE if successful; otherwise, FALSE
}
\description{
Drop a database from MongoDB server. Removes the entire
database and all collections in it.
}
\details{
Obviously, care should be taken when using this command.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
print(mongo.drop.database(mongo, "test"))
mongo.destroy(mongo)
}
}
\seealso{
\code{\link{mongo.drop}},\cr
\code{\link{mongo.command}},\cr
\code{\link{mongo.rename}},\cr
\code{\link{mongo.count}},\cr \link{mongo}.
}
| /man/mongo.drop.database.Rd | no_license | dtenenba/rmongodb | R | false | false | 817 | rd | \name{mongo.drop.database}
\alias{mongo.drop.database}
\title{Drop a database from a MongoDB server}
\usage{
mongo.drop.database(mongo, db)
}
\arguments{
\item{mongo}{(\link{mongo}) A mongo connection object.}
\item{db}{(string) The name of the database to drop.}
}
\value{
(Logical) TRUE if successful; otherwise, FALSE
}
\description{
Drop a database from MongoDB server. Removes the entire
database and all collections in it.
}
\details{
Obviously, care should be taken when using this command.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
print(mongo.drop.database(mongo, "test"))
mongo.destroy(mongo)
}
}
\seealso{
\code{\link{mongo.drop}},\cr
\code{\link{mongo.command}},\cr
\code{\link{mongo.rename}},\cr
\code{\link{mongo.count}},\cr \link{mongo}.
}
|
#' Compare Simulation Results
#'
#' Generic function to compare simulation results in \pkg{lsbclust}.
#'
#' @inheritParams cfsim.lsbclust
#' @seealso \code{\link{cfsim.lsbclust}}, \code{\link{cfsim.T3Clusf}}
#' @export
cfsim <- function(fitted, actual, method = c("diag", "cRand")) {
UseMethod("cfsim", fitted)
}
| /lsbclust/R/cfsim.R | no_license | akhikolla/InformationHouse | R | false | false | 317 | r | #' Compare Simulation Results
#'
#' Generic function to compare simulation results in \pkg{lsbclust}.
#'
#' @inheritParams cfsim.lsbclust
#' @seealso \code{\link{cfsim.lsbclust}}, \code{\link{cfsim.T3Clusf}}
#' @export
cfsim <- function(fitted, actual, method = c("diag", "cRand")) {
UseMethod("cfsim", fitted)
}
|
library(ape)
testtree <- read.tree("10845_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10845_0_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/10845_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10845_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10845_0_unrooted.txt") |
fluidPage(
titlePanel("Folds: Hyperparameters, Sample Sizes and Prediction Performances"),
sidebarLayout(
sidebarPanel(
helpText("Explore the hyperparameters values, the performances and the sample sizes for each model."),
selectInput("analysis", label = "Select the predicted phenotype:", choices = c(Choose = '', analyses_labels), selectize = TRUE, selected=analyses_labels[1]),
selectInput("predictors", label = "Select the predictors:", choices = c(Choose = '', predictors_labels), selectize = TRUE, selected="Mixed Predictors + Demographics"),
uiOutput("algos"),
radioButtons("set", "Display the results for the datasets:", choices = c("Training and Testing", "Training", "Testing"), selected = "Training and Testing"),
checkboxInput("display_performances", "Display the performances", value = T),
uiOutput("display_performances_CI"),
checkboxInput("display_sample_sizes", "Display the sample sizes", value = T),
checkboxInput("display_hyperparameters", "Display the hyperparameters values", value = T),
uiOutput("help_text_list_performances"),
uiOutput("list_performances")
),
mainPanel(dataTableOutput("table_Hyperparameters"),
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"))
)
)
| /analysis_pipeline/scripts_shiny_app/uis/Hyperparameters_ui.R | no_license | chiragjp/metagenome_ml | R | false | false | 1,422 | r | fluidPage(
titlePanel("Folds: Hyperparameters, Sample Sizes and Prediction Performances"),
sidebarLayout(
sidebarPanel(
helpText("Explore the hyperparameters values, the performances and the sample sizes for each model."),
selectInput("analysis", label = "Select the predicted phenotype:", choices = c(Choose = '', analyses_labels), selectize = TRUE, selected=analyses_labels[1]),
selectInput("predictors", label = "Select the predictors:", choices = c(Choose = '', predictors_labels), selectize = TRUE, selected="Mixed Predictors + Demographics"),
uiOutput("algos"),
radioButtons("set", "Display the results for the datasets:", choices = c("Training and Testing", "Training", "Testing"), selected = "Training and Testing"),
checkboxInput("display_performances", "Display the performances", value = T),
uiOutput("display_performances_CI"),
checkboxInput("display_sample_sizes", "Display the sample sizes", value = T),
checkboxInput("display_hyperparameters", "Display the hyperparameters values", value = T),
uiOutput("help_text_list_performances"),
uiOutput("list_performances")
),
mainPanel(dataTableOutput("table_Hyperparameters"),
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"))
)
)
|
########################################
# Introduction to Bayesian Computation #
########################################
######################
# Rejection Sampling
######################
# library(LearnBayes)
# data(cancermortality)
# Previously, we were able to produce
# simulated samples directly from the
# posterior distribution since the
# distributions were familiar functional forms.
# So we could obtain Monte Carlo estimates
# of the posterior mean for any function
# of the parameters of interest.
# But in other situations, such as the
# beta-binomial example today, the
# posterior does not have a familiar form
# and so we need to use an alternative
# algorithm for producing a simulated sample.
# A general-purpose algorithm for
# simulating random draws from a given
# probability distribution is rejection sampling.
# Suppose we wish to produce an independent
# sample from a posterior density g(theta|y)
# where the normalizing constant may not be known.
# The first step in rejection sampling
# is to find another probability density
# p(theta) such that:
# - It is easy to simulate draws from p.
# - The density p resembles the posterior
# density of interest g in terms of
# location and spread.
# - For all theta and a constant c,
# g(theta|y) l.t.e.t. cp(theta).
# Suppose we are able to find a density p
# with these properties. Then one obtains
# draws from g using the following
# accept/reject algorithm:
# 1. Independently simulate theta from p and
# a uniform random variable U on the
# unit interval.
# 2. If U l.t.e.t. g(theta|y)/(cp(theta)), then accept theta
# as a draw from the density g;
# otherwise reject theta.
# 3. Continue steps 1 and 2 of the algorithm
# until one has collected a sufficient
# number of "accepted" theta's.
# We return to the beta-binomial example.
# We want to find a proposal density of
# a simple functional form that, when
# multiplied by an appropriate constant,
# covers the posterior density of interest.
# One choice for p would be a multivariate
# t density with mean and scale matrix
# chosen to match the posterior density.
# We use a multivariate t density with
# location fit$mode, scale matrix
# 2 fit$var, and 4 dof.
# We write a new function betabinT()
# with two inputs, the parameter
# theta and a list datapar with
# components data, the data matrix,
# and par, a list with the parameters
# of the t proposal density (mean,
# scale matrix, and degrees of freedom)
fit=laplace(betabinexch,
c(-7,6),
cancermortality)
fit
betabinT=function(theta,datapar)
{
data=datapar$data
tpar=datapar$par
d=betabinexch(theta,data)-dmt(theta,
mean=c(tpar$m),
S=tpar$var,
df=tpar$df,
log=TRUE)
return(d)
}
# We define parameters of t proposal density
# and the list datapar:
tpar=list(m=fit$mode,var=2*fit$var,df=4)
tpar
datapar=list(data=cancermortality,par=tpar)
datapar
# We run laplace() with the above
# function and an "intelligent"
# starting value
start=c(-6.9,12.4)
fit1=laplace(betabinT,start,datapar)
fit1$mode
# We find that the maximum value d
# occurs at the value theta = (-6.889, 12.422).
# The value of d is found by evaluating
# the function at the modal value.
betabinT(fit1$mode,datapar)
# We use rejectsampling() using
# constant value of d and simulate
# 10,000 draws from proposal density
theta=rejectsampling(betabinexch,tpar,
-569.2813,10000,
cancermortality)
dim(theta)
# theta has 2406 rows so acceptance
# rate is 2406/10000 = .24
# We plot simulated draws from rejection
# sampling on contour plot of previous
# log posterior density plot. Most
# of the draws are within the inner
# contour of the exact density
mycontour(betabinexch,c(-8,-4.5,3,16.5),
cancermortality, xlab="logit eta",
ylab="log K")
points(theta[,1],theta[,2])
###########################
### Importance Sampling ###
###########################
### see slides 6-9 for mathematical details ###
# As in rejection sampling, the issue in
# designing a good importance sampling
# estimate is finding a suitable sampling
# density p. This density should be of a
# familiar functional form so simulated
# draws are available.
# The density should mimic the posterior
# density g and have relatively flat tails
# so that the weight function w(theta) is
# bounded from above. One can monitor the
# choice of p by inspecting the values of
# the simulated weights w(thetaj). If there
# are no unusually large weights, then it
# is likely that the weight function is bounded
# and the importance sampler is providing a
# suitable estimate.
# library(LearnBayes)
data(cancermortality)
fit=laplace(betabinexch,c(-7,6),
cancermortality)
fit
# We write betabinexch.cond() this posterior
# density conditional on the value of theta1
# -6.819. The function allows the input of
# the vector of values theta2 = log K. The
# function returns the value of the density
# (not log density).
betabinexch.cond=function (log.K, data)
{
eta = exp(-6.818793)/(1 + exp(-6.818793))
K = exp(log.K)
y = data[, 1]; n = data[, 2]; N = length(y)
logf=0*log.K
for (j in 1:length(y))
logf = logf + lbeta(K * eta + y[j], K * (1 - eta) + n[j] - y[j]) - lbeta(K * eta, K * (1 - eta))
val = logf + log.K - 2 * log(1 + K)
return(exp(val-max(val)))
}
# To compute the mean of logK for the cancer
# mortality data, suppose we let the proposal
# density p be normal with mean 8 and standard
# deviation 2.
# In this R code, we use the integrate()
# to find the normalizing constant of the
# posterior density of logK.
# Then, using the curve function, we display
# the conditional posterior density of logK
# and the normal proposal density in the top
# left graph. The top right graph displays the
# weight function, the ratio of the posterior
# density to the proposal density.
I=integrate(betabinexch.cond,2,16,cancermortality)
I
par(mfrow=c(2,2))
curve(betabinexch.cond(x,cancermortality)/I$value,
from=3,to=16, ylab="Density",
xlab="log K",lwd=3, main="Densities")
curve(dnorm(x,8,2),add=TRUE)
legend("topright",legend=c("Exact","Normal"),lwd=c(3,1))
curve(betabinexch.cond(x,cancermortality)/I$value/dnorm(x,8,2),
from=3,to=16, ylab="Weight",xlab="log K",
main="Weight = g/p")
# Although the normal proposal density
# resembles the posterior density with
# respect to location and spread, the
# posterior density has a flatter right
# tail than the proposal and the weight
# function is unbounded for large logK.
# Suppose instead that we let the proposal
# density have the t functional form with
# location 8, scale 2, and 2 degrees of
# freedom. Using more R commands, the
# bottom graphs display the posterior and
# proposal densities and the weight function.
# Here the t proposal density has flatter
# tails than the posterior density and the
# weight function is bounded. Here the t
# functional form is a better proposal
# for importance sampling.
curve(betabinexch.cond(x,cancermortality)/I$value,
from=3,to=16, ylab="Density", xlab="log K",
lwd=3, main="Densities")
curve(1/2*dt(x-8,df=2),add=TRUE)
legend("topright",legend=c("Exact","T(2)"),lwd=c(3,1))
curve(betabinexch.cond(x,cancermortality)/I$value/
(1/2*dt(x-8,df=2)),from=3,to=16,
ylab="Weight",xlab="log K",
main="Weight = g/p")
##############################
### Using a Multivariate t ###
### as a Proposal Density ###
##############################
# For a posterior density of a vector of
# real-valued parameters, a convenient
# choice of sampler p is a multivariate
# t density. The R function impsampling
# implements importance sampling for an
# arbitrary posterior density when p is
# a t density.
# There are five inputs to this function:
# - logf() is the function defining the
# logarithm of the posterior,
# - tpar() is a list of parameter values of
# the t density,
# - h() is a function defining the function
# h(theta) of interest,
# - n is the size of the simulated sample, and
# - data is the vector or list used in the
# definition of logf().
# In the function impsampling(), the functions
# rmt() and dmt() from the mnormt library
# are used to simulate and compute values
# of the t density.
# In this R code from impsampling(), we
# simulate draws from the sampling density,
# compute values of the log sampling density
# and the log posterior density at the
# simulated draws, and compute the weights
# and importance sampler estimate.
# theta = rmt(n, mean = c(tpar$m), S = tpar$var, df = tpar$df)
# lf = matrix(0, c(dim(theta)[1], 1))
# lp = dmt(theta, mean = c(tpar$m), S = tpar$var, df = tpar$df,
# log = TRUE)
# md = max(lf - lp)
# wt = exp(lf - lp - md)
# est = sum(wt * H)/sum(wt)
# Note that the value md is the maximum
# value of the difference of logs of the
# posterior and proposal density - this value
# is used in the computation of the weights
# to prevent possible overflow.
# The output of impsampling() is a list with
# four components:
# - est is the importance sampling estimate,
# - se is the corresponding simulation standard
# error,
# - theta is a matrix of simulated draws from
# the proposal density p, and
# - wt is a vector of the corresponding
# weights.
# To illustrate importance sampling, we
# return to our beta-binomial example and
# consider the problem of estimating the
# posterior mean of logK.
# The proposal density used in the development
# of a rejection algorithm seems to be a
# good choice for importance sampling.
# We choose a t density where the location
# is the posterior mode (found from laplace),
# the scale matrix is twice the estimated
# variance-covariance matrix, and the number
# of degrees of freedom is 4. This choice for
# p will resemble the posterior density and
# have flat tails that we hope will result
# in bounded weights.
# We define myfunc() to compute function h().
# Since we are interested in the posterior
# mean of logK, we define the function to
# be the second component of the vector theta.
tpar=list(m=fit$mode,var=2*fit$var,df=4)
tpar
myfunc=function(theta)
return(theta[2])
s=impsampling(betabinexch,tpar,
myfunc,10000,
cancermortality)
s
cbind(s$est,s$se)
# We see from the output that the importance
# sampling estimate of the mean of logK
# is 7.918 with an associated standard
# error of 0.0184.
##############################################
# Section 5.10 Sampling Importance Resampling
##############################################
library(LearnBayes)
data(cancermortality)
fit=laplace(betabinexch,c(-7,6),cancermortality)
tpar=list(m=fit$mode,var=2*fit$var,df=4)
theta.s=sir(betabinexch,tpar,10000,cancermortality)
S=bayes.influence(theta.s,cancermortality)
plot(c(0,0,0),S$summary,type="b",
lwd=3,xlim=c(-1,21),
ylim=c(5,11),
xlab="Observation removed",
ylab="log K")
for (i in 1:20)
lines(c(i,i,i),S$summary.obs[i,],type="b")
| /Materials- 7. Rejection and Importance Sampling/Session 7 R Script/S7_Intro_Bayesian_Compute.R | no_license | thecodemasterk/bayesian | R | false | false | 11,568 | r | ########################################
# Introduction to Bayesian Computation #
########################################
######################
# Rejection Sampling
######################
# library(LearnBayes)
# data(cancermortality)
# Previously, we were able to produce
# simulated samples directly from the
# posterior distribution since the
# distributions were familiar functional forms.
# So we could obtain Monte Carlo estimates
# of the posterior mean for any function
# of the parameters of interest.
# But in other situations, such as the
# beta-binomial example today, the
# posterior does not have a familiar form
# and so we need to use an alternative
# algorithm for producing a simulated sample.
# A general-purpose algorithm for
# simulating random draws from a given
# probability distribution is rejection sampling.
# Suppose we wish to produce an independent
# sample from a posterior density g(theta|y)
# where the normalizing constant may not be known.
# The first step in rejection sampling
# is to find another probability density
# p(theta) such that:
# - It is easy to simulate draws from p.
# - The density p resembles the posterior
# density of interest g in terms of
# location and spread.
# - For all theta and a constant c,
# g(theta|y) l.t.e.t. cp(theta).
# Suppose we are able to find a density p
# with these properties. Then one obtains
# draws from g using the following
# accept/reject algorithm:
# 1. Independently simulate theta from p and
# a uniform random variable U on the
# unit interval.
# 2. If U l.t.e.t. g(theta|y)/(cp(theta)), then accept theta
# as a draw from the density g;
# otherwise reject theta.
# 3. Continue steps 1 and 2 of the algorithm
# until one has collected a sufficient
# number of "accepted" theta's.
# We return to the beta-binomial example.
# We want to find a proposal density of
# a simple functional form that, when
# multiplied by an appropriate constant,
# covers the posterior density of interest.
# One choice for p would be a multivariate
# t density with mean and scale matrix
# chosen to match the posterior density.
# We use a multivariate t density with
# location fit$mode, scale matrix
# 2 fit$var, and 4 dof.
# We write a new function betabinT()
# with two inputs, the parameter
# theta and a list datapar with
# components data, the data matrix,
# and par, a list with the parameters
# of the t proposal density (mean,
# scale matrix, and degrees of freedom)
fit=laplace(betabinexch,
c(-7,6),
cancermortality)
fit
betabinT=function(theta,datapar)
{
data=datapar$data
tpar=datapar$par
d=betabinexch(theta,data)-dmt(theta,
mean=c(tpar$m),
S=tpar$var,
df=tpar$df,
log=TRUE)
return(d)
}
# We define parameters of t proposal density
# and the list datapar:
tpar=list(m=fit$mode,var=2*fit$var,df=4)
tpar
datapar=list(data=cancermortality,par=tpar)
datapar
# We run laplace() with the above
# function and an "intelligent"
# starting value
start=c(-6.9,12.4)
fit1=laplace(betabinT,start,datapar)
fit1$mode
# We find that the maximum value d
# occurs at the value theta = (-6.889, 12.422).
# The value of d is found by evaluating
# the function at the modal value.
betabinT(fit1$mode,datapar)
# We use rejectsampling() using
# constant value of d and simulate
# 10,000 draws from proposal density
theta=rejectsampling(betabinexch,tpar,
-569.2813,10000,
cancermortality)
dim(theta)
# theta has 2406 rows so acceptance
# rate is 2406/10000 = .24
# We plot simulated draws from rejection
# sampling on contour plot of previous
# log posterior density plot. Most
# of the draws are within the inner
# contour of the exact density
mycontour(betabinexch,c(-8,-4.5,3,16.5),
cancermortality, xlab="logit eta",
ylab="log K")
points(theta[,1],theta[,2])
###########################
### Importance Sampling ###
###########################
### see slides 6-9 for mathematical details ###
# As in rejection sampling, the issue in
# designing a good importance sampling
# estimate is finding a suitable sampling
# density p. This density should be of a
# familiar functional form so simulated
# draws are available.
# The density should mimic the posterior
# density g and have relatively flat tails
# so that the weight function w(theta) is
# bounded from above. One can monitor the
# choice of p by inspecting the values of
# the simulated weights w(thetaj). If there
# are no unusually large weights, then it
# is likely that the weight function is bounded
# and the importance sampler is providing a
# suitable estimate.
# library(LearnBayes)
data(cancermortality)
fit=laplace(betabinexch,c(-7,6),
cancermortality)
fit
# We write betabinexch.cond() this posterior
# density conditional on the value of theta1
# -6.819. The function allows the input of
# the vector of values theta2 = log K. The
# function returns the value of the density
# (not log density).
betabinexch.cond=function (log.K, data)
{
eta = exp(-6.818793)/(1 + exp(-6.818793))
K = exp(log.K)
y = data[, 1]; n = data[, 2]; N = length(y)
logf=0*log.K
for (j in 1:length(y))
logf = logf + lbeta(K * eta + y[j], K * (1 - eta) + n[j] - y[j]) - lbeta(K * eta, K * (1 - eta))
val = logf + log.K - 2 * log(1 + K)
return(exp(val-max(val)))
}
# To compute the mean of logK for the cancer
# mortality data, suppose we let the proposal
# density p be normal with mean 8 and standard
# deviation 2.
# In this R code, we use the integrate()
# to find the normalizing constant of the
# posterior density of logK.
# Then, using the curve function, we display
# the conditional posterior density of logK
# and the normal proposal density in the top
# left graph. The top right graph displays the
# weight function, the ratio of the posterior
# density to the proposal density.
I=integrate(betabinexch.cond,2,16,cancermortality)
I
par(mfrow=c(2,2))
curve(betabinexch.cond(x,cancermortality)/I$value,
from=3,to=16, ylab="Density",
xlab="log K",lwd=3, main="Densities")
curve(dnorm(x,8,2),add=TRUE)
legend("topright",legend=c("Exact","Normal"),lwd=c(3,1))
curve(betabinexch.cond(x,cancermortality)/I$value/dnorm(x,8,2),
from=3,to=16, ylab="Weight",xlab="log K",
main="Weight = g/p")
# Although the normal proposal density
# resembles the posterior density with
# respect to location and spread, the
# posterior density has a flatter right
# tail than the proposal and the weight
# function is unbounded for large logK.
# Suppose instead that we let the proposal
# density have the t functional form with
# location 8, scale 2, and 2 degrees of
# freedom. Using more R commands, the
# bottom graphs display the posterior and
# proposal densities and the weight function.
# Here the t proposal density has flatter
# tails than the posterior density and the
# weight function is bounded. Here the t
# functional form is a better proposal
# for importance sampling.
curve(betabinexch.cond(x,cancermortality)/I$value,
from=3,to=16, ylab="Density", xlab="log K",
lwd=3, main="Densities")
curve(1/2*dt(x-8,df=2),add=TRUE)
legend("topright",legend=c("Exact","T(2)"),lwd=c(3,1))
curve(betabinexch.cond(x,cancermortality)/I$value/
(1/2*dt(x-8,df=2)),from=3,to=16,
ylab="Weight",xlab="log K",
main="Weight = g/p")
##############################
### Using a Multivariate t ###
### as a Proposal Density ###
##############################
# For a posterior density of a vector of
# real-valued parameters, a convenient
# choice of sampler p is a multivariate
# t density. The R function impsampling
# implements importance sampling for an
# arbitrary posterior density when p is
# a t density.
# There are five inputs to this function:
# - logf() is the function defining the
# logarithm of the posterior,
# - tpar() is a list of parameter values of
# the t density,
# - h() is a function defining the function
# h(theta) of interest,
# - n is the size of the simulated sample, and
# - data is the vector or list used in the
# definition of logf().
# In the function impsampling(), the functions
# rmt() and dmt() from the mnormt library
# are used to simulate and compute values
# of the t density.
# In this R code from impsampling(), we
# simulate draws from the sampling density,
# compute values of the log sampling density
# and the log posterior density at the
# simulated draws, and compute the weights
# and importance sampler estimate.
# theta = rmt(n, mean = c(tpar$m), S = tpar$var, df = tpar$df)
# lf = matrix(0, c(dim(theta)[1], 1))
# lp = dmt(theta, mean = c(tpar$m), S = tpar$var, df = tpar$df,
# log = TRUE)
# md = max(lf - lp)
# wt = exp(lf - lp - md)
# est = sum(wt * H)/sum(wt)
# Note that the value md is the maximum
# value of the difference of logs of the
# posterior and proposal density - this value
# is used in the computation of the weights
# to prevent possible overflow.
# The output of impsampling() is a list with
# four components:
# - est is the importance sampling estimate,
# - se is the corresponding simulation standard
# error,
# - theta is a matrix of simulated draws from
# the proposal density p, and
# - wt is a vector of the corresponding
# weights.
# To illustrate importance sampling, we
# return to our beta-binomial example and
# consider the problem of estimating the
# posterior mean of logK.
# The proposal density used in the development
# of a rejection algorithm seems to be a
# good choice for importance sampling.
# We choose a t density where the location
# is the posterior mode (found from laplace),
# the scale matrix is twice the estimated
# variance-covariance matrix, and the number
# of degrees of freedom is 4. This choice for
# p will resemble the posterior density and
# have flat tails that we hope will result
# in bounded weights.
# We define myfunc() to compute function h().
# Since we are interested in the posterior
# mean of logK, we define the function to
# be the second component of the vector theta.
tpar=list(m=fit$mode,var=2*fit$var,df=4)
tpar
myfunc=function(theta)
return(theta[2])
s=impsampling(betabinexch,tpar,
myfunc,10000,
cancermortality)
s
cbind(s$est,s$se)
# We see from the output that the importance
# sampling estimate of the mean of logK
# is 7.918 with an associated standard
# error of 0.0184.
##############################################
# Section 5.10 Sampling Importance Resampling
##############################################
library(LearnBayes)
data(cancermortality)
fit=laplace(betabinexch,c(-7,6),cancermortality)
tpar=list(m=fit$mode,var=2*fit$var,df=4)
theta.s=sir(betabinexch,tpar,10000,cancermortality)
S=bayes.influence(theta.s,cancermortality)
plot(c(0,0,0),S$summary,type="b",
lwd=3,xlim=c(-1,21),
ylim=c(5,11),
xlab="Observation removed",
ylab="log K")
for (i in 1:20)
lines(c(i,i,i),S$summary.obs[i,],type="b")
|
install.packages("googledrive")
library("googledrive")
archivos = drive_find(n_max=20)
archivos$name
drive_download("Base de Prueba_Página 1_Tabla", type = "csv")
library(readxl)
datos = read.csv("Base de Prueba_Página 1_Tabla.csv", sep = ",")
head(datos)
table(datos$Event.Name)
table(datos$Country)
table(datos$Mobile.Brand.Name)
cor(datos$Event.Count, datos$Unique.Users)
library(ggplot2)
ggplot(datos, aes(x = Event.Name, y = Event.Count, fill=Event.Name)) +
geom_boxplot() + coord_flip()
ggplot(datos, aes(x = Mobile.Brand.Name, y = Event.Count, fill=Event.Name)) +
geom_boxplot() + coord_flip()
ggplot(datos, aes(x = Country, y = Event.Count, fill=Event.Name)) +
geom_boxplot() + coord_flip()
#Lamentablemente los gráficos no aportan mucho como información | /Test1.R | no_license | EliasMind/Test-Tarea-1 | R | false | false | 784 | r | install.packages("googledrive")
library("googledrive")
archivos = drive_find(n_max=20)
archivos$name
drive_download("Base de Prueba_Página 1_Tabla", type = "csv")
library(readxl)
datos = read.csv("Base de Prueba_Página 1_Tabla.csv", sep = ",")
head(datos)
table(datos$Event.Name)
table(datos$Country)
table(datos$Mobile.Brand.Name)
cor(datos$Event.Count, datos$Unique.Users)
library(ggplot2)
ggplot(datos, aes(x = Event.Name, y = Event.Count, fill=Event.Name)) +
geom_boxplot() + coord_flip()
ggplot(datos, aes(x = Mobile.Brand.Name, y = Event.Count, fill=Event.Name)) +
geom_boxplot() + coord_flip()
ggplot(datos, aes(x = Country, y = Event.Count, fill=Event.Name)) +
geom_boxplot() + coord_flip()
#Lamentablemente los gráficos no aportan mucho como información |
\name{mixor-deprecated}
\alias{mixor-deprecated}
\docType{package}
\title{
Deprecated Functions in Package mixor
}
\description{
These functions are provided for compatibility with older versions
of \code{mixor} only, and will be defunct at the next release.
}
\details{
The following functions are deprecated and will be made defunct; use
the replacement indicated below:
\itemize{
\item{mixord: \code{\link{mixor}}}
}
}
\author{
Kellie J. Archer, Donald Hedeker, Rachel Nordgren, Robert D. Gibbons,
Maintainer: Kellie J. Archer <kjarcher@vcu.edu>
}
| /man/Mixor-deprecated.Rd | no_license | cran/mixor | R | false | false | 589 | rd | \name{mixor-deprecated}
\alias{mixor-deprecated}
\docType{package}
\title{
Deprecated Functions in Package mixor
}
\description{
These functions are provided for compatibility with older versions
of \code{mixor} only, and will be defunct at the next release.
}
\details{
The following functions are deprecated and will be made defunct; use
the replacement indicated below:
\itemize{
\item{mixord: \code{\link{mixor}}}
}
}
\author{
Kellie J. Archer, Donald Hedeker, Rachel Nordgren, Robert D. Gibbons,
Maintainer: Kellie J. Archer <kjarcher@vcu.edu>
}
|
# Read data from text file into a data frame. Specify '?' character is na
df<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
#Subset data for 2007-02-01 and 2007-02-02 dates
df<-df[df$Date=='1/2/2007' | df$Date=='2/2/2007',]
# Convert to Date and Time fields to Date-Time class
df$Time<-strptime(paste(df$Date,df$Time),"%d/%m/%Y %H:%M:%S")
#Create plot2.png in working directory
png(filename="plot2.png",width = 480, height = 480)
#Create a plot and send it to the png file
plot(df$Time,df$Global_active_power,ylab="Global Active Power (kilowatts)",xlab="",type="l")
#Close the png file device
dev.off()
| /plot2.R | no_license | ekumar0987/ExData_Plotting1 | R | false | false | 664 | r | # Read data from text file into a data frame. Specify '?' character is na
df<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
#Subset data for 2007-02-01 and 2007-02-02 dates
df<-df[df$Date=='1/2/2007' | df$Date=='2/2/2007',]
# Convert to Date and Time fields to Date-Time class
df$Time<-strptime(paste(df$Date,df$Time),"%d/%m/%Y %H:%M:%S")
#Create plot2.png in working directory
png(filename="plot2.png",width = 480, height = 480)
#Create a plot and send it to the png file
plot(df$Time,df$Global_active_power,ylab="Global Active Power (kilowatts)",xlab="",type="l")
#Close the png file device
dev.off()
|
dew <-
function(x, r, y, te, s0, sigma, skew, kurt)
{
v = sqrt(exp(sigma^2 * te) - 1)
m = log(s0) + (r - y - (sigma^2)/2) * te
skew.lognorm = 3 * v + v^3
kurt.lognorm = 16 * v^2 + 15 * v^4 + 6 * v^6 + v^8
cumul.lognorm = (s0 * exp((r-y) * te) * v)^2
density.lognorm = dlnorm(x = x, meanlog = log(s0) + (r - y - (sigma^2)/2)*te, sdlog = sigma * sqrt(te), log = FALSE)
frst.der.lognorm = -1 * ( 1 + (log(x) - m)/(te * sigma^2) ) * density.lognorm/x
scnd.der.lognorm = -1 * ( 2 + (log(x) - m)/(te * sigma^2) ) * frst.der.lognorm / x - density.lognorm/(x^2 * sigma^2)
thrd.der.lognorm = -1 * ( 3 + (log(x) - m)/(te * sigma^2) ) * scnd.der.lognorm / x - 2 * frst.der.lognorm/(x^2 * sigma^2) + density.lognorm/(x^3 * sigma^2)
frth.der.lognorm = -1 * ( 4 + (log(x) - m)/(te * sigma^2) ) * thrd.der.lognorm / x - 3 * scnd.der.lognorm/(x^2 * sigma^2) +
3 * frst.der.lognorm/(x^3 * sigma^2) - 2 * density.lognorm/(x^4 * sigma^2)
out = density.lognorm - (skew - skew.lognorm) * ((cumul.lognorm)^(1.5))*thrd.der.lognorm/6 + (kurt - kurt.lognorm)*((cumul.lognorm)^2)*frth.der.lognorm/24
}
| /R/dew.R | no_license | freephys/RND | R | false | false | 1,206 | r | dew <-
function(x, r, y, te, s0, sigma, skew, kurt)
{
v = sqrt(exp(sigma^2 * te) - 1)
m = log(s0) + (r - y - (sigma^2)/2) * te
skew.lognorm = 3 * v + v^3
kurt.lognorm = 16 * v^2 + 15 * v^4 + 6 * v^6 + v^8
cumul.lognorm = (s0 * exp((r-y) * te) * v)^2
density.lognorm = dlnorm(x = x, meanlog = log(s0) + (r - y - (sigma^2)/2)*te, sdlog = sigma * sqrt(te), log = FALSE)
frst.der.lognorm = -1 * ( 1 + (log(x) - m)/(te * sigma^2) ) * density.lognorm/x
scnd.der.lognorm = -1 * ( 2 + (log(x) - m)/(te * sigma^2) ) * frst.der.lognorm / x - density.lognorm/(x^2 * sigma^2)
thrd.der.lognorm = -1 * ( 3 + (log(x) - m)/(te * sigma^2) ) * scnd.der.lognorm / x - 2 * frst.der.lognorm/(x^2 * sigma^2) + density.lognorm/(x^3 * sigma^2)
frth.der.lognorm = -1 * ( 4 + (log(x) - m)/(te * sigma^2) ) * thrd.der.lognorm / x - 3 * scnd.der.lognorm/(x^2 * sigma^2) +
3 * frst.der.lognorm/(x^3 * sigma^2) - 2 * density.lognorm/(x^4 * sigma^2)
out = density.lognorm - (skew - skew.lognorm) * ((cumul.lognorm)^(1.5))*thrd.der.lognorm/6 + (kurt - kurt.lognorm)*((cumul.lognorm)^2)*frth.der.lognorm/24
}
|
#' @import stats
#'
#' @title Quantile residual tests for GMAR, StMAR , and G-StMAR models
#'
#' @description \code{quantile_residual_tests} performs quantile residual tests for GMAR, StMAR,
#' and G-StMAR models, testing normality, autocorrelation, and conditional heteroscedasticity
#' of the quantile residuals.
#'
#' @inheritParams add_data
#' @param lags_ac a numeric vector of positive integers specifying the lags for which autocorrelation is tested.
#' @param lags_ch a numeric vector of positive integers specifying the lags for which conditional heteroscedasticity
#' is tested.
#' @param nsimu a positive integer specifying to how many simulated observations the covariance matrix Omega
#' (see Kalliovirta (2012)) should be based on. If smaller than data size, then omega will be based on the
#' given data and not on simulated data. Having the covariance matrix omega based on a large simulated sample
#' might improve the tests size properties.
#' @param print_res a logical argument defining whether the results should be printed or not.
#' @details
#' For a correctly specified GSMAR model employing the maximum likelihood estimator, the quantile residuals
#' are asymptotically independent with standard normal distribution. They can hence be used in a similar
#' manner to conventional Pearson's residuals. For more details about quantile residual based diagnostics,
#' and in particular, about the quantile residual tests, see the cited article by \emph{Kalliovirta (2012)}.
#' @return Returns an object of class \code{'qrtest'} containing the test results in data frames. In the cases
#' of autocorrelation and conditional heteroscedasticity tests, the returned object also contains the
#' associated individual statistics and their standard errors, discussed in \emph{Kalliovirta (2012)} at
#' the pages 369-370.
#' @inherit quantile_residuals_int references
#' @seealso \code{\link{profile_logliks}}, \code{\link{fitGSMAR}}, \code{\link{GSMAR}}, \code{\link{diagnostic_plot}},
#' \code{\link{predict.gsmar}}, \code{\link{get_test_Omega}},
#' @examples
#' \donttest{
#' ## The below examples take approximately 30 seconds to run.
#'
#' # G-StMAR model with one GMAR type and one StMAR type regime
#' fit42gs <- fitGSMAR(data=M10Y1Y, p=4, M=c(1, 1), model="G-StMAR",
#' ncalls=1, seeds=4)
#'
#' # Tests based on the observed data (without simulation procedure) with the
#' # default lags:
#' qrt1 <- quantile_residual_tests(fit42gs)
#'
#' # Tests based on the simulation procedure using sample size 10000 and with
#' # the lags specified by hand:
#' set.seed(1)
#' qrt2 <- quantile_residual_tests(fit42gs, lags_ac=c(1, 6), nsimu=10000)
#'
#' # GMAR model
#' fit12 <- fitGSMAR(data=simudata, p=1, M=2, model="GMAR", ncalls=1, seeds=1)
#' qrt3 <- quantile_residual_tests(fit12, lags_ac=c(1, 5, 10, 15))
#' }
#' @export
quantile_residual_tests <- function(gsmar, lags_ac=c(1, 3, 6, 12), lags_ch=lags_ac, nsimu=1, print_res=TRUE) {
# Checks + collect the relevant statistics etc
check_gsmar(gsmar)
check_data(gsmar)
data <- gsmar$data
p <- gsmar$model$p
M <- gsmar$model$M
params <- gsmar$params
model <- gsmar$model$model
restricted <- gsmar$model$restricted
constraints <- gsmar$model$constraints
parametrization <- gsmar$model$parametrization
T_obs <- length(data) - p
if(max(c(lags_ac, lags_ch)) >= T_obs) stop("The lags are too large compared to the data size")
# Obtain the quantile residuals
qresiduals <- quantile_residuals_int(data=data, p=p, M=M, params=params, model=model, restricted=restricted,
constraints=constraints, parametrization=parametrization)
# Sample used in calculation of Omega: either simulated or the data
if(nsimu > length(data)) {
omegaData <- as.matrix(simulate.gsmar(gsmar, nsim=nsimu)$sample)
} else {
omegaData <- data
}
# Function to calculate the Omega matrix (that is presented in Kalliovirta 2012, Lemma 2.2); calls the function get_test_Omega
try_to_get_omega <- function(g, dim_g, which_test, which_lag=NA) {
print_message <- function(because_of) {
if(which_test == "norm") {
message(paste("Can't perform normality test", because_of))
} else if(which_test == "ac") {
message(paste("Can't perform autocorrelation test for lag", which_lag, because_of))
} else if(which_test == "ch") {
message(paste("Can't perform conditional heteroskedasticity test for lag", which_lag, because_of))
}
}
num_string <- "because of numerical problems."
omg <- tryCatch(get_test_Omega(data=omegaData, p=p, M=M, params=params, model=model, restricted=restricted,
constraints=constraints, parametrization=parametrization, g=g, dim_g=dim_g),
error=function(e) {
if(model %in% c("StMAR", "G-StMAR")) {
dfs <- pick_dfs(p=p, M=M, params=params, model=model)
if(any(dfs > 100)) {
print_message("- possibly because some degrees of freedom parameter is very large. Consider changing the corresponding StMAR type regimes into GMAR type with the function 'stmar_to_gstmar'.")
} else {
print_message(num_string)
}
} else {
print_message(num_string)
}
return(NA)
})
if(is.matrix(omg) & anyNA(omg)) {
print_message("- possibly because the model fits too poorly")
} else if(length(omg) == 1) {
if(is.na(omg)) return(matrix(NA, nrow=dim_g, ncol=dim_g))
}
omg
}
# Functions to format values and print the results
format_value0 <- format_valuef(0)
format_value3 <- format_valuef(3)
print_resf <- function(lag, p_val) {
sep <- ifelse(lag < 10, " | ", "| ")
cat(" ", format_value0(lag), sep, format_value3(p_val), "\n")
}
####################
## Test normality ## (Kalliovirta 2012, Section 3.3)
####################
# The function 'g' of Kalliovirta 2012, Section 3.3
g <- function(r) {
cbind(r^2 - 1, r^3, r^4 - 3)
}
dim_g <- 3
# Omega (Kalliovirta 2012, Lemma 2.2)
Omega <- try_to_get_omega(g=g, dim_g=dim_g, which_test="norm", which_lag=NA)
# Test statistics and p-value
sumg <- as.matrix(colSums(g(qresiduals)))
N <- crossprod(sumg, solve(Omega, sumg))/T_obs
pvalue <- 1 - pchisq(N, df=dim_g)
# Results
if(print_res) cat(paste0("Normality test p-value: ", format_value3(pvalue)), "\n\n")
norm_res <- data.frame(testStat=N, df=dim_g, pvalue=pvalue, row.names=NULL)
#############################################################
## Test autocorrelation and conditional heteroscedasticity ## (Kalliovirta 2012, Sections 3.1 and 3.2)
#############################################################
# Storage for the results
tmp <- rep(NA, length(lags_ac))
ac_res <- data.frame(lags=lags_ac, testStat=tmp, df=tmp, pvalue=tmp, indStat=tmp, stdError=tmp)
tmp <- rep(NA, length(lags_ch))
ch_res <- data.frame(lags=lags_ch, testStat=tmp, df=tmp, pvalue=tmp, indStat=tmp, stdError=tmp)
ret <- list(norm_res=norm_res,
ac_res=ac_res,
ch_res=ch_res)
# Returns the function 'g' for a given lag and function FUN to be applied (see Kalliovirta 2012, Sections 3.1 and 3.2 for the 'g').
get_g <- function(lag, FUN) {
FUN <- match.fun(FUN)
function(r) { # Takes in quantile residuals vector r, returns a (T - lag x lag) matrix. (lag = dim_g)
res <- vapply((1 + lag):length(r), function(i1) vapply(1:lag, function(i2) FUN(r, i1, i2), numeric(1)), numeric(lag))
if(lag == 1) {
return(as.matrix(res))
} else {
return(t(res))
}
}
}
# Apart from the function 'g', the test statistics are similar for AC and CH tests.
# Function to calculate ac and ch tests
test_ac_or_ch <- function(which_test=c("ac", "ch")) {
which_test <- match.arg(which_test)
# Which lags to go through
if(which_test == "ac") {
lags_to_loop <- lags_ac
} else {
lags_to_loop <- lags_ch
}
j1 <- 1 # Count iterations
for(lag in lags_to_loop) {
# The function 'g'
if(which_test == "ac") {
g <- get_g(lag, FUN=function(r, i1, i2) r[i1]*r[i1 - i2]) # FUN = r[i1]*r[i1 - i2]
} else { # to_test == "ch"
g <- get_g(lag, FUN=function(r, i1, i2) (r[i1]^2 - 1)*r[i1 - i2]^2) # FUN = (r[i1]^2 - 1)*r[i1 - i2]^2
}
# Omega (Kalliovirta 2012, Lemma 2.2)
Omega <- try_to_get_omega(g=g, dim_g=lag, which_test=which_test, which_lag=lag)
# Test statistics: sample autocorrelation c_k/h.sked statistic d_k for of the current lag, standard error, and p-value
sumg <- as.matrix(colSums(g(qresiduals)))
AorH <- crossprod(sumg, solve(Omega, sumg))/(T_obs - lag) # See A ("ac") and H ("ch") test statistics in Kalliovirta 2012, pp.369-370
indStat <- sumg[lag]/(T_obs - lag) # c_k ("ac") or d_k ("ch") of Kalliovirta 2012
stdError <- sqrt(Omega[lag, lag]/T_obs) # See Kalliovirta 2012, pp.369-370
pvalue <- 1 - pchisq(AorH, df=lag)
# Store the results
if(print_res) print_resf(lag=lag, p_val=pvalue)
index <- ifelse(which_test == "ac", 2, 3) # Index in the list 'ret'
ret[[index]]$testStat[j1] <<- AorH
ret[[index]]$df[j1] <<- lag
ret[[index]]$pvalue[j1] <<- pvalue
ret[[index]]$indStat[j1] <<- indStat
ret[[index]]$stdError[j1] <<- stdError
j1 <- j1 + 1
}
}
# Calculate the tests and print the results
if(print_res) cat("Autocorrelation tests:\nlags | p-value\n")
test_ac_or_ch("ac")
if(print_res) cat("\nConditional heteroskedasticity tests:\nlags | p-value\n")
test_ac_or_ch("ch")
structure(ret, class="qrtest")
}
| /R/quantileResidualTests.R | no_license | saviviro/uGMAR | R | false | false | 9,898 | r | #' @import stats
#'
#' @title Quantile residual tests for GMAR, StMAR , and G-StMAR models
#'
#' @description \code{quantile_residual_tests} performs quantile residual tests for GMAR, StMAR,
#' and G-StMAR models, testing normality, autocorrelation, and conditional heteroscedasticity
#' of the quantile residuals.
#'
#' @inheritParams add_data
#' @param lags_ac a numeric vector of positive integers specifying the lags for which autocorrelation is tested.
#' @param lags_ch a numeric vector of positive integers specifying the lags for which conditional heteroscedasticity
#' is tested.
#' @param nsimu a positive integer specifying to how many simulated observations the covariance matrix Omega
#' (see Kalliovirta (2012)) should be based on. If smaller than data size, then omega will be based on the
#' given data and not on simulated data. Having the covariance matrix omega based on a large simulated sample
#' might improve the tests size properties.
#' @param print_res a logical argument defining whether the results should be printed or not.
#' @details
#' For a correctly specified GSMAR model employing the maximum likelihood estimator, the quantile residuals
#' are asymptotically independent with standard normal distribution. They can hence be used in a similar
#' manner to conventional Pearson's residuals. For more details about quantile residual based diagnostics,
#' and in particular, about the quantile residual tests, see the cited article by \emph{Kalliovirta (2012)}.
#' @return Returns an object of class \code{'qrtest'} containing the test results in data frames. In the cases
#' of autocorrelation and conditional heteroscedasticity tests, the returned object also contains the
#' associated individual statistics and their standard errors, discussed in \emph{Kalliovirta (2012)} at
#' the pages 369-370.
#' @inherit quantile_residuals_int references
#' @seealso \code{\link{profile_logliks}}, \code{\link{fitGSMAR}}, \code{\link{GSMAR}}, \code{\link{diagnostic_plot}},
#' \code{\link{predict.gsmar}}, \code{\link{get_test_Omega}},
#' @examples
#' \donttest{
#' ## The below examples take approximately 30 seconds to run.
#'
#' # G-StMAR model with one GMAR type and one StMAR type regime
#' fit42gs <- fitGSMAR(data=M10Y1Y, p=4, M=c(1, 1), model="G-StMAR",
#' ncalls=1, seeds=4)
#'
#' # Tests based on the observed data (without simulation procedure) with the
#' # default lags:
#' qrt1 <- quantile_residual_tests(fit42gs)
#'
#' # Tests based on the simulation procedure using sample size 10000 and with
#' # the lags specified by hand:
#' set.seed(1)
#' qrt2 <- quantile_residual_tests(fit42gs, lags_ac=c(1, 6), nsimu=10000)
#'
#' # GMAR model
#' fit12 <- fitGSMAR(data=simudata, p=1, M=2, model="GMAR", ncalls=1, seeds=1)
#' qrt3 <- quantile_residual_tests(fit12, lags_ac=c(1, 5, 10, 15))
#' }
#' @export
quantile_residual_tests <- function(gsmar, lags_ac=c(1, 3, 6, 12), lags_ch=lags_ac, nsimu=1, print_res=TRUE) {
# Checks + collect the relevant statistics etc
check_gsmar(gsmar)
check_data(gsmar)
data <- gsmar$data
p <- gsmar$model$p
M <- gsmar$model$M
params <- gsmar$params
model <- gsmar$model$model
restricted <- gsmar$model$restricted
constraints <- gsmar$model$constraints
parametrization <- gsmar$model$parametrization
T_obs <- length(data) - p
if(max(c(lags_ac, lags_ch)) >= T_obs) stop("The lags are too large compared to the data size")
# Obtain the quantile residuals
qresiduals <- quantile_residuals_int(data=data, p=p, M=M, params=params, model=model, restricted=restricted,
constraints=constraints, parametrization=parametrization)
# Sample used in calculation of Omega: either simulated or the data
if(nsimu > length(data)) {
omegaData <- as.matrix(simulate.gsmar(gsmar, nsim=nsimu)$sample)
} else {
omegaData <- data
}
# Function to calculate the Omega matrix (that is presented in Kalliovirta 2012, Lemma 2.2); calls the function get_test_Omega
try_to_get_omega <- function(g, dim_g, which_test, which_lag=NA) {
print_message <- function(because_of) {
if(which_test == "norm") {
message(paste("Can't perform normality test", because_of))
} else if(which_test == "ac") {
message(paste("Can't perform autocorrelation test for lag", which_lag, because_of))
} else if(which_test == "ch") {
message(paste("Can't perform conditional heteroskedasticity test for lag", which_lag, because_of))
}
}
num_string <- "because of numerical problems."
omg <- tryCatch(get_test_Omega(data=omegaData, p=p, M=M, params=params, model=model, restricted=restricted,
constraints=constraints, parametrization=parametrization, g=g, dim_g=dim_g),
error=function(e) {
if(model %in% c("StMAR", "G-StMAR")) {
dfs <- pick_dfs(p=p, M=M, params=params, model=model)
if(any(dfs > 100)) {
print_message("- possibly because some degrees of freedom parameter is very large. Consider changing the corresponding StMAR type regimes into GMAR type with the function 'stmar_to_gstmar'.")
} else {
print_message(num_string)
}
} else {
print_message(num_string)
}
return(NA)
})
if(is.matrix(omg) & anyNA(omg)) {
print_message("- possibly because the model fits too poorly")
} else if(length(omg) == 1) {
if(is.na(omg)) return(matrix(NA, nrow=dim_g, ncol=dim_g))
}
omg
}
# Functions to format values and print the results
format_value0 <- format_valuef(0)
format_value3 <- format_valuef(3)
print_resf <- function(lag, p_val) {
sep <- ifelse(lag < 10, " | ", "| ")
cat(" ", format_value0(lag), sep, format_value3(p_val), "\n")
}
####################
## Test normality ## (Kalliovirta 2012, Section 3.3)
####################
# The function 'g' of Kalliovirta 2012, Section 3.3
g <- function(r) {
cbind(r^2 - 1, r^3, r^4 - 3)
}
dim_g <- 3
# Omega (Kalliovirta 2012, Lemma 2.2)
Omega <- try_to_get_omega(g=g, dim_g=dim_g, which_test="norm", which_lag=NA)
# Test statistics and p-value
sumg <- as.matrix(colSums(g(qresiduals)))
N <- crossprod(sumg, solve(Omega, sumg))/T_obs
pvalue <- 1 - pchisq(N, df=dim_g)
# Results
if(print_res) cat(paste0("Normality test p-value: ", format_value3(pvalue)), "\n\n")
norm_res <- data.frame(testStat=N, df=dim_g, pvalue=pvalue, row.names=NULL)
#############################################################
## Test autocorrelation and conditional heteroscedasticity ## (Kalliovirta 2012, Sections 3.1 and 3.2)
#############################################################
# Storage for the results
tmp <- rep(NA, length(lags_ac))
ac_res <- data.frame(lags=lags_ac, testStat=tmp, df=tmp, pvalue=tmp, indStat=tmp, stdError=tmp)
tmp <- rep(NA, length(lags_ch))
ch_res <- data.frame(lags=lags_ch, testStat=tmp, df=tmp, pvalue=tmp, indStat=tmp, stdError=tmp)
ret <- list(norm_res=norm_res,
ac_res=ac_res,
ch_res=ch_res)
# Returns the function 'g' for a given lag and function FUN to be applied (see Kalliovirta 2012, Sections 3.1 and 3.2 for the 'g').
get_g <- function(lag, FUN) {
FUN <- match.fun(FUN)
function(r) { # Takes in quantile residuals vector r, returns a (T - lag x lag) matrix. (lag = dim_g)
res <- vapply((1 + lag):length(r), function(i1) vapply(1:lag, function(i2) FUN(r, i1, i2), numeric(1)), numeric(lag))
if(lag == 1) {
return(as.matrix(res))
} else {
return(t(res))
}
}
}
# Apart from the function 'g', the test statistics are similar for AC and CH tests.
# Function to calculate ac and ch tests
test_ac_or_ch <- function(which_test=c("ac", "ch")) {
which_test <- match.arg(which_test)
# Which lags to go through
if(which_test == "ac") {
lags_to_loop <- lags_ac
} else {
lags_to_loop <- lags_ch
}
j1 <- 1 # Count iterations
for(lag in lags_to_loop) {
# The function 'g'
if(which_test == "ac") {
g <- get_g(lag, FUN=function(r, i1, i2) r[i1]*r[i1 - i2]) # FUN = r[i1]*r[i1 - i2]
} else { # to_test == "ch"
g <- get_g(lag, FUN=function(r, i1, i2) (r[i1]^2 - 1)*r[i1 - i2]^2) # FUN = (r[i1]^2 - 1)*r[i1 - i2]^2
}
# Omega (Kalliovirta 2012, Lemma 2.2)
Omega <- try_to_get_omega(g=g, dim_g=lag, which_test=which_test, which_lag=lag)
# Test statistics: sample autocorrelation c_k/h.sked statistic d_k for of the current lag, standard error, and p-value
sumg <- as.matrix(colSums(g(qresiduals)))
AorH <- crossprod(sumg, solve(Omega, sumg))/(T_obs - lag) # See A ("ac") and H ("ch") test statistics in Kalliovirta 2012, pp.369-370
indStat <- sumg[lag]/(T_obs - lag) # c_k ("ac") or d_k ("ch") of Kalliovirta 2012
stdError <- sqrt(Omega[lag, lag]/T_obs) # See Kalliovirta 2012, pp.369-370
pvalue <- 1 - pchisq(AorH, df=lag)
# Store the results
if(print_res) print_resf(lag=lag, p_val=pvalue)
index <- ifelse(which_test == "ac", 2, 3) # Index in the list 'ret'
ret[[index]]$testStat[j1] <<- AorH
ret[[index]]$df[j1] <<- lag
ret[[index]]$pvalue[j1] <<- pvalue
ret[[index]]$indStat[j1] <<- indStat
ret[[index]]$stdError[j1] <<- stdError
j1 <- j1 + 1
}
}
# Calculate the tests and print the results
if(print_res) cat("Autocorrelation tests:\nlags | p-value\n")
test_ac_or_ch("ac")
if(print_res) cat("\nConditional heteroskedasticity tests:\nlags | p-value\n")
test_ac_or_ch("ch")
structure(ret, class="qrtest")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lineup.R
\name{buildLineUp}
\alias{buildLineUp}
\title{lineup - factory for LineUp HTMLWidget}
\usage{
buildLineUp(
x,
width = "100\%",
height = NULL,
elementId = NULL,
dependencies = crosstalk::crosstalkLibs()
)
}
\arguments{
\item{x}{LineUpBuilder object}
\item{width}{width of the element}
\item{height}{height of the element}
\item{elementId}{unique element id}
\item{dependencies}{include crosstalk dependencies}
}
\value{
html lineup widget
}
\description{
lineup - factory for LineUp HTMLWidget
}
| /man/buildLineUp.Rd | permissive | lenamax2355/lineup_htmlwidget | R | false | true | 596 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lineup.R
\name{buildLineUp}
\alias{buildLineUp}
\title{lineup - factory for LineUp HTMLWidget}
\usage{
buildLineUp(
x,
width = "100\%",
height = NULL,
elementId = NULL,
dependencies = crosstalk::crosstalkLibs()
)
}
\arguments{
\item{x}{LineUpBuilder object}
\item{width}{width of the element}
\item{height}{height of the element}
\item{elementId}{unique element id}
\item{dependencies}{include crosstalk dependencies}
}
\value{
html lineup widget
}
\description{
lineup - factory for LineUp HTMLWidget
}
|
library(stringr)
source("../../R/SurveyMap.R")
context("survey-map")
survey_values <- c('18-25','26-35','36-45','46-55','56-65','66-75','76-90')
popn_values <- c('18-35','18-35','36-55','36-55','56-65','66+','66+')
survey_values2 <- c('M','F')
popn_values2 <- c('Male','Female')
test_that("create SurveyMap, no args", {
smap = SurveyMap$new()
expect_output(smap$print(),"empty mapping")
})
test_that("create SurveyMap, 1 mapping", {
smap <- SurveyMap$new("age_s", "age_p", survey_values, popn_values)
expect_output(smap$print(),"age_s = age_p")
expect_output(smap$print(),"18-25 = 18-35")
})
test_that("add mapping", {
smap <- SurveyMap$new("age_s", "age_p", survey_values, popn_values)
smap$add("sex_s", "sex_p", survey_values2, popn_values2)
expect_output(smap$print(),"M = Male")
})
test_that("create SurveyMap, 2 mappings", {
smap2 = SurveyMap$new(c("age_s", "sex_s"),
c("age_p", "sex_p"),
list(survey_values, survey_values2),
list(popn_values, popn_values2))
expect_output(smap2$print(),"age_s = age_p")
expect_output(smap2$print(),"M = Male")
})
test_that("delete mapping", {
smap2 = SurveyMap$new(c("age_s", "sex_s"),
c("age_p", "sex_p"),
list(survey_values, survey_values2),
list(popn_values, popn_values2))
smap2$delete("age_s")
s = capture_output({
smap2$print()
})
expect_equal(1, str_count(s, "--------------"))
})
test_that("rename mapping", {
smap3 = SurveyMap$new(c("age_s", "sex_s"),
c("age_p", "sex_p"),
list(survey_values, survey_values2),
list(popn_values, popn_values2))
survey_values3 <- c('H','F')
popn_values3 <- c('Homme','Femme')
smap3$replace("sex_s", "sex_s", "sex_p", survey_values3, popn_values3)
expect_output(smap3$print(),"age_s = age_p")
expect_output(smap3$print(),"H = Homme")
})
| /tests/testthat/test_surveymap.R | permissive | yajuansi-sophie/mrp-kit | R | false | false | 2,057 | r | library(stringr)
source("../../R/SurveyMap.R")
context("survey-map")
survey_values <- c('18-25','26-35','36-45','46-55','56-65','66-75','76-90')
popn_values <- c('18-35','18-35','36-55','36-55','56-65','66+','66+')
survey_values2 <- c('M','F')
popn_values2 <- c('Male','Female')
test_that("create SurveyMap, no args", {
smap = SurveyMap$new()
expect_output(smap$print(),"empty mapping")
})
test_that("create SurveyMap, 1 mapping", {
smap <- SurveyMap$new("age_s", "age_p", survey_values, popn_values)
expect_output(smap$print(),"age_s = age_p")
expect_output(smap$print(),"18-25 = 18-35")
})
test_that("add mapping", {
smap <- SurveyMap$new("age_s", "age_p", survey_values, popn_values)
smap$add("sex_s", "sex_p", survey_values2, popn_values2)
expect_output(smap$print(),"M = Male")
})
test_that("create SurveyMap, 2 mappings", {
smap2 = SurveyMap$new(c("age_s", "sex_s"),
c("age_p", "sex_p"),
list(survey_values, survey_values2),
list(popn_values, popn_values2))
expect_output(smap2$print(),"age_s = age_p")
expect_output(smap2$print(),"M = Male")
})
test_that("delete mapping", {
smap2 = SurveyMap$new(c("age_s", "sex_s"),
c("age_p", "sex_p"),
list(survey_values, survey_values2),
list(popn_values, popn_values2))
smap2$delete("age_s")
s = capture_output({
smap2$print()
})
expect_equal(1, str_count(s, "--------------"))
})
test_that("rename mapping", {
smap3 = SurveyMap$new(c("age_s", "sex_s"),
c("age_p", "sex_p"),
list(survey_values, survey_values2),
list(popn_values, popn_values2))
survey_values3 <- c('H','F')
popn_values3 <- c('Homme','Femme')
smap3$replace("sex_s", "sex_s", "sex_p", survey_values3, popn_values3)
expect_output(smap3$print(),"age_s = age_p")
expect_output(smap3$print(),"H = Homme")
})
|
\name{GQD.plot}
\alias{GQD.plot}
\title{
Quick Plots for DiffusionRgqd Objects
}
\description{
\code{GQD.plot()} recognizes output objects calculated using routines from the \bold{DiffusionRgqd} package and subsequently constructs an appropriate plot, for example a perspective plot of a transition density.
}
\usage{
GQD.plot(x, thin = 1, burns, h = FALSE, palette = "mono")
}
\arguments{
\item{x}{
Generic GQD-objects, i.e. \code{res = GQD.density()}.
}
\item{thin}{
Thinning interval for \code{.mcmc} objects.
}
\item{burns}{
Number of parameter draws to discard for \code{.mcmc} objects.
}
\item{h}{
if \code{TRUE} a histogram is drawn i.s.o. a trace plot.
}
\item{palette}{Colour palette for drawing trace plots. Default \code{palette = 'mono'}, otherwise a qualitative palette will be used.}
}
\value{Varies in accordance with input type.
}
\author{
Etienne A.D. Pienaar: \email{etiannead@gmail.com}
}
\references{
Updates available on GitHub at \url{https://github.com/eta21}.
}
\seealso{
\code{\link{GQD.mcmc}}, \code{\link{GQD.mle}}, \code{\link{GQD.density}}, \code{\link{BiGQD.density}} etc.
}
\examples{
\donttest{
# Remove any existing coefficients
GQD.remove()
# Define drift Coefficients. Note that the limiting mean is sinusoidal.
G0 <- function(t){2*(10+sin(2*pi*(t-0.5)))}
G1 <- function(t){-2}
# Define sinusoidal diffusion coefficient with `faster' oscillation.
Q1 <- function(t){0.25*(1+0.75*(sin(4*pi*t)))}
states <- seq(5,15,1/10) # State values
initial <- 8 # Starting value of the process
Tmax <- 5 # Time horizon
Tstart <- 1 # Time starts at 1
increment <- 1/100 # Incremental time steps
# Generate the transitional density
M <- GQD.density(Xs=initial,Xt=states,s=Tstart,t=Tmax,delt=increment)
GQD.plot(M)
}
}
\keyword{plot}
| /man/GQD.plot.Rd | no_license | cran/DiffusionRgqd | R | false | false | 1,968 | rd | \name{GQD.plot}
\alias{GQD.plot}
\title{
Quick Plots for DiffusionRgqd Objects
}
\description{
\code{GQD.plot()} recognizes output objects calculated using routines from the \bold{DiffusionRgqd} package and subsequently constructs an appropriate plot, for example a perspective plot of a transition density.
}
\usage{
GQD.plot(x, thin = 1, burns, h = FALSE, palette = "mono")
}
\arguments{
\item{x}{
Generic GQD-objects, i.e. \code{res = GQD.density()}.
}
\item{thin}{
Thinning interval for \code{.mcmc} objects.
}
\item{burns}{
Number of parameter draws to discard for \code{.mcmc} objects.
}
\item{h}{
if \code{TRUE} a histogram is drawn i.s.o. a trace plot.
}
\item{palette}{Colour palette for drawing trace plots. Default \code{palette = 'mono'}, otherwise a qualitative palette will be used.}
}
\value{Varies in accordance with input type.
}
\author{
Etienne A.D. Pienaar: \email{etiannead@gmail.com}
}
\references{
Updates available on GitHub at \url{https://github.com/eta21}.
}
\seealso{
\code{\link{GQD.mcmc}}, \code{\link{GQD.mle}}, \code{\link{GQD.density}}, \code{\link{BiGQD.density}} etc.
}
\examples{
\donttest{
# Remove any existing coefficients
GQD.remove()
# Define drift Coefficients. Note that the limiting mean is sinusoidal.
G0 <- function(t){2*(10+sin(2*pi*(t-0.5)))}
G1 <- function(t){-2}
# Define sinusoidal diffusion coefficient with `faster' oscillation.
Q1 <- function(t){0.25*(1+0.75*(sin(4*pi*t)))}
states <- seq(5,15,1/10) # State values
initial <- 8 # Starting value of the process
Tmax <- 5 # Time horizon
Tstart <- 1 # Time starts at 1
increment <- 1/100 # Incremental time steps
# Generate the transitional density
M <- GQD.density(Xs=initial,Xt=states,s=Tstart,t=Tmax,delt=increment)
GQD.plot(M)
}
}
\keyword{plot}
|
statedat <- read.csv("StateSAT.csv", header=TRUE, sep=';')
statedat
summary(statedat)
is.na(statedat$teacherpay)
ls()
class(statedat)
nrow(statedat)
ncol(statedat)
dim(statedat)
names(statedat)
colnames(statedat)
head(statedat)
tail(statedat)
length(statedat$states)
sort(statedat$population, decreasing = TRUE)
mean(statedat$population)
round(mean(statedat$population),digits=3)
fivenum(statedat$satmath)
hist(statedat$population, breaks= 25, xlab = 'State Populations', col='yellow')
plot(statedat, main="Plot of all items")
cor(statedat$satmath, statedat$satverbal)
cor.test(statedat$satmath, statedat$satverbal)
| /Intro.R | no_license | KoPra-Tech/R | R | false | false | 680 | r |
statedat <- read.csv("StateSAT.csv", header=TRUE, sep=';')
statedat
summary(statedat)
is.na(statedat$teacherpay)
ls()
class(statedat)
nrow(statedat)
ncol(statedat)
dim(statedat)
names(statedat)
colnames(statedat)
head(statedat)
tail(statedat)
length(statedat$states)
sort(statedat$population, decreasing = TRUE)
mean(statedat$population)
round(mean(statedat$population),digits=3)
fivenum(statedat$satmath)
hist(statedat$population, breaks= 25, xlab = 'State Populations', col='yellow')
plot(statedat, main="Plot of all items")
cor(statedat$satmath, statedat$satverbal)
cor.test(statedat$satmath, statedat$satverbal)
|
library(magrittr)
library(ggplot2)
library(Hmisc)
metadata_analysis <- list('metadata' = qiime2R::read_q2metadata(file = '../qiime_metadata.tsv'),
'dir' = 'metadata',
'distribution' = 'metadata/dist',
'exp_groups' = c('group', 'mother')
)
metadata_analysis$metadata_tidy <- tidyr::pivot_longer(
data = metadata_analysis$metadata,
cols = where(is.numeric),
names_to = 'variable')
metadata_analysis$metadata_tidy$group <- as.character(metadata_analysis$metadata_tidy$group)
metadata_analysis$metadata_tidy$mother <- as.character(metadata_analysis$metadata_tidy$mother)
dir.create(metadata_analysis$dir)
dir.create(metadata_analysis$distribution)
##########################
### SEPARATE VARIABLES ###
purrr::walk(.x = metadata_analysis$exp_groups, .f = function(exp_group){
purrr::walk(
.x = unique(metadata_analysis$metadata_tidy$variable),
.f = function(col_name){
plot_data <- metadata_analysis$metadata_tidy %>%
dplyr::filter(variable == col_name)
plot_ <- ggplot(plot_data, aes(x = value)) + geom_histogram() + geom_density(color = 'red')
ggsave(filename = paste0(metadata_analysis$distribution, '/', col_name, '.png'), plot = plot_)
boxplot_ <- ggplot(plot_data, aes(x = eval(parse(text = exp_group)), y = value, color = eval(parse(text = exp_group)))) + geom_boxplot() + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(filename = paste0(metadata_analysis$distribution, '/', exp_group, 'boxplot_', col_name, '.png'), plot = boxplot_)
purrr::walk(
.x = unique(plot_data[[exp_group]]),
.f = function(group_){
dir_name <- paste0(metadata_analysis$distribution, '/', group_)
dir.create(dir_name)
plot_data_group <- plot_data %>%
dplyr::filter(group == group_)
plot_group <- ggplot(plot_data_group, aes(x = value)) + geom_histogram() + geom_density(color = 'red')
ggsave(filename = paste0(dir_name, '/', exp_group, '_', col_name, group_, '.png'), plot = plot_group)
})
})
})
### SEPARATE VARIABLES ###
##########################
# Pearson will be good - group count is to small to see normality, but I think it should be normal, cause why not
########################
### PREPARE DATASETS ###
metadata_analysis$metadata_for_corr_all <- metadata_analysis$metadata
metadata_analysis$metadata_for_corr_all$group <- NULL
metadata_analysis$metadata_for_corr_all$mother <- NULL
rownames(metadata_analysis$metadata_for_corr_all) <- metadata_analysis$metadata_for_corr_all$SampleID
metadata_analysis$metadata_for_corr_all$SampleID <- NULL
for (col in colnames(metadata_analysis$metadata_for_corr_all)) {
metadata_analysis$metadata_for_corr_all[[col]] <- scale(
metadata_analysis$metadata_for_corr_all[[col]],
center = TRUE,
scale = TRUE)
}
metadata_analysis$corr$no_nas$data <- metadata_analysis$metadata_for_corr_all[-1,-1]
metadata_analysis$corr$no_nas <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas$data, dataset_name = 'no_nas', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`, -`rbc-he_pg`)
metadata_analysis$corr$fecal <- calculations_of_metadata_plots(metadata_analysis$corr$fecal$data, dataset_name = 'fecal', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_samples_but_no_fecal_data$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`, -`rbc-he_pg`, -`non-heme_fe_faces_mg_fe_kg`)
metadata_analysis$corr$fecal_samples_but_no_fecal_data <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_samples_but_no_fecal_data$data, dataset_name = 'fecal_samples_but_no_fecal_data', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
dplyr::select(-`non-heme_fe_faces_mg_fe_kg`)
metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data <- subset(x = metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data, subset = !is.na(metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data$ret_k_uL))
metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data, dataset_name = 'fecal_samples_but_no_fecal_data_with_more_variables', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$no_nas_rbc_morph$data <- merge_measures(
prepared_metadata = metadata_analysis$corr$no_nas$data,
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology')
metadata_analysis$corr$no_nas_rbc_morph <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas_rbc_morph$data, dataset_name = 'no_nas_rbc_morph', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_rbc_morph$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
merge_measures(
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology') %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`)
metadata_analysis$corr$fecal_rbc_morph$data <- subset(x = metadata_analysis$corr$fecal_rbc_morph$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_morph$data[['rbc_morphology']]))
metadata_analysis$corr$fecal_rbc_morph <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_rbc_morph$data, dataset_name = 'fecal_rbc_morph', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$no_nas_rbc_all$data <- merge_measures(
prepared_metadata = metadata_analysis$corr$no_nas$data,
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg', 'hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_all')
metadata_analysis$corr$no_nas_rbc_all <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas_rbc_all$data, dataset_name = 'no_nas_rbc_all', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_rbc_all$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
merge_measures(
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg', 'hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_all') %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`)
metadata_analysis$corr$fecal_rbc_all$data <- subset(x = metadata_analysis$corr$fecal_rbc_all$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_all$data[['rbc_all']]))
metadata_analysis$corr$fecal_rbc_all <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_rbc_all$data, dataset_name = 'fecal_rbc_all', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$no_nas_rbc_2$data <-
merge_measures(
prepared_metadata = metadata_analysis$corr$no_nas$data,
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology') %>%
merge_measures(
cols_to_merge_char_vec = c('hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_quant')
metadata_analysis$corr$no_nas_rbc_2 <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas_rbc_2$data, dataset_name = 'no_nas_rbc_2', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_rbc_2$data <-
metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
merge_measures(
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology') %>%
merge_measures(
cols_to_merge_char_vec = c('hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_quant') %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`)
metadata_analysis$corr$fecal_rbc_2$data <- subset(x = metadata_analysis$corr$fecal_rbc_2$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_2$data[['rbc_morphology']]))
metadata_analysis$corr$fecal_rbc_2$data <- subset(x = metadata_analysis$corr$fecal_rbc_2$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_2$data[['rbc_quant']]))
metadata_analysis$corr$fecal_rbc_2 <- calculations_of_metadata_plots(
metadata_analysis$corr$fecal_rbc_2$data,
dataset_name = 'fecal_rbc_2',
full_metadata = metadata_analysis$metadata,
full_metadata_groupid_col = 'group')
### PREPARE DATASETS ###
########################
##############################
### VISUALIZE CORRELATIONS ###
purrr::walk2(
.x = metadata_analysis$corr,
.y = names(metadata_analysis$corr),
.f = function(dataset, dataset_name){
cor_plot <- ggcorrplot::ggcorrplot(dataset$cor$r,
hc.order = TRUE,
type = "lower",
lab = TRUE,
p.mat = ggcorrplot::cor_pmat(dataset$cor$r))
ggsave(filename = paste0(dataset$dir, dataset_name, '_correlation_metadata.png'), plot = cor_plot, dpi = 250)
ggsave(filename = paste0(dataset$dir, dataset_name, '_correlation_metadata_2.png'), plot = psych::pairs.panels(dataset$data), dpi = 250)
})
for (dataset_name in names(metadata_analysis$corr)) {
png(filename = paste0(metadata_analysis$corr[[dataset_name]]$dir, dataset_name, '_heatmap_metadata.png'), width = 1600, height = 900)
gplots::heatmap.2(x = as.matrix(z_standarized), trace = 'none', margins = c(15, 5))
dev.off()
}
### VISUALIZE CORRELATIONS ###
##############################
###########
### PCA ###
purrr::walk2(
.x = metadata_analysis$corr,
.y = names(metadata_analysis$corr),
.f = function(dataset, dataset_name){
ggsave(
filename = paste0(dataset$dir, dataset_name, '_pca_metadata.png'),
plot = factoextra::fviz_pca_ind(dataset$pca_scaled, repel = T),
dpi = 250)
if (dataset_name != 'no_nas_pca_fil') {
ggsave(
filename = paste0(dataset$dir, dataset_name, '_pca_metadata_samples.png'),
plot = factoextra::fviz_pca_biplot(
dataset$pca_scaled_samples,
repel = T,
habillage = dataset$habillage),
dpi = 250)
}
})
### PCA ###
###########
#######################
### ADD COMPARISONS ###
metadata_analysis$metadata_enhanced <- metadata_analysis$metadata
metadata_analysis$metadata_enhanced$anemia_v_treatment <- as.character(metadata_analysis$metadata_enhanced$group)
metadata_analysis$metadata_enhanced$anemia_v_treatment[metadata_analysis$metadata_enhanced$anemia_v_treatment != 'anemia'] <- 'treatment'
metadata_analysis$metadata_enhanced$dex_muscle_v_treatment <- as.character(metadata_analysis$metadata_enhanced$group)
metadata_analysis$metadata_enhanced$dex_muscle_v_treatment[metadata_analysis$metadata_enhanced$dex_muscle_v_treatment == 'anemia'] <- ''
metadata_analysis$metadata_enhanced$dex_muscle_v_treatment[metadata_analysis$metadata_enhanced$dex_muscle_v_treatment %nin% c('fe_dextran_muscle', '')] <- 'non_dextran_muscle_treatment'
colnames(metadata_analysis$metadata_enhanced) <- stringr::str_replace_all(
string = colnames(metadata_analysis$metadata_enhanced),
pattern = '-',
replacement = '_')
colnames(metadata_analysis$metadata_enhanced) <- tolower(colnames(metadata_analysis$metadata_enhanced))
metadata_analysis$metadata_enhanced <- merge_measures(
prepared_metadata = metadata_analysis$metadata_enhanced,
cols_to_merge_char_vec = c('mcv_fl', 'mch_pg', 'rbc_he_pg'),
merged_col_name = 'rbc_morphology_z_score',
scale_before_merging = T) %>%
merge_measures(
cols_to_merge_char_vec = c('hgb_g_dl', 'rbc_m_ul'),
merged_col_name = 'rbc_quant_z_score',
scale_before_merging = T)
for (col in colnames(metadata_analysis$metadata_enhanced)) {
metadata_analysis$metadata_enhanced[[col]] <- as.character(metadata_analysis$metadata_enhanced[[col]] )}
metadata_analysis$metadata_enhanced[is.na(metadata_analysis$metadata_enhanced)] <- ''
metadata_analysis$metadata_enhanced <- subset(x = metadata_analysis$metadata_enhanced, subset = metadata_analysis$metadata_enhanced$sampleid != 'F5')
readr::write_tsv(x = metadata_analysis$metadata_enhanced, file = '../metadata_enhanced.tsv')
metadata_analysis$qiime_column_types <- c('#q2:types', 'categorical', 'categorical', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'categorical', 'categorical', 'numeric', 'numeric')
metadata_analysis$metadata_enhanced_phylo <- metadata_analysis$metadata_enhanced
metadata_analysis$metadata_enhanced_phylo[metadata_analysis$metadata_enhanced_phylo == ''] <- NA
metadata_analysis$metadata_enhanced_phylo <- rbind(metadata_analysis$qiime_column_types, metadata_analysis$metadata_enhanced_phylo)
write.table(x = metadata_analysis$metadata_enhanced_phylo, file = '/home/adrian/Desktop/qiime/metadata_enhanced_phylo.tsv', sep = '\t', quote = F, row.names = F)
### ADD COMPARISONS ###
#######################
| /metadata.R | no_license | AdrianS85/16S-R | R | false | false | 13,858 | r | library(magrittr)
library(ggplot2)
library(Hmisc)
metadata_analysis <- list('metadata' = qiime2R::read_q2metadata(file = '../qiime_metadata.tsv'),
'dir' = 'metadata',
'distribution' = 'metadata/dist',
'exp_groups' = c('group', 'mother')
)
metadata_analysis$metadata_tidy <- tidyr::pivot_longer(
data = metadata_analysis$metadata,
cols = where(is.numeric),
names_to = 'variable')
metadata_analysis$metadata_tidy$group <- as.character(metadata_analysis$metadata_tidy$group)
metadata_analysis$metadata_tidy$mother <- as.character(metadata_analysis$metadata_tidy$mother)
dir.create(metadata_analysis$dir)
dir.create(metadata_analysis$distribution)
##########################
### SEPARATE VARIABLES ###
purrr::walk(.x = metadata_analysis$exp_groups, .f = function(exp_group){
purrr::walk(
.x = unique(metadata_analysis$metadata_tidy$variable),
.f = function(col_name){
plot_data <- metadata_analysis$metadata_tidy %>%
dplyr::filter(variable == col_name)
plot_ <- ggplot(plot_data, aes(x = value)) + geom_histogram() + geom_density(color = 'red')
ggsave(filename = paste0(metadata_analysis$distribution, '/', col_name, '.png'), plot = plot_)
boxplot_ <- ggplot(plot_data, aes(x = eval(parse(text = exp_group)), y = value, color = eval(parse(text = exp_group)))) + geom_boxplot() + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(filename = paste0(metadata_analysis$distribution, '/', exp_group, 'boxplot_', col_name, '.png'), plot = boxplot_)
purrr::walk(
.x = unique(plot_data[[exp_group]]),
.f = function(group_){
dir_name <- paste0(metadata_analysis$distribution, '/', group_)
dir.create(dir_name)
plot_data_group <- plot_data %>%
dplyr::filter(group == group_)
plot_group <- ggplot(plot_data_group, aes(x = value)) + geom_histogram() + geom_density(color = 'red')
ggsave(filename = paste0(dir_name, '/', exp_group, '_', col_name, group_, '.png'), plot = plot_group)
})
})
})
### SEPARATE VARIABLES ###
##########################
# Pearson will be good - group count is to small to see normality, but I think it should be normal, cause why not
########################
### PREPARE DATASETS ###
metadata_analysis$metadata_for_corr_all <- metadata_analysis$metadata
metadata_analysis$metadata_for_corr_all$group <- NULL
metadata_analysis$metadata_for_corr_all$mother <- NULL
rownames(metadata_analysis$metadata_for_corr_all) <- metadata_analysis$metadata_for_corr_all$SampleID
metadata_analysis$metadata_for_corr_all$SampleID <- NULL
for (col in colnames(metadata_analysis$metadata_for_corr_all)) {
metadata_analysis$metadata_for_corr_all[[col]] <- scale(
metadata_analysis$metadata_for_corr_all[[col]],
center = TRUE,
scale = TRUE)
}
metadata_analysis$corr$no_nas$data <- metadata_analysis$metadata_for_corr_all[-1,-1]
metadata_analysis$corr$no_nas <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas$data, dataset_name = 'no_nas', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`, -`rbc-he_pg`)
metadata_analysis$corr$fecal <- calculations_of_metadata_plots(metadata_analysis$corr$fecal$data, dataset_name = 'fecal', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_samples_but_no_fecal_data$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`, -`rbc-he_pg`, -`non-heme_fe_faces_mg_fe_kg`)
metadata_analysis$corr$fecal_samples_but_no_fecal_data <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_samples_but_no_fecal_data$data, dataset_name = 'fecal_samples_but_no_fecal_data', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
dplyr::select(-`non-heme_fe_faces_mg_fe_kg`)
metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data <- subset(x = metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data, subset = !is.na(metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data$ret_k_uL))
metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_samples_but_no_fecal_data_with_more_variables$data, dataset_name = 'fecal_samples_but_no_fecal_data_with_more_variables', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$no_nas_rbc_morph$data <- merge_measures(
prepared_metadata = metadata_analysis$corr$no_nas$data,
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology')
metadata_analysis$corr$no_nas_rbc_morph <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas_rbc_morph$data, dataset_name = 'no_nas_rbc_morph', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_rbc_morph$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
merge_measures(
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology') %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`)
metadata_analysis$corr$fecal_rbc_morph$data <- subset(x = metadata_analysis$corr$fecal_rbc_morph$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_morph$data[['rbc_morphology']]))
metadata_analysis$corr$fecal_rbc_morph <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_rbc_morph$data, dataset_name = 'fecal_rbc_morph', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$no_nas_rbc_all$data <- merge_measures(
prepared_metadata = metadata_analysis$corr$no_nas$data,
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg', 'hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_all')
metadata_analysis$corr$no_nas_rbc_all <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas_rbc_all$data, dataset_name = 'no_nas_rbc_all', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_rbc_all$data <- metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
merge_measures(
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg', 'hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_all') %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`)
metadata_analysis$corr$fecal_rbc_all$data <- subset(x = metadata_analysis$corr$fecal_rbc_all$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_all$data[['rbc_all']]))
metadata_analysis$corr$fecal_rbc_all <- calculations_of_metadata_plots(metadata_analysis$corr$fecal_rbc_all$data, dataset_name = 'fecal_rbc_all', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$no_nas_rbc_2$data <-
merge_measures(
prepared_metadata = metadata_analysis$corr$no_nas$data,
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology') %>%
merge_measures(
cols_to_merge_char_vec = c('hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_quant')
metadata_analysis$corr$no_nas_rbc_2 <- calculations_of_metadata_plots(metadata_analysis$corr$no_nas_rbc_2$data, dataset_name = 'no_nas_rbc_2', full_metadata = metadata_analysis$metadata, full_metadata_groupid_col = 'group')
metadata_analysis$corr$fecal_rbc_2$data <-
metadata_analysis$metadata_for_corr_all[!is.na(metadata_analysis$metadata_for_corr_all$`non-heme_fe_faces_mg_fe_kg`),] %>%
merge_measures(
cols_to_merge_char_vec = c('mcv_fL', 'mch_pg', 'rbc-he_pg'),
merged_col_name = 'rbc_morphology') %>%
merge_measures(
cols_to_merge_char_vec = c('hgb_g_dL', 'rbc_m_uL'),
merged_col_name = 'rbc_quant') %>%
dplyr::select(-ret_k_uL, -`ret-he_pg`)
metadata_analysis$corr$fecal_rbc_2$data <- subset(x = metadata_analysis$corr$fecal_rbc_2$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_2$data[['rbc_morphology']]))
metadata_analysis$corr$fecal_rbc_2$data <- subset(x = metadata_analysis$corr$fecal_rbc_2$data, subset = !is.na(metadata_analysis$corr$fecal_rbc_2$data[['rbc_quant']]))
metadata_analysis$corr$fecal_rbc_2 <- calculations_of_metadata_plots(
metadata_analysis$corr$fecal_rbc_2$data,
dataset_name = 'fecal_rbc_2',
full_metadata = metadata_analysis$metadata,
full_metadata_groupid_col = 'group')
### PREPARE DATASETS ###
########################
##############################
### VISUALIZE CORRELATIONS ###
purrr::walk2(
.x = metadata_analysis$corr,
.y = names(metadata_analysis$corr),
.f = function(dataset, dataset_name){
cor_plot <- ggcorrplot::ggcorrplot(dataset$cor$r,
hc.order = TRUE,
type = "lower",
lab = TRUE,
p.mat = ggcorrplot::cor_pmat(dataset$cor$r))
ggsave(filename = paste0(dataset$dir, dataset_name, '_correlation_metadata.png'), plot = cor_plot, dpi = 250)
ggsave(filename = paste0(dataset$dir, dataset_name, '_correlation_metadata_2.png'), plot = psych::pairs.panels(dataset$data), dpi = 250)
})
for (dataset_name in names(metadata_analysis$corr)) {
png(filename = paste0(metadata_analysis$corr[[dataset_name]]$dir, dataset_name, '_heatmap_metadata.png'), width = 1600, height = 900)
gplots::heatmap.2(x = as.matrix(z_standarized), trace = 'none', margins = c(15, 5))
dev.off()
}
### VISUALIZE CORRELATIONS ###
##############################
###########
### PCA ###
purrr::walk2(
.x = metadata_analysis$corr,
.y = names(metadata_analysis$corr),
.f = function(dataset, dataset_name){
ggsave(
filename = paste0(dataset$dir, dataset_name, '_pca_metadata.png'),
plot = factoextra::fviz_pca_ind(dataset$pca_scaled, repel = T),
dpi = 250)
if (dataset_name != 'no_nas_pca_fil') {
ggsave(
filename = paste0(dataset$dir, dataset_name, '_pca_metadata_samples.png'),
plot = factoextra::fviz_pca_biplot(
dataset$pca_scaled_samples,
repel = T,
habillage = dataset$habillage),
dpi = 250)
}
})
### PCA ###
###########
#######################
### ADD COMPARISONS ###
metadata_analysis$metadata_enhanced <- metadata_analysis$metadata
metadata_analysis$metadata_enhanced$anemia_v_treatment <- as.character(metadata_analysis$metadata_enhanced$group)
metadata_analysis$metadata_enhanced$anemia_v_treatment[metadata_analysis$metadata_enhanced$anemia_v_treatment != 'anemia'] <- 'treatment'
metadata_analysis$metadata_enhanced$dex_muscle_v_treatment <- as.character(metadata_analysis$metadata_enhanced$group)
metadata_analysis$metadata_enhanced$dex_muscle_v_treatment[metadata_analysis$metadata_enhanced$dex_muscle_v_treatment == 'anemia'] <- ''
metadata_analysis$metadata_enhanced$dex_muscle_v_treatment[metadata_analysis$metadata_enhanced$dex_muscle_v_treatment %nin% c('fe_dextran_muscle', '')] <- 'non_dextran_muscle_treatment'
colnames(metadata_analysis$metadata_enhanced) <- stringr::str_replace_all(
string = colnames(metadata_analysis$metadata_enhanced),
pattern = '-',
replacement = '_')
colnames(metadata_analysis$metadata_enhanced) <- tolower(colnames(metadata_analysis$metadata_enhanced))
metadata_analysis$metadata_enhanced <- merge_measures(
prepared_metadata = metadata_analysis$metadata_enhanced,
cols_to_merge_char_vec = c('mcv_fl', 'mch_pg', 'rbc_he_pg'),
merged_col_name = 'rbc_morphology_z_score',
scale_before_merging = T) %>%
merge_measures(
cols_to_merge_char_vec = c('hgb_g_dl', 'rbc_m_ul'),
merged_col_name = 'rbc_quant_z_score',
scale_before_merging = T)
for (col in colnames(metadata_analysis$metadata_enhanced)) {
metadata_analysis$metadata_enhanced[[col]] <- as.character(metadata_analysis$metadata_enhanced[[col]] )}
metadata_analysis$metadata_enhanced[is.na(metadata_analysis$metadata_enhanced)] <- ''
metadata_analysis$metadata_enhanced <- subset(x = metadata_analysis$metadata_enhanced, subset = metadata_analysis$metadata_enhanced$sampleid != 'F5')
readr::write_tsv(x = metadata_analysis$metadata_enhanced, file = '../metadata_enhanced.tsv')
metadata_analysis$qiime_column_types <- c('#q2:types', 'categorical', 'categorical', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'categorical', 'categorical', 'numeric', 'numeric')
metadata_analysis$metadata_enhanced_phylo <- metadata_analysis$metadata_enhanced
metadata_analysis$metadata_enhanced_phylo[metadata_analysis$metadata_enhanced_phylo == ''] <- NA
metadata_analysis$metadata_enhanced_phylo <- rbind(metadata_analysis$qiime_column_types, metadata_analysis$metadata_enhanced_phylo)
write.table(x = metadata_analysis$metadata_enhanced_phylo, file = '/home/adrian/Desktop/qiime/metadata_enhanced_phylo.tsv', sep = '\t', quote = F, row.names = F)
### ADD COMPARISONS ###
#######################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_specifying_columns.R
\name{specifying_columns}
\alias{specifying_columns}
\title{Specifying hierarchical columns with arguments \code{pattern} or \code{by}}
\description{
Within the \code{hmatch_} group of functions, there are three ways to specify the
hierarchical columns to be matched.
In all cases, it is assumed that matched columns are already correctly
ordered, with the first matched column reflecting the broadest hierarchical
level (lowest-resolution, e.g. country) and the last column reflecting the
finest level (highest-resolution, e.g. township).
}
\section{(1) All column names common to \code{raw} and \code{ref}}{
If neither \code{pattern} nor \code{by} are specified (the default), then the
hierarchical columns are assumed to be all column names that are common to
both \code{raw} and \code{ref}.
}
\section{(2) Regex pattern}{
Arguments \code{pattern} and \code{pattern_ref} take regex patterns to match the
hierarchical columns in \code{raw} and \code{ref}, respectively. Argument \code{pattern_ref}
only needs to be specified if it's different from \code{pattern} (i.e. if the
hierarchical columns have different names in \code{raw} vs. \code{ref}).
For example, if the hierarchical columns in \code{raw} are "ADM_1", "ADM_2", and
"ADM_3", which correspond respectively to columns within \code{ref} named
"REF_ADM_1", "REF_ADM_2", and "REF_ADM_3", then the pattern arguments can be
specified as:
\itemize{
\item \code{pattern = "^ADM_[[:digit:]]"}
\item \code{pattern_ref = "^REF_ADM_[[:digit:]]"}
}
Alternatively, because \code{pattern_ref} defaults to the same value as
\code{pattern} (unless otherwise specified), one could specify a single regex pattern
that matches the hierarchical columns in both \code{raw} and \code{ref}, e.g.
\itemize{
\item \code{pattern = "ADM_[[:digit:]]"}
}
However, the user should exercise care to ensure that there are no
non-hierarchical columns within \code{raw} or \code{ref} that may inadvertently be
matched by the given pattern.
}
\section{(3) Vector of column names}{
If the hierarchical columns cannot easily be matched with a regex pattern,
one can specify the relevant column names in vector form using arguments \code{by}
and \code{by_ref}. As with \code{pattern_ref}, argument \code{by_ref} only needs to be
specified if it's different from \code{by} (i.e. if the hierarchical columns have
different names in \code{raw} vs. \code{ref}).
For example, if the hierarchical columns in \code{raw} are "state", "county", and
"township", which correspond respectively to columns within \code{ref} named
"admin1", "admin2", and "admin3", then the\code{by} arguments can be specified
with:
\itemize{
\item \code{by = c("state", "county", "township")}
\item \code{by_ref = c("admin1", "admin2", "admin3")}
}
}
| /man/specifying_columns.Rd | no_license | ntncmch/hmatch | R | false | true | 2,864 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_specifying_columns.R
\name{specifying_columns}
\alias{specifying_columns}
\title{Specifying hierarchical columns with arguments \code{pattern} or \code{by}}
\description{
Within the \code{hmatch_} group of functions, there are three ways to specify the
hierarchical columns to be matched.
In all cases, it is assumed that matched columns are already correctly
ordered, with the first matched column reflecting the broadest hierarchical
level (lowest-resolution, e.g. country) and the last column reflecting the
finest level (highest-resolution, e.g. township).
}
\section{(1) All column names common to \code{raw} and \code{ref}}{
If neither \code{pattern} nor \code{by} are specified (the default), then the
hierarchical columns are assumed to be all column names that are common to
both \code{raw} and \code{ref}.
}
\section{(2) Regex pattern}{
Arguments \code{pattern} and \code{pattern_ref} take regex patterns to match the
hierarchical columns in \code{raw} and \code{ref}, respectively. Argument \code{pattern_ref}
only needs to be specified if it's different from \code{pattern} (i.e. if the
hierarchical columns have different names in \code{raw} vs. \code{ref}).
For example, if the hierarchical columns in \code{raw} are "ADM_1", "ADM_2", and
"ADM_3", which correspond respectively to columns within \code{ref} named
"REF_ADM_1", "REF_ADM_2", and "REF_ADM_3", then the pattern arguments can be
specified as:
\itemize{
\item \code{pattern = "^ADM_[[:digit:]]"}
\item \code{pattern_ref = "^REF_ADM_[[:digit:]]"}
}
Alternatively, because \code{pattern_ref} defaults to the same value as
\code{pattern} (unless otherwise specified), one could specify a single regex pattern
that matches the hierarchical columns in both \code{raw} and \code{ref}, e.g.
\itemize{
\item \code{pattern = "ADM_[[:digit:]]"}
}
However, the user should exercise care to ensure that there are no
non-hierarchical columns within \code{raw} or \code{ref} that may inadvertently be
matched by the given pattern.
}
\section{(3) Vector of column names}{
If the hierarchical columns cannot easily be matched with a regex pattern,
one can specify the relevant column names in vector form using arguments \code{by}
and \code{by_ref}. As with \code{pattern_ref}, argument \code{by_ref} only needs to be
specified if it's different from \code{by} (i.e. if the hierarchical columns have
different names in \code{raw} vs. \code{ref}).
For example, if the hierarchical columns in \code{raw} are "state", "county", and
"township", which correspond respectively to columns within \code{ref} named
"admin1", "admin2", and "admin3", then the\code{by} arguments can be specified
with:
\itemize{
\item \code{by = c("state", "county", "township")}
\item \code{by_ref = c("admin1", "admin2", "admin3")}
}
}
|
library(rms)
library(gplots)
plot_heatmap = function (mycor, main, dend="none", Colv=F, withlabels=F) {
Colv = (dend=="both")
Rowv = (dend=="both")
if (withlabels==TRUE) {
textsize=46
marginsize=400
} else {
textsize=.2
marginsize=.5
}
colorRange = round(range(mycor, na.rm=T) * 15) + 16
colorChoices = bluered(32)[colorRange[1]:colorRange[2]]
heatmap.2(mycor, col=colorChoices, symm = TRUE, cexRow=textsize, cexCol = textsize,
#dend = "both", Colv=T,
dend = dend, Colv=Colv, Rowv=Rowv,
lmat=rbind( c(0, 3), c(2,1), c(0,4) ), lhei=c(0.1, 2, 0.1),
trace = "none", margins=c(marginsize, marginsize), key=FALSE, keysize=0.1, main=main)
}
save_correlation_heatmap = function(dat, journal, year, dend="none", withlabels=F, main=""){
mycor = calc.correlations(dat, "pairwise.complete.obs", "pearson")
pdf(paste("heatmap_altmetrics_", journal, year, main, ".pdf", sep=""), width=200, height=200)
plot_heatmap(mycor, "", dend=dend, withlabels=withlabels)
title(paste("\n", main, year, journal))
dev.off()
}
# for different journals, different years
lots_of_correlations = function(dat, corrColumns){
save_correlation_heatmap(dat[,corrColumns], "all", "all")
years = 2010:2004
journals = names(table(dat$journal.x))
#quartz()
for (year in years) {
inYear = which(dat$year == year)
save_correlation_heatmap(dat[inYear,corrColumns], "all", year)
for (journal in journals) {
inJournal = which(dat$journal.x == journal)
# save one for all years; will be overwritten but that is ok
save_correlation_heatmap(dat[inJournal,corrColumns], journal, "all")
# now save a different one per journal, per year
#print(year); print(journal)
dat.subset = dat[intersect(inYear,inJournal),corrColumns]
if (nrow(dat.subset) > 50) {
#print(dim(dat.subset))
save_correlation_heatmap(dat.subset, journal, year) # , "n=", nrow(dat.subset)
}
}
}
}
heatmap_of_articles = function(dat, corrColumns, year=2008) {
# Now a heatmap with a subsample of articles and the variables
set.seed(42)
inYear = which(dat$year == year)
dat.tosample = dat[inYear,]
dat.subsample = as.matrix(dat.tosample[sample(1:dim(dat.tosample)[1], 1000, TRUE), corrColumns])
m=200
pdf(paste("heatmap_articles_vs_altmetrics_", year, ".pdf", sep=""))
heatmap.2(t(dat.subsample), col=bluered(m*2)[1:(m*2-1)],
cexRow=1, cexCol=.1, dend = "both", trace="none",
lmat=rbind( c(0, 3), c(2,1), c(0,4) ), lhei=c(1.5, 4, 2 ),
margins=c(1,10), key=FALSE, keysize=0.1, scale="row", symbreaks=T)
title(paste("\narticles vs altmetrics", year))
dev.off()
}
showpanel <- function(column) {
image(z=matrix(1:100, ncol=1), col=column, xaxt="n", yaxt="n" )
}
#quartz()
#showpanel(colorChoices)
#showpanel(bluered(m*2)[1:(m*2-1)])
corrColumns = c(
"wosCount",
"pdfDownloadsCount",
"htmlDownloadsCount",
"mendeleyReadersCount",
"almCiteULikeCount",
"deliciousCount",
"almBlogsCount",
"backtweetsCount",
"wikipediaCites",
"f1000Factor",
"plosCommentCount",
"plosCommentResponsesCount",
"facebookCommentCount"
)
library(altmetrics.analysis)
data(dat_research_norm)
# Do transformation
dat.research.norm.transform = dat.research.norm
dat.research.norm.transform[, altmetricsColumns] = transformation_function(dat.research.norm[, altmetricsColumns])
mycor = calc.correlations(dat.research.norm.transform[, altmetricsColumns], "pairwise.complete.obs", "pearson")
# main one, with labels
save_correlation_heatmap(dat.research.norm.transform[, corrColumns], "all", "all", dend="none", withlabels=T, main="labels")
# subdivisions
lots_of_correlations(dat.research.norm.transform, corrColumns)
# now with dendrograms
save_correlation_heatmap(dat.research.norm.transform[, altmetricsColumns], "all", "all", dend="both", withlabels=T, main="dend")
heatmap_of_articles(dat.research.norm.transform, corrColumns)
# source("altmetrics.analysis/inst/doc_src/correlation/do_correlations_viz.R") | /stats/scripts/altmetrics.analysis/inst/doc_src/correlation/do_correlations_viz.R | permissive | neostoic/plos_altmetrics_study | R | false | false | 4,167 | r |
library(rms)
library(gplots)
plot_heatmap = function (mycor, main, dend="none", Colv=F, withlabels=F) {
Colv = (dend=="both")
Rowv = (dend=="both")
if (withlabels==TRUE) {
textsize=46
marginsize=400
} else {
textsize=.2
marginsize=.5
}
colorRange = round(range(mycor, na.rm=T) * 15) + 16
colorChoices = bluered(32)[colorRange[1]:colorRange[2]]
heatmap.2(mycor, col=colorChoices, symm = TRUE, cexRow=textsize, cexCol = textsize,
#dend = "both", Colv=T,
dend = dend, Colv=Colv, Rowv=Rowv,
lmat=rbind( c(0, 3), c(2,1), c(0,4) ), lhei=c(0.1, 2, 0.1),
trace = "none", margins=c(marginsize, marginsize), key=FALSE, keysize=0.1, main=main)
}
save_correlation_heatmap = function(dat, journal, year, dend="none", withlabels=F, main=""){
mycor = calc.correlations(dat, "pairwise.complete.obs", "pearson")
pdf(paste("heatmap_altmetrics_", journal, year, main, ".pdf", sep=""), width=200, height=200)
plot_heatmap(mycor, "", dend=dend, withlabels=withlabels)
title(paste("\n", main, year, journal))
dev.off()
}
# for different journals, different years
lots_of_correlations = function(dat, corrColumns){
save_correlation_heatmap(dat[,corrColumns], "all", "all")
years = 2010:2004
journals = names(table(dat$journal.x))
#quartz()
for (year in years) {
inYear = which(dat$year == year)
save_correlation_heatmap(dat[inYear,corrColumns], "all", year)
for (journal in journals) {
inJournal = which(dat$journal.x == journal)
# save one for all years; will be overwritten but that is ok
save_correlation_heatmap(dat[inJournal,corrColumns], journal, "all")
# now save a different one per journal, per year
#print(year); print(journal)
dat.subset = dat[intersect(inYear,inJournal),corrColumns]
if (nrow(dat.subset) > 50) {
#print(dim(dat.subset))
save_correlation_heatmap(dat.subset, journal, year) # , "n=", nrow(dat.subset)
}
}
}
}
heatmap_of_articles = function(dat, corrColumns, year=2008) {
# Now a heatmap with a subsample of articles and the variables
set.seed(42)
inYear = which(dat$year == year)
dat.tosample = dat[inYear,]
dat.subsample = as.matrix(dat.tosample[sample(1:dim(dat.tosample)[1], 1000, TRUE), corrColumns])
m=200
pdf(paste("heatmap_articles_vs_altmetrics_", year, ".pdf", sep=""))
heatmap.2(t(dat.subsample), col=bluered(m*2)[1:(m*2-1)],
cexRow=1, cexCol=.1, dend = "both", trace="none",
lmat=rbind( c(0, 3), c(2,1), c(0,4) ), lhei=c(1.5, 4, 2 ),
margins=c(1,10), key=FALSE, keysize=0.1, scale="row", symbreaks=T)
title(paste("\narticles vs altmetrics", year))
dev.off()
}
showpanel <- function(column) {
image(z=matrix(1:100, ncol=1), col=column, xaxt="n", yaxt="n" )
}
#quartz()
#showpanel(colorChoices)
#showpanel(bluered(m*2)[1:(m*2-1)])
corrColumns = c(
"wosCount",
"pdfDownloadsCount",
"htmlDownloadsCount",
"mendeleyReadersCount",
"almCiteULikeCount",
"deliciousCount",
"almBlogsCount",
"backtweetsCount",
"wikipediaCites",
"f1000Factor",
"plosCommentCount",
"plosCommentResponsesCount",
"facebookCommentCount"
)
library(altmetrics.analysis)
data(dat_research_norm)
# Do transformation
dat.research.norm.transform = dat.research.norm
dat.research.norm.transform[, altmetricsColumns] = transformation_function(dat.research.norm[, altmetricsColumns])
mycor = calc.correlations(dat.research.norm.transform[, altmetricsColumns], "pairwise.complete.obs", "pearson")
# main one, with labels
save_correlation_heatmap(dat.research.norm.transform[, corrColumns], "all", "all", dend="none", withlabels=T, main="labels")
# subdivisions
lots_of_correlations(dat.research.norm.transform, corrColumns)
# now with dendrograms
save_correlation_heatmap(dat.research.norm.transform[, altmetricsColumns], "all", "all", dend="both", withlabels=T, main="dend")
heatmap_of_articles(dat.research.norm.transform, corrColumns)
# source("altmetrics.analysis/inst/doc_src/correlation/do_correlations_viz.R") |
library(pollimetry)
### Name: tonguelength
### Title: Converts ITD (cm) to tongue length for bees.
### Aliases: tonguelength
### ** Examples
example=cbind.data.frame(IT=c(1.3,2.3),
Family=c("Andrenidae","Apidae"))
tonguelength(example,mouthpart="all")
| /data/genthat_extracted_code/pollimetry/examples/tonguelength.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 284 | r | library(pollimetry)
### Name: tonguelength
### Title: Converts ITD (cm) to tongue length for bees.
### Aliases: tonguelength
### ** Examples
example=cbind.data.frame(IT=c(1.3,2.3),
Family=c("Andrenidae","Apidae"))
tonguelength(example,mouthpart="all")
|
#Set Directory
getwd()
setwd("D:/R files/PM-Project")
cellphone=read.csv("Cellphone.csv", sep=",", header=TRUE)
#Understand Data
dim(cellphone)
names(cellphone)
str(cellphone)
cellphone$Churn=as.factor(cellphone$Churn)
cellphone$ContractRenewal=as.factor(cellphone$ContractRenewal)
cellphone$DataPlan=as.factor(cellphone$DataPlan)
summary(cellphone)
View(cellphone)
# Checking null data
sapply(data,function(x) sum(is.na(x)))
#univariate analysis
boxplot(cellphone$AccountWeeks)
boxplot(cellphone$DayMins)
boxplot(cellphone$DayCalls)
boxplot(cellphone$MonthlyCharge)
boxplot(cellphone$OverageFee)
boxplot(cellphone$RoamMins)
library(ggplot2)
ggplot(cellphone, aes(x=Churn,
y=..count../sum(..count..)))+geom_bar()+
labs(x="Churn", y="Percent", title="Customer Churn")+scale_y_continuous(labels = scales::percent)
ggplot(cellphone, aes(x=ContractRenewal,
y=..count../sum(..count..)))+geom_bar()+
labs(x="Contract Renewal", y="Percent")+scale_y_continuous(labels = scales::percent)
ggplot(cellphone, aes(x=DataPlan,
y=..count../sum(..count..)))+geom_bar()+
labs(x="Data Plan", y="Percent")+scale_y_continuous(labels = scales::percent)
ggplot(cellphone, aes(x=MonthlyCharge, y=DayMins))+geom_point()
ggplot(cellphone, aes(x=MonthlyCharge, y=DataPlan))+geom_point()
ggplot(cellphone, aes(x=DataUsage, y=MonthlyCharge))+geom_point()
install.packages("corrplot")
library(corrplot)
cellphone.cor=cor(cellphone[,-c(1,3,4)])
cellphone.cor
corrplot(cellphone.cor)
pairs(cellphone[,-c(1,3,4)])
palette = colorRampPalette(c("green", "white", "red")) (20)
heatmap(x = cellphone.cor, col = palette, symm = TRUE)
library(caTools)
split <- sample.split(cellphone$Churn, SplitRatio = 0.7)
#we are splitting the data such that we have 70% of the data is Train Data and 30% of the data is my Test Data
train<- subset(cellphone, split == TRUE)
test<- subset( cellphone, split == FALSE)
table(train$Churn)
table(test$Churn)
model1=glm(Churn~., data=train, family="binomial")
model1
summary(model1)
# Check for multicollinearity
library(carData)
vif(model1)
model2=glm(Churn~., data=train[,-9], family=binomial(link="logit"))
summary(model2)
vif(model2)
model3=glm(Churn~., data=train[,-c(5,9)], family="binomial")
summary(model3)
vif(model3)
model4=glm(Churn~., data=train[,-c(2,5,9)], family="binomial")
summary(model4)
vif(model4)
library(blorr)
#AIC=Alkaline information criteria
blr_step_aic_both(model1, details = TRUE)
final_model=glm(Churn~CustServCalls+ContractRenewal+DayMins+DataPlan+OverageFee+RoamMins, data=train, family="binomial")
summary(final_model)
predtrain_log=predict(final_model, data=train, type="response")
predtest_log=predict(final_model, newdata=test, type="response")
table(train$Churn, predtrain_log>0.5)
Accuracy=(1944+64)/2333
Accuracy
sensitivity=64/(64+274)
sensitivity
specificity=1944/(1944+51)
specificity
table(test$Churn, predtest_log>0.5)
Accuracy=(832+31)/1000
Accuracy
sensitivity=31/(31+114)
sensitivity
specificity=832/(832+23)
specificity
library(ROCR)
library(ineq)
library(InformationValue)
install.packages("InformationValue")
predobjtrain = prediction (predtrain_log, train$Churn)
perftrain = performance(predobjtrain, "tpr", "fpr")
plot(perftrain)#ROC curve
preobjtest=prediction(predtest_log,test$Churn)
preftest=performance(preobjtest,"tpr","fpr")
plot(preftest)
auc = performance(predobjtrain, "auc")
auc = as.numeric(auc@y.values)
auc
auctest=performance(preobjtest,"auc")
auctest=as.numeric(auctest@y.values)
auctest
KStrain=max(perftrain@y.values[[1]]-perftrain@x.values[[1]])
KStrain
KStest=max(preftest@y.values[[1]]-preftest@x.values[[1]])
KStest#same?
Ginitrain=ineq(predtrain_log, "gini")
Ginitrain
Ginitest=ineq(predtest_log, "gini")
Ginitest
############KNN##############
#Normalising data
normalize<-function(x){ return((x-min(x))/(max(x)-min(x)))}
norm_data=as.data.frame(lapply(cellphone[,-c(1,3,4)], normalize))
summary(norm_data)
usable_data = cbind(cellphone[,c(1,3,4)], norm_data)
View(usable_data)
summary(usable_data)
library(caTools)
set.seed(10)
split <- sample.split(usable_data$Churn, SplitRatio = 0.7)
#we are splitting the data such that we have 70% of the data is Train Data and 30% of the data is my Test Data
norm_train<- subset(usable_data, split == TRUE)
norm_test<- subset(usable_data, split == FALSE)
dim(norm_train)
dim(norm_test)
library(class)
predKNN=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=48)
table.knn1=table(norm_test[,1],predKNN)
sum(diag(table.knn1)/sum(table.knn1))
predKNN2=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=35)
table.knn2=table(norm_test[,1],predKNN2)
sum(diag(table.knn2)/sum(table.knn2))
predKNN3=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=30)
table.knn3=table(norm_test[,1],predKNN3)
sum(diag(table.knn3)/sum(table.knn3))
predKNN4=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=10)
table.knn4=table(norm_test[,1],predKNN4)
sum(diag(table.knn4)/sum(table.knn4))
table.knn4
Accuracy_KNN=sum(diag(table.knn4)/sum(table.knn4))
Accuracy_KNN
sensitivity_KNN=61/(61+84)
sensitivity_KNN
specificity_KNN=846/(846+9)
specificity_KNN
#######NAIVES BAYES############
install.packages("e1071")
library(e1071)
NB=naiveBayes(Churn~., data=norm_train)
predNB=predict(NB, norm_test,type="class")
tab.NB=table(norm_test[,1],predNB)
tab.NB
Accuracy_NB=sum(diag(tab.NB)/sum(tab.NB))
Accuracy_NB
sensitivity_KNN=48/(48+97)
sensitivity_KNN
specificity_KNN=832/(832+23)
specificity_KNN
| /practice.R | no_license | GarimaGugnani/customer_churn | R | false | false | 5,744 | r | #Set Directory
getwd()
setwd("D:/R files/PM-Project")
cellphone=read.csv("Cellphone.csv", sep=",", header=TRUE)
#Understand Data
dim(cellphone)
names(cellphone)
str(cellphone)
cellphone$Churn=as.factor(cellphone$Churn)
cellphone$ContractRenewal=as.factor(cellphone$ContractRenewal)
cellphone$DataPlan=as.factor(cellphone$DataPlan)
summary(cellphone)
View(cellphone)
# Checking null data
sapply(data,function(x) sum(is.na(x)))
#univariate analysis
boxplot(cellphone$AccountWeeks)
boxplot(cellphone$DayMins)
boxplot(cellphone$DayCalls)
boxplot(cellphone$MonthlyCharge)
boxplot(cellphone$OverageFee)
boxplot(cellphone$RoamMins)
library(ggplot2)
ggplot(cellphone, aes(x=Churn,
y=..count../sum(..count..)))+geom_bar()+
labs(x="Churn", y="Percent", title="Customer Churn")+scale_y_continuous(labels = scales::percent)
ggplot(cellphone, aes(x=ContractRenewal,
y=..count../sum(..count..)))+geom_bar()+
labs(x="Contract Renewal", y="Percent")+scale_y_continuous(labels = scales::percent)
ggplot(cellphone, aes(x=DataPlan,
y=..count../sum(..count..)))+geom_bar()+
labs(x="Data Plan", y="Percent")+scale_y_continuous(labels = scales::percent)
ggplot(cellphone, aes(x=MonthlyCharge, y=DayMins))+geom_point()
ggplot(cellphone, aes(x=MonthlyCharge, y=DataPlan))+geom_point()
ggplot(cellphone, aes(x=DataUsage, y=MonthlyCharge))+geom_point()
install.packages("corrplot")
library(corrplot)
cellphone.cor=cor(cellphone[,-c(1,3,4)])
cellphone.cor
corrplot(cellphone.cor)
pairs(cellphone[,-c(1,3,4)])
palette = colorRampPalette(c("green", "white", "red")) (20)
heatmap(x = cellphone.cor, col = palette, symm = TRUE)
library(caTools)
split <- sample.split(cellphone$Churn, SplitRatio = 0.7)
#we are splitting the data such that we have 70% of the data is Train Data and 30% of the data is my Test Data
train<- subset(cellphone, split == TRUE)
test<- subset( cellphone, split == FALSE)
table(train$Churn)
table(test$Churn)
model1=glm(Churn~., data=train, family="binomial")
model1
summary(model1)
# Check for multicollinearity
library(carData)
vif(model1)
model2=glm(Churn~., data=train[,-9], family=binomial(link="logit"))
summary(model2)
vif(model2)
model3=glm(Churn~., data=train[,-c(5,9)], family="binomial")
summary(model3)
vif(model3)
model4=glm(Churn~., data=train[,-c(2,5,9)], family="binomial")
summary(model4)
vif(model4)
library(blorr)
#AIC=Alkaline information criteria
blr_step_aic_both(model1, details = TRUE)
final_model=glm(Churn~CustServCalls+ContractRenewal+DayMins+DataPlan+OverageFee+RoamMins, data=train, family="binomial")
summary(final_model)
predtrain_log=predict(final_model, data=train, type="response")
predtest_log=predict(final_model, newdata=test, type="response")
table(train$Churn, predtrain_log>0.5)
Accuracy=(1944+64)/2333
Accuracy
sensitivity=64/(64+274)
sensitivity
specificity=1944/(1944+51)
specificity
table(test$Churn, predtest_log>0.5)
Accuracy=(832+31)/1000
Accuracy
sensitivity=31/(31+114)
sensitivity
specificity=832/(832+23)
specificity
library(ROCR)
library(ineq)
library(InformationValue)
install.packages("InformationValue")
predobjtrain = prediction (predtrain_log, train$Churn)
perftrain = performance(predobjtrain, "tpr", "fpr")
plot(perftrain)#ROC curve
preobjtest=prediction(predtest_log,test$Churn)
preftest=performance(preobjtest,"tpr","fpr")
plot(preftest)
auc = performance(predobjtrain, "auc")
auc = as.numeric(auc@y.values)
auc
auctest=performance(preobjtest,"auc")
auctest=as.numeric(auctest@y.values)
auctest
KStrain=max(perftrain@y.values[[1]]-perftrain@x.values[[1]])
KStrain
KStest=max(preftest@y.values[[1]]-preftest@x.values[[1]])
KStest#same?
Ginitrain=ineq(predtrain_log, "gini")
Ginitrain
Ginitest=ineq(predtest_log, "gini")
Ginitest
############KNN##############
#Normalising data
normalize<-function(x){ return((x-min(x))/(max(x)-min(x)))}
norm_data=as.data.frame(lapply(cellphone[,-c(1,3,4)], normalize))
summary(norm_data)
usable_data = cbind(cellphone[,c(1,3,4)], norm_data)
View(usable_data)
summary(usable_data)
library(caTools)
set.seed(10)
split <- sample.split(usable_data$Churn, SplitRatio = 0.7)
#we are splitting the data such that we have 70% of the data is Train Data and 30% of the data is my Test Data
norm_train<- subset(usable_data, split == TRUE)
norm_test<- subset(usable_data, split == FALSE)
dim(norm_train)
dim(norm_test)
library(class)
predKNN=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=48)
table.knn1=table(norm_test[,1],predKNN)
sum(diag(table.knn1)/sum(table.knn1))
predKNN2=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=35)
table.knn2=table(norm_test[,1],predKNN2)
sum(diag(table.knn2)/sum(table.knn2))
predKNN3=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=30)
table.knn3=table(norm_test[,1],predKNN3)
sum(diag(table.knn3)/sum(table.knn3))
predKNN4=knn(norm_train[,-1], norm_test[,-1], norm_train[,1], k=10)
table.knn4=table(norm_test[,1],predKNN4)
sum(diag(table.knn4)/sum(table.knn4))
table.knn4
Accuracy_KNN=sum(diag(table.knn4)/sum(table.knn4))
Accuracy_KNN
sensitivity_KNN=61/(61+84)
sensitivity_KNN
specificity_KNN=846/(846+9)
specificity_KNN
#######NAIVES BAYES############
install.packages("e1071")
library(e1071)
NB=naiveBayes(Churn~., data=norm_train)
predNB=predict(NB, norm_test,type="class")
tab.NB=table(norm_test[,1],predNB)
tab.NB
Accuracy_NB=sum(diag(tab.NB)/sum(tab.NB))
Accuracy_NB
sensitivity_KNN=48/(48+97)
sensitivity_KNN
specificity_KNN=832/(832+23)
specificity_KNN
|
# Using sieve of Eratosthene
# > Rscript 3_higher_prime_factor.r
sieveOfEratosthenes <- function(num){
values <- rep(TRUE, num)
values[1] <- FALSE
prime <- 2
for(i in prime:sqrt(num)) {
values[seq.int(2 * prime, num, prime)] <- FALSE
prime <- prime + min(which(values[(prime + 1) : num]))
}
return(which(values)) # Non Eliminited indices
}
primeFactors <- function(num) {
factors <- NULL
rest <- num
limit <- sqrt(num)
while(rest > limit) {
for(i in sieveOfEratosthenes(limit)) {
if(rest %% i == 0) {
factors <- c(factors, i)
rest <- rest %/% i
}
}
}
return(factors)
}
system.time( result <- primeFactors(600851475143) )
print("Using sieve of eratosthenes")
tail(result, n=1) | /R/3_higher_prime_factor.r | no_license | dam/project-euler | R | false | false | 760 | r | # Using sieve of Eratosthene
# > Rscript 3_higher_prime_factor.r
sieveOfEratosthenes <- function(num){
values <- rep(TRUE, num)
values[1] <- FALSE
prime <- 2
for(i in prime:sqrt(num)) {
values[seq.int(2 * prime, num, prime)] <- FALSE
prime <- prime + min(which(values[(prime + 1) : num]))
}
return(which(values)) # Non Eliminited indices
}
primeFactors <- function(num) {
factors <- NULL
rest <- num
limit <- sqrt(num)
while(rest > limit) {
for(i in sieveOfEratosthenes(limit)) {
if(rest %% i == 0) {
factors <- c(factors, i)
rest <- rest %/% i
}
}
}
return(factors)
}
system.time( result <- primeFactors(600851475143) )
print("Using sieve of eratosthenes")
tail(result, n=1) |
# library(tensorflow)
# library(keras)
library(R2deepR)
library(reticulate)
source_python('misc/R-load-model-lr-range-test.py')
params <- lr_range_test(model, dataset)
filenm <- 'params.txt'
write(params, filenm) | /misc/lr-range-test-trump-tweeter.R | no_license | ifrit98/trump-change | R | false | false | 216 | r | # library(tensorflow)
# library(keras)
library(R2deepR)
library(reticulate)
source_python('misc/R-load-model-lr-range-test.py')
params <- lr_range_test(model, dataset)
filenm <- 'params.txt'
write(params, filenm) |
# clear global environment and load libraries
rm(list=ls())
library(rvest)
library(jsonlite)
library(data.table)
library(tidyverse)
library(ggplot2)
library(dplyr)
# add link for simplejob.com
website <- "https://simplejob.com/search/all?"
# save html
t <- read_html(website)
write_html(t, "t.html")
# get the JSON
job_list <- fromJSON(
t %>%
html_nodes(xpath = "//script[@type='application/json']") %>%
html_text())
# unbox the JSON
toJSON(job_list, auto_unbox = T)
# load data into dataset
job_df <- job_list$props$pageProps$jobsPageData$data$positions
company_df <- job_list$props$pageProps$jobs$company
###### some cleaning in job_df:
job_df <- separate_rows(job_df,url_slug, convert = T)
job_df <- job_df %>% mutate("Category" = url_slug)
job_df <- job_df %>% select(-c(url_slug, type))
#drop unnassacry categories:
job_df <- job_df %>% subset(Category != "allasok" & Category != "munkas" & Category != "konyhai" & Category != "center" & Category != "ugyintezo" & Category != "munkak" & Category != "munkatars" & Category != "szelli" & Category != "munka" & Category != "allas" & Category != "allasok" & Category != "egyeb" & Category != "instruktor" & Category != "fizikai")
# rename some categories
job_df$Category <- gsub("kisegito", "konyhai kisegito", job_df$Category)
job_df$Category <- gsub("call", "call-center", job_df$Category)
job_df$Category <- gsub("logisztikai", "logisztikai ugyintezo", job_df$Category)
job_df$Category <- gsub("raktarvezeto", "raktarvezeto helyettes", job_df$Category)
job_df$Category <- gsub("szakmunkak", "egyeb szakmunkak", job_df$Category)
job_df$Category <- gsub("fitness", "fitness instruktor", job_df$Category)
job_df$Category <- gsub("konnyu", "konnyu fizikai", job_df$Category)
# View(job_df)
####### some cleaning in company_df
# selecting necassary columns:
company_df <- company_df %>% select(c(company_id_ai, active_followers, name, introduction, motto, web, facebook, instagram, video_url, count_active_job_offers, url_slug))
company_df <- company_df %>% distinct()
# View(company_df)
### Analysis of job_df:
# select the highest number of listings in categories:
frequency_job <- job_df %>% group_by(Category) %>% count(Category)
# sum_job <- job_df %>% count(name)
# select categories where the frequency is higher than 1
frequency_job <- filter(frequency_job, n > 1 )
# total number of offerings:
# num_off <- c("Total number of offerings: ", cbind(sum(sum_job$n)))
# View(frequency_job)
# visualize the frequencies
ggplot(frequency_job, aes(x = reorder(Category, -n), y = n)) +
geom_bar(stat = 'identity', fill = 'red') +
labs(x = "Category Name", y = "Frequency") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#### Analysis of company_df
summary_comp_foll <- company_df %>% select(c(active_followers, name, count_active_job_offers))
# View(summary_comp_foll)
# select most nessacary columns for visualization and exclude companies with 0 followers
company_active_foll <- summary_comp_foll %>% filter(active_followers > 0)
# visualization of company and their number of followers
ggplot(company_active_foll, aes(x = reorder(name, -active_followers), y = active_followers)) +
geom_bar(stat = 'identity', fill = 'Orange') +
labs(x = "Company Name", y = "Number of active followers") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# visualization of actibe job offers by companies
ggplot(company_active_foll, aes(x = reorder(name, -count_active_job_offers), y = count_active_job_offers)) +
geom_bar(stat = 'identity', fill = 'gold') +
labs(x = "Company name", y = "Number of active job offerings") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
| /Assignemnt2/SimpleJob_DiamantEszter.R | no_license | DiamantEszter97/WebScraping | R | false | false | 3,704 | r | # clear global environment and load libraries
rm(list=ls())
library(rvest)
library(jsonlite)
library(data.table)
library(tidyverse)
library(ggplot2)
library(dplyr)
# add link for simplejob.com
website <- "https://simplejob.com/search/all?"
# save html
t <- read_html(website)
write_html(t, "t.html")
# get the JSON
job_list <- fromJSON(
t %>%
html_nodes(xpath = "//script[@type='application/json']") %>%
html_text())
# unbox the JSON
toJSON(job_list, auto_unbox = T)
# load data into dataset
job_df <- job_list$props$pageProps$jobsPageData$data$positions
company_df <- job_list$props$pageProps$jobs$company
###### some cleaning in job_df:
job_df <- separate_rows(job_df,url_slug, convert = T)
job_df <- job_df %>% mutate("Category" = url_slug)
job_df <- job_df %>% select(-c(url_slug, type))
#drop unnassacry categories:
job_df <- job_df %>% subset(Category != "allasok" & Category != "munkas" & Category != "konyhai" & Category != "center" & Category != "ugyintezo" & Category != "munkak" & Category != "munkatars" & Category != "szelli" & Category != "munka" & Category != "allas" & Category != "allasok" & Category != "egyeb" & Category != "instruktor" & Category != "fizikai")
# rename some categories
job_df$Category <- gsub("kisegito", "konyhai kisegito", job_df$Category)
job_df$Category <- gsub("call", "call-center", job_df$Category)
job_df$Category <- gsub("logisztikai", "logisztikai ugyintezo", job_df$Category)
job_df$Category <- gsub("raktarvezeto", "raktarvezeto helyettes", job_df$Category)
job_df$Category <- gsub("szakmunkak", "egyeb szakmunkak", job_df$Category)
job_df$Category <- gsub("fitness", "fitness instruktor", job_df$Category)
job_df$Category <- gsub("konnyu", "konnyu fizikai", job_df$Category)
# View(job_df)
####### some cleaning in company_df
# selecting necassary columns:
company_df <- company_df %>% select(c(company_id_ai, active_followers, name, introduction, motto, web, facebook, instagram, video_url, count_active_job_offers, url_slug))
company_df <- company_df %>% distinct()
# View(company_df)
### Analysis of job_df:
# select the highest number of listings in categories:
frequency_job <- job_df %>% group_by(Category) %>% count(Category)
# sum_job <- job_df %>% count(name)
# select categories where the frequency is higher than 1
frequency_job <- filter(frequency_job, n > 1 )
# total number of offerings:
# num_off <- c("Total number of offerings: ", cbind(sum(sum_job$n)))
# View(frequency_job)
# visualize the frequencies
ggplot(frequency_job, aes(x = reorder(Category, -n), y = n)) +
geom_bar(stat = 'identity', fill = 'red') +
labs(x = "Category Name", y = "Frequency") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#### Analysis of company_df
summary_comp_foll <- company_df %>% select(c(active_followers, name, count_active_job_offers))
# View(summary_comp_foll)
# select most nessacary columns for visualization and exclude companies with 0 followers
company_active_foll <- summary_comp_foll %>% filter(active_followers > 0)
# visualization of company and their number of followers
ggplot(company_active_foll, aes(x = reorder(name, -active_followers), y = active_followers)) +
geom_bar(stat = 'identity', fill = 'Orange') +
labs(x = "Company Name", y = "Number of active followers") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# visualization of actibe job offers by companies
ggplot(company_active_foll, aes(x = reorder(name, -count_active_job_offers), y = count_active_job_offers)) +
geom_bar(stat = 'identity', fill = 'gold') +
labs(x = "Company name", y = "Number of active job offerings") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
|
\name{GetThruput}
\alias{GetThruput}
\title{ Get the system throughput }
\description{
Determine the system throughput for the specified workload.
}
\usage{
GetThruput(class, wname)
}
\arguments{
\item{class}{ TRANS, TERM, or BATCH type. }
\item{wname}{ Character string containing the name of the workload. }
}
\details{
The classes of workloads are:
\itemize{
\item{ TRANS }{ a workload that is defined by arrival rate, not think time; only valid for an open circuit }
\item{ TERM }{ a workload with non-zero think time: there will be \code{think} delay before requests re-enter the system; only valid for a closed circuit }
\item{ BATCH }{ a workload with no think time: requests immediately re-enter the system; only valid for a closed circuit }
}
}
\value{
System throughput as a decimal number.
}
\author{
Neil J. Gunther
}
\references{
Gunther, N. J. (2011) \emph{Analyzing computer systems performance with PERL::PDQ}, 2nd edn., Heidelberg, Germany, Springer-Verlag. \url{http://www.perfdynamics.com/iBook/ppa_new.html}
}
\examples{
library(pdq)
Init("GetThruput Example")
CreateClosed("DB Users", TERM, 10.0, 30.5)
CreateNode("DB Server", CEN, FCFS)
SetDemand("DB Server", "DB Users", 1.0)
Solve(EXACT)
tp <- GetThruput(TRANS, "DB Users")
tp
}
| /interfaces/R/pdq/man/GetThruput.Rd | permissive | gitathrun/pdq-qnm-pkg | R | false | false | 1,259 | rd | \name{GetThruput}
\alias{GetThruput}
\title{ Get the system throughput }
\description{
Determine the system throughput for the specified workload.
}
\usage{
GetThruput(class, wname)
}
\arguments{
\item{class}{ TRANS, TERM, or BATCH type. }
\item{wname}{ Character string containing the name of the workload. }
}
\details{
The classes of workloads are:
\itemize{
\item{ TRANS }{ a workload that is defined by arrival rate, not think time; only valid for an open circuit }
\item{ TERM }{ a workload with non-zero think time: there will be \code{think} delay before requests re-enter the system; only valid for a closed circuit }
\item{ BATCH }{ a workload with no think time: requests immediately re-enter the system; only valid for a closed circuit }
}
}
\value{
System throughput as a decimal number.
}
\author{
Neil J. Gunther
}
\references{
Gunther, N. J. (2011) \emph{Analyzing computer systems performance with PERL::PDQ}, 2nd edn., Heidelberg, Germany, Springer-Verlag. \url{http://www.perfdynamics.com/iBook/ppa_new.html}
}
\examples{
library(pdq)
Init("GetThruput Example")
CreateClosed("DB Users", TERM, 10.0, 30.5)
CreateNode("DB Server", CEN, FCFS)
SetDemand("DB Server", "DB Users", 1.0)
Solve(EXACT)
tp <- GetThruput(TRANS, "DB Users")
tp
}
|
\name{pwr-package}
\alias{pwr-package}
\alias{pwr}
\docType{package}
\title{
Basic Functions for Power Analysis
pwr
}
\description{
Power calculations along the lines of Cohen (1988)
using in particular the same notations for effect sizes.
Examples from the book are given.
}
\details{
\tabular{ll}{
Package: \tab pwr\cr
Type: \tab Package\cr
Version: \tab 1.1-3\cr
Date: \tab 2015-08-18\cr
License: \tab GPL (>= 3) \cr
}
This package contains functions for basic power calculations using effect sizes and
notations from Cohen (1988) :
pwr.p.test: test for one proportion (ES=h)
pwr.2p.test: test for two proportions (ES=h)
pwr.2p2n.test: test for two proportions (ES=h, unequal sample sizes)
pwr.t.test: one sample and two samples (equal sizes) t tests for means (ES=d)
pwr.t2n.test: two samples (different sizes) t test for means (ES=d)
pwr.anova.test: test for one-way balanced anova (ES=f)
pwr.r.test: correlation test (ES=r)
pwr.chisq.test: chi-squared test (ES=w)
pwr.f2.test: test for the general linear model (ES=f2)
ES.h: computing effect size h for proportions tests
ES.w1: computing effect size w for the goodness of fit chi-squared test
ES.w2: computing effect size w for the association chi-squared test
cohen.ES: computing effect sizes for all the previous tests corresponding to conventional effect sizes (small, medium, large)
}
\author{
Stephane Champely, based on previous works by Claus Ekstrom and Peter Dalgaard,
with contributions of Jeffrey Gill and Jan Wunder.
Maintainer: Helios De Rosario-Martinez <helios.derosario@gmail.com>
}
\references{Cohen, J. (1988). Statistical power analysis for the
behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.}
\keyword{ package }
\keyword{htest}
\seealso{power.t.test,power.prop.test,power.anova.test}
\examples{
## Exercise 8.1 P. 357 from Cohen (1988)
pwr.anova.test(f=0.28,k=4,n=20,sig.level=0.05)
## Exercise 6.1 p. 198 from Cohen (1988)
pwr.2p.test(h=0.3,n=80,sig.level=0.05,alternative="greater")
## Exercise 7.3 p. 251
pwr.chisq.test(w=0.346,df=(2-1)*(3-1),N=140,sig.level=0.01)
## Exercise 6.5 p. 203 from Cohen (1988)
pwr.p.test(h=0.2,n=60,sig.level=0.05,alternative="two.sided")
}
| /man/pwr-package.Rd | no_license | aanandku/pwr | R | false | false | 2,190 | rd | \name{pwr-package}
\alias{pwr-package}
\alias{pwr}
\docType{package}
\title{
Basic Functions for Power Analysis
pwr
}
\description{
Power calculations along the lines of Cohen (1988)
using in particular the same notations for effect sizes.
Examples from the book are given.
}
\details{
\tabular{ll}{
Package: \tab pwr\cr
Type: \tab Package\cr
Version: \tab 1.1-3\cr
Date: \tab 2015-08-18\cr
License: \tab GPL (>= 3) \cr
}
This package contains functions for basic power calculations using effect sizes and
notations from Cohen (1988) :
pwr.p.test: test for one proportion (ES=h)
pwr.2p.test: test for two proportions (ES=h)
pwr.2p2n.test: test for two proportions (ES=h, unequal sample sizes)
pwr.t.test: one sample and two samples (equal sizes) t tests for means (ES=d)
pwr.t2n.test: two samples (different sizes) t test for means (ES=d)
pwr.anova.test: test for one-way balanced anova (ES=f)
pwr.r.test: correlation test (ES=r)
pwr.chisq.test: chi-squared test (ES=w)
pwr.f2.test: test for the general linear model (ES=f2)
ES.h: computing effect size h for proportions tests
ES.w1: computing effect size w for the goodness of fit chi-squared test
ES.w2: computing effect size w for the association chi-squared test
cohen.ES: computing effect sizes for all the previous tests corresponding to conventional effect sizes (small, medium, large)
}
\author{
Stephane Champely, based on previous works by Claus Ekstrom and Peter Dalgaard,
with contributions of Jeffrey Gill and Jan Wunder.
Maintainer: Helios De Rosario-Martinez <helios.derosario@gmail.com>
}
\references{Cohen, J. (1988). Statistical power analysis for the
behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.}
\keyword{ package }
\keyword{htest}
\seealso{power.t.test,power.prop.test,power.anova.test}
\examples{
## Exercise 8.1 P. 357 from Cohen (1988)
pwr.anova.test(f=0.28,k=4,n=20,sig.level=0.05)
## Exercise 6.1 p. 198 from Cohen (1988)
pwr.2p.test(h=0.3,n=80,sig.level=0.05,alternative="greater")
## Exercise 7.3 p. 251
pwr.chisq.test(w=0.346,df=(2-1)*(3-1),N=140,sig.level=0.01)
## Exercise 6.5 p. 203 from Cohen (1988)
pwr.p.test(h=0.2,n=60,sig.level=0.05,alternative="two.sided")
}
|
library(crisprDesignData)
library(S4Vectors)
library(dplyr)
library(pbapply)
data(txdb_human)
load("processed/guideMap.rda")
rankings <- readRDS("../../processingRankings/objects/rankings.rds")
common <- intersect(guideMap$name,rankings$name)
guideMap <- guideMap[match(common, guideMap$name),]
rankings <- rankings[match(common, rankings$name),]
# Only taking in common
good <- which(complete.cases(rankings))
rankings <- rankings[good,]
guideMap <- guideMap[good,]
# Ready for analysis:
df <- rankings
df$lfc <- guideMap$lfc
# Getting essential genes:
load("../../data/egs.rda")
load("../../data/negs.rda")
key <- mcols(txdb_human$cds)[, c("gene_symbol", "gene_id")]
key <- as.data.frame(key)
key <- key[!duplicated(key),]
egs <- unique(key$gene_id[key$gene_symbol %in% egs])
negs <- unique(key$gene_id[key$gene_symbol %in% negs])
rankings_toronto3 <- df[df$ensembl_id %in% egs,]
save(rankings_toronto3,
file="../objects/rankings_toronto3.rda")
rankings_toronto3_neg <- df[df$ensembl_id %in% negs,]
save(rankings_toronto3_neg,
file="../objects/rankings_toronto3_neg.rda")
| /analyses/rankings/processingDatasets/torontov3/process.R | no_license | crisprVerse/crisprVersePaper | R | false | false | 1,092 | r | library(crisprDesignData)
library(S4Vectors)
library(dplyr)
library(pbapply)
data(txdb_human)
load("processed/guideMap.rda")
rankings <- readRDS("../../processingRankings/objects/rankings.rds")
common <- intersect(guideMap$name,rankings$name)
guideMap <- guideMap[match(common, guideMap$name),]
rankings <- rankings[match(common, rankings$name),]
# Only taking in common
good <- which(complete.cases(rankings))
rankings <- rankings[good,]
guideMap <- guideMap[good,]
# Ready for analysis:
df <- rankings
df$lfc <- guideMap$lfc
# Getting essential genes:
load("../../data/egs.rda")
load("../../data/negs.rda")
key <- mcols(txdb_human$cds)[, c("gene_symbol", "gene_id")]
key <- as.data.frame(key)
key <- key[!duplicated(key),]
egs <- unique(key$gene_id[key$gene_symbol %in% egs])
negs <- unique(key$gene_id[key$gene_symbol %in% negs])
rankings_toronto3 <- df[df$ensembl_id %in% egs,]
save(rankings_toronto3,
file="../objects/rankings_toronto3.rda")
rankings_toronto3_neg <- df[df$ensembl_id %in% negs,]
save(rankings_toronto3_neg,
file="../objects/rankings_toronto3_neg.rda")
|
#' P-values for n = 7
#'
#' Created by \code{data_raw/p_values_7.R}.
#'
#' @name p_values_7
#' @export
#' @examples
#' head(p_values_7)
NULL
| /wrswoR.benchmark/R/p_values_7.R | no_license | ingted/R-Examples | R | false | false | 141 | r | #' P-values for n = 7
#'
#' Created by \code{data_raw/p_values_7.R}.
#'
#' @name p_values_7
#' @export
#' @examples
#' head(p_values_7)
NULL
|
# Helper
# Lista de imágenes sinópticas
sinls <- c(
'Agua precipitable' = 'Agua_precipitable.png',
'Presi\u00f3n reducida nivel del mar' = 'presionsuperficial.png',
'L\u00edneas de corriente 200 hPa' = 'streamlines.png',
'Divergencia 850 hPa' = 'div_850.png',
'Divergencia 200 hPa' = 'div_200.png',
'Velocidad vertical 800 hPa' = 'Vel_vert_800.png',
'Velocidad vertical 500 hPa' = 'Vel_vert_500.png',
'Velocidad vertical 300 hPa' = 'Vel_vert_300.png',
'\u00cdndice CAPE' = 'cape.png',
'\u00cdndice GDI' = 'GDI.png',
'\u00cdndice K' = 'IndiceK.png'
)
# Función para convertir bits a temperatura de brillo
fact <- function(dat, band){
if(band == 1){dat <- dat*0.0003175*100}
else if(band == 2){dat <- dat*0.0002442*100}
else if(band == 7){dat <- dat*0.0130962 + 197.31 - 273.15}
else if(band == 8){dat <- dat*0.0422499 + 138.05 - 273.15}
else if(band == 9){dat <- dat*0.0423391 + 137.70 - 273.15}
else if(band == 10){dat <- dat*0.0498892 + 126.91 - 273.15}
else if(band == 12){dat <- dat*0.0472703 + 117.49 - 273.15}
else if(band == 13){dat <- dat*0.0614533 + 89.62 - 273.15}
else if(band == 14){dat <- dat*0.0598507 + 96.19 - 273.15}
else if(band == 15){dat <- dat*0.0595608 + 97.38 - 273.15}
return(dat)
}
# Calcular alto de ventana
alto_box <- function(){
ancho <- 1200
alto <- 926
window_width <- as.integer(unlist(strsplit(JS('window.innerWidth'), 'p'))[1])
window_width <- (window_width - 150)/2
box_height <- round(alto*window_width/ancho, 0)
}
cap <- c(
"Huaraz" = "Centro_grafica-1.png",
"Lima" = "Centro_grafica-2.png",
"Ica" = "Centro_grafica-3.png",
"Hu\u00e1nuco" = "Centro_grafica-4.png",
"Cerro de Pasco" = "Centro_grafica-5.png",
"Huancayo" = "Centro_grafica-6.png",
"Huancavelica" = "Centro_grafica-7.png",
"Pucallpa" = "Centro_grafica-8.png",
"Tumbes" = "Norte_grafica-1.png",
"Piura" = "Norte_grafica-2.png",
"Chiclayo" = "Norte_grafica-3.png",
"Trujillo" = "Norte_grafica-4.png",
"Cajamarca" = "Norte_grafica-5.png",
"Chachapoyas" = "Norte_grafica-6.png",
"Tarapoto" = "Norte_grafica-7.png",
"Iquitos" = "Norte_grafica-8.png",
"Arequipa" = "Sur_grafica-1.png",
"Ilo" = "Sur_grafica-2.png",
"Tacna" = "Sur_grafica-3.png",
"Ayacucho" = "Sur_grafica-4.png",
"Abancay" = "Sur_grafica-5.png",
"Cusco" = "Sur_grafica-6.png",
"Puno" = "Sur_grafica-7.png",
"Puerto Maldonado" = "Sur_grafica-8.png"
)
sel_implot <- c(
"Banda 2" = "PE_OR_ABI-L2-CMIPF-M3C02_G16_s20183251615368_e20183251626135_c20183251626212-114300_0.nc.tiff",
"Banda 7" = "PE_OR_ABI-L2-CMIPF-M3C07_G16_s20180102000410_e20180102011188_c20180102011243.nc.tiff",
"Banda 8" = "PE_OR_ABI-L2-CMIPF-M3C08_G16_s20180101930410_e20180101941177_c20180101941249.nc.tiff",
"Banda 9" = "PE_OR_ABI-L2-CMIPF-M3C09_G16_s20183251615368_e20183251626140_c20183251626217.nc.tiff",
"Banda 13" = "PE_OR_ABI-L2-CMIPF-M3C13_G16_s20183251615368_e20183251626146_c20183251626219.nc.tiff",
"Banda 14" = "PE_OR_ABI-L2-CMIPF-M3C14_G16_s20180101930410_e20180101941177_c20180101941267.nc.tiff",
"Banda 15" = "PE_OR_ABI-L2-CMIPF-M3C15_G16_s20180102000410_e20180102011183_c20180102011270.nc.tiff"
)
#Climatologia
#Para temperatura maxima
meteo = read.csv("www/para_app.csv")
prueba <- mutate(meteo, Fecha = paste(Anio, Mes, 15, sep = '-'))
prueba = mutate(prueba, Fecha = as.Date(Fecha))
prueba$Ch_Mes <- factor(prueba$Mes)
levels(prueba$Ch_Mes) <- c("ENE", "FEB", "MAR", "ABR", "MAY", "JUN", "JUL", "AGO", "SEP", "OCT", "NOV", "DEC")
prueba$char = as.character(prueba$Ch_Mes)
cristina = group_by(prueba,Departamento ,Mes)
cristina = summarise(cristina, Txclim = mean(Tmaxp, na.rm = T), Ds = sd(Tmaxp, na.rm = T), Tnclim = mean(Tminp, na.rm = T), de = sd(Tminp, na.rm = T))
cristina = mutate(cristina, High = Txclim + Ds, Low = Txclim - Ds, alto = Tnclim + de , bajo = Tnclim - de)
cristina$Ch_Mes <- factor(cristina$Mes)
levels(cristina$Ch_Mes) <- c("ENE", "FEB", "MAR", "ABR", "MAY", "JUN", "JUL", "AGO", "SEP", "OCT", "NOV", "DEC")
cristina$char = as.character(cristina$Ch_Mes)
var_ko = group_by(prueba,Departamento)
var_fe = summarise(var_ko,
Media = mean(Tmaxp, na.rm = T),
Max = max(Tmaxp, na.rm = T),
Min = min(Tmaxp, na.rm = T),
Mediana = median(Tmaxp, na.rm = T),
Desv.est = sd(Tmaxp,na.rm = T),
Q1 = quantile(Tmaxp,0.25, na.rm = T),
Q3 = quantile(Tmaxp, 0.75, na.rm = T),
numtotal = length(Tmaxp),
numNA = sum(is.na(Tmaxp)),
Porc_na = (numNA/numtotal)*100)
din = rep('Temperatura maxima',each = 5)
var_fe$Variable = paste0(din)
var_fe = select(var_fe, Variable,Departamento,Media, Max, Min, Mediana, Desv.est, Q1, Q3)
var_fi = summarise(var_ko,
Media = mean(Tminp, na.rm = T),
Max = max(Tminp, na.rm = T),
Min = min(Tminp, na.rm = T),
Mediana = median(Tminp, na.rm = T),
Desv.est = sd(Tminp,na.rm = T),
Q1 = quantile(Tminp,0.25, na.rm = T),
Q3 = quantile(Tminp, 0.75, na.rm = T),
numtotal = length(Tminp),
numNA = sum(is.na(Tminp)),
Porc_na = (numNA/numtotal)*100)
dina = rep('Temperatura minima',each = 5)
var_fi$Variable = paste0(dina)
var_fi = select(var_fi, Variable,Departamento,Media, Max, Min, Mediana, Desv.est, Q1, Q3)
var_vi = summarise(var_ko,
Media = mean(Precs, na.rm = T),
Max = max(Precs, na.rm = T),
Min = min(Precs, na.rm = T),
Mediana = median(Precs, na.rm = T),
Desv.est = sd(Precs,na.rm = T),
Q1 = quantile(Precs,0.25, na.rm = T),
Q3 = quantile(Precs, 0.75, na.rm = T),
numtotal = length(Precs),
numNA = sum(is.na(Precs)),
Porc_na = (numNA/numtotal)*100)
dinm = rep('Precipitacion',each = 5)
var_vi$Variable = paste0(dinm)
var_vi = select(var_vi,Variable,Departamento, Media, Max, Min, Mediana, Desv.est, Q1, Q3)
stn = c("Junin", "Huanuco", "Ancash", "Lima" , "Ucayali")
Graf = c("Temperatura", "Precipitacion")
adriana = read.csv("www/metadata.csv")
| /helper.r | no_license | brunorcs/OpenCloud | R | false | false | 6,534 | r | # Helper
# Lista de imágenes sinópticas
sinls <- c(
'Agua precipitable' = 'Agua_precipitable.png',
'Presi\u00f3n reducida nivel del mar' = 'presionsuperficial.png',
'L\u00edneas de corriente 200 hPa' = 'streamlines.png',
'Divergencia 850 hPa' = 'div_850.png',
'Divergencia 200 hPa' = 'div_200.png',
'Velocidad vertical 800 hPa' = 'Vel_vert_800.png',
'Velocidad vertical 500 hPa' = 'Vel_vert_500.png',
'Velocidad vertical 300 hPa' = 'Vel_vert_300.png',
'\u00cdndice CAPE' = 'cape.png',
'\u00cdndice GDI' = 'GDI.png',
'\u00cdndice K' = 'IndiceK.png'
)
# Función para convertir bits a temperatura de brillo
fact <- function(dat, band){
if(band == 1){dat <- dat*0.0003175*100}
else if(band == 2){dat <- dat*0.0002442*100}
else if(band == 7){dat <- dat*0.0130962 + 197.31 - 273.15}
else if(band == 8){dat <- dat*0.0422499 + 138.05 - 273.15}
else if(band == 9){dat <- dat*0.0423391 + 137.70 - 273.15}
else if(band == 10){dat <- dat*0.0498892 + 126.91 - 273.15}
else if(band == 12){dat <- dat*0.0472703 + 117.49 - 273.15}
else if(band == 13){dat <- dat*0.0614533 + 89.62 - 273.15}
else if(band == 14){dat <- dat*0.0598507 + 96.19 - 273.15}
else if(band == 15){dat <- dat*0.0595608 + 97.38 - 273.15}
return(dat)
}
# Calcular alto de ventana
alto_box <- function(){
ancho <- 1200
alto <- 926
window_width <- as.integer(unlist(strsplit(JS('window.innerWidth'), 'p'))[1])
window_width <- (window_width - 150)/2
box_height <- round(alto*window_width/ancho, 0)
}
cap <- c(
"Huaraz" = "Centro_grafica-1.png",
"Lima" = "Centro_grafica-2.png",
"Ica" = "Centro_grafica-3.png",
"Hu\u00e1nuco" = "Centro_grafica-4.png",
"Cerro de Pasco" = "Centro_grafica-5.png",
"Huancayo" = "Centro_grafica-6.png",
"Huancavelica" = "Centro_grafica-7.png",
"Pucallpa" = "Centro_grafica-8.png",
"Tumbes" = "Norte_grafica-1.png",
"Piura" = "Norte_grafica-2.png",
"Chiclayo" = "Norte_grafica-3.png",
"Trujillo" = "Norte_grafica-4.png",
"Cajamarca" = "Norte_grafica-5.png",
"Chachapoyas" = "Norte_grafica-6.png",
"Tarapoto" = "Norte_grafica-7.png",
"Iquitos" = "Norte_grafica-8.png",
"Arequipa" = "Sur_grafica-1.png",
"Ilo" = "Sur_grafica-2.png",
"Tacna" = "Sur_grafica-3.png",
"Ayacucho" = "Sur_grafica-4.png",
"Abancay" = "Sur_grafica-5.png",
"Cusco" = "Sur_grafica-6.png",
"Puno" = "Sur_grafica-7.png",
"Puerto Maldonado" = "Sur_grafica-8.png"
)
sel_implot <- c(
"Banda 2" = "PE_OR_ABI-L2-CMIPF-M3C02_G16_s20183251615368_e20183251626135_c20183251626212-114300_0.nc.tiff",
"Banda 7" = "PE_OR_ABI-L2-CMIPF-M3C07_G16_s20180102000410_e20180102011188_c20180102011243.nc.tiff",
"Banda 8" = "PE_OR_ABI-L2-CMIPF-M3C08_G16_s20180101930410_e20180101941177_c20180101941249.nc.tiff",
"Banda 9" = "PE_OR_ABI-L2-CMIPF-M3C09_G16_s20183251615368_e20183251626140_c20183251626217.nc.tiff",
"Banda 13" = "PE_OR_ABI-L2-CMIPF-M3C13_G16_s20183251615368_e20183251626146_c20183251626219.nc.tiff",
"Banda 14" = "PE_OR_ABI-L2-CMIPF-M3C14_G16_s20180101930410_e20180101941177_c20180101941267.nc.tiff",
"Banda 15" = "PE_OR_ABI-L2-CMIPF-M3C15_G16_s20180102000410_e20180102011183_c20180102011270.nc.tiff"
)
#Climatologia
#Para temperatura maxima
meteo = read.csv("www/para_app.csv")
prueba <- mutate(meteo, Fecha = paste(Anio, Mes, 15, sep = '-'))
prueba = mutate(prueba, Fecha = as.Date(Fecha))
prueba$Ch_Mes <- factor(prueba$Mes)
levels(prueba$Ch_Mes) <- c("ENE", "FEB", "MAR", "ABR", "MAY", "JUN", "JUL", "AGO", "SEP", "OCT", "NOV", "DEC")
prueba$char = as.character(prueba$Ch_Mes)
cristina = group_by(prueba,Departamento ,Mes)
cristina = summarise(cristina, Txclim = mean(Tmaxp, na.rm = T), Ds = sd(Tmaxp, na.rm = T), Tnclim = mean(Tminp, na.rm = T), de = sd(Tminp, na.rm = T))
cristina = mutate(cristina, High = Txclim + Ds, Low = Txclim - Ds, alto = Tnclim + de , bajo = Tnclim - de)
cristina$Ch_Mes <- factor(cristina$Mes)
levels(cristina$Ch_Mes) <- c("ENE", "FEB", "MAR", "ABR", "MAY", "JUN", "JUL", "AGO", "SEP", "OCT", "NOV", "DEC")
cristina$char = as.character(cristina$Ch_Mes)
var_ko = group_by(prueba,Departamento)
var_fe = summarise(var_ko,
Media = mean(Tmaxp, na.rm = T),
Max = max(Tmaxp, na.rm = T),
Min = min(Tmaxp, na.rm = T),
Mediana = median(Tmaxp, na.rm = T),
Desv.est = sd(Tmaxp,na.rm = T),
Q1 = quantile(Tmaxp,0.25, na.rm = T),
Q3 = quantile(Tmaxp, 0.75, na.rm = T),
numtotal = length(Tmaxp),
numNA = sum(is.na(Tmaxp)),
Porc_na = (numNA/numtotal)*100)
din = rep('Temperatura maxima',each = 5)
var_fe$Variable = paste0(din)
var_fe = select(var_fe, Variable,Departamento,Media, Max, Min, Mediana, Desv.est, Q1, Q3)
var_fi = summarise(var_ko,
Media = mean(Tminp, na.rm = T),
Max = max(Tminp, na.rm = T),
Min = min(Tminp, na.rm = T),
Mediana = median(Tminp, na.rm = T),
Desv.est = sd(Tminp,na.rm = T),
Q1 = quantile(Tminp,0.25, na.rm = T),
Q3 = quantile(Tminp, 0.75, na.rm = T),
numtotal = length(Tminp),
numNA = sum(is.na(Tminp)),
Porc_na = (numNA/numtotal)*100)
dina = rep('Temperatura minima',each = 5)
var_fi$Variable = paste0(dina)
var_fi = select(var_fi, Variable,Departamento,Media, Max, Min, Mediana, Desv.est, Q1, Q3)
var_vi = summarise(var_ko,
Media = mean(Precs, na.rm = T),
Max = max(Precs, na.rm = T),
Min = min(Precs, na.rm = T),
Mediana = median(Precs, na.rm = T),
Desv.est = sd(Precs,na.rm = T),
Q1 = quantile(Precs,0.25, na.rm = T),
Q3 = quantile(Precs, 0.75, na.rm = T),
numtotal = length(Precs),
numNA = sum(is.na(Precs)),
Porc_na = (numNA/numtotal)*100)
dinm = rep('Precipitacion',each = 5)
var_vi$Variable = paste0(dinm)
var_vi = select(var_vi,Variable,Departamento, Media, Max, Min, Mediana, Desv.est, Q1, Q3)
stn = c("Junin", "Huanuco", "Ancash", "Lima" , "Ucayali")
Graf = c("Temperatura", "Precipitacion")
adriana = read.csv("www/metadata.csv")
|
library(lavaan)
library(xlsx)
library(semPlot)
library(GGally)
library(tidyverse)
setwd("C:/glucagon_robert/")
glucagon<- read.xlsx('glucagon_cross_legged.xlsx','Sheet 1')
#logtransform and normalize data
dat.glucagon<-log(glucagon[,9:27])
dat.glucagon<-scale(dat.glucagon)
hist(dat.glucagon)
n.glucagon <-cbind(glucagon[,1:8],dat.glucagon)
#visualize correlation matrix
# ----- Define a function for plotting a matrix ----- #
myImagePlot <- function(x, ...){
min <- min(x)
max <- max(x)
yLabels <- rownames(x)
xLabels <- colnames(x)
title <-c()
# check for additional function arguments
if( length(list(...)) ){
Lst <- list(...)
if( !is.null(Lst$zlim) ){
min <- Lst$zlim[1]
max <- Lst$zlim[2]
}
if( !is.null(Lst$yLabels) ){
yLabels <- c(Lst$yLabels)
}
if( !is.null(Lst$xLabels) ){
xLabels <- c(Lst$xLabels)
}
if( !is.null(Lst$title) ){
title <- Lst$title
}
}
# check for null values
if( is.null(xLabels) ){
xLabels <- c(1:ncol(x))
}
if( is.null(yLabels) ){
yLabels <- c(1:nrow(x))
}
layout(matrix(data=c(1,2), nrow=1, ncol=2), widths=c(4,1), heights=c(1,1))
# Red and green range from 0 to 1 while Blue ranges from 1 to 0
ColorRamp <- rgb( seq(0,1,length=256), # Red
seq(0,1,length=256), # Green
seq(1,0,length=256)) # Blue
ColorLevels <- seq(min, max, length=length(ColorRamp))
# Reverse Y axis
reverse <- nrow(x) : 1
yLabels <- yLabels[reverse]
x <- x[reverse,]
# Data Map
par(mar = c(3,5,2.5,2))
image(1:length(xLabels), 1:length(yLabels), t(x), col=ColorRamp, xlab="",
ylab="", axes=FALSE, zlim=c(min,max))
if( !is.null(title) ){
title(main=title)
}
axis(BELOW<-1, at=1:length(xLabels), labels=xLabels, cex.axis=0.7)
axis(LEFT <-2, at=1:length(yLabels), labels=yLabels, las= HORIZONTAL<-1,
cex.axis=0.7)
# Color Scale
par(mar = c(3,2.5,2.5,2))
image(1, ColorLevels,
matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1),
col=ColorRamp,
xlab="",ylab="",
xaxt="n")
layout(1)
}
# ----- END plot function ----- #
corrMat <-cor(dat.glucagon,use = "complete.obs")
myImagePlot(corrMat)
#try growth model
model <- '
i =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
s =~ 0*INS0 + 1*INS30 + 2*INS60 + 3*INS90 + 4*INS120
# regressions
i ~ fchglucagon + AGE
s ~ fchglucagon + AGE
GLUK0 ~~ GLUK0
# time-varying covariates
#INS0 ~ c1
#INS30 ~ c2
#INS60 ~ c3
#INS90 ~ c4
#INS120 ~ C5
'
fit <- growth(model, data=n.glucagon)
summary(fit)
semPaths(fit,what='std',layout='tree')
coef(fit)
#try simple autoregression model
model2 <-'
INS =~ INS0 + INS30 + INS60 + INS90 + INS120
BZ =~ BZN0 + BZ30 + BZ60 + BZ90 + BZ120
GLUK =~ GLUK0 + GLUK30 + GLUK120
INS~~BZ
INS~~GLUK
GLUK~~BZ
#autoregression
INS ~ BZ + GLUK
BZ ~ INS + GLUK
GLUK ~ INS + BZ
'
fit2 <- sem(model2, data=n.glucagon)
summary(fit2)
semPaths(fit2,what='std',layout='tree2')
coef(fit2)
#CLPM INS ~ BZ autoregression
model3 <-'
corr0 =~ BZN0
corr30 =~ BZ30
corr60=~ BZ60
corr90=~ BZ90
corr120 =~ BZ120
#LATENT VARIABLE DEFINITION
LINS0 =~ INS0
LINS30 =~ INS30
LINS60 =~ INS60
LINS90 =~ INS90
LINS120 =~ INS120
#LATENT FACTORS COVARIANCES @0
corr30 ~~ 0*corr0
corr60 ~~ 0*corr0
corr90 ~~ 0*corr0
corr120 ~~ 0*corr0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
LINS30 ~ v1*LINS0
LINS60 ~ v1*LINS30
LINS90 ~ v1*LINS60
LINS120 ~ v1*LINS90
corr30 ~ v2*corr0
corr60 ~ v2*corr30
corr90 ~ v2*corr60
corr120 ~ v2*corr90
LINS30 ~ v3*corr0
LINS60 ~ v3*corr30
LINS90 ~ v3*corr60
LINS120 ~ v3*corr90
corr30 ~ v4*LINS0
corr60 ~ v4*LINS30
corr90 ~ v4*LINS60
corr120 ~ v4*LINS90
# CORRELATIONS
LINS0 ~~ v5*corr0
LINS30 ~~ v6*corr30
LINS60 ~~ v6*corr60
LINS90 ~~ v6*corr90
LINS120 ~~ v6*corr120
'
fit3 <- sem(model3, data=n.glucagon,missing='ml')
summary(fit3)
semPaths(fit3,what='std',layout='tree2')
coef(fit3)
#RICLPM model INS ~ BZ unconstrained
model4 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ kappa #variance
omega ~~ omega #variance
kappa ~~ omega #covariance
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0
BZ120 ~ gamma5*LINS90 + delta5*LBZ90
BZ90 ~ gamma4*LINS60 + delta5*LBZ60
BZ60 ~ gamma3*LINS30 + delta3*LBZ30
BZ30 ~ gamma2*LINS0 + delta2*LBZ0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
# LBZELATIONS
LINS0 ~~ b0*LBZ0
LINS30 ~~ b2*LBZ30
LINS60 ~~ b3*LBZ60
LINS90 ~~ b4*LBZ90
LINS120 ~~ b5*LBZ120
'
fit4 <- lavaan(model4, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit4,standardized = T)
Estimates<-parameterEstimates(fit4,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
semPaths(fit4,what='std',layout='tree2')
coef(fit4)
#CLPM model
model5 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ 0*kappa #variance
omega ~~ 0*omega #variance
kappa ~~ 0*omega #covariance
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0
BZ120 ~ gamma5*LINS90 + delta5*LBZ90
BZ90 ~ gamma4*LINS60 + delta5*LBZ60
BZ60 ~ gamma3*LINS30 + delta3*LBZ30
BZ30 ~ gamma2*LINS0 + delta2*LBZ0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
# LBZELATIONS
LINS0 ~~ LBZ0
LINS30 ~~ LBZ30
LINS60 ~~ LBZ60
LINS90 ~~ LBZ90
LINS120 ~~ LBZ120
'
fit5 <- lavaan(model5, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit5,standardized = T)
semPaths(fit5,what='std',layout='tree')
coef(fit5)
#RICLPM model AR structure
model6 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ kappa #variance
omega ~~ omega #variance
kappa ~~ omega #covariance
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha*LINS90 + beta*LBZ90
LINS90 ~ alpha*LINS60 + beta*LBZ60
LINS60 ~ alpha*LINS30 + beta*LBZ30
LINS30 ~ alpha*LINS0 + beta*LBZ0
BZ120 ~ gamma*LINS90 + delta*LBZ90
BZ90 ~ gamma*LINS60 + delta*LBZ60
BZ60 ~ gamma*LINS30 + delta*LBZ30
BZ30 ~ gamma*LINS0 + delta*LBZ0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u*LINS30
LINS60 ~~ u*LINS60
LINS90 ~~ u*LINS90
LINS120 ~~ u*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v*LBZ30
LBZ60 ~~ v*LBZ60
LBZ90 ~~ v*LBZ90
LBZ120 ~~ v*LBZ120
# CORRELATIONS
LINS0 ~~ LBZ0
LINS30 ~~ b*LBZ30
LINS60 ~~ b*LBZ60
LINS90 ~~ b*LBZ90
LINS120 ~~ b*LBZ120
'
fit6 <- lavaan(model6, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit6,standardized = T)
semPaths(fit6,what='std',layout='tree')
coef(fit6)
#compare un, non-RI & AR models
anova(fit4,fit5,fit6)#unconstrained model has the best fit
#plot correlations between latent variables
#plot prediction
predict(fit4) %>%
as.data.frame %>%
select(-kappa, -omega) %>%
ggpairs(lower = list(continuous = wrap(ggally_smooth, alpha = .5))) +
theme_classic()
#plot raw
dat.glucagon %>%
as.data.frame %>%
ggpairs(lower = list(continuous = wrap(ggally_smooth, alpha = .5))) +
theme_classic()
#RICLPM glucagon
model7 <-'
#LATENT VARIABLE DEFINITION
LGLUK0 =~ 1*GLUK0
LGLUK30 =~ 1*GLUK30
LGLUK120 =~ 1*GLUK120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*GLUK0 + 1*GLUK30 + 1*GLUK120
omega =~ 1*INS0 + 1*INS30 + 1*INS120
#intercepts
#mu: group mean per wave covariants
GLUK0 ~ mu1*1
GLUK30 ~ mu2*1
GLUK120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ kappa #variance
omega ~~ omega #variance
kappa ~~ omega #covariance
#LATENT FACTORS COVARIANCES @0
LGLUK30 ~~ 0*LGLUK0
LGLUK120 ~~ 0*LGLUK0
LINS30 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha3*LINS30 + beta3*LGLUK30
LINS30 ~ alpha2*LINS0 + beta2*LGLUK0
GLUK120 ~ gamma3*LINS30 + delta3*LGLUK30
GLUK30 ~ gamma2*LINS0 + delta2*LGLUK0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS120 ~~ u5*LINS120
#variance
LGLUK0 ~~ LGLUK0
LGLUK30 ~~ v2*LGLUK30
LGLUK120 ~~ v5*LGLUK120
# CORRELATIONS
LINS0 ~~ b0*LGLUK0
LINS30 ~~ b2*LGLUK30
LINS120 ~~ b5*LGLUK120
'
fit7 <- lavaan(model7, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit7,standardized = T)
Estimates<-parameterEstimates(fit7,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
coef(fit7)
#RICLPM model INS ~ INS + BZ + GLUK + FFAMI
model8 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LGLUK0 =~ 1*GLUK0
LGLUK30 =~ 1*GLUK30
LGLUK120 =~ 1*GLUK120
LFF0 =~ 1*FFAMI0
LFF30 =~ 1*FFAMI30
LFF60 =~ 1*FFAMI60
LFF90 =~ 1*FFAMI90
LFF120 =~ 1*FFAMI120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
lambda =~ 1*GLUK0 + 1*GLUK30 + 1*GLUK120
iota =~ 1*FFAMI0 + 1*FFAMI30 + 1*FFAMI60 + 1*FFAMI90 + 1*FFAMI120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#zeta: group mean per wave glucagon
GLUK0 ~ zeta1*1
GLUK30 ~ zeta2*1
GLUK120 ~ zeta5*1
#eta: group mean per wave FFAMI
FFAMI0 ~ eta1*1
FFAMI30 ~ eta2*1
FFAMI60 ~ eta3*1
FFAMI90 ~ eta4*1
FFAMI120 ~ eta5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
#kappa ~~ kappa #variance
#omega ~~ omega #variance
#lambda ~~ lambda
#iota ~~ iota
kappa ~~ omega #covariance
kappa ~~ lambda
kappa ~~ iota
omega ~~ lambda
omega ~~ iota
lambda ~~ iota
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
LGLUK30 ~~ 0*LGLUK0
LGLUK120 ~~ 0*LGLUK0
LFF30 ~~ 0*LFF0
LFF60 ~~ 0*LFF0
LFF90 ~~ 0*LFF0
LFF120 ~~ 0*LFF0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90 + delta5*LFF90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60 + delta4*LFF60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30 + gamma3*LGLUK30 + delta3*LFF30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0 + gamma2*LGLUK0 + delta2*LFF0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
#variance
LGLUK0 ~~ LGLUK0
LGLUK30 ~~ w2*LGLUK30
LGLUK120 ~~ w5*LGLUK120
#variance
LFF0 ~~ LFF0
LFF30 ~~ x2*LFF30
LFF60 ~~ x3*LFF60
LFF90 ~~ x4*LFF90
LFF120 ~~ x5*LFF120
# CORRELATIONS
LINS0 ~~ a0*LBZ0
LINS30 ~~ a2*LBZ30
LINS60 ~~ a3*LBZ60
LINS90 ~~ a4*LBZ90
LINS120 ~~ a5*LBZ120
LINS0 ~~ b0*LGLUK0
LINS30 ~~ b2*LGLUK30
LINS120 ~~ b5*LGLUK120
LINS0 ~~ c0*LFF0
LINS30 ~~ c2*LFF30
LINS60 ~~ c3*LFF60
LINS90 ~~ c4*LFF90
LINS120 ~~ c5*LFF120
'
fit8 <- lavaan(model8, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
lavInspect(fit8, "cov.lv")
summary(fit8,standardized = T)
Estimates<-parameterEstimates(fit8,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
coef(fit8)
#plot prediction
predict(fit8) %>%
as.data.frame %>%
select(-kappa, -omega) %>%
ggpairs(lower = list(continuous = wrap(ggally_smooth, alpha = .5))) +
theme_classic()
#RICLPM model unconstrained
#INS ~ INS + BZ + GLUK + FFAMI
#GLUK ~ INS + BZ + GLUK + FFAMI
model9 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LGLUK0 =~ 1*GLUK0
LGLUK30 =~ 1*GLUK30
LGLUK120 =~ 1*GLUK120
LFF0 =~ 1*FFAMI0
LFF30 =~ 1*FFAMI30
LFF60 =~ 1*FFAMI60
LFF90 =~ 1*FFAMI90
LFF120 =~ 1*FFAMI120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
lambda =~ 1*GLUK0 + 1*GLUK30 + 1*GLUK120
iota =~ 1*FFAMI0 + 1*FFAMI30 + 1*FFAMI60 + 1*FFAMI90 + 1*FFAMI120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#zeta: group mean per wave glucagon
GLUK0 ~ zeta1*1
GLUK30 ~ zeta2*1
GLUK120 ~ zeta5*1
#eta: group mean per wave FFAMI
FFAMI0 ~ eta1*1
FFAMI30 ~ eta2*1
FFAMI60 ~ eta3*1
FFAMI90 ~ eta4*1
FFAMI120 ~ eta5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
#kappa ~~ kappa #variance
#omega ~~ omega #variance
#lambda ~~ lambda
#iota ~~ iota
#kappa ~~ omega #covariance
#kappa ~~ lambda
#kappa ~~ iota
#omega ~~ lambda
#omega ~~ iota
#lambda ~~ iota
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
LGLUK30 ~~ 0*LGLUK0
LGLUK120 ~~ 0*LGLUK0
LFF30 ~~ 0*LFF0
LFF60 ~~ 0*LFF0
LFF90 ~~ 0*LFF0
LFF120 ~~ 0*LFF0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90 + delta5*LFF90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60 + delta4*LFF60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30 + gamma3*LGLUK30 + delta3*LFF30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0 + gamma2*LGLUK0 + delta2*LFF0
LGLUK120 ~ agluk5*LINS90 + bgluk5*LBZ90 + dgluk5*LFF90
LGLUK30 ~ agluk2*LINS0 + bgluk2*LBZ0 + ggluk2*LGLUK0 + dgluk2*LFF0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
#variance
LGLUK0 ~~ LGLUK0
LGLUK30 ~~ w2*LGLUK30
LGLUK120 ~~ w5*LGLUK120
#variance
LFF0 ~~ LFF0
LFF30 ~~ x2*LFF30
LFF60 ~~ x3*LFF60
LFF90 ~~ x4*LFF90
LFF120 ~~ x5*LFF120
# CORRELATIONS
LINS0 ~~ a0*LBZ0
LINS30 ~~ a2*LBZ30
LINS60 ~~ a3*LBZ60
LINS90 ~~ a4*LBZ90
LINS120 ~~ a5*LBZ120
LINS0 ~~ b0*LGLUK0
LINS30 ~~ b2*LGLUK30
LINS120 ~~ b5*LGLUK120
LINS0 ~~ c0*LFF0
LINS30 ~~ c2*LFF30
LINS60 ~~ c3*LFF60
LINS90 ~~ c4*LFF90
LINS120 ~~ c5*LFF120
'
fit9 <- lavaan(model9, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
lavMatrix <-lavInspect(fit9, "cov.lv")
myImagePlot(lavMatrix)
summary(fit9,standardized = T)
Estimates<-parameterEstimates(fit9,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
coef(fit9)
| /glucagon.R | no_license | eksnoF/SEM_glucagon | R | false | false | 19,630 | r | library(lavaan)
library(xlsx)
library(semPlot)
library(GGally)
library(tidyverse)
setwd("C:/glucagon_robert/")
glucagon<- read.xlsx('glucagon_cross_legged.xlsx','Sheet 1')
#logtransform and normalize data
dat.glucagon<-log(glucagon[,9:27])
dat.glucagon<-scale(dat.glucagon)
hist(dat.glucagon)
n.glucagon <-cbind(glucagon[,1:8],dat.glucagon)
#visualize correlation matrix
# ----- Define a function for plotting a matrix ----- #
myImagePlot <- function(x, ...){
min <- min(x)
max <- max(x)
yLabels <- rownames(x)
xLabels <- colnames(x)
title <-c()
# check for additional function arguments
if( length(list(...)) ){
Lst <- list(...)
if( !is.null(Lst$zlim) ){
min <- Lst$zlim[1]
max <- Lst$zlim[2]
}
if( !is.null(Lst$yLabels) ){
yLabels <- c(Lst$yLabels)
}
if( !is.null(Lst$xLabels) ){
xLabels <- c(Lst$xLabels)
}
if( !is.null(Lst$title) ){
title <- Lst$title
}
}
# check for null values
if( is.null(xLabels) ){
xLabels <- c(1:ncol(x))
}
if( is.null(yLabels) ){
yLabels <- c(1:nrow(x))
}
layout(matrix(data=c(1,2), nrow=1, ncol=2), widths=c(4,1), heights=c(1,1))
# Red and green range from 0 to 1 while Blue ranges from 1 to 0
ColorRamp <- rgb( seq(0,1,length=256), # Red
seq(0,1,length=256), # Green
seq(1,0,length=256)) # Blue
ColorLevels <- seq(min, max, length=length(ColorRamp))
# Reverse Y axis
reverse <- nrow(x) : 1
yLabels <- yLabels[reverse]
x <- x[reverse,]
# Data Map
par(mar = c(3,5,2.5,2))
image(1:length(xLabels), 1:length(yLabels), t(x), col=ColorRamp, xlab="",
ylab="", axes=FALSE, zlim=c(min,max))
if( !is.null(title) ){
title(main=title)
}
axis(BELOW<-1, at=1:length(xLabels), labels=xLabels, cex.axis=0.7)
axis(LEFT <-2, at=1:length(yLabels), labels=yLabels, las= HORIZONTAL<-1,
cex.axis=0.7)
# Color Scale
par(mar = c(3,2.5,2.5,2))
image(1, ColorLevels,
matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1),
col=ColorRamp,
xlab="",ylab="",
xaxt="n")
layout(1)
}
# ----- END plot function ----- #
corrMat <-cor(dat.glucagon,use = "complete.obs")
myImagePlot(corrMat)
#try growth model
model <- '
i =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
s =~ 0*INS0 + 1*INS30 + 2*INS60 + 3*INS90 + 4*INS120
# regressions
i ~ fchglucagon + AGE
s ~ fchglucagon + AGE
GLUK0 ~~ GLUK0
# time-varying covariates
#INS0 ~ c1
#INS30 ~ c2
#INS60 ~ c3
#INS90 ~ c4
#INS120 ~ C5
'
fit <- growth(model, data=n.glucagon)
summary(fit)
semPaths(fit,what='std',layout='tree')
coef(fit)
#try simple autoregression model
model2 <-'
INS =~ INS0 + INS30 + INS60 + INS90 + INS120
BZ =~ BZN0 + BZ30 + BZ60 + BZ90 + BZ120
GLUK =~ GLUK0 + GLUK30 + GLUK120
INS~~BZ
INS~~GLUK
GLUK~~BZ
#autoregression
INS ~ BZ + GLUK
BZ ~ INS + GLUK
GLUK ~ INS + BZ
'
fit2 <- sem(model2, data=n.glucagon)
summary(fit2)
semPaths(fit2,what='std',layout='tree2')
coef(fit2)
#CLPM INS ~ BZ autoregression
model3 <-'
corr0 =~ BZN0
corr30 =~ BZ30
corr60=~ BZ60
corr90=~ BZ90
corr120 =~ BZ120
#LATENT VARIABLE DEFINITION
LINS0 =~ INS0
LINS30 =~ INS30
LINS60 =~ INS60
LINS90 =~ INS90
LINS120 =~ INS120
#LATENT FACTORS COVARIANCES @0
corr30 ~~ 0*corr0
corr60 ~~ 0*corr0
corr90 ~~ 0*corr0
corr120 ~~ 0*corr0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
LINS30 ~ v1*LINS0
LINS60 ~ v1*LINS30
LINS90 ~ v1*LINS60
LINS120 ~ v1*LINS90
corr30 ~ v2*corr0
corr60 ~ v2*corr30
corr90 ~ v2*corr60
corr120 ~ v2*corr90
LINS30 ~ v3*corr0
LINS60 ~ v3*corr30
LINS90 ~ v3*corr60
LINS120 ~ v3*corr90
corr30 ~ v4*LINS0
corr60 ~ v4*LINS30
corr90 ~ v4*LINS60
corr120 ~ v4*LINS90
# CORRELATIONS
LINS0 ~~ v5*corr0
LINS30 ~~ v6*corr30
LINS60 ~~ v6*corr60
LINS90 ~~ v6*corr90
LINS120 ~~ v6*corr120
'
fit3 <- sem(model3, data=n.glucagon,missing='ml')
summary(fit3)
semPaths(fit3,what='std',layout='tree2')
coef(fit3)
#RICLPM model INS ~ BZ unconstrained
model4 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ kappa #variance
omega ~~ omega #variance
kappa ~~ omega #covariance
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0
BZ120 ~ gamma5*LINS90 + delta5*LBZ90
BZ90 ~ gamma4*LINS60 + delta5*LBZ60
BZ60 ~ gamma3*LINS30 + delta3*LBZ30
BZ30 ~ gamma2*LINS0 + delta2*LBZ0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
# LBZELATIONS
LINS0 ~~ b0*LBZ0
LINS30 ~~ b2*LBZ30
LINS60 ~~ b3*LBZ60
LINS90 ~~ b4*LBZ90
LINS120 ~~ b5*LBZ120
'
fit4 <- lavaan(model4, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit4,standardized = T)
Estimates<-parameterEstimates(fit4,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
semPaths(fit4,what='std',layout='tree2')
coef(fit4)
#CLPM model
model5 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ 0*kappa #variance
omega ~~ 0*omega #variance
kappa ~~ 0*omega #covariance
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0
BZ120 ~ gamma5*LINS90 + delta5*LBZ90
BZ90 ~ gamma4*LINS60 + delta5*LBZ60
BZ60 ~ gamma3*LINS30 + delta3*LBZ30
BZ30 ~ gamma2*LINS0 + delta2*LBZ0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
# LBZELATIONS
LINS0 ~~ LBZ0
LINS30 ~~ LBZ30
LINS60 ~~ LBZ60
LINS90 ~~ LBZ90
LINS120 ~~ LBZ120
'
fit5 <- lavaan(model5, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit5,standardized = T)
semPaths(fit5,what='std',layout='tree')
coef(fit5)
#RICLPM model AR structure
model6 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ kappa #variance
omega ~~ omega #variance
kappa ~~ omega #covariance
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha*LINS90 + beta*LBZ90
LINS90 ~ alpha*LINS60 + beta*LBZ60
LINS60 ~ alpha*LINS30 + beta*LBZ30
LINS30 ~ alpha*LINS0 + beta*LBZ0
BZ120 ~ gamma*LINS90 + delta*LBZ90
BZ90 ~ gamma*LINS60 + delta*LBZ60
BZ60 ~ gamma*LINS30 + delta*LBZ30
BZ30 ~ gamma*LINS0 + delta*LBZ0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u*LINS30
LINS60 ~~ u*LINS60
LINS90 ~~ u*LINS90
LINS120 ~~ u*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v*LBZ30
LBZ60 ~~ v*LBZ60
LBZ90 ~~ v*LBZ90
LBZ120 ~~ v*LBZ120
# CORRELATIONS
LINS0 ~~ LBZ0
LINS30 ~~ b*LBZ30
LINS60 ~~ b*LBZ60
LINS90 ~~ b*LBZ90
LINS120 ~~ b*LBZ120
'
fit6 <- lavaan(model6, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit6,standardized = T)
semPaths(fit6,what='std',layout='tree')
coef(fit6)
#compare un, non-RI & AR models
anova(fit4,fit5,fit6)#unconstrained model has the best fit
#plot correlations between latent variables
#plot prediction
predict(fit4) %>%
as.data.frame %>%
select(-kappa, -omega) %>%
ggpairs(lower = list(continuous = wrap(ggally_smooth, alpha = .5))) +
theme_classic()
#plot raw
dat.glucagon %>%
as.data.frame %>%
ggpairs(lower = list(continuous = wrap(ggally_smooth, alpha = .5))) +
theme_classic()
#RICLPM glucagon
model7 <-'
#LATENT VARIABLE DEFINITION
LGLUK0 =~ 1*GLUK0
LGLUK30 =~ 1*GLUK30
LGLUK120 =~ 1*GLUK120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*GLUK0 + 1*GLUK30 + 1*GLUK120
omega =~ 1*INS0 + 1*INS30 + 1*INS120
#intercepts
#mu: group mean per wave covariants
GLUK0 ~ mu1*1
GLUK30 ~ mu2*1
GLUK120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS120 ~ pi5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
kappa ~~ kappa #variance
omega ~~ omega #variance
kappa ~~ omega #covariance
#LATENT FACTORS COVARIANCES @0
LGLUK30 ~~ 0*LGLUK0
LGLUK120 ~~ 0*LGLUK0
LINS30 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha3*LINS30 + beta3*LGLUK30
LINS30 ~ alpha2*LINS0 + beta2*LGLUK0
GLUK120 ~ gamma3*LINS30 + delta3*LGLUK30
GLUK30 ~ gamma2*LINS0 + delta2*LGLUK0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS120 ~~ u5*LINS120
#variance
LGLUK0 ~~ LGLUK0
LGLUK30 ~~ v2*LGLUK30
LGLUK120 ~~ v5*LGLUK120
# CORRELATIONS
LINS0 ~~ b0*LGLUK0
LINS30 ~~ b2*LGLUK30
LINS120 ~~ b5*LGLUK120
'
fit7 <- lavaan(model7, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
summary(fit7,standardized = T)
Estimates<-parameterEstimates(fit7,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
coef(fit7)
#RICLPM model INS ~ INS + BZ + GLUK + FFAMI
model8 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LGLUK0 =~ 1*GLUK0
LGLUK30 =~ 1*GLUK30
LGLUK120 =~ 1*GLUK120
LFF0 =~ 1*FFAMI0
LFF30 =~ 1*FFAMI30
LFF60 =~ 1*FFAMI60
LFF90 =~ 1*FFAMI90
LFF120 =~ 1*FFAMI120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
lambda =~ 1*GLUK0 + 1*GLUK30 + 1*GLUK120
iota =~ 1*FFAMI0 + 1*FFAMI30 + 1*FFAMI60 + 1*FFAMI90 + 1*FFAMI120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#zeta: group mean per wave glucagon
GLUK0 ~ zeta1*1
GLUK30 ~ zeta2*1
GLUK120 ~ zeta5*1
#eta: group mean per wave FFAMI
FFAMI0 ~ eta1*1
FFAMI30 ~ eta2*1
FFAMI60 ~ eta3*1
FFAMI90 ~ eta4*1
FFAMI120 ~ eta5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
#kappa ~~ kappa #variance
#omega ~~ omega #variance
#lambda ~~ lambda
#iota ~~ iota
kappa ~~ omega #covariance
kappa ~~ lambda
kappa ~~ iota
omega ~~ lambda
omega ~~ iota
lambda ~~ iota
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
LGLUK30 ~~ 0*LGLUK0
LGLUK120 ~~ 0*LGLUK0
LFF30 ~~ 0*LFF0
LFF60 ~~ 0*LFF0
LFF90 ~~ 0*LFF0
LFF120 ~~ 0*LFF0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90 + delta5*LFF90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60 + delta4*LFF60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30 + gamma3*LGLUK30 + delta3*LFF30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0 + gamma2*LGLUK0 + delta2*LFF0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
#variance
LGLUK0 ~~ LGLUK0
LGLUK30 ~~ w2*LGLUK30
LGLUK120 ~~ w5*LGLUK120
#variance
LFF0 ~~ LFF0
LFF30 ~~ x2*LFF30
LFF60 ~~ x3*LFF60
LFF90 ~~ x4*LFF90
LFF120 ~~ x5*LFF120
# CORRELATIONS
LINS0 ~~ a0*LBZ0
LINS30 ~~ a2*LBZ30
LINS60 ~~ a3*LBZ60
LINS90 ~~ a4*LBZ90
LINS120 ~~ a5*LBZ120
LINS0 ~~ b0*LGLUK0
LINS30 ~~ b2*LGLUK30
LINS120 ~~ b5*LGLUK120
LINS0 ~~ c0*LFF0
LINS30 ~~ c2*LFF30
LINS60 ~~ c3*LFF60
LINS90 ~~ c4*LFF90
LINS120 ~~ c5*LFF120
'
fit8 <- lavaan(model8, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
lavInspect(fit8, "cov.lv")
summary(fit8,standardized = T)
Estimates<-parameterEstimates(fit8,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
coef(fit8)
#plot prediction
predict(fit8) %>%
as.data.frame %>%
select(-kappa, -omega) %>%
ggpairs(lower = list(continuous = wrap(ggally_smooth, alpha = .5))) +
theme_classic()
#RICLPM model unconstrained
#INS ~ INS + BZ + GLUK + FFAMI
#GLUK ~ INS + BZ + GLUK + FFAMI
model9 <-'
#LATENT VARIABLE DEFINITION
LBZ0 =~ 1*BZN0
LBZ30 =~ 1*BZ30
LBZ60 =~ 1*BZ60
LBZ90 =~ 1*BZ90
LBZ120 =~ 1*BZ120
LGLUK0 =~ 1*GLUK0
LGLUK30 =~ 1*GLUK30
LGLUK120 =~ 1*GLUK120
LFF0 =~ 1*FFAMI0
LFF30 =~ 1*FFAMI30
LFF60 =~ 1*FFAMI60
LFF90 =~ 1*FFAMI90
LFF120 =~ 1*FFAMI120
LINS0 =~ 1*INS0
LINS30 =~ 1*INS30
LINS60 =~ 1*INS60
LINS90 =~ 1*INS90
LINS120 =~ 1*INS120
#Latent mean Structure with intercepts
kappa =~ 1*BZN0 + 1*BZ30 + 1*BZ60 + 1*BZ90 + 1*BZ120
omega =~ 1*INS0 + 1*INS30 + 1*INS60 + 1*INS90 + 1*INS120
lambda =~ 1*GLUK0 + 1*GLUK30 + 1*GLUK120
iota =~ 1*FFAMI0 + 1*FFAMI30 + 1*FFAMI60 + 1*FFAMI90 + 1*FFAMI120
#intercepts
#mu: group mean per wave covariants
BZN0 ~ mu1*1
BZ30 ~ mu2*1
BZ60 ~ mu3*1
BZ90 ~ mu4*1
BZ120 ~ mu5*1
#pi: group mean per wave insulin
INS0 ~ pi1*1
INS30 ~ pi2*1
INS60 ~ pi3*1
INS90 ~ pi4*1
INS120 ~ pi5*1
#zeta: group mean per wave glucagon
GLUK0 ~ zeta1*1
GLUK30 ~ zeta2*1
GLUK120 ~ zeta5*1
#eta: group mean per wave FFAMI
FFAMI0 ~ eta1*1
FFAMI30 ~ eta2*1
FFAMI60 ~ eta3*1
FFAMI90 ~ eta4*1
FFAMI120 ~ eta5*1
#kappa: random intercepts for covariants
#omega: random intercepts for Inuslin
#kappa ~~ kappa #variance
#omega ~~ omega #variance
#lambda ~~ lambda
#iota ~~ iota
#kappa ~~ omega #covariance
#kappa ~~ lambda
#kappa ~~ iota
#omega ~~ lambda
#omega ~~ iota
#lambda ~~ iota
#LATENT FACTORS COVARIANCES @0
LBZ30 ~~ 0*LBZ0
LBZ60 ~~ 0*LBZ0
LBZ90 ~~ 0*LBZ0
LBZ120 ~~ 0*LBZ0
LINS30 ~~ 0*LINS0
LINS60 ~~ 0*LINS0
LINS90 ~~ 0*LINS0
LINS120 ~~ 0*LINS0
LGLUK30 ~~ 0*LGLUK0
LGLUK120 ~~ 0*LGLUK0
LFF30 ~~ 0*LFF0
LFF60 ~~ 0*LFF0
LFF90 ~~ 0*LFF0
LFF120 ~~ 0*LFF0
#LAGGED EFFECTS
#effects to be the same across both lags.
LINS120 ~ alpha5*LINS90 + beta5*LBZ90 + delta5*LFF90
LINS90 ~ alpha4*LINS60 + beta4*LBZ60 + delta4*LFF60
LINS60 ~ alpha3*LINS30 + beta3*LBZ30 + gamma3*LGLUK30 + delta3*LFF30
LINS30 ~ alpha2*LINS0 + beta2*LBZ0 + gamma2*LGLUK0 + delta2*LFF0
LGLUK120 ~ agluk5*LINS90 + bgluk5*LBZ90 + dgluk5*LFF90
LGLUK30 ~ agluk2*LINS0 + bgluk2*LBZ0 + ggluk2*LGLUK0 + dgluk2*LFF0
#variance
LINS0 ~~ LINS0
LINS30 ~~ u2*LINS30
LINS60 ~~ u3*LINS60
LINS90 ~~ u4*LINS90
LINS120 ~~ u5*LINS120
#variance
LBZ0 ~~ LBZ0
LBZ30 ~~ v2*LBZ30
LBZ60 ~~ v3*LBZ60
LBZ90 ~~ v4*LBZ90
LBZ120 ~~ v5*LBZ120
#variance
LGLUK0 ~~ LGLUK0
LGLUK30 ~~ w2*LGLUK30
LGLUK120 ~~ w5*LGLUK120
#variance
LFF0 ~~ LFF0
LFF30 ~~ x2*LFF30
LFF60 ~~ x3*LFF60
LFF90 ~~ x4*LFF90
LFF120 ~~ x5*LFF120
# CORRELATIONS
LINS0 ~~ a0*LBZ0
LINS30 ~~ a2*LBZ30
LINS60 ~~ a3*LBZ60
LINS90 ~~ a4*LBZ90
LINS120 ~~ a5*LBZ120
LINS0 ~~ b0*LGLUK0
LINS30 ~~ b2*LGLUK30
LINS120 ~~ b5*LGLUK120
LINS0 ~~ c0*LFF0
LINS30 ~~ c2*LFF30
LINS60 ~~ c3*LFF60
LINS90 ~~ c4*LFF90
LINS120 ~~ c5*LFF120
'
fit9 <- lavaan(model9, data=n.glucagon,missing='ml',
int.ov.free = F,
int.lv.free = F,
auto.fix.first = F,
auto.fix.single = F,
auto.cov.lv.x = F,
auto.cov.y = F,
auto.var = F)
lavMatrix <-lavInspect(fit9, "cov.lv")
myImagePlot(lavMatrix)
summary(fit9,standardized = T)
Estimates<-parameterEstimates(fit9,standardized=T)
print(Estimates[nchar(Estimates[,"label"])==2,"std.all"])
print(Estimates[nchar(Estimates[,"label"])==2,"pvalue"])
coef(fit9)
|
SD1 <-
function(behavior,phaseX,v1,ABxlab,ABylab, ABmain){
maxy=which.max(behavior)
max<-behavior[maxy]
miny=which.min(behavior)
min<-behavior[miny]
t1<-table(phaseX)
tmaxA<-t1[names(t1)==v1]
startA<-match(v1,phaseX)
endA<-tmaxA+startA-1
A<-behavior[startA:endA]
meanA=mean(A,na.rm=T)
sdA=sd(A,na.rm=T)
SDabove<-meanA+sdA
SDbelow<-meanA-sdA
#min=SDbelow-2
#max=SDabove+2
f1=SDabove >max
f2=SDbelow <min
if (f1==TRUE)
{max=SDabove+1}
if (f2==TRUE)
{min=SDbelow-1}
y<-na.omit(behavior)
total=length(y)
x=(1:total)
end<-which(is.na(phaseX))
np<-length(end)
j=1
while (j <= np){
e<-end[j]
y<-insert(y,NA,e)
x<-insert(x,NA,e)
j=j+1
}
graphics.off()
layout(rbind(1,2), heights=c(6,1))
plot(x,y,type="o",ylim=c(min,max),col="red",xlab=ABxlab,ylab=ABylab,main=ABmain,bty="l")
#
abline(h=meanA,col="green",lwd=3)
abline(h=SDabove,col="black",lwd=3)
abline(h=SDbelow,col="black",lwd=3)
#par(mar=c(1, 1, 1, 1))
#plot.new()
#legend("center", c("behavior","+1sd","mean","-1sd"), col = c("red","black", "green","black"), lwd = 1,ncol=4,bty ="n")
sdp<-c("SD",round(sdA,2))
psdu<-c("+1SD",round(SDabove,2))
pmean<-c("mean",round(meanA,2))
psdb<-c("-1SD",round(SDbelow,2))
tprint=c(sdp,psdu,pmean,psdb)
print(tprint)
ab<-NULL
ab<<-recordPlot()
}
| /A_github/sources/authors/2866/SSDforR/SD1.R | no_license | Irbis3/crantasticScrapper | R | false | false | 1,387 | r | SD1 <-
function(behavior,phaseX,v1,ABxlab,ABylab, ABmain){
maxy=which.max(behavior)
max<-behavior[maxy]
miny=which.min(behavior)
min<-behavior[miny]
t1<-table(phaseX)
tmaxA<-t1[names(t1)==v1]
startA<-match(v1,phaseX)
endA<-tmaxA+startA-1
A<-behavior[startA:endA]
meanA=mean(A,na.rm=T)
sdA=sd(A,na.rm=T)
SDabove<-meanA+sdA
SDbelow<-meanA-sdA
#min=SDbelow-2
#max=SDabove+2
f1=SDabove >max
f2=SDbelow <min
if (f1==TRUE)
{max=SDabove+1}
if (f2==TRUE)
{min=SDbelow-1}
y<-na.omit(behavior)
total=length(y)
x=(1:total)
end<-which(is.na(phaseX))
np<-length(end)
j=1
while (j <= np){
e<-end[j]
y<-insert(y,NA,e)
x<-insert(x,NA,e)
j=j+1
}
graphics.off()
layout(rbind(1,2), heights=c(6,1))
plot(x,y,type="o",ylim=c(min,max),col="red",xlab=ABxlab,ylab=ABylab,main=ABmain,bty="l")
#
abline(h=meanA,col="green",lwd=3)
abline(h=SDabove,col="black",lwd=3)
abline(h=SDbelow,col="black",lwd=3)
#par(mar=c(1, 1, 1, 1))
#plot.new()
#legend("center", c("behavior","+1sd","mean","-1sd"), col = c("red","black", "green","black"), lwd = 1,ncol=4,bty ="n")
sdp<-c("SD",round(sdA,2))
psdu<-c("+1SD",round(SDabove,2))
pmean<-c("mean",round(meanA,2))
psdb<-c("-1SD",round(SDbelow,2))
tprint=c(sdp,psdu,pmean,psdb)
print(tprint)
ab<-NULL
ab<<-recordPlot()
}
|
library(lubridate)
library(dplyr)
#Download and subset data
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipName <- "data/data.zip"
dataFile <- "data/household_power_consumption.txt"
if(!file.exists("data")){dir.create("data/")}
if(!file.exists(dataFile)){
download.file(fileURL, zipName )
unzip(zipName, exdir = "data")
file.remove(zipName)
}
df <- read.table(dataFile,
header = TRUE,
sep = ";",
na.strings = "?"
)
df$Date <- dmy(df$Date)
df$Time <- hms(df$Time)
# Read only dates 2007-02-01 and 2007-02-02
df <- filter(df, Date >= "2007-02-01", Date <= "2007-02-02" )
png("plot2.png", width=480, height=480)
with(df,
plot(x=1:2880, y=Global_active_power,
xlab="",
ylab="Global Active Power (Kilowatts)",
xaxt ="n",
col = 'black',
lty=1,
pch = "",
)
)
lines(1:2880, df$Global_active_power, ylab="Global Active Power (Kilowatts)", xlab="")
axis(1, c(1,1440, 2880),c("Thu", "Fri", "Sun"))
dev.off()
#done
| /plot2.R | no_license | frankij11/ExData_Plotting1 | R | false | false | 1,107 | r | library(lubridate)
library(dplyr)
#Download and subset data
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipName <- "data/data.zip"
dataFile <- "data/household_power_consumption.txt"
if(!file.exists("data")){dir.create("data/")}
if(!file.exists(dataFile)){
download.file(fileURL, zipName )
unzip(zipName, exdir = "data")
file.remove(zipName)
}
df <- read.table(dataFile,
header = TRUE,
sep = ";",
na.strings = "?"
)
df$Date <- dmy(df$Date)
df$Time <- hms(df$Time)
# Read only dates 2007-02-01 and 2007-02-02
df <- filter(df, Date >= "2007-02-01", Date <= "2007-02-02" )
png("plot2.png", width=480, height=480)
with(df,
plot(x=1:2880, y=Global_active_power,
xlab="",
ylab="Global Active Power (Kilowatts)",
xaxt ="n",
col = 'black',
lty=1,
pch = "",
)
)
lines(1:2880, df$Global_active_power, ylab="Global Active Power (Kilowatts)", xlab="")
axis(1, c(1,1440, 2880),c("Thu", "Fri", "Sun"))
dev.off()
#done
|
# getting data from web pages readlines()
con = url("http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en")
htmlCode = readLines(con)
close(con)
htmlCode
#Parsing with XML
library(XML)
url <- "http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en"
html <- htmlTreeParse(url, useInternalNodes=T)
xpathSApply(html, "//title", xmlValue)
xpathSApply(html, "//td[@id='col-citedby']", xmlValue)
#GET from the httr package
install.packages("XML")
library(XML)
install.packages("httr")
library(httr); html2 = GET(url)
content2 = content(html2,as="text")
parsedHtml = htmlParse(content2,asText=TRUE)
xpathSApply(parsedHtml, "//title", xmlValue)
# Accessing websites with passwords
pg1 = GET("http://httpbin.org/basic-auth/user/passwd")
pg1
#Accessing websites with passwords
pg2 = GET("http://httpbin.org/basic-auth/user/passwd",
authenticate("user","passwd"))
pg2
names(pg2)
# Using handles
google = handle("http://google.com")
pg1 = GET(handle=google,path="/")
pg2 = GET(handle=google,path="search") | /WebScrapping.R | no_license | NidaBat/Data-Cleaning | R | false | false | 1,021 | r | # getting data from web pages readlines()
con = url("http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en")
htmlCode = readLines(con)
close(con)
htmlCode
#Parsing with XML
library(XML)
url <- "http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en"
html <- htmlTreeParse(url, useInternalNodes=T)
xpathSApply(html, "//title", xmlValue)
xpathSApply(html, "//td[@id='col-citedby']", xmlValue)
#GET from the httr package
install.packages("XML")
library(XML)
install.packages("httr")
library(httr); html2 = GET(url)
content2 = content(html2,as="text")
parsedHtml = htmlParse(content2,asText=TRUE)
xpathSApply(parsedHtml, "//title", xmlValue)
# Accessing websites with passwords
pg1 = GET("http://httpbin.org/basic-auth/user/passwd")
pg1
#Accessing websites with passwords
pg2 = GET("http://httpbin.org/basic-auth/user/passwd",
authenticate("user","passwd"))
pg2
names(pg2)
# Using handles
google = handle("http://google.com")
pg1 = GET(handle=google,path="/")
pg2 = GET(handle=google,path="search") |
build_isochrone_url <- function(
locations,
costing_model,
contours,
date_time,
polygons,
denoise,
generalize,
id,
api_key = NULL
) {
costing <- costing_model$costing
costing_options <- costing_model$costing_options
json <- build_isochrone_json(
locations = locations,
costing = costing,
costing_options = costing_options,
date_time = date_time,
contours = contours,
polygons = polygons,
denoise = denoise,
generalize = generalize
)
matrix_url(
endpoint = "isochrone",
json = json,
id = id,
api_key = api_key)
}
build_isochrone_json <- function(
locations,
costing,
costing_options,
date_time,
contours,
polygons,
denoise,
generalize) {
# locations should have lon/lat. for now, only one location
# is supported
locations <- as.mz_location(locations)
assert_that(is.null(polygons) || is.flag(polygons))
assert_that(is.null(denoise) || is.number(denoise))
assert_that(is.null(generalize) || is.number(generalize))
locations <- data.frame(
lon = locations[["lon"]],
lat = locations[["lat"]]
)
res <- list(locations = locations)
costing_model <- jsonlite::unbox(costing)
res <- c(res, list(costing = costing_model))
if (length(costing_options) > 0L)
res <- c(res, costing_options = list(costing_options))
if (!is.null(date_time)) {
assert_that(inherits(date_time, "mz_date_time"))
res <- c(res, date_time = list(jsonlite::unbox(date_time)))
}
res <- c(res, contours = list(contours))
if (!is.null(polygons))
res <- c(res, polygons = list(jsonlite::unbox(polygons)))
if (!is.null(denoise)) {
assert_that(denoise >= 0, denoise <= 1)
res <- c(res,
denoise = list(jsonlite::unbox(denoise)))
}
if (!is.null(generalize))
res <- c(res, generalize = list(jsonlite::unbox(generalize)))
jsonlite::toJSON(res)
}
iso_process <- function(response) {
tryCatch(
httr::stop_for_status(response),
http_400 = function(e) {
txt <- httr::content(response, as = "text")
lst <- jsonlite::fromJSON(txt, simplifyVector = FALSE)
stop(e$message, "\n", lst$error, " (", lst$error_code, ")",
call. = FALSE)
}
)
header <- httr::headers(response)
mz_update_usage(header, "matrix")
txt <- httr::content(response, as = "text")
lst <- jsonlite::fromJSON(txt, simplifyVector = FALSE)
structure(lst,
header = header,
class = c("mapzen_isochrone_list", "geo_list"))
}
#' @import assertthat
isochrone_get <- function(url) {
response <- matrix_GET(httr::build_url(url))
iso_process(response)
}
#' Retrieve isochrones
#'
#' From \url{https://mapzen.com/documentation/mobility/isochrone/api-reference/}:
#' "An isochrone is a line that connects points of equal travel time about a
#' given location, from the Greek roots of 'iso' for equal and 'chrone' for time.
#' The Mapzen Isochrone service computes areas that are reachable within
#' specified time intervals from a location, and returns the reachable regions
#' as contours of polygons or lines that you can display on a map."
#'
#' @param locations An \code{mz_location}, or something that can be coerced to an
#' \code{\link{mz_location}}, as the departure point for the isochrone. This can be the
#' result of \code{\link{mz_geocode}}. Despite the argument name, the isochrone
#' service currently can only accept a single location
#' @param costing_model The costing model, see \code{\link{mz_costing}}
#' @param contours Up to 4 contours, see \code{\link{mz_contours}}
#' @param date_time The local date and time at the location, and whether it is
#' the departure or arrival time. See \code{\link{mz_date_time}}
#' @param polygons Whether to return polygons (TRUE) or linestrings (FALSE, default)
#' @param denoise A value between 0 and 1 (default 1) to remove smaller contours.
#' A value of 1 will only return the largest contour for a given time value. A
#' value of 0.5 drops any contours that are less than half the area of the
#' largest contour.
#' @param generalize Tolerance in meters for the Douglas-Peucker generalization.
#' @param id A descriptive identifier, the response will contain the id as an element.
#' @param api_key Your Mapzen API key, defaults to the MAPZEN_KEY environment variable.
#'
#' @return A \code{mapzen_isochrone_list}, which can be converted to \code{sf}
#' or \code{sp} using \code{\link{as_sf}} or \code{\link{as_sp}}.
#'
#' @seealso \code{\link{mz_costing}}
#'
#' @examples
#' \dontrun{
#' mz_isochrone(
#' mz_location(lat = 37.87416, lon = -122.2544),
#' costing_model = mz_costing$auto(),
#' contours = mz_contours(c(10, 20, 30))
#' )
#'
#' # departure point can be specified as a geocode result
#' mz_isochrone(
#' mz_geocode("UC Berkeley"),
#' costing_model = mz_costing$pedestrian(),
#' contours = mz_contours(c(10, 20, 30))
#' )
#' }
#'
#' @name mz_isochrone
#' @export
mz_isochrone <- function(
locations,
costing_model,
contours,
date_time = NULL,
polygons = NULL,
denoise = NULL,
generalize = NULL,
id = "my-iso",
api_key = NULL
) {
costing_options <- costing_model$costing_options
url <- build_isochrone_url(
locations = locations,
costing_model = costing_model,
contours = contours,
date_time = date_time,
polygons = polygons,
denoise = denoise,
generalize = generalize,
id = id,
api_key = api_key)
isochrone_get(url)
}
#' @export
print.mapzen_isochrone_list <- function(x, ...) {
cat("GeoJSON response from Mapzen\n")
cat("Isochrones: ", length(x$features), sep = "")
}
| /R/isochrone.R | no_license | cuulee/rmapzen | R | false | false | 5,907 | r | build_isochrone_url <- function(
locations,
costing_model,
contours,
date_time,
polygons,
denoise,
generalize,
id,
api_key = NULL
) {
costing <- costing_model$costing
costing_options <- costing_model$costing_options
json <- build_isochrone_json(
locations = locations,
costing = costing,
costing_options = costing_options,
date_time = date_time,
contours = contours,
polygons = polygons,
denoise = denoise,
generalize = generalize
)
matrix_url(
endpoint = "isochrone",
json = json,
id = id,
api_key = api_key)
}
build_isochrone_json <- function(
locations,
costing,
costing_options,
date_time,
contours,
polygons,
denoise,
generalize) {
# locations should have lon/lat. for now, only one location
# is supported
locations <- as.mz_location(locations)
assert_that(is.null(polygons) || is.flag(polygons))
assert_that(is.null(denoise) || is.number(denoise))
assert_that(is.null(generalize) || is.number(generalize))
locations <- data.frame(
lon = locations[["lon"]],
lat = locations[["lat"]]
)
res <- list(locations = locations)
costing_model <- jsonlite::unbox(costing)
res <- c(res, list(costing = costing_model))
if (length(costing_options) > 0L)
res <- c(res, costing_options = list(costing_options))
if (!is.null(date_time)) {
assert_that(inherits(date_time, "mz_date_time"))
res <- c(res, date_time = list(jsonlite::unbox(date_time)))
}
res <- c(res, contours = list(contours))
if (!is.null(polygons))
res <- c(res, polygons = list(jsonlite::unbox(polygons)))
if (!is.null(denoise)) {
assert_that(denoise >= 0, denoise <= 1)
res <- c(res,
denoise = list(jsonlite::unbox(denoise)))
}
if (!is.null(generalize))
res <- c(res, generalize = list(jsonlite::unbox(generalize)))
jsonlite::toJSON(res)
}
iso_process <- function(response) {
tryCatch(
httr::stop_for_status(response),
http_400 = function(e) {
txt <- httr::content(response, as = "text")
lst <- jsonlite::fromJSON(txt, simplifyVector = FALSE)
stop(e$message, "\n", lst$error, " (", lst$error_code, ")",
call. = FALSE)
}
)
header <- httr::headers(response)
mz_update_usage(header, "matrix")
txt <- httr::content(response, as = "text")
lst <- jsonlite::fromJSON(txt, simplifyVector = FALSE)
structure(lst,
header = header,
class = c("mapzen_isochrone_list", "geo_list"))
}
#' @import assertthat
isochrone_get <- function(url) {
response <- matrix_GET(httr::build_url(url))
iso_process(response)
}
#' Retrieve isochrones
#'
#' From \url{https://mapzen.com/documentation/mobility/isochrone/api-reference/}:
#' "An isochrone is a line that connects points of equal travel time about a
#' given location, from the Greek roots of 'iso' for equal and 'chrone' for time.
#' The Mapzen Isochrone service computes areas that are reachable within
#' specified time intervals from a location, and returns the reachable regions
#' as contours of polygons or lines that you can display on a map."
#'
#' @param locations An \code{mz_location}, or something that can be coerced to an
#' \code{\link{mz_location}}, as the departure point for the isochrone. This can be the
#' result of \code{\link{mz_geocode}}. Despite the argument name, the isochrone
#' service currently can only accept a single location
#' @param costing_model The costing model, see \code{\link{mz_costing}}
#' @param contours Up to 4 contours, see \code{\link{mz_contours}}
#' @param date_time The local date and time at the location, and whether it is
#' the departure or arrival time. See \code{\link{mz_date_time}}
#' @param polygons Whether to return polygons (TRUE) or linestrings (FALSE, default)
#' @param denoise A value between 0 and 1 (default 1) to remove smaller contours.
#' A value of 1 will only return the largest contour for a given time value. A
#' value of 0.5 drops any contours that are less than half the area of the
#' largest contour.
#' @param generalize Tolerance in meters for the Douglas-Peucker generalization.
#' @param id A descriptive identifier, the response will contain the id as an element.
#' @param api_key Your Mapzen API key, defaults to the MAPZEN_KEY environment variable.
#'
#' @return A \code{mapzen_isochrone_list}, which can be converted to \code{sf}
#' or \code{sp} using \code{\link{as_sf}} or \code{\link{as_sp}}.
#'
#' @seealso \code{\link{mz_costing}}
#'
#' @examples
#' \dontrun{
#' mz_isochrone(
#' mz_location(lat = 37.87416, lon = -122.2544),
#' costing_model = mz_costing$auto(),
#' contours = mz_contours(c(10, 20, 30))
#' )
#'
#' # departure point can be specified as a geocode result
#' mz_isochrone(
#' mz_geocode("UC Berkeley"),
#' costing_model = mz_costing$pedestrian(),
#' contours = mz_contours(c(10, 20, 30))
#' )
#' }
#'
#' @name mz_isochrone
#' @export
mz_isochrone <- function(
locations,
costing_model,
contours,
date_time = NULL,
polygons = NULL,
denoise = NULL,
generalize = NULL,
id = "my-iso",
api_key = NULL
) {
costing_options <- costing_model$costing_options
url <- build_isochrone_url(
locations = locations,
costing_model = costing_model,
contours = contours,
date_time = date_time,
polygons = polygons,
denoise = denoise,
generalize = generalize,
id = id,
api_key = api_key)
isochrone_get(url)
}
#' @export
print.mapzen_isochrone_list <- function(x, ...) {
cat("GeoJSON response from Mapzen\n")
cat("Isochrones: ", length(x$features), sep = "")
}
|
n = 10
## generate P, X
# Import Rmosek
if (!require("Rmosek")) {
stop ("Rmosek not installed.")
}
######################################
#Generates the n by n-1 Isotonic matrix.
isomat = function(n) {
m1 = matrix(0,n,(n-1))
for (i in 1:(n-1)){
for (j in i:(n-1)){
m1[i,j] = -1
}
}
m2 = matrix(0,n,(n-1))
for (j in 1:(n-1)){
m2[,j] = rep(j,n)/n
}
mat = m1 + m2
return(mat)
}
################################
X = isomat(n)
################################
#Generate permutation P#########
new.ord = sample(1:n, size=n, replace=FALSE, prob=rep(1,n))
#new.ord = sample(1:n, size=n, replace=FALSE, prob=exp( 5*(1:n)/n))
#new.ord = n+1 - 1:n
#new.ord = c(10,5,2,8,9,3,4,6,7,1)
new.ord = as.integer(new.ord)
sigma = invPerm(new.ord)
PX = X[new.ord, ]
PtX = X[sigma, ]
## new
new.ord2 = sample(1:n, size=n)
P2X = X[new.ord2, ]
#####Create the equivalence matrix#####
X = isomat(n)
Winv = solve(t(X) %*% X)
Z = t(X) %*% PtX
#W = t(isomat(n)) %*% isomat(n);
#rowind = seq(1:(n-2))
#colind = rowind + 1
#Winv = sparseMatrix(i = rowind, j = colind,x = -1,dims = c(n-1,n-1),symmetric = T)
#diag(Winv) = rep(2,n-1)
#Z = t(isomat(n)) %*% isomat(n)[sigma,]
E = - Winv %*% Z
pdf("e_matrix_example.pdf")
image(Matrix(round(E)))
dev.off()
################################
beta1star = runif(n-1)
beta1star = beta1star/sum(beta1star)
beta2star = runif(n-1)
beta2star[beta2star > 0.05] = 0
beta2star[floor(n/2)] = 0.1
beta2star = beta2star/sum(beta2star)
beta3star = rep(0.05, n-1)
beta3star[floor(n/2)] = 1
beta3star[floor(n/3)] = 1
beta3star = beta3star/sum(beta3star)
p = 3
################################
y = X %*% beta1star + PX %*% beta2star + P2X %*% beta3star
## rmosek call 1 L1 minimization
## # Set up the program
beta1pos.index <- seq(1,(n-1))
beta1neg.index <- seq(1,(n-1)) + (n-1)
beta2pos.index <- seq(1,(n-1)) + 2*(n-1)
beta2neg.index <- seq(1,(n-1)) + 3*(n-1)
num.vars = 4*(n-1)
##############################################
noiseless.pattern <- list(sense = "min")
noiseless.pattern$c <- rep(1,4*(n-1))
##############################################
# Affine constraint 1: auxiliary variables [no cost]
# r = y - X beta
A1 <- Matrix(0, nrow = n, ncol = num.vars)
A1 <- cbind(X,-X,PX,-PX)
A1 <- Matrix(A1)
###############################################
noiseless.pattern$A <- A1
noiseless.pattern$bc <- rbind(
blc = t(y),
buc = t(y)
)
#constraints on the program variables
noiseless.pattern$bx <- rbind(
blx = c(rep(0, 4*(n-1))),
bux = c(rep(Inf, 4*(n-1)))
)
##################################
r <- mosek(noiseless.pattern)
sol1 <- r$sol$itr$xx
obj1 <- sum(sol1)
## rmosek call 2 non-neg
beta1.index <- seq(1,(n-1))
beta2.index <- seq(1,(n-1)) + (n-1)
num.vars = 2*(n-1)
##############################################
noiseless.pattern <- list(sense = "min")
noiseless.pattern$c <- rep(1,2*(n-1))
##############################################
# Affine constraint 1: auxiliary variables [no cost]
# r = y - X beta
A1 <- Matrix(0, nrow = n, ncol = num.vars)
A1 <- cbind(X,PX)
A1 <- Matrix(A1)
###############################################
noiseless.pattern$A <- A1
noiseless.pattern$bc <- rbind(
blc = t(y),
buc = t(y)
)
#constraints on the program variables
noiseless.pattern$bx <- rbind(
blx = c(rep(0, 2*(n-1))),
bux = c(rep(Inf, 2*(n-1)))
)
##################################
r <- mosek(noiseless.pattern)
sol2 <- r$sol$itr$xx
obj2 <- sum(sol2)
#####################################
print(c(obj1, obj2))
#print(sol1[beta1neg.index] > 1e-5)
#print(sol1[beta2neg.index] > 1e-5)
#beta1a = sol1[beta1pos.index] - sol1[beta1neg.index]
#beta1b = sol2[beta1.index]
#beta2a = sol1[beta2pos.index] - sol1[beta2neg.index]
#beta2b = sol2[beta2.index]
#print(round(cbind(beta1a, beta2a, beta1b, beta2b), 3))
| /code/noiselesspattern.R | no_license | nineisprime/isopattern | R | false | false | 3,789 | r | n = 10
## generate P, X
# Import Rmosek
if (!require("Rmosek")) {
stop ("Rmosek not installed.")
}
######################################
#Generates the n by n-1 Isotonic matrix.
isomat = function(n) {
m1 = matrix(0,n,(n-1))
for (i in 1:(n-1)){
for (j in i:(n-1)){
m1[i,j] = -1
}
}
m2 = matrix(0,n,(n-1))
for (j in 1:(n-1)){
m2[,j] = rep(j,n)/n
}
mat = m1 + m2
return(mat)
}
################################
X = isomat(n)
################################
#Generate permutation P#########
new.ord = sample(1:n, size=n, replace=FALSE, prob=rep(1,n))
#new.ord = sample(1:n, size=n, replace=FALSE, prob=exp( 5*(1:n)/n))
#new.ord = n+1 - 1:n
#new.ord = c(10,5,2,8,9,3,4,6,7,1)
new.ord = as.integer(new.ord)
sigma = invPerm(new.ord)
PX = X[new.ord, ]
PtX = X[sigma, ]
## new
new.ord2 = sample(1:n, size=n)
P2X = X[new.ord2, ]
#####Create the equivalence matrix#####
X = isomat(n)
Winv = solve(t(X) %*% X)
Z = t(X) %*% PtX
#W = t(isomat(n)) %*% isomat(n);
#rowind = seq(1:(n-2))
#colind = rowind + 1
#Winv = sparseMatrix(i = rowind, j = colind,x = -1,dims = c(n-1,n-1),symmetric = T)
#diag(Winv) = rep(2,n-1)
#Z = t(isomat(n)) %*% isomat(n)[sigma,]
E = - Winv %*% Z
pdf("e_matrix_example.pdf")
image(Matrix(round(E)))
dev.off()
################################
beta1star = runif(n-1)
beta1star = beta1star/sum(beta1star)
beta2star = runif(n-1)
beta2star[beta2star > 0.05] = 0
beta2star[floor(n/2)] = 0.1
beta2star = beta2star/sum(beta2star)
beta3star = rep(0.05, n-1)
beta3star[floor(n/2)] = 1
beta3star[floor(n/3)] = 1
beta3star = beta3star/sum(beta3star)
p = 3
################################
y = X %*% beta1star + PX %*% beta2star + P2X %*% beta3star
## rmosek call 1 L1 minimization
## # Set up the program
beta1pos.index <- seq(1,(n-1))
beta1neg.index <- seq(1,(n-1)) + (n-1)
beta2pos.index <- seq(1,(n-1)) + 2*(n-1)
beta2neg.index <- seq(1,(n-1)) + 3*(n-1)
num.vars = 4*(n-1)
##############################################
noiseless.pattern <- list(sense = "min")
noiseless.pattern$c <- rep(1,4*(n-1))
##############################################
# Affine constraint 1: auxiliary variables [no cost]
# r = y - X beta
A1 <- Matrix(0, nrow = n, ncol = num.vars)
A1 <- cbind(X,-X,PX,-PX)
A1 <- Matrix(A1)
###############################################
noiseless.pattern$A <- A1
noiseless.pattern$bc <- rbind(
blc = t(y),
buc = t(y)
)
#constraints on the program variables
noiseless.pattern$bx <- rbind(
blx = c(rep(0, 4*(n-1))),
bux = c(rep(Inf, 4*(n-1)))
)
##################################
r <- mosek(noiseless.pattern)
sol1 <- r$sol$itr$xx
obj1 <- sum(sol1)
## rmosek call 2 non-neg
beta1.index <- seq(1,(n-1))
beta2.index <- seq(1,(n-1)) + (n-1)
num.vars = 2*(n-1)
##############################################
noiseless.pattern <- list(sense = "min")
noiseless.pattern$c <- rep(1,2*(n-1))
##############################################
# Affine constraint 1: auxiliary variables [no cost]
# r = y - X beta
A1 <- Matrix(0, nrow = n, ncol = num.vars)
A1 <- cbind(X,PX)
A1 <- Matrix(A1)
###############################################
noiseless.pattern$A <- A1
noiseless.pattern$bc <- rbind(
blc = t(y),
buc = t(y)
)
#constraints on the program variables
noiseless.pattern$bx <- rbind(
blx = c(rep(0, 2*(n-1))),
bux = c(rep(Inf, 2*(n-1)))
)
##################################
r <- mosek(noiseless.pattern)
sol2 <- r$sol$itr$xx
obj2 <- sum(sol2)
#####################################
print(c(obj1, obj2))
#print(sol1[beta1neg.index] > 1e-5)
#print(sol1[beta2neg.index] > 1e-5)
#beta1a = sol1[beta1pos.index] - sol1[beta1neg.index]
#beta1b = sol2[beta1.index]
#beta2a = sol1[beta2pos.index] - sol1[beta2neg.index]
#beta2b = sol2[beta2.index]
#print(round(cbind(beta1a, beta2a, beta1b, beta2b), 3))
|
testlist <- list(x = structure(c(7.29112202061864e-304, 1.13839277919377e-79, 0, 0, 0, 0), .Dim = c(1L, 6L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result) | /multivariance/inst/testfiles/doubleCenterBiasCorrected/libFuzzer_doubleCenterBiasCorrected/doubleCenterBiasCorrected_valgrind_files/1612884237-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 192 | r | testlist <- list(x = structure(c(7.29112202061864e-304, 1.13839277919377e-79, 0, 0, 0, 0), .Dim = c(1L, 6L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result) |
#' @title
#' Computes the signaling entropy of given gene expression profiles
#' over a given connected network
#'
#' @aliases CompSRana
#'
#' @description
#' This is the main user function for computing signaling entropy of
#' single cells. It takes as input the gene expression profile of
#' single cells and the adjacency matrix of a connected network. These
#' inputs will be typically the output of the \code{DoIntegPPI} function.
#'
#' @param Integration.l
#' A list object from \code{DoIntegPPI} function.
#'
#' @param local
#' A logical (default is FALSE). If TRUE, function computes the normalized
#' local signaling entropies of each gene in the network.
#'
#' @param mc.cores
#' The number of cores to use, i.e. at most how many child processes will
#' be run simultaneously. The option is initialized from environment variable
#' MC_CORES if set. Must be at least one, and parallelization requires at
#' least two cores.
#'
#' @return A list incorporates the input list and four new elements:
#'
#' @return SR
#' The global signaling entropy rate. It is normalized by the
#' maximum rate, hence a value between 0 and 1
#'
#' @return inv
#' The stationary distribution of every sample
#'
#' @return s
#' The unnormlised local entropies of each gene in every cell
#'
#' @return ns
#' The normalised local entropies of each gene, so that each value is
#' between 0 and 1
#'
#' @references
#' Teschendorff AE, Tariq Enver.
#' \emph{Single-cell entropy for accurate estimation of differentiation
#' potency from a cell’s transcriptome.}
#' Nature communications 8 (2017): 15599.
#' doi:\href{https://doi.org/10.1038/ncomms15599}{
#' 10.1038/ncomms15599}.
#'
#' Teschendorff AE, Banerji CR, Severini S, Kuehn R, Sollich P.
#' \emph{Increased signaling entropy in cancer requires the scale-free
#' property of protein interaction networks.}
#' Scientific reports 5 (2015): 9646.
#' doi:\href{https://doi.org/10.1038/srep09646}{
#' 10.1038/srep09646}.
#'
#' Banerji, Christopher RS, et al.
#' \emph{Intra-tumour signalling entropy determines clinical outcome
#' in breast and lung cancer.}
#' PLoS computational biology 11.3 (2015): e1004115.
#' doi:\href{https://doi.org/10.1371/journal.pcbi.1004115}{
#' 10.1371/journal.pcbi.1004115}.
#'
#' Teschendorff, Andrew E., Peter Sollich, and Reimer Kuehn.
#' \emph{Signalling entropy: A novel network-theoretical framework
#' for systems analysis and interpretation of functional omic data.}
#' Methods 67.3 (2014): 282-293.
#' doi:\href{https://doi.org/10.1016/j.ymeth.2014.03.013}{
#' 10.1016/j.ymeth.2014.03.013}.
#'
#' Banerji, Christopher RS, et al.
#' \emph{Cellular network entropy as the energy potential in
#' Waddington's differentiation landscape.}
#' Scientific reports 3 (2013): 3039.
#' doi:\href{https://doi.org/10.1038/srep03039}{
#' 10.1038/srep03039}.
#'
#' @examples
#' ### define a small network
#' ppiA.m <- matrix(0,nrow=10,ncol=10);
#' ppiA.m[1,] <- c(0,1,1,1,1);
#' for(r in 2:nrow(ppiA.m)){
#' ppiA.m[r,1] <- 1;
#' }
#' rownames(ppiA.m) <- paste("G",1:10,sep="");
#' colnames(ppiA.m) <- paste("G",1:10,sep="");
#'
#' ### define a positively valued expression matrix (20 genes x 10 samples)
#' exp.m <- matrix(rpois(20*10,8),nrow=20,ncol=10);
#' colnames(exp.m) <- paste("S",1:10,sep="");
#' rownames(exp.m) <- paste("G",1:20,sep="");
#'
#' ### integrate data and network
#' Integration.l <- DoIntegPPI(exp.m, ppiA.m);
#'
#' ### compute SR values
#' Integration.l <- CompSRana(Integration.l);
#'
#' ### output global signaling entropy
#' print(Integration.l$SR);
#'
#' @import parallel
#' @import Biobase
#' @import SingleCellExperiment
#' @importFrom igraph arpack
#' @importFrom SummarizedExperiment colData<-
#' @importFrom SummarizedExperiment colData
#' @export
#'
CompSRana <- function(Integration.l,
local = FALSE,
mc.cores=1)
{
### compute maxSR for SR normalization
Integration.l <- CompMaxSR(Integration.l)
maxSR <- Integration.l$maxSR
idx.l <- as.list(seq_len(ncol(Integration.l$expMC)))
out.l <- mclapply(idx.l, CompSRanaPRL,
exp.m=Integration.l$expMC,
adj.m=Integration.l$adjMC,
local=local,
maxSR=maxSR,
mc.cores=mc.cores)
SR.v <- sapply(out.l, function(v) return(v[[1]]))
invP.v <- sapply(out.l, function(v) return(v[[2]]))
S.v <- sapply(out.l, function(v) return(v[[3]]))
NS.v <- sapply(out.l, function(v) return(v[[4]]))
Integration.l$SR <- SR.v
Integration.l$inv <- invP.v
Integration.l$s <- S.v
Integration.l$ns <- NS.v
if (!is.null(Integration.l$data.sce)) {
colData(Integration.l$data.sce)$SR <- SR.v
}else if (!is.null(Integration.l$data.cds)) {
pData(Integration.l$data.cds)$SR <- SR.v
}
return(Integration.l)
}
CompMaxSR <- function(Integration.l){
adj.m <- Integration.l$adjMC
# find right eigenvector of adjacency matrix
fa <- function(x,extra=NULL) {
as.vector(adj.m %*% x)
}
ap.o <- igraph::arpack(fa,options=list(n=nrow(adj.m),nev=1,which="LM"), sym=TRUE)
v <- ap.o$vectors
lambda <- ap.o$values
# maximum entropy
MaxSR <- log(lambda)
Integration.l$maxSR <- MaxSR
return(Integration.l)
}
CompSRanaPRL <- function(idx,
exp.m,
adj.m,
local=TRUE,
maxSR=NULL)
{
# compute outgoing flux around each node
exp.v <- exp.m[,idx];
sumexp.v <- as.vector(adj.m %*% matrix(exp.v,ncol=1));
invP.v <- exp.v*sumexp.v;
nf <- sum(invP.v);
invP.v <- invP.v/nf;
p.m <- t(t(adj.m)*exp.v)/sumexp.v;
S.v <- apply(p.m,1,CompS);
SR <- sum(invP.v*S.v);
# if provided then normalise relative to maxSR
if(is.null(maxSR)==FALSE){
SR <- SR/maxSR;
}
if(local){
NS.v <- apply(p.m,1,CompNS);
}
else {
NS.v <- NULL;
}
return(list(sr=SR,inv=invP.v,s=S.v,ns=NS.v));
}
CompNS <- function(p.v){
tmp.idx <- which(p.v>0);
if(length(tmp.idx)>1){
NLS <- -sum( p.v[tmp.idx]*log(p.v[tmp.idx]) )/log(length(tmp.idx));
}
else {
# one degree nodes have zero entropy, avoid singularity.
NLS <- 0;
}
return(NLS);
}
CompS <- function(p.v){
tmp.idx <- which(p.v>0);
LS <- - sum( p.v[tmp.idx]*log(p.v[tmp.idx]) )
return(LS);
}
| /R/CompSRana.R | no_license | ChenWeiyan/LandSCENT | R | false | false | 6,531 | r | #' @title
#' Computes the signaling entropy of given gene expression profiles
#' over a given connected network
#'
#' @aliases CompSRana
#'
#' @description
#' This is the main user function for computing signaling entropy of
#' single cells. It takes as input the gene expression profile of
#' single cells and the adjacency matrix of a connected network. These
#' inputs will be typically the output of the \code{DoIntegPPI} function.
#'
#' @param Integration.l
#' A list object from \code{DoIntegPPI} function.
#'
#' @param local
#' A logical (default is FALSE). If TRUE, function computes the normalized
#' local signaling entropies of each gene in the network.
#'
#' @param mc.cores
#' The number of cores to use, i.e. at most how many child processes will
#' be run simultaneously. The option is initialized from environment variable
#' MC_CORES if set. Must be at least one, and parallelization requires at
#' least two cores.
#'
#' @return A list incorporates the input list and four new elements:
#'
#' @return SR
#' The global signaling entropy rate. It is normalized by the
#' maximum rate, hence a value between 0 and 1
#'
#' @return inv
#' The stationary distribution of every sample
#'
#' @return s
#' The unnormlised local entropies of each gene in every cell
#'
#' @return ns
#' The normalised local entropies of each gene, so that each value is
#' between 0 and 1
#'
#' @references
#' Teschendorff AE, Tariq Enver.
#' \emph{Single-cell entropy for accurate estimation of differentiation
#' potency from a cell’s transcriptome.}
#' Nature communications 8 (2017): 15599.
#' doi:\href{https://doi.org/10.1038/ncomms15599}{
#' 10.1038/ncomms15599}.
#'
#' Teschendorff AE, Banerji CR, Severini S, Kuehn R, Sollich P.
#' \emph{Increased signaling entropy in cancer requires the scale-free
#' property of protein interaction networks.}
#' Scientific reports 5 (2015): 9646.
#' doi:\href{https://doi.org/10.1038/srep09646}{
#' 10.1038/srep09646}.
#'
#' Banerji, Christopher RS, et al.
#' \emph{Intra-tumour signalling entropy determines clinical outcome
#' in breast and lung cancer.}
#' PLoS computational biology 11.3 (2015): e1004115.
#' doi:\href{https://doi.org/10.1371/journal.pcbi.1004115}{
#' 10.1371/journal.pcbi.1004115}.
#'
#' Teschendorff, Andrew E., Peter Sollich, and Reimer Kuehn.
#' \emph{Signalling entropy: A novel network-theoretical framework
#' for systems analysis and interpretation of functional omic data.}
#' Methods 67.3 (2014): 282-293.
#' doi:\href{https://doi.org/10.1016/j.ymeth.2014.03.013}{
#' 10.1016/j.ymeth.2014.03.013}.
#'
#' Banerji, Christopher RS, et al.
#' \emph{Cellular network entropy as the energy potential in
#' Waddington's differentiation landscape.}
#' Scientific reports 3 (2013): 3039.
#' doi:\href{https://doi.org/10.1038/srep03039}{
#' 10.1038/srep03039}.
#'
#' @examples
#' ### define a small network
#' ppiA.m <- matrix(0,nrow=10,ncol=10);
#' ppiA.m[1,] <- c(0,1,1,1,1);
#' for(r in 2:nrow(ppiA.m)){
#' ppiA.m[r,1] <- 1;
#' }
#' rownames(ppiA.m) <- paste("G",1:10,sep="");
#' colnames(ppiA.m) <- paste("G",1:10,sep="");
#'
#' ### define a positively valued expression matrix (20 genes x 10 samples)
#' exp.m <- matrix(rpois(20*10,8),nrow=20,ncol=10);
#' colnames(exp.m) <- paste("S",1:10,sep="");
#' rownames(exp.m) <- paste("G",1:20,sep="");
#'
#' ### integrate data and network
#' Integration.l <- DoIntegPPI(exp.m, ppiA.m);
#'
#' ### compute SR values
#' Integration.l <- CompSRana(Integration.l);
#'
#' ### output global signaling entropy
#' print(Integration.l$SR);
#'
#' @import parallel
#' @import Biobase
#' @import SingleCellExperiment
#' @importFrom igraph arpack
#' @importFrom SummarizedExperiment colData<-
#' @importFrom SummarizedExperiment colData
#' @export
#'
CompSRana <- function(Integration.l,
local = FALSE,
mc.cores=1)
{
### compute maxSR for SR normalization
Integration.l <- CompMaxSR(Integration.l)
maxSR <- Integration.l$maxSR
idx.l <- as.list(seq_len(ncol(Integration.l$expMC)))
out.l <- mclapply(idx.l, CompSRanaPRL,
exp.m=Integration.l$expMC,
adj.m=Integration.l$adjMC,
local=local,
maxSR=maxSR,
mc.cores=mc.cores)
SR.v <- sapply(out.l, function(v) return(v[[1]]))
invP.v <- sapply(out.l, function(v) return(v[[2]]))
S.v <- sapply(out.l, function(v) return(v[[3]]))
NS.v <- sapply(out.l, function(v) return(v[[4]]))
Integration.l$SR <- SR.v
Integration.l$inv <- invP.v
Integration.l$s <- S.v
Integration.l$ns <- NS.v
if (!is.null(Integration.l$data.sce)) {
colData(Integration.l$data.sce)$SR <- SR.v
}else if (!is.null(Integration.l$data.cds)) {
pData(Integration.l$data.cds)$SR <- SR.v
}
return(Integration.l)
}
CompMaxSR <- function(Integration.l){
adj.m <- Integration.l$adjMC
# find right eigenvector of adjacency matrix
fa <- function(x,extra=NULL) {
as.vector(adj.m %*% x)
}
ap.o <- igraph::arpack(fa,options=list(n=nrow(adj.m),nev=1,which="LM"), sym=TRUE)
v <- ap.o$vectors
lambda <- ap.o$values
# maximum entropy
MaxSR <- log(lambda)
Integration.l$maxSR <- MaxSR
return(Integration.l)
}
CompSRanaPRL <- function(idx,
exp.m,
adj.m,
local=TRUE,
maxSR=NULL)
{
# compute outgoing flux around each node
exp.v <- exp.m[,idx];
sumexp.v <- as.vector(adj.m %*% matrix(exp.v,ncol=1));
invP.v <- exp.v*sumexp.v;
nf <- sum(invP.v);
invP.v <- invP.v/nf;
p.m <- t(t(adj.m)*exp.v)/sumexp.v;
S.v <- apply(p.m,1,CompS);
SR <- sum(invP.v*S.v);
# if provided then normalise relative to maxSR
if(is.null(maxSR)==FALSE){
SR <- SR/maxSR;
}
if(local){
NS.v <- apply(p.m,1,CompNS);
}
else {
NS.v <- NULL;
}
return(list(sr=SR,inv=invP.v,s=S.v,ns=NS.v));
}
CompNS <- function(p.v){
tmp.idx <- which(p.v>0);
if(length(tmp.idx)>1){
NLS <- -sum( p.v[tmp.idx]*log(p.v[tmp.idx]) )/log(length(tmp.idx));
}
else {
# one degree nodes have zero entropy, avoid singularity.
NLS <- 0;
}
return(NLS);
}
CompS <- function(p.v){
tmp.idx <- which(p.v>0);
LS <- - sum( p.v[tmp.idx]*log(p.v[tmp.idx]) )
return(LS);
}
|
# 宣告學習曲線計算函式
y1.f <- function(x,b){ # x: 累計件數
100000*(x^b)
}
# 宣告本例相關常數
title <- "人工工時與學習曲線" # 圖表標題
xy <- data.frame(x = seq(0,100,by=10)) # xy 軸範圍
x.label <- '累計件數' # x軸標籤
y.label <- '每件人工小時' # y軸標籤
lgnd.title <- '學習率' # 圖例標題
lr <- c(0.9,0.8,0.7) # 學習率組
colors= c('#FF2345','#34FF45','#AD34AE') # 各學習率線圖顏色對應
# 宣告本例點狀圖資料
p.df <- data.frame(x=c(1,seq(2,30,by=2)))
for (i in 1:length(lr)){
p.df[,i+1] <- y1.f( # 呼叫自訂函式計算y值
p.df$x, # 以p.df的x欄資料依序傳入函式
log(lr[i])/log(2) # 學習曲線指數
) # 讀者執行至此可於console 執行print(p.df)指令視其結果
}
# 使用ggplot 繪圖
library(ggplot2)
p<-ggplot(
data=p.df,
mapping=aes(x=p.df[,1],y=NULL))+ # 指定x、y軸資料(暫時為NULL)
ggtitle(title)+ # 圖標題
xlab(x.label)+ylab(y.label)+ # 給予xy軸標籤
theme( # xy軸標籤的字體、顏色、大小等
axis.title.x = element_text(color = "#56ABCD", size = 12, face = "bold"),
axis.title.y = element_text(color = "#993333", size = 12, face = "bold")
)+
theme(axis.text.x = element_text(size = 10))+ # 給予x 軸字體大小
scale_x_discrete(limits=p.df$x)+ # 依x軸各值標示
scale_colour_manual(lgnd.title,values =colors) # 圖例依線圖顏色對應標示於圖右上
# 宣告不同點狀圖及連線圖的疊加函數
s.f <- function(s,i){ # s: ggplot 物件 i: 對應學習率組
# 使用geom_point 及 geom_path將線圖疊加於plot 物件上
# 自動將每一 x軸值及args 的參數值帶入y1.f函式計算出y軸值
# 產生不同線圖的圖例文字標示
s<- s+
geom_point( # 點狀圖疊加於plot 物件上
data=data.frame(p.df[,i+1]), # 資料為p.df的 i+1 欄位資料
aes(y=p.df[,i+1], # y 軸同上資料
colour = as.character(lr[i]) # 圖例顏色對應之文字
)
)+ # 畫出各點點狀
geom_path( # 線圖疊加於plot 物件上
data=data.frame(p.df[,i+1]), #同上述
aes(y=p.df[,i+1], #同上述
colour = as.character(lr[i]) # 圖例顏色對應之文字
)
) # 疊加畫出各點連線
}
# 利用迴圈呼叫自訂函式s.f繪出疊加線圖
for (i in 1:length(lr)){
p <-s.f(p,i)
}
print(p) # 印出本例繪圖物件
| /R/9_2.R | no_license | Hsusir/HS-I- | R | false | false | 2,467 | r | # 宣告學習曲線計算函式
y1.f <- function(x,b){ # x: 累計件數
100000*(x^b)
}
# 宣告本例相關常數
title <- "人工工時與學習曲線" # 圖表標題
xy <- data.frame(x = seq(0,100,by=10)) # xy 軸範圍
x.label <- '累計件數' # x軸標籤
y.label <- '每件人工小時' # y軸標籤
lgnd.title <- '學習率' # 圖例標題
lr <- c(0.9,0.8,0.7) # 學習率組
colors= c('#FF2345','#34FF45','#AD34AE') # 各學習率線圖顏色對應
# 宣告本例點狀圖資料
p.df <- data.frame(x=c(1,seq(2,30,by=2)))
for (i in 1:length(lr)){
p.df[,i+1] <- y1.f( # 呼叫自訂函式計算y值
p.df$x, # 以p.df的x欄資料依序傳入函式
log(lr[i])/log(2) # 學習曲線指數
) # 讀者執行至此可於console 執行print(p.df)指令視其結果
}
# 使用ggplot 繪圖
library(ggplot2)
p<-ggplot(
data=p.df,
mapping=aes(x=p.df[,1],y=NULL))+ # 指定x、y軸資料(暫時為NULL)
ggtitle(title)+ # 圖標題
xlab(x.label)+ylab(y.label)+ # 給予xy軸標籤
theme( # xy軸標籤的字體、顏色、大小等
axis.title.x = element_text(color = "#56ABCD", size = 12, face = "bold"),
axis.title.y = element_text(color = "#993333", size = 12, face = "bold")
)+
theme(axis.text.x = element_text(size = 10))+ # 給予x 軸字體大小
scale_x_discrete(limits=p.df$x)+ # 依x軸各值標示
scale_colour_manual(lgnd.title,values =colors) # 圖例依線圖顏色對應標示於圖右上
# 宣告不同點狀圖及連線圖的疊加函數
s.f <- function(s,i){ # s: ggplot 物件 i: 對應學習率組
# 使用geom_point 及 geom_path將線圖疊加於plot 物件上
# 自動將每一 x軸值及args 的參數值帶入y1.f函式計算出y軸值
# 產生不同線圖的圖例文字標示
s<- s+
geom_point( # 點狀圖疊加於plot 物件上
data=data.frame(p.df[,i+1]), # 資料為p.df的 i+1 欄位資料
aes(y=p.df[,i+1], # y 軸同上資料
colour = as.character(lr[i]) # 圖例顏色對應之文字
)
)+ # 畫出各點點狀
geom_path( # 線圖疊加於plot 物件上
data=data.frame(p.df[,i+1]), #同上述
aes(y=p.df[,i+1], #同上述
colour = as.character(lr[i]) # 圖例顏色對應之文字
)
) # 疊加畫出各點連線
}
# 利用迴圈呼叫自訂函式s.f繪出疊加線圖
for (i in 1:length(lr)){
p <-s.f(p,i)
}
print(p) # 印出本例繪圖物件
|
#################################################################
# DeepL hack EXAMPLE
#################################################################
# Content
#################################################################
# Dependencies
# Load data and Selenium driver
# Translate documents
#################################################################
rm(list = ls())
#################################################################
# Dependencies
#################################################################
# global
library(httr)
library(dplyr)
library(pbapply)
library(pbmcapply)
library(stringr)
library(RSelenium)
library(textcat)
library(wdman)
# local
source('~/hub/helper/r/selenium-hacks/deepl-hacks-fx.R')
source('~/hub/helper/r/selenium-hacks/selenium-hacks-fx.R')
source('~/hub/helper/r/text-analysis/text-batching-fx.R')
#################################################################
# Load data and Selenium driver
#################################################################
load('~/../../some-data.RData')
cDrv <- chrome(port=4567L)
eCaps <- list(chromeOptions = list(
args = c('--headless', '--disable-gpu', '--window-size=1280,800')
))
#################################################################
# Translate documents
#################################################################
setwd('~/../../res/')
# TEST opening the webpage
SelRun(startpage = 'https://www.deepl.com/translator', timeout = 20000, test = T, browser = 'chrome', portN = 4567L, extraCapabilities = eCaps)
# specify sequence of documents that will constitute a makro-batch
# a makro-batch will be processed together and save in a single object
# makro-batching is used to secure already translated documents in case of a loop abortion and prevent single file saving
for(i in c((seq(1, nrow(df), 10)+1))){
batch <- i:(i+9)
print(batch)
system.time(
# apply translation function over a makro-batch
res <- lapply(batch, function(index){
txt <- df$`ht+body`[index]
Encoding(txt) <- 'UTF-8'
# if a single document in a makro-batch is longer than 5000 characters, batch it
if(nchar(txt) >= 5000){
txt_batch <- batch_text(txt, 5000) %>%
lapply(., function(x) gsub('&', '+', x))
}else{txt_batch <- list(txt)}
fbatch <- txt_batch %>% unlist %>% paste0(., collapse='')
print('Batching Done')
if(!identical(nchar(fbatch), nchar(txt))){return('Batching Problem')}else{
# open the browser for real
browser <- SelRun(startpage = 'https://www.deepl.com/translator', timeout = 20000, portN = 4567L, extraCapabilities = eCaps)
Sys.sleep(5)
# set source language
set_lang(user.text = txt_batch, driver = browser, language = 'german')
# translate documents
res <- tryCatch({lapply(txt_batch, function(x) get_transl(user.text = unlist(x), driver = browser))},
error = function(e) return('Translation Problem'))
# collapse batch to single document
res <- paste0(unlist(res), collapse=' ') %>% gsub('\\s{2,}', ' ', .)
# add the index of the document as name
names(res) <- df$id[index]
browser$close()
browser$quit()
# close session
return(res)
}
})
)
# save batch
save(file = paste0('some-prefix', min(batch), '_', max(batch), '_HACKY-REMOTE.RData'), res)
}
cDrv$stop()
| /r/selenium-hacks/EX-selenium-hacks.R | no_license | lucienbaumgartner/helper | R | false | false | 3,457 | r | #################################################################
# DeepL hack EXAMPLE
#################################################################
# Content
#################################################################
# Dependencies
# Load data and Selenium driver
# Translate documents
#################################################################
rm(list = ls())
#################################################################
# Dependencies
#################################################################
# global
library(httr)
library(dplyr)
library(pbapply)
library(pbmcapply)
library(stringr)
library(RSelenium)
library(textcat)
library(wdman)
# local
source('~/hub/helper/r/selenium-hacks/deepl-hacks-fx.R')
source('~/hub/helper/r/selenium-hacks/selenium-hacks-fx.R')
source('~/hub/helper/r/text-analysis/text-batching-fx.R')
#################################################################
# Load data and Selenium driver
#################################################################
load('~/../../some-data.RData')
cDrv <- chrome(port=4567L)
eCaps <- list(chromeOptions = list(
args = c('--headless', '--disable-gpu', '--window-size=1280,800')
))
#################################################################
# Translate documents
#################################################################
setwd('~/../../res/')
# TEST opening the webpage
SelRun(startpage = 'https://www.deepl.com/translator', timeout = 20000, test = T, browser = 'chrome', portN = 4567L, extraCapabilities = eCaps)
# specify sequence of documents that will constitute a makro-batch
# a makro-batch will be processed together and save in a single object
# makro-batching is used to secure already translated documents in case of a loop abortion and prevent single file saving
for(i in c((seq(1, nrow(df), 10)+1))){
batch <- i:(i+9)
print(batch)
system.time(
# apply translation function over a makro-batch
res <- lapply(batch, function(index){
txt <- df$`ht+body`[index]
Encoding(txt) <- 'UTF-8'
# if a single document in a makro-batch is longer than 5000 characters, batch it
if(nchar(txt) >= 5000){
txt_batch <- batch_text(txt, 5000) %>%
lapply(., function(x) gsub('&', '+', x))
}else{txt_batch <- list(txt)}
fbatch <- txt_batch %>% unlist %>% paste0(., collapse='')
print('Batching Done')
if(!identical(nchar(fbatch), nchar(txt))){return('Batching Problem')}else{
# open the browser for real
browser <- SelRun(startpage = 'https://www.deepl.com/translator', timeout = 20000, portN = 4567L, extraCapabilities = eCaps)
Sys.sleep(5)
# set source language
set_lang(user.text = txt_batch, driver = browser, language = 'german')
# translate documents
res <- tryCatch({lapply(txt_batch, function(x) get_transl(user.text = unlist(x), driver = browser))},
error = function(e) return('Translation Problem'))
# collapse batch to single document
res <- paste0(unlist(res), collapse=' ') %>% gsub('\\s{2,}', ' ', .)
# add the index of the document as name
names(res) <- df$id[index]
browser$close()
browser$quit()
# close session
return(res)
}
})
)
# save batch
save(file = paste0('some-prefix', min(batch), '_', max(batch), '_HACKY-REMOTE.RData'), res)
}
cDrv$stop()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_survey.R
\name{gg_survey}
\alias{gg_survey}
\title{Plot voter shares observed in one survey}
\usage{
gg_survey(data, colors = NULL, labels = NULL, annotate_bars = TRUE,
hurdle = 5)
}
\arguments{
\item{data}{Scraped dataset containing one row per party in the column
\code{party} and the observed voter share in the column \code{percent}}
\item{colors}{Named vector containing party colours. If \code{NULL}
(deftault) tries to guess color based on party names, grey otherwise.}
\item{labels}{Named vector containing party labels. If \code{NULL} (default)
tries to guess party names from \code{data}.}
\item{annotate_bars}{If \code{TRUE} (default) bars are annotated by the
respective vote share (percentage).}
\item{hurdle}{Hurdle for single parties to get into the parliament, e.g. '5'
for '5\%'. If set to NULL no horizontal line is plotted.
The horizontal line can be suppressed using \code{NULL}.}
}
\description{
Bar chart of the raw voter shares observed in one survey.
Additionally to plotting positive voter shares,
the function can be used to plot party-specific differences (e.g. between
a survey and the election result), including negative numbers.
}
\examples{
library(tidyr)
library(dplyr)
library(coalitions)
survey <- scrape_wahlrecht() \%>\% collapse_parties() \%>\%
slice(1) \%>\% select(survey) \%>\% unnest()
gg_survey(survey)
}
| /man/gg_survey.Rd | permissive | romainfrancois/coalitions | R | false | true | 1,441 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_survey.R
\name{gg_survey}
\alias{gg_survey}
\title{Plot voter shares observed in one survey}
\usage{
gg_survey(data, colors = NULL, labels = NULL, annotate_bars = TRUE,
hurdle = 5)
}
\arguments{
\item{data}{Scraped dataset containing one row per party in the column
\code{party} and the observed voter share in the column \code{percent}}
\item{colors}{Named vector containing party colours. If \code{NULL}
(deftault) tries to guess color based on party names, grey otherwise.}
\item{labels}{Named vector containing party labels. If \code{NULL} (default)
tries to guess party names from \code{data}.}
\item{annotate_bars}{If \code{TRUE} (default) bars are annotated by the
respective vote share (percentage).}
\item{hurdle}{Hurdle for single parties to get into the parliament, e.g. '5'
for '5\%'. If set to NULL no horizontal line is plotted.
The horizontal line can be suppressed using \code{NULL}.}
}
\description{
Bar chart of the raw voter shares observed in one survey.
Additionally to plotting positive voter shares,
the function can be used to plot party-specific differences (e.g. between
a survey and the election result), including negative numbers.
}
\examples{
library(tidyr)
library(dplyr)
library(coalitions)
survey <- scrape_wahlrecht() \%>\% collapse_parties() \%>\%
slice(1) \%>\% select(survey) \%>\% unnest()
gg_survey(survey)
}
|
#
# UPDATE MARKOV TEXT PREDICTION TABLE WITH WORD PROBABILITIES USING
# MODIFIED INTERPOLATED KNESER-NEY SMOOTHING VALUES (RESULT IS "PKN")
#
# NOTE: THIS IS A LONG RUNNING PROCEDURE SO YOU WILL RUN THIS MULTIPLE TIMES
# REQUIREMENTS:
# PROCEDURE '2_buildLookupTables.R' MUST HAVE RUN TO BUILD LOOKUP TABLES
# 1. UPDATE THE 'inFile' NAME AND 'outFile' NAME
# 2. UPDATE THE 'skip' AND 'nrows' IN THE 'read.csv' STATEMENT
#
library(dplyr)
library(readtext)
library(reader)
library(stringr)
library(lubridate)
library(data.table)
library(tidyr)
inFile <- paste0(getwd(),"/TextData/markovTable_40PCT.csv")
outFile <- paste0(getwd(),"/TextData/markovTable_1750001.csv")
headers <- names(read.csv(inFile,nrows=1))
rt_txt <- read.csv(inFile, header=F, col.names=headers, skip=1750001, nrows=250000)
markovTable <- as.data.frame(rt_txt)
#
# LOAD LOOKUP TABLES
#
inFile <- paste0(getwd(),"/TextData/mc.csv")
mc <- as.data.frame(read.csv(inFile))
inFile <- paste0(getwd(),"/TextData/mcType.csv")
mcType <- as.data.frame(read.csv(inFile))
inFile <- paste0(getwd(),"/TextData/nw.csv")
nw <- as.data.frame(read.csv(inFile))
inFile <- paste0(getwd(),"/TextData/nGramCount.csv")
nGramCount <- as.data.frame(read.csv(inFile))
#
nextWordProbability <- function(df, sampleSize=nrow(df)) {
print(paste0(now()," - Starting maxLikelihood calculation with sample size = ", sampleSize, " tokens & n-grams"))
dValue <- .75 # STANDARD DISCOUNTING
# print(paste0(now(), " COMPLETE-Lookup tables built"))
#
# set.seed(1234)
dfTable <- df
pb <- txtProgressBar(min = 0, max = sampleSize, style = 3)
for(i in 1: nrow(dfTable)) {
setTxtProgressBar(pb, i)
# cat(paste0("\r","Iteration = ", i))
dfTable$searchCount[i] <- mc$freq[match(dfTable$searchWords[i],mc$searchWords)]
cat(paste0("\r","Iteration = ", i, " searchWord count done "))
dfTable$mcTypeCount[i] <- mcType$freq[match(dfTable$searchWords[i], mcType$searchWords)]
cat(paste0("\r","Iteration = ", i, " searchWord type count done"))
dfTable$wordMLE[i] <- dfTable$frequency[i]/dfTable$searchCount[i]
lookupKey <- paste0(dfTable$nGramCount[i], dfTable$nextWord[i])
dfTable$nwCount[i] <- nw$freq[match(lookupKey, nw$searchKey)]
cat(paste0("\r","Iteration = ", i, " nextWord count done "))
#
# KNESER-NEY SMOOTHING FACTORS
#
dfTable$firstTerm[i] <- max((dfTable$frequency[i] - dValue)/dfTable$searchCount[i], 0)
dfTable$lambda[i] <- (dValue/dfTable$searchCount[i]) * dfTable$mcTypeCount[i]
nGramc <- nGramCount$freq[match(dfTable$nGramCount[i], nGramCount$nGram)]
dfTable$pCont[i] <- dfTable$nwCount[i] / nGramc
dfTable$PKN[i] <- dfTable$firstTerm[i] + (dfTable$lambda[i] * dfTable$pCont[i])
cat(paste0("\r","Iteration = ", i, " probabilities done "))
}
cat("\n")
print(paste0(now(), " - Calculation Complete"))
return(dfTable)
}
newTable <- nextWordProbability(markovTable)
head(newTable)
write.csv(newTable,outFile, row.names=T)
| /3_updateProbabilities.R | no_license | jlranaliticas/NextWordPredictor | R | false | false | 3,547 | r | #
# UPDATE MARKOV TEXT PREDICTION TABLE WITH WORD PROBABILITIES USING
# MODIFIED INTERPOLATED KNESER-NEY SMOOTHING VALUES (RESULT IS "PKN")
#
# NOTE: THIS IS A LONG RUNNING PROCEDURE SO YOU WILL RUN THIS MULTIPLE TIMES
# REQUIREMENTS:
# PROCEDURE '2_buildLookupTables.R' MUST HAVE RUN TO BUILD LOOKUP TABLES
# 1. UPDATE THE 'inFile' NAME AND 'outFile' NAME
# 2. UPDATE THE 'skip' AND 'nrows' IN THE 'read.csv' STATEMENT
#
library(dplyr)
library(readtext)
library(reader)
library(stringr)
library(lubridate)
library(data.table)
library(tidyr)
inFile <- paste0(getwd(),"/TextData/markovTable_40PCT.csv")
outFile <- paste0(getwd(),"/TextData/markovTable_1750001.csv")
headers <- names(read.csv(inFile,nrows=1))
rt_txt <- read.csv(inFile, header=F, col.names=headers, skip=1750001, nrows=250000)
markovTable <- as.data.frame(rt_txt)
#
# LOAD LOOKUP TABLES
#
inFile <- paste0(getwd(),"/TextData/mc.csv")
mc <- as.data.frame(read.csv(inFile))
inFile <- paste0(getwd(),"/TextData/mcType.csv")
mcType <- as.data.frame(read.csv(inFile))
inFile <- paste0(getwd(),"/TextData/nw.csv")
nw <- as.data.frame(read.csv(inFile))
inFile <- paste0(getwd(),"/TextData/nGramCount.csv")
nGramCount <- as.data.frame(read.csv(inFile))
#
nextWordProbability <- function(df, sampleSize=nrow(df)) {
print(paste0(now()," - Starting maxLikelihood calculation with sample size = ", sampleSize, " tokens & n-grams"))
dValue <- .75 # STANDARD DISCOUNTING
# print(paste0(now(), " COMPLETE-Lookup tables built"))
#
# set.seed(1234)
dfTable <- df
pb <- txtProgressBar(min = 0, max = sampleSize, style = 3)
for(i in 1: nrow(dfTable)) {
setTxtProgressBar(pb, i)
# cat(paste0("\r","Iteration = ", i))
dfTable$searchCount[i] <- mc$freq[match(dfTable$searchWords[i],mc$searchWords)]
cat(paste0("\r","Iteration = ", i, " searchWord count done "))
dfTable$mcTypeCount[i] <- mcType$freq[match(dfTable$searchWords[i], mcType$searchWords)]
cat(paste0("\r","Iteration = ", i, " searchWord type count done"))
dfTable$wordMLE[i] <- dfTable$frequency[i]/dfTable$searchCount[i]
lookupKey <- paste0(dfTable$nGramCount[i], dfTable$nextWord[i])
dfTable$nwCount[i] <- nw$freq[match(lookupKey, nw$searchKey)]
cat(paste0("\r","Iteration = ", i, " nextWord count done "))
#
# KNESER-NEY SMOOTHING FACTORS
#
dfTable$firstTerm[i] <- max((dfTable$frequency[i] - dValue)/dfTable$searchCount[i], 0)
dfTable$lambda[i] <- (dValue/dfTable$searchCount[i]) * dfTable$mcTypeCount[i]
nGramc <- nGramCount$freq[match(dfTable$nGramCount[i], nGramCount$nGram)]
dfTable$pCont[i] <- dfTable$nwCount[i] / nGramc
dfTable$PKN[i] <- dfTable$firstTerm[i] + (dfTable$lambda[i] * dfTable$pCont[i])
cat(paste0("\r","Iteration = ", i, " probabilities done "))
}
cat("\n")
print(paste0(now(), " - Calculation Complete"))
return(dfTable)
}
newTable <- nextWordProbability(markovTable)
head(newTable)
write.csv(newTable,outFile, row.names=T)
|
\name{seqedplot}
\alias{seqedplot}
\title{
Graphical representation of a set of events sequences.
}
\description{
This function provides two ways to represent a set of events.
The first one (\code{type="survival"}) plots the survival curves of the first occurrence of each event.
The second one (\code{type="hazard"}) plots the mean counts of each event in the successive periods.
}
\usage{
seqedplot(seqe,
group=NULL, breaks=20, ages=NULL,
main="auto", type="survival", ignore=NULL,
with.legend="auto",cex.legend=1,
use.layout=(!is.null(group) | with.legend!=FALSE), legend.prop=NA,
rows=NA, cols=NA,
xaxis="all", xlab="time",
yaxis="all",
ylab=ifelse(type=="survival", "survival probability", "mean number of events"),
cpal=NULL,
title, withlegend, axes, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{seqe}{an event sequence object as defined by the \code{\link{seqecreate}} function.}
\item{group}{Plots one plot for each level of the factor given as argument.}
\item{breaks}{Number of breaks defining a period.}
\item{ages}{Two numeric values representing minimum and maximum ages to be represented.}
\item{main}{String. Title of the graphic. Default is \code{"auto"}, i.e., group levels. Set as \code{NULL} to suppress titles.}
\item{type}{String. Type of One of \code{"survival"} or \code{"hazard"}. If \code{type="survival"}, survival curves of the first occurrence of each event are plotted. If \code{type="hazard"}, mean numbers of each event in the successive periods are plotted.}
\item{ignore}{Character vector. An optional list of events that should not be plotted.}
\item{with.legend}{Logical or string. Defines if and where the legend of the state colors is plotted.
The default value \code{"auto"} sets the position of the legend automatically.
Other possible values are \code{"right"} or \code{FALSE}. Obsolete value \code{TRUE} is equivalent to "auto".}
\item{cex.legend}{expansion factor for setting the size of the font for the labels in the legend. The default value is 1. Values lesser than 1 will reduce the size of the font, values greater than 1 will increase the size.}
\item{use.layout}{if \code{TRUE}, layout is used to arrange plots when using the group option or plotting a legend.
When layout is activated, the standard \code{par(mfrow=....)} for arranging plots does not work. With \code{with.legend=FALSE} and \code{group=NULL},
layout is automatically deactivated and \code{par(mfrow=....)} can be used.}
\item{legend.prop}{proportion of the graphic area used for plotting the legend when \code{use.layout=TRUE} and \code{with.legend=TRUE}.
Default value is set according to the place (bottom or right of the graphic area) where the legend is plotted. Values from 0 to 1.}
\item{rows}{optional arguments to arrange plots when use.layout=TRUE.}
\item{cols}{optional arguments to arrange plots when use.layout=TRUE.}
\item{xaxis}{Logical or one of \code{"all"} and \code{"bottom"}. If set as \code{TRUE} or "all" (default value) x-axes are drawn on each plot in the graphic. If set as "bottom" and group is used, x-axes are drawn under the plots of the bottom panel only. If FALSE, no x-axis is drawn.}
\item{yaxis}{Logical or one of \code{"all"} or \code{"left"}. If set as \code{TRUE} or \code{"all"} (default value) y-axes are drawn on each plot in the graphic. If \code{"left"} and \code{group} is used, the y-axis is displayed on plots of the left panel only. If \code{FALSE} no y-axis is drawn.}
\item{xlab}{an optional label for the x-axis. If set to \code{NA}, no label is drawn.}
\item{ylab}{an optional label for the y-axis. If set to \code{NA}, no label is drawn. Can be a vector of labels by group level.}
\item{cpal}{Color palette used for the events. If \code{NULL}, a new color palette is generated.}
\item{title}{Deprecated. Use \code{main} instead.}
\item{withlegend}{Deprecated. Use \code{with.legend} instead.}
\item{axes}{Deprecated. Use \code{xaxis} instead.}
\item{\dots}{Additional arguments passed to \code{\link[survival]{plot.survfit}}, \code{\link[survival]{lines.survfit}}, and/or \code{\link[graphics]{legend}}.}
}
\author{Matthias Studer}
\references{
Studer, M., Müller, N.S., Ritschard, G. & Gabadinho, A. (2010), "Classer, discriminer et visualiser des séquences d'événements",
In Extraction et gestion des connaissances (EGC 2010), \emph{Revue des nouvelles technologies de l'information RNTI}. Vol. E-19, pp. 37-48.
}
\examples{
data(actcal.tse)
actcal.tse <- actcal.tse[1:200,]
iseq <- unique(actcal.tse$id)
nseq <- length(iseq)
data(actcal)
actcal <- actcal[rownames(actcal) \%in\% iseq,]
actcal.seqe <- seqecreate(actcal.tse)
seqelength(actcal.seqe) <- rep(12, nseq)
seqedplot(actcal.seqe, type="hazard", breaks=6, group=actcal$sex, lwd=3)
seqedplot(actcal.seqe, type="survival", group=actcal$sex, lwd=3)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{event sequences}
| /man/seqedplot.Rd | no_license | cran/TraMineRextras | R | false | false | 5,109 | rd | \name{seqedplot}
\alias{seqedplot}
\title{
Graphical representation of a set of events sequences.
}
\description{
This function provides two ways to represent a set of events.
The first one (\code{type="survival"}) plots the survival curves of the first occurrence of each event.
The second one (\code{type="hazard"}) plots the mean counts of each event in the successive periods.
}
\usage{
seqedplot(seqe,
group=NULL, breaks=20, ages=NULL,
main="auto", type="survival", ignore=NULL,
with.legend="auto",cex.legend=1,
use.layout=(!is.null(group) | with.legend!=FALSE), legend.prop=NA,
rows=NA, cols=NA,
xaxis="all", xlab="time",
yaxis="all",
ylab=ifelse(type=="survival", "survival probability", "mean number of events"),
cpal=NULL,
title, withlegend, axes, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{seqe}{an event sequence object as defined by the \code{\link{seqecreate}} function.}
\item{group}{Plots one plot for each level of the factor given as argument.}
\item{breaks}{Number of breaks defining a period.}
\item{ages}{Two numeric values representing minimum and maximum ages to be represented.}
\item{main}{String. Title of the graphic. Default is \code{"auto"}, i.e., group levels. Set as \code{NULL} to suppress titles.}
\item{type}{String. Type of One of \code{"survival"} or \code{"hazard"}. If \code{type="survival"}, survival curves of the first occurrence of each event are plotted. If \code{type="hazard"}, mean numbers of each event in the successive periods are plotted.}
\item{ignore}{Character vector. An optional list of events that should not be plotted.}
\item{with.legend}{Logical or string. Defines if and where the legend of the state colors is plotted.
The default value \code{"auto"} sets the position of the legend automatically.
Other possible values are \code{"right"} or \code{FALSE}. Obsolete value \code{TRUE} is equivalent to "auto".}
\item{cex.legend}{expansion factor for setting the size of the font for the labels in the legend. The default value is 1. Values lesser than 1 will reduce the size of the font, values greater than 1 will increase the size.}
\item{use.layout}{if \code{TRUE}, layout is used to arrange plots when using the group option or plotting a legend.
When layout is activated, the standard \code{par(mfrow=....)} for arranging plots does not work. With \code{with.legend=FALSE} and \code{group=NULL},
layout is automatically deactivated and \code{par(mfrow=....)} can be used.}
\item{legend.prop}{proportion of the graphic area used for plotting the legend when \code{use.layout=TRUE} and \code{with.legend=TRUE}.
Default value is set according to the place (bottom or right of the graphic area) where the legend is plotted. Values from 0 to 1.}
\item{rows}{optional arguments to arrange plots when use.layout=TRUE.}
\item{cols}{optional arguments to arrange plots when use.layout=TRUE.}
\item{xaxis}{Logical or one of \code{"all"} and \code{"bottom"}. If set as \code{TRUE} or "all" (default value) x-axes are drawn on each plot in the graphic. If set as "bottom" and group is used, x-axes are drawn under the plots of the bottom panel only. If FALSE, no x-axis is drawn.}
\item{yaxis}{Logical or one of \code{"all"} or \code{"left"}. If set as \code{TRUE} or \code{"all"} (default value) y-axes are drawn on each plot in the graphic. If \code{"left"} and \code{group} is used, the y-axis is displayed on plots of the left panel only. If \code{FALSE} no y-axis is drawn.}
\item{xlab}{an optional label for the x-axis. If set to \code{NA}, no label is drawn.}
\item{ylab}{an optional label for the y-axis. If set to \code{NA}, no label is drawn. Can be a vector of labels by group level.}
\item{cpal}{Color palette used for the events. If \code{NULL}, a new color palette is generated.}
\item{title}{Deprecated. Use \code{main} instead.}
\item{withlegend}{Deprecated. Use \code{with.legend} instead.}
\item{axes}{Deprecated. Use \code{xaxis} instead.}
\item{\dots}{Additional arguments passed to \code{\link[survival]{plot.survfit}}, \code{\link[survival]{lines.survfit}}, and/or \code{\link[graphics]{legend}}.}
}
\author{Matthias Studer}
\references{
Studer, M., Müller, N.S., Ritschard, G. & Gabadinho, A. (2010), "Classer, discriminer et visualiser des séquences d'événements",
In Extraction et gestion des connaissances (EGC 2010), \emph{Revue des nouvelles technologies de l'information RNTI}. Vol. E-19, pp. 37-48.
}
\examples{
data(actcal.tse)
actcal.tse <- actcal.tse[1:200,]
iseq <- unique(actcal.tse$id)
nseq <- length(iseq)
data(actcal)
actcal <- actcal[rownames(actcal) \%in\% iseq,]
actcal.seqe <- seqecreate(actcal.tse)
seqelength(actcal.seqe) <- rep(12, nseq)
seqedplot(actcal.seqe, type="hazard", breaks=6, group=actcal$sex, lwd=3)
seqedplot(actcal.seqe, type="survival", group=actcal$sex, lwd=3)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{event sequences}
|
library(smoothtail)
### Name: falk
### Title: Compute original and smoothed version of Falk's estimator
### Aliases: falk
### Keywords: distribution htest nonparametric
### ** Examples
# generate ordered random sample from GPD
set.seed(1977)
n <- 20
gam <- -0.75
x <- rgpd(n, gam)
## generate dlc object
est <- logConDens(x, smoothed = FALSE, print = FALSE, gam = NULL, xs = NULL)
# compute tail index estimator
falk(est)
| /data/genthat_extracted_code/smoothtail/examples/falk.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 431 | r | library(smoothtail)
### Name: falk
### Title: Compute original and smoothed version of Falk's estimator
### Aliases: falk
### Keywords: distribution htest nonparametric
### ** Examples
# generate ordered random sample from GPD
set.seed(1977)
n <- 20
gam <- -0.75
x <- rgpd(n, gam)
## generate dlc object
est <- logConDens(x, smoothed = FALSE, print = FALSE, gam = NULL, xs = NULL)
# compute tail index estimator
falk(est)
|
#' Summarize lengths in a subset of data
#'
#' Compute summary statistics of the molecule length distribution in a subset of an \code{\link{electrophoresis}} object. This helper function is called by other functions that split the object into subsets.
#'
#' @param sample.frame A subset of an \code{\link{electrophoresis}} object containing only contiguous data from one sample.
#'
#' @seealso \code{\link{summarize.peak.region}}, \code{\link{summarize.custom}}
#'
#' @export
summarize.subset <- function(sample.frame) {
if (nrow(sample.frame) < 2) {
c(
Median = NA,
Mean = NA,
SD = NA,
Skewness = NA,
Kurtosis = NA
)
} else {
total.molarity <- sum(sample.frame$molarity)
sample.median <- round(sample.frame$length[min(which(cumsum(sample.frame$molarity) >= total.molarity / 2))])
sample.mean <- sum(sample.frame$molarity * sample.frame$length) / total.molarity
length.residuals <- sample.frame$length - sample.mean
sample.sd <- sqrt(sum(sample.frame$molarity * length.residuals^2) / total.molarity)
sample.skewness <- sum(sample.frame$molarity * length.residuals^3) / total.molarity / sample.sd^3
sample.kurtosis <- sum(sample.frame$molarity * length.residuals^4) / total.molarity / sample.sd^4
c(
Median = sample.median,
Mean = sample.mean,
SD = sample.sd,
Skewness = sample.skewness,
Kurtosis = sample.kurtosis
)
}
}
#' Summarize lengths in peaks or regions
#'
#' Compute summary statistics of the molecule length distribution in the reported peaks or regions in an \code{\link{electrophoresis}} object.
#'
#' @param electrophoresis An \code{\link{electrophoresis}} object.
#' @param index The index, or a vector of indexes, of the peaks or regions to summarize (row numbers in \code{electrophoresis$peaks} or \code{electrophoresis$regions}).
#'
#' @seealso \code{\link{summarize.custom}}
#'
#' @name summarize.peak.region
NULL
#' @rdname summarize.peak.region
#' @export
summarize.peak <- function(
electrophoresis,
index = seq(nrow(electrophoresis$peaks))
) as.data.frame(t(sapply(index, function(i) summarize.subset(electrophoresis$data[in.peak(electrophoresis, i),]))))
#' @rdname summarize.peak.region
#' @export
summarize.region <- function(
electrophoresis,
index = seq(nrow(electrophoresis$regions))
) as.data.frame(t(sapply(index, function(i) summarize.subset(electrophoresis$data[in.region(electrophoresis, i),]))))
#' Summarize lengths in a custom region
#'
#' Compute summary statistics of the molecule length distribution between specified boundaries. The summary is computed individually for each sample.
#'
#' @param electrophoresis An \code{\link{electrophoresis}} object.
#' @param lower.bound Lower boundary of the region to summarize.
#' @param upper.bound Upper boundary of the region to summarize.
#'
#' @seealso \code{\link{summarize.peak}}, \code{\link{summarize.region}}
#'
#' @export
summarize.custom <- function(
electrophoresis,
lower.bound = -Inf,
upper.bound = Inf
) {
stopifnot("upper bound must be greater than lower bound" = upper.bound > lower.bound)
in.this.region <- in.custom.region(electrophoresis$data, lower.bound, upper.bound, "length")
result <- as.data.frame(t(simplify2array(lapply(unique(electrophoresis$data$sample.index), function(index) summarize.subset(subset(electrophoresis$data, in.this.region & sample.index == index))))))
if (lower.bound == -Inf) {
if (upper.bound != Inf) { # bounded only on right
colnames(result) <- paste(colnames(result), "below", upper.bound)
}
} else if (upper.bound == Inf) { # bounded only on left
colnames(result) <- paste(colnames(result), "above", lower.bound)
} else { # bounded on both sides
colnames(result) <- paste0(colnames(result), " in ", lower.bound, "-", upper.bound)
}
result
}
| /R/summary.R | permissive | jwfoley/bioanalyzeR | R | false | false | 3,771 | r | #' Summarize lengths in a subset of data
#'
#' Compute summary statistics of the molecule length distribution in a subset of an \code{\link{electrophoresis}} object. This helper function is called by other functions that split the object into subsets.
#'
#' @param sample.frame A subset of an \code{\link{electrophoresis}} object containing only contiguous data from one sample.
#'
#' @seealso \code{\link{summarize.peak.region}}, \code{\link{summarize.custom}}
#'
#' @export
summarize.subset <- function(sample.frame) {
if (nrow(sample.frame) < 2) {
c(
Median = NA,
Mean = NA,
SD = NA,
Skewness = NA,
Kurtosis = NA
)
} else {
total.molarity <- sum(sample.frame$molarity)
sample.median <- round(sample.frame$length[min(which(cumsum(sample.frame$molarity) >= total.molarity / 2))])
sample.mean <- sum(sample.frame$molarity * sample.frame$length) / total.molarity
length.residuals <- sample.frame$length - sample.mean
sample.sd <- sqrt(sum(sample.frame$molarity * length.residuals^2) / total.molarity)
sample.skewness <- sum(sample.frame$molarity * length.residuals^3) / total.molarity / sample.sd^3
sample.kurtosis <- sum(sample.frame$molarity * length.residuals^4) / total.molarity / sample.sd^4
c(
Median = sample.median,
Mean = sample.mean,
SD = sample.sd,
Skewness = sample.skewness,
Kurtosis = sample.kurtosis
)
}
}
#' Summarize lengths in peaks or regions
#'
#' Compute summary statistics of the molecule length distribution in the reported peaks or regions in an \code{\link{electrophoresis}} object.
#'
#' @param electrophoresis An \code{\link{electrophoresis}} object.
#' @param index The index, or a vector of indexes, of the peaks or regions to summarize (row numbers in \code{electrophoresis$peaks} or \code{electrophoresis$regions}).
#'
#' @seealso \code{\link{summarize.custom}}
#'
#' @name summarize.peak.region
NULL
#' @rdname summarize.peak.region
#' @export
summarize.peak <- function(
electrophoresis,
index = seq(nrow(electrophoresis$peaks))
) as.data.frame(t(sapply(index, function(i) summarize.subset(electrophoresis$data[in.peak(electrophoresis, i),]))))
#' @rdname summarize.peak.region
#' @export
summarize.region <- function(
electrophoresis,
index = seq(nrow(electrophoresis$regions))
) as.data.frame(t(sapply(index, function(i) summarize.subset(electrophoresis$data[in.region(electrophoresis, i),]))))
#' Summarize lengths in a custom region
#'
#' Compute summary statistics of the molecule length distribution between specified boundaries. The summary is computed individually for each sample.
#'
#' @param electrophoresis An \code{\link{electrophoresis}} object.
#' @param lower.bound Lower boundary of the region to summarize.
#' @param upper.bound Upper boundary of the region to summarize.
#'
#' @seealso \code{\link{summarize.peak}}, \code{\link{summarize.region}}
#'
#' @export
summarize.custom <- function(
electrophoresis,
lower.bound = -Inf,
upper.bound = Inf
) {
stopifnot("upper bound must be greater than lower bound" = upper.bound > lower.bound)
in.this.region <- in.custom.region(electrophoresis$data, lower.bound, upper.bound, "length")
result <- as.data.frame(t(simplify2array(lapply(unique(electrophoresis$data$sample.index), function(index) summarize.subset(subset(electrophoresis$data, in.this.region & sample.index == index))))))
if (lower.bound == -Inf) {
if (upper.bound != Inf) { # bounded only on right
colnames(result) <- paste(colnames(result), "below", upper.bound)
}
} else if (upper.bound == Inf) { # bounded only on left
colnames(result) <- paste(colnames(result), "above", lower.bound)
} else { # bounded on both sides
colnames(result) <- paste0(colnames(result), " in ", lower.bound, "-", upper.bound)
}
result
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LUE_BIOMASS.r
\name{LUE_BIOMASS}
\alias{LUE_BIOMASS}
\title{Light Use Efficiency Model to Estimate Biomass}
\format{A Biomass raster}
\usage{
LUE_BIOMASS(fpar_raster,par,tmin,tmin_min,tmin_max,LUE_optimal)
}
\arguments{
\item{fpar_raster}{fraction of photosynthetically active radiation (fpar) per day raster with .tif format}
\item{par}{clear sky surface photosynthetically active radiation (par) per day raster with .nc file format.}
\item{tmin}{Minimum temperature at 2 metres since previous post-processing per day raster with .nc file format.}
\item{tmin_min}{minimum value of tmin used for the threshold}
\item{tmin_max}{maximum value of tmin used for the threshold}
\item{LUE_optimal}{optical lue value with respect to crop type for example wheat crop LUE_optimal is 3.0 (Djumaniyazova et al., 2010)}
}
\value{
Biomass raster
}
\description{
Contains LUE_BIOMASS() to estimate aboveground biomass firstly by calculating the Absorbed Photosynthetically Active Radiation (APAR) and secondly the actual values of light use efficiency Shi et al.(2007) <doi:10.2134/agronj2006.0260>.
}
\examples{
\dontrun{
## load the data
data(fpar)
data(par1)
data(tmin)
LUE_BIOMASS(fpar,par1,tmin,-2,12,3)
}
library(raster)
fparr <- raster(nc=2, nr=2)
values(fparr)<-runif(ncell(fparr),min =0.2,max= 0.8)
par11<- brick(nc=2, nr=2, nl=2)
values(par11)<-runif(ncell(par11),min =169076.9,max= 924474.6)
tminn <- brick(nc=2, nr=2, nl=2)
values(tminn)<-runif(ncell(tminn),min = 278,max= 281)
LUE_BIOMASS(fparr,par11,tminn,-2,12,3)
}
\references{
Djumaniyazova Y, Sommer R, Ibragimov N, Ruzimov J, Lamers J & Vlek P (2010) Simulating water use and N response of winter wheat in the irrigated floodplains of Northwest Uzbekistan. Field Crops Research 116, 239-251.
Shi Z, Ruecker G R,Mueller M, Conrad C, Ibragimov N, Lamers J P A, Martius C, Strunz G, Dech S & Vlek P L G (2007) Modeling of Cotton Yields in the Amu Darya River Floodplains of Uzbekistan Integrating Multitemporal Remote Sensing and Minimum Field Data. Agronomy Journal 99, 1317-1326.
}
\keyword{datasets}
| /man/LUE_BIOMASS.Rd | no_license | cran/lue | R | false | true | 2,190 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LUE_BIOMASS.r
\name{LUE_BIOMASS}
\alias{LUE_BIOMASS}
\title{Light Use Efficiency Model to Estimate Biomass}
\format{A Biomass raster}
\usage{
LUE_BIOMASS(fpar_raster,par,tmin,tmin_min,tmin_max,LUE_optimal)
}
\arguments{
\item{fpar_raster}{fraction of photosynthetically active radiation (fpar) per day raster with .tif format}
\item{par}{clear sky surface photosynthetically active radiation (par) per day raster with .nc file format.}
\item{tmin}{Minimum temperature at 2 metres since previous post-processing per day raster with .nc file format.}
\item{tmin_min}{minimum value of tmin used for the threshold}
\item{tmin_max}{maximum value of tmin used for the threshold}
\item{LUE_optimal}{optical lue value with respect to crop type for example wheat crop LUE_optimal is 3.0 (Djumaniyazova et al., 2010)}
}
\value{
Biomass raster
}
\description{
Contains LUE_BIOMASS() to estimate aboveground biomass firstly by calculating the Absorbed Photosynthetically Active Radiation (APAR) and secondly the actual values of light use efficiency Shi et al.(2007) <doi:10.2134/agronj2006.0260>.
}
\examples{
\dontrun{
## load the data
data(fpar)
data(par1)
data(tmin)
LUE_BIOMASS(fpar,par1,tmin,-2,12,3)
}
library(raster)
fparr <- raster(nc=2, nr=2)
values(fparr)<-runif(ncell(fparr),min =0.2,max= 0.8)
par11<- brick(nc=2, nr=2, nl=2)
values(par11)<-runif(ncell(par11),min =169076.9,max= 924474.6)
tminn <- brick(nc=2, nr=2, nl=2)
values(tminn)<-runif(ncell(tminn),min = 278,max= 281)
LUE_BIOMASS(fparr,par11,tminn,-2,12,3)
}
\references{
Djumaniyazova Y, Sommer R, Ibragimov N, Ruzimov J, Lamers J & Vlek P (2010) Simulating water use and N response of winter wheat in the irrigated floodplains of Northwest Uzbekistan. Field Crops Research 116, 239-251.
Shi Z, Ruecker G R,Mueller M, Conrad C, Ibragimov N, Lamers J P A, Martius C, Strunz G, Dech S & Vlek P L G (2007) Modeling of Cotton Yields in the Amu Darya River Floodplains of Uzbekistan Integrating Multitemporal Remote Sensing and Minimum Field Data. Agronomy Journal 99, 1317-1326.
}
\keyword{datasets}
|
fluidPage(theme = shinytheme("superhero"),
# Application title
titlePanel(withTags(
div("Image Georeferencing",
div(class = 'pull-right',
a(href = 'https://github.com/mrjoh3/georefr',
icon('github'))), hr() )
),
windowTitle = "Image Georeferencing"
),
sidebarLayout(
sidebarPanel(width = 3,
includeMarkdown('instructions.md'),
h3("Import Image"),
wellPanel(style = 'background-color: #637281; padding: 10px; margin-top: 5px;',
fileInput('add_file', label = 'Select or drag n drop image', placeholder = 'JPEG, PNG, TIFF or BMP')
),
h3('Parameters'),
tabsetPanel(type = "tabs",
tabPanel('Georeference Method',
wellPanel(style = "background: #910505;",
selectizeInput('method', 'Choose',
choices = c('No Click'= 1,
'2 Click Image - Known Map Coordinates' = 2,
'2 Click Image - 2 Click Map Coordinates' = 3
),
selected = 3)
),
numericInput('crs', 'Define CRS', 3857),
selectInput('cntry', 'Define Output Map Area',
choices = rnaturalearth::countries110$admin,
selected = 'France')),
tabPanel('Known Spatial Information',
wellPanel(style = "background: #910505",
sliderInput('xs', 'X Max and Min', min = -180, max = 180, value = c(-10, 150)),
sliderInput('ys', 'Y Max and Min', min = -90, max = 90, value = c(-40, 40))))
),
h3('Run Georeference'),
actionButton('btn', 'Run Georeference', class = "btn-primary"),
tags$hr(),
downloadButton('download', 'Corrected Raster'),
downloadButton('geom', 'Reference Geometry')
),
mainPanel(
fluidRow(
column(12,
shiny::tabsetPanel(
tabPanel('Map',
editModUI("editor", height = 600)),
tabPanel('Input Image',
fluidRow(column(10,
#shiny::imageOutput('image', click = 'imageClick')),
plotOutput('image', click = 'imageClick')),
column(2,
h4('Image Clicks'),
tableOutput('img_clicks')))
),
tabPanel('Output Image',
plotOutput('corrected'))
)
) # end col
) # end row
)
)
)
| /app/ui.R | no_license | mrjoh3/georefr | R | false | false | 3,297 | r | fluidPage(theme = shinytheme("superhero"),
# Application title
titlePanel(withTags(
div("Image Georeferencing",
div(class = 'pull-right',
a(href = 'https://github.com/mrjoh3/georefr',
icon('github'))), hr() )
),
windowTitle = "Image Georeferencing"
),
sidebarLayout(
sidebarPanel(width = 3,
includeMarkdown('instructions.md'),
h3("Import Image"),
wellPanel(style = 'background-color: #637281; padding: 10px; margin-top: 5px;',
fileInput('add_file', label = 'Select or drag n drop image', placeholder = 'JPEG, PNG, TIFF or BMP')
),
h3('Parameters'),
tabsetPanel(type = "tabs",
tabPanel('Georeference Method',
wellPanel(style = "background: #910505;",
selectizeInput('method', 'Choose',
choices = c('No Click'= 1,
'2 Click Image - Known Map Coordinates' = 2,
'2 Click Image - 2 Click Map Coordinates' = 3
),
selected = 3)
),
numericInput('crs', 'Define CRS', 3857),
selectInput('cntry', 'Define Output Map Area',
choices = rnaturalearth::countries110$admin,
selected = 'France')),
tabPanel('Known Spatial Information',
wellPanel(style = "background: #910505",
sliderInput('xs', 'X Max and Min', min = -180, max = 180, value = c(-10, 150)),
sliderInput('ys', 'Y Max and Min', min = -90, max = 90, value = c(-40, 40))))
),
h3('Run Georeference'),
actionButton('btn', 'Run Georeference', class = "btn-primary"),
tags$hr(),
downloadButton('download', 'Corrected Raster'),
downloadButton('geom', 'Reference Geometry')
),
mainPanel(
fluidRow(
column(12,
shiny::tabsetPanel(
tabPanel('Map',
editModUI("editor", height = 600)),
tabPanel('Input Image',
fluidRow(column(10,
#shiny::imageOutput('image', click = 'imageClick')),
plotOutput('image', click = 'imageClick')),
column(2,
h4('Image Clicks'),
tableOutput('img_clicks')))
),
tabPanel('Output Image',
plotOutput('corrected'))
)
) # end col
) # end row
)
)
)
|
/bin/DEG_Analysis/v3.4/bin/Enrichment/GOClassificationMap2.r | no_license | baibaijingjing/NoRef | R | false | false | 7,261 | r | ||
test_that('efftox_solve_p returns expected result', {
p <- efftox_solve_p(eff0 = 0.5, tox1 = 0.65, eff_star = 0.7, tox_star = 0.25)
expect_equal(round(p, 3), 0.977)
})
test_that('efftox_solve_p throws error on zero eff0', {
expect_error(efftox_solve_p(eff0 = 0, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on zero tox1', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on zero eff_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 0, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on zero tox_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 0))
})
test_that('efftox_solve_p throws error on unit eff0', {
expect_error(efftox_solve_p(eff0 = 1, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on unit tox1', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 1,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on unit eff_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 1, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on unit tox_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 1))
})
# TODO
# efftox_get_tox()
# efftox_superiority()
# efftox_analysis_to_df()
# efftox_utility()
| /tests/testthat/test_efftox.R | no_license | brockk/trialr | R | false | false | 1,696 | r |
test_that('efftox_solve_p returns expected result', {
p <- efftox_solve_p(eff0 = 0.5, tox1 = 0.65, eff_star = 0.7, tox_star = 0.25)
expect_equal(round(p, 3), 0.977)
})
test_that('efftox_solve_p throws error on zero eff0', {
expect_error(efftox_solve_p(eff0 = 0, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on zero tox1', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on zero eff_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 0, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on zero tox_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 0))
})
test_that('efftox_solve_p throws error on unit eff0', {
expect_error(efftox_solve_p(eff0 = 1, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on unit tox1', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 1,
eff_star = 0.7, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on unit eff_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 1, tox_star = 0.25))
})
test_that('efftox_solve_p throws error on unit tox_star', {
expect_error(efftox_solve_p(eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 1))
})
# TODO
# efftox_get_tox()
# efftox_superiority()
# efftox_analysis_to_df()
# efftox_utility()
|
jekyll_md_document = function (toc = FALSE, toc_depth = 3, fig_width = 7, fig_height = 5,
dev = "png", df_print = "default", includes = NULL, md_extensions = NULL,
hard_line_breaks = TRUE, pandoc_args = NULL, html_preview = TRUE)
{
require(rmarkdown)
require(stringr)
pandoc_args <- c(pandoc_args, "--mathjax", "--template", pandoc_path_arg(paste(system.file(package="eehutils"),"/rmarkdown/templates/jekyll_md_document/resources/default.md",sep="")))
pandoc2 <- rmarkdown:::pandoc2.0()
variant <- if (pandoc2)
"gfm"
else "markdown_github"
if (!hard_line_breaks)
variant <- paste0(variant, "-hard_line_breaks")
variant <- paste0(variant, "-ascii_identifiers+tex_math_dollars")
format <- md_document(variant = variant, toc = toc, toc_depth = toc_depth,
fig_width = fig_width, fig_height = fig_height, dev = dev,
df_print = df_print, includes = includes, md_extensions = md_extensions,
pandoc_args = pandoc_args)
format$pandoc$from <- gsub("+ascii_identifiers", "", format$pandoc$from,
fixed = TRUE)
if (html_preview) {
format$post_processor <- function(metadata, input_file,
output_file, clean, verbose) {
css <- pandoc_path_arg(rmarkdown:::rmarkdown_system_file("rmarkdown/templates/github_document/resources/github.css"))
args <- c("--standalone", "--self-contained", "--highlight-style",
"pygments", "--template", pandoc_path_arg(rmarkdown:::rmarkdown_system_file("rmarkdown/templates/github_document/resources/preview.html")),
"--variable", paste0("github-markdown-css:",
css), "--email-obfuscation", "none", if (pandoc2) c("--metadata",
"pagetitle=PREVIEW"))
preview_file <- rmarkdown:::file_with_ext(output_file, "html")
pandoc_convert(input = output_file, to = "html",
from = variant, output = preview_file, options = args,
verbose = verbose)
preview_dir <- Sys.getenv("RMARKDOWN_PREVIEW_DIR",
unset = NA)
if (!is.na(preview_dir)) {
relocated_preview_file <- tempfile("preview-",
preview_dir, ".html")
file.copy(preview_file, relocated_preview_file)
file.remove(preview_file)
preview_file <- relocated_preview_file
}
if (verbose)
message("\nPreview created: ", preview_file)
output_file
}
}
format
} | /R/jekyll_md_document.r | no_license | eeholmes/eehutils | R | false | false | 2,738 | r | jekyll_md_document = function (toc = FALSE, toc_depth = 3, fig_width = 7, fig_height = 5,
dev = "png", df_print = "default", includes = NULL, md_extensions = NULL,
hard_line_breaks = TRUE, pandoc_args = NULL, html_preview = TRUE)
{
require(rmarkdown)
require(stringr)
pandoc_args <- c(pandoc_args, "--mathjax", "--template", pandoc_path_arg(paste(system.file(package="eehutils"),"/rmarkdown/templates/jekyll_md_document/resources/default.md",sep="")))
pandoc2 <- rmarkdown:::pandoc2.0()
variant <- if (pandoc2)
"gfm"
else "markdown_github"
if (!hard_line_breaks)
variant <- paste0(variant, "-hard_line_breaks")
variant <- paste0(variant, "-ascii_identifiers+tex_math_dollars")
format <- md_document(variant = variant, toc = toc, toc_depth = toc_depth,
fig_width = fig_width, fig_height = fig_height, dev = dev,
df_print = df_print, includes = includes, md_extensions = md_extensions,
pandoc_args = pandoc_args)
format$pandoc$from <- gsub("+ascii_identifiers", "", format$pandoc$from,
fixed = TRUE)
if (html_preview) {
format$post_processor <- function(metadata, input_file,
output_file, clean, verbose) {
css <- pandoc_path_arg(rmarkdown:::rmarkdown_system_file("rmarkdown/templates/github_document/resources/github.css"))
args <- c("--standalone", "--self-contained", "--highlight-style",
"pygments", "--template", pandoc_path_arg(rmarkdown:::rmarkdown_system_file("rmarkdown/templates/github_document/resources/preview.html")),
"--variable", paste0("github-markdown-css:",
css), "--email-obfuscation", "none", if (pandoc2) c("--metadata",
"pagetitle=PREVIEW"))
preview_file <- rmarkdown:::file_with_ext(output_file, "html")
pandoc_convert(input = output_file, to = "html",
from = variant, output = preview_file, options = args,
verbose = verbose)
preview_dir <- Sys.getenv("RMARKDOWN_PREVIEW_DIR",
unset = NA)
if (!is.na(preview_dir)) {
relocated_preview_file <- tempfile("preview-",
preview_dir, ".html")
file.copy(preview_file, relocated_preview_file)
file.remove(preview_file)
preview_file <- relocated_preview_file
}
if (verbose)
message("\nPreview created: ", preview_file)
output_file
}
}
format
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarizing_functions.R
\name{get_number_farms}
\alias{get_number_farms}
\title{Number of Affected Farms}
\usage{
get_number_farms(results)
}
\arguments{
\item{results}{4-dimensional array of results (compartment, patch, time, simulation)}
}
\description{
Returns a dataframe of the simulation number and the number of farms affected by the outbreak
}
| /man/get_number_farms.Rd | permissive | ecohealthalliance/metaflu | R | false | true | 430 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarizing_functions.R
\name{get_number_farms}
\alias{get_number_farms}
\title{Number of Affected Farms}
\usage{
get_number_farms(results)
}
\arguments{
\item{results}{4-dimensional array of results (compartment, patch, time, simulation)}
}
\description{
Returns a dataframe of the simulation number and the number of farms affected by the outbreak
}
|
library(pracma)
library(readr)
library(tibble)
library(signal)
library(readxl)
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggcyto)
#keep one column for the time
TF <- read_excel("C:/Users/alahmada/Desktop/CHI_study/vis/R/QT_417_Baseline.xlsx")
colnames(TF) <- c("time", "II")
#read the file and change the column names
ECG <- read_excel("C:/Users/alahmada/Desktop/CHI_study/vis/R/537/QT_537.xlsx")
colnames(ECG) <- c("time", "II")
#format the time
ECG$time <- TF$time
#plot the signal
plot(ECG$II, type="l", col="navy") + grid()
#find R peaks
# find peaks more than 0.6 mv
peaks1 <- findpeaks(ECG$II, minpeakheight=0.6, minpeakdistance=100)
#include peaks that have two sustained repeated values
peaks2 <- findpeaks(ECG$II, minpeakheight=0.6, minpeakdistance=100, peakpat = "[+]{1,}[0]{1,}[-]{1,}")
#comibe all R peaks in one dataframe
peaks <- rbind(peaks1,peaks2)
#plot R peaks using red circels
points(peaks[, 2], peaks[, 1], pch=20, col="maroon")
#name the columns in R peaks matrix
colnames(peaks) <- c("R_mv","time","no_need1","no_need2")
#remove unneeded columns
Rpeaks<-peaks[,-3:-4]
#sort the peaks by time, we need them sorted when calucating RR-intervals
new_Rpeaks_sorted <- Rpeaks[order(Rpeaks[, 2]), ]
new_Rpeaks_sorted
#convert R_peaks matrix to dataframe
df_Rpeaks_RR <- as.data.frame(new_Rpeaks_sorted)
#delete the vector of mv, and keep only time vector
df_Rpeaks_RR <- df_Rpeaks_RR[,-1]
df_Rpeaks_RR
# values of RR-intervals will be used to caluclate average RR-interval and HR
df_Rpeaks_RR
# assign the index time of R-peaks in a vector
R_peaks_time <- Rpeaks[,2]
Time_RR <- ECG$time
mv_RR <- ECG$II
#Calculate the heart rate from the RR-intervals
# first calulcate the RR-intervals
RR_interval <- NA
for (i in 1:length(df_Rpeaks_RR)-1)
{
RR_interval[i] = df_Rpeaks_RR[i+1] - df_Rpeaks_RR[i]
}
#take the average RR-interval
RR <- mean(RR_interval)
RR
#caluclate the heart rate
HR <- 60000/RR
HR
#next code for creating the colour scale vector
maximumColourVector <- 530 #half RR-interval
colourVector <- NA
length(colourVector) <- 10000 #this equals to the length of ECG signal
for (R in 1:length(R_peaks_time))
{ if (R_peaks_time[R] >= 100)
index <- R_peaks_time[R] - 28
else index <- R_peaks_time[R]
for (i in 1:maximumColourVector)
{
colourVector[index +i]=i
}
}
#any time out of the 570 range (i.e. NA) make it grey
colourVector[is.na(colourVector)] <- 800
LowerColourlimit <- 250
UpperColourLimit <- 570
for (n in 1:length(colourVector))
{
if(colourVector[n] < LowerColourlimit)
{ colourVector[n] <- LowerColourlimit}
if(colourVector[n] > UpperColourLimit && colourVector[n]< 800)
{colourVector[n] <- UpperColourLimit}
}
#create a new ECG file
ECG_with_colourScale <- cbind(Time_RR,mv_RR,colourVector,UpperColourLimit,LowerColourlimit)
write.csv(ECG_with_colourScale,"C:/Users/alahmada/Desktop/CHI_study/vis/R/file2.csv")
file2 <- read_csv("C:/Users/alahmada/Desktop/CHI_study/vis/R/file2.csv")
#next script is for visulising the ECG with the colour scale
colorcodesValues <- NA
length(colorcodesValues) <- 9
colorcodesValues[1] <- 255 # purple
colorcodesValues[2] <- UpperColourLimit - (40*7) #blue
colorcodesValues[3] <- UpperColourLimit - (40*6) #lime
colorcodesValues[4] <- UpperColourLimit - (40*5) #green
colorcodesValues[5] <- UpperColourLimit - (40*4) #yellow
colorcodesValues[6] <- UpperColourLimit - (40*3) #orange
colorcodesValues[7] <- UpperColourLimit - (40*2) #dark orange
colorcodesValues[8] <- UpperColourLimit - (40*1) #red
colorcodesValues[9] <- 565 #dark red
#create the pesudo colour vector using spectral codes
myColor <- rev(RColorBrewer::brewer.pal(11, "Spectral"))
myColor_scale_fill <- scale_fill_gradientn(colours = myColor,breaks=c(colorcodesValues[1],colorcodesValues[2],colorcodesValues[3],colorcodesValues[4],colorcodesValues[5],colorcodesValues[6],colorcodesValues[7],colorcodesValues[8],colorcodesValues[9]),labels=c("> 250",290,330,370,410,450,490,530,"< 570"),limits=c(250,570))
#caluclate the incresed value fo half RR0interval to draw the dashed lines
half_RR_value <- (df_Rpeaks_RR[4] - df_Rpeaks_RR[3] ) / 2
#covert to millisconds
half_RR_value <- half_RR_value/1000
half_RR_value
# plot the ECG with the colour scale
p <- ggplot(data=file2, aes(x=Time_RR, y=mv_RR, fill=colourVector))
p +
#draw the vertical lines of R-peaks and dahsed lines for half RR-intervals
geom_vline(xintercept = 0, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[2]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[3]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[4]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[5]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[6]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[7]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[8]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[9]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[10]/1000, size=0.7) +
geom_vline(xintercept = 0 + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[2]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[3]/1000) + half_RR_value,linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[4]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[5]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[6]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[7]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[8]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[9]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[10]/1000) + half_RR_value, linetype="dashed", size=0.7) +
scale_y_continuous(minor_breaks = seq(-0.5, +1, 0.1),breaks = seq(-0.5, +1, 0.5), lim = c(-0.5, +1)) +
scale_x_continuous(minor_breaks = seq(0 , 9.3, 0.04),breaks = seq(0, 9.2, 0.2), lim = c(0,9.3)) +
geom_ribbon(aes(ymin=-1, ymax=-0.5),fill="grey70",alpha =0,size=1) +
geom_ribbon(aes(ymin=-0.5, ymax=0),fill="grey70",alpha =0.3,size=1) +
geom_ribbon(aes(ymin=0, ymax=0.5),fill="grey70",alpha =0,size=1) +
geom_ribbon(aes(ymin=0.5, ymax=1),fill="grey70",alpha =0.3,size=1) +
theme(panel.grid.minor = element_line(colour="white"), panel.grid.major = element_line(colour = "white", size=1),legend.key.height=grid::unit(2.5,"cm"),legend.key.width = unit(1,"cm")) +
geom_bar(stat="identity", position ="dodge") + geom_line(size=0.73) + myColor_scale_fill +
geom_line(data=file2,aes(x=Time_RR, y=0), colour="#444444", lwd=0.5) +
theme(axis.text.x= element_text(size=20, color = "black",face="bold")) +
theme(axis.text.y= element_text(size=20, color = "black",face="bold")) +
geom_line(data=file2,aes(x=Time_RR, y=0), colour="#444444", lwd=0.5) +
labs(x ="Time (seconds)", y="mV", fill = "1/2 RR-Interval (ms)") + theme(legend.text=element_text(size=15),axis.title=element_text(size=25,face="bold"),legend.title=element_text(size=20))
ggsave("C:/Users/alahmada/Desktop/CHI_study/vis/R/QT_537_CC.png", width=32.31, height=6.14)
| /R-scripts/QT_537/ECG_with_ColourScale_Cartesian_537.R | no_license | mbchxaa6/ECG_QT_Visualisation | R | false | false | 7,527 | r | library(pracma)
library(readr)
library(tibble)
library(signal)
library(readxl)
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggcyto)
#keep one column for the time
TF <- read_excel("C:/Users/alahmada/Desktop/CHI_study/vis/R/QT_417_Baseline.xlsx")
colnames(TF) <- c("time", "II")
#read the file and change the column names
ECG <- read_excel("C:/Users/alahmada/Desktop/CHI_study/vis/R/537/QT_537.xlsx")
colnames(ECG) <- c("time", "II")
#format the time
ECG$time <- TF$time
#plot the signal
plot(ECG$II, type="l", col="navy") + grid()
#find R peaks
# find peaks more than 0.6 mv
peaks1 <- findpeaks(ECG$II, minpeakheight=0.6, minpeakdistance=100)
#include peaks that have two sustained repeated values
peaks2 <- findpeaks(ECG$II, minpeakheight=0.6, minpeakdistance=100, peakpat = "[+]{1,}[0]{1,}[-]{1,}")
#comibe all R peaks in one dataframe
peaks <- rbind(peaks1,peaks2)
#plot R peaks using red circels
points(peaks[, 2], peaks[, 1], pch=20, col="maroon")
#name the columns in R peaks matrix
colnames(peaks) <- c("R_mv","time","no_need1","no_need2")
#remove unneeded columns
Rpeaks<-peaks[,-3:-4]
#sort the peaks by time, we need them sorted when calucating RR-intervals
new_Rpeaks_sorted <- Rpeaks[order(Rpeaks[, 2]), ]
new_Rpeaks_sorted
#convert R_peaks matrix to dataframe
df_Rpeaks_RR <- as.data.frame(new_Rpeaks_sorted)
#delete the vector of mv, and keep only time vector
df_Rpeaks_RR <- df_Rpeaks_RR[,-1]
df_Rpeaks_RR
# values of RR-intervals will be used to caluclate average RR-interval and HR
df_Rpeaks_RR
# assign the index time of R-peaks in a vector
R_peaks_time <- Rpeaks[,2]
Time_RR <- ECG$time
mv_RR <- ECG$II
#Calculate the heart rate from the RR-intervals
# first calulcate the RR-intervals
RR_interval <- NA
for (i in 1:length(df_Rpeaks_RR)-1)
{
RR_interval[i] = df_Rpeaks_RR[i+1] - df_Rpeaks_RR[i]
}
#take the average RR-interval
RR <- mean(RR_interval)
RR
#caluclate the heart rate
HR <- 60000/RR
HR
#next code for creating the colour scale vector
maximumColourVector <- 530 #half RR-interval
colourVector <- NA
length(colourVector) <- 10000 #this equals to the length of ECG signal
for (R in 1:length(R_peaks_time))
{ if (R_peaks_time[R] >= 100)
index <- R_peaks_time[R] - 28
else index <- R_peaks_time[R]
for (i in 1:maximumColourVector)
{
colourVector[index +i]=i
}
}
#any time out of the 570 range (i.e. NA) make it grey
colourVector[is.na(colourVector)] <- 800
LowerColourlimit <- 250
UpperColourLimit <- 570
for (n in 1:length(colourVector))
{
if(colourVector[n] < LowerColourlimit)
{ colourVector[n] <- LowerColourlimit}
if(colourVector[n] > UpperColourLimit && colourVector[n]< 800)
{colourVector[n] <- UpperColourLimit}
}
#create a new ECG file
ECG_with_colourScale <- cbind(Time_RR,mv_RR,colourVector,UpperColourLimit,LowerColourlimit)
write.csv(ECG_with_colourScale,"C:/Users/alahmada/Desktop/CHI_study/vis/R/file2.csv")
file2 <- read_csv("C:/Users/alahmada/Desktop/CHI_study/vis/R/file2.csv")
#next script is for visulising the ECG with the colour scale
colorcodesValues <- NA
length(colorcodesValues) <- 9
colorcodesValues[1] <- 255 # purple
colorcodesValues[2] <- UpperColourLimit - (40*7) #blue
colorcodesValues[3] <- UpperColourLimit - (40*6) #lime
colorcodesValues[4] <- UpperColourLimit - (40*5) #green
colorcodesValues[5] <- UpperColourLimit - (40*4) #yellow
colorcodesValues[6] <- UpperColourLimit - (40*3) #orange
colorcodesValues[7] <- UpperColourLimit - (40*2) #dark orange
colorcodesValues[8] <- UpperColourLimit - (40*1) #red
colorcodesValues[9] <- 565 #dark red
#create the pesudo colour vector using spectral codes
myColor <- rev(RColorBrewer::brewer.pal(11, "Spectral"))
myColor_scale_fill <- scale_fill_gradientn(colours = myColor,breaks=c(colorcodesValues[1],colorcodesValues[2],colorcodesValues[3],colorcodesValues[4],colorcodesValues[5],colorcodesValues[6],colorcodesValues[7],colorcodesValues[8],colorcodesValues[9]),labels=c("> 250",290,330,370,410,450,490,530,"< 570"),limits=c(250,570))
#caluclate the incresed value fo half RR0interval to draw the dashed lines
half_RR_value <- (df_Rpeaks_RR[4] - df_Rpeaks_RR[3] ) / 2
#covert to millisconds
half_RR_value <- half_RR_value/1000
half_RR_value
# plot the ECG with the colour scale
p <- ggplot(data=file2, aes(x=Time_RR, y=mv_RR, fill=colourVector))
p +
#draw the vertical lines of R-peaks and dahsed lines for half RR-intervals
geom_vline(xintercept = 0, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[2]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[3]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[4]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[5]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[6]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[7]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[8]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[9]/1000, size=0.7) +
geom_vline(xintercept = df_Rpeaks_RR[10]/1000, size=0.7) +
geom_vline(xintercept = 0 + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[2]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[3]/1000) + half_RR_value,linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[4]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[5]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[6]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[7]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[8]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[9]/1000) + half_RR_value, linetype="dashed", size=0.7) +
geom_vline(xintercept = (df_Rpeaks_RR[10]/1000) + half_RR_value, linetype="dashed", size=0.7) +
scale_y_continuous(minor_breaks = seq(-0.5, +1, 0.1),breaks = seq(-0.5, +1, 0.5), lim = c(-0.5, +1)) +
scale_x_continuous(minor_breaks = seq(0 , 9.3, 0.04),breaks = seq(0, 9.2, 0.2), lim = c(0,9.3)) +
geom_ribbon(aes(ymin=-1, ymax=-0.5),fill="grey70",alpha =0,size=1) +
geom_ribbon(aes(ymin=-0.5, ymax=0),fill="grey70",alpha =0.3,size=1) +
geom_ribbon(aes(ymin=0, ymax=0.5),fill="grey70",alpha =0,size=1) +
geom_ribbon(aes(ymin=0.5, ymax=1),fill="grey70",alpha =0.3,size=1) +
theme(panel.grid.minor = element_line(colour="white"), panel.grid.major = element_line(colour = "white", size=1),legend.key.height=grid::unit(2.5,"cm"),legend.key.width = unit(1,"cm")) +
geom_bar(stat="identity", position ="dodge") + geom_line(size=0.73) + myColor_scale_fill +
geom_line(data=file2,aes(x=Time_RR, y=0), colour="#444444", lwd=0.5) +
theme(axis.text.x= element_text(size=20, color = "black",face="bold")) +
theme(axis.text.y= element_text(size=20, color = "black",face="bold")) +
geom_line(data=file2,aes(x=Time_RR, y=0), colour="#444444", lwd=0.5) +
labs(x ="Time (seconds)", y="mV", fill = "1/2 RR-Interval (ms)") + theme(legend.text=element_text(size=15),axis.title=element_text(size=25,face="bold"),legend.title=element_text(size=20))
ggsave("C:/Users/alahmada/Desktop/CHI_study/vis/R/QT_537_CC.png", width=32.31, height=6.14)
|
library(tidyverse)
#### local import ####
## CBS_age_10yrs_GH <-read.csv("C:\\Rdir\\data-contstant\\CBS_age_10yr_groups.csv",sep=";")
## read.aantal.landelijk.path <- paste("C:\\Rdir\\data\\",Sys.Date(),"\\", Sys.Date(), "_COVID-19_casus_landelijk.csv",sep="")
## RIVM_casus_landelijk <- read.csv(read.aantal.landelijk.path,sep=";")
today = Sys.Date()
#casus.working <-cases_per_day
casus.working <-RIVM_casus_landelijk
casus.working$week<-strftime(casus.working$date,format = "%V") #adding week_number to the case
casus.working$weekbegin <- floor_date(casus.working$date, " week", week_start = 1)
#Aantal per week per groep tellen + leeftijdverdeling landelijk pakken
casus.working<-count(casus.working,weekbegin,Agegroup)
#mergen + per honderduizen berekenen
casus.working <- merge(casus.working,CBS_age_10yrs_GH)
casus.working$phd <- round(casus.working$n*100000/casus.working$population,0)
#weeknumber <- isoweek(Sys.Date())
weeknumber<-strftime(Sys.Date(),format = "%V")
#Gewenste weken subsetten
casus.working <- casus.working[casus.working$weekbegin>"2020-07-01"&casus.working$weekbegin<=today,]
#Heatmap
ggplot(casus.working,aes(weekbegin,Agegroup,fill=phd))+
geom_tile(size=1.5,color="#F5F5F5")+
geom_text(label=casus.working$phd,size=5)+
scale_fill_gradient2(trans="sqrt",low = "#5B9BD5",mid="#FFEB84",midpoint = 15,
high = "#c00000")+
scale_x_date(as.Date("2020-07-06"),breaks = "1 week", labels = date_format("%V"))+
coord_cartesian(expand = FALSE)+
#ggtitle("Aantal geconstateerde besmettingen per 100.000 per week")+
theme_minimal()+
xlab("")+
ylab("")+
theme(legend.position = "none")+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "Aantal positief geteste mensen per 100.000 binnen de leeftijdsgroep. Week 3 & 4 zullen nog sterk stijgen",fill=NULL,
caption = paste("Bron data: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"),
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.3, "cm"),
axis.title.x=element_blank())# +
ggsave("data/02_leeftijd_heatmap.png",width=16, height = 9)
ggplot(casus.working,aes(weekbegin,Agegroup,fill=phd))+
geom_tile(size=1.5,color="#F5F5F5")+
geom_text(label=casus.working$phd,size=5)+
scale_fill_gradient2(trans="sqrt",low = "#5B9BD5",mid="#FFEB84",midpoint = 15,
high = "#c00000")+
scale_x_date(as.Date("2020-07-06"),breaks = "1 week", labels = date_format("%V"))+
coord_cartesian(expand = FALSE)+
#ggtitle("cases per 100.000 per week")+
theme_minimal()+
xlab("")+
ylab("")+
theme(legend.position = "none")+
labs(title = "Cases COVID-19",
subtitle = "Number of cases per 100.000, within each agegroup. Week 3 and 4 will still rise.",fill=NULL,
caption = paste("Source: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"),
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 30,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.3, "cm"),
axis.title.x=element_blank())# +
ggsave("data/02_EN_leeftijd_heatmap.png",width=16, height = 9)
#Gewenste weken subsetten
casus.working <- casus.working[casus.working$weekbegin>"2020-12-07"&casus.working$weekbegin<=today,]
casus.working$weekbegin <- as.factor(casus.working$weekbegin)
#barchart
ggplot(casus.working,aes(Agegroup,phd,fill=weekbegin))+
geom_bar(stat="identity", position=position_dodge(0.85),width = 0.7)+
theme_classic()+
theme(legend.position= c(0.5,0.9), legend.direction = "horizontal")+
xlab("")+
ylab("")+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "Aantal positief geteste mensen per 100.000 binnen de leeftijdsgroep. Week 3 & 4 zullen nog sterk stijgen.",
fill="Week",
caption = paste("Bron data: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25, face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black", face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.5, "cm"),
axis.line = element_line(colour = "#F5F5F5"),
panel.grid.major.y = element_line(colour= "lightgray", linetype = "dashed"))+
scale_fill_manual(values=c('#c6cee6','#adb9dd', '#8fa2d4', '#6383c9', '#416ebd', '#3b64ad', '#f1a069', '#f8cbad' ), labels=c(
"50",
"51",
"52",
"53",
"1",
"2",
"3",
"4"))# +
ggsave("data/01_leeftijd_barchart.png",width=16, height = 9)
ggplot(casus.working,aes(Agegroup,phd,fill=weekbegin))+
geom_bar(stat="identity", position=position_dodge(0.85),width = 0.7)+
theme_classic()+
theme(legend.position= c(0.5,0.9), legend.direction = "horizontal")+
xlab("")+
ylab("")+
labs(title = "Cases COVID-19",
subtitle = "Number of cases per 100.000, within each agegroup. Week 3 and 4 will still rise.",
fill="Week",
caption = paste("Source: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25, face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black", face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.5, "cm"),
axis.line = element_line(colour = "#F5F5F5"),
panel.grid.major.y = element_line(colour= "lightgray", linetype = "dashed"))+
scale_fill_manual(values=c('#c6cee6','#adb9dd', '#8fa2d4', '#6383c9', '#416ebd', '#3b64ad', '#f1a069', '#f8cbad' ), labels=c( "50", "50",
"51",
"52",
"53",
"1",
"2",
"3",
"4"))
ggsave("data/01_EN_leeftijd_barchart.png",width=16, height = 9)
#barchart - abs
ggplot(casus.working,aes(Agegroup,n,fill=weekbegin))+
geom_bar(stat="identity", position=position_dodge(0.85),width = 0.7)+
theme_classic()+
theme(legend.position= c(0.5,0.9), legend.direction = "horizontal")+
xlab("")+
ylab("")+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "Aantal positief geteste mensen per 100.000 binnen de leeftijdsgroep. Week 3 & 4 zullen nog sterk stijgen.",
fill="Week",
caption = paste("Bron data: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25, face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black", face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.5, "cm"),
axis.line = element_line(colour = "#F5F5F5"),
panel.grid.major.y = element_line(colour= "lightgray", linetype = "dashed"))+
scale_fill_manual(values=c('#c6cee6','#adb9dd', '#8fa2d4', '#6383c9', '#416ebd', '#3b64ad', '#f1a069', '#f8cbad' ), labels=c( "48",
"49",
"50",
"51",
"52",
"53",
"1",
"2"))
ggsave("data/01_leeftijd_barchart_abs.png",width=16, height = 9)
#### Code onderlinge verhouding plot #####
casus.working = filter(RIVM_casus_landelijk, Agegroup != "<50" & Agegroup !="Unknown")
casus.working <- casus.working %>% mutate(age_grouping = case_when(str_detect(Agegroup, "0-9") ~ '0-9',
str_detect(Agegroup, "10-19") ~ '10-19',
str_detect(Agegroup, "20-29") ~ '20-39',
str_detect(Agegroup, "30-39") ~ '20-39',
str_detect(Agegroup, "40-49") ~ '40-59',
str_detect(Agegroup, "50-59") ~ '40-59',
str_detect(Agegroup, "60-69") ~ '60-79',
str_detect(Agegroup, "70-79") ~ '60-79',
str_detect(Agegroup, "80-89") ~ '80+',
str_detect(Agegroup, "90+") ~ '80+',))
casus.working <-count(casus.working,date,age_grouping)
#Take rolling 7-day averages
casus.working <- casus.working %>%
group_by(age_grouping) %>%
arrange(date) %>%
mutate(cases_avg=roll_mean(n, 7, align="right", fill=0))
dag<-strftime(Sys.Date()-1)
casus.working <- casus.working[casus.working$date>"2020-02-29"&casus.working$date<dag,]
casus.working$date <- as.Date(casus.working$date)
draw_key_polygon3 <- function(data, params, size) {
lwd <- min(data$size, min(size) / 4)
grid::rectGrob(
width = grid::unit(0.6, "npc"),
height = grid::unit(0.6, "npc"),
gp = grid::gpar(
col = data$colour,
fill = alpha(data$fill, data$alpha),
lty = data$linetype,
lwd = lwd * .pt,
linejoin = "mitre"
))
}
GeomBar$draw_key = draw_key_polygon3
#### PLOT onderlinge verhouding ####
ggplot(casus.working, aes(date,cases_avg,fill=age_grouping))+
geom_bar(stat="identity", position=position_fill(), width=1) + scale_y_reverse() +
theme_classic()+
theme(legend.position = "right", legend.direction = "vertical", legend.background =element_rect(fill = "#F5F5F5") , legend.spacing.y = unit(0, "cm"), legend.key.size = unit(1, "cm"))+ #legend.spacing =0.5
xlab("")+
ylab("")+
scale_x_date(date_breaks = "1 month",
date_labels= format("%b"),
limits = as.Date(c("2020-02-27", Sys.Date())))+
# scale_y_continuous( label = percent_format(), sec.axis = sec_axis(~ . * 1, label = percent_format()))+
scale_fill_manual(values=c("darkgray", '#f8cbad','#c55a11', '#2f5597', '#8faadc', '#5b9bd5'))+ # Use custom colors
guides(fill = guide_legend(reverse = TRUE))+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "verhouding tussen de groepen, gebaseerd op 7 daags lopend gemiddelde",
fill="",
caption = paste("Bron data: RIVM | Plot: @YorickB | ",Sys.Date()-1))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.text.y = element_blank(),
axis.ticks.length = unit(0.1, "cm"),
axis.line = element_line(colour = "#F5F5F5"))+
ggsave("data/03_leeftijd_relatief.png",width=16, height = 9)
ggplot(casus.working, aes(date,cases_avg,fill=age_grouping))+
geom_bar(stat="identity", position=position_fill(), width=1) + scale_y_reverse() +
theme_classic()+
theme(legend.position = "right", legend.direction = "vertical", legend.background =element_rect(fill = "#F5F5F5") , legend.spacing.y = unit(0, "cm"), legend.key.size = unit(1, "cm"))+ #legend.spacing =0.5
xlab("")+
ylab("")+
guides(fill = guide_legend(reverse = TRUE))+
scale_x_date(date_breaks = "1 month",
date_labels= format("%b"),
limits = as.Date(c("2020-02-27", Sys.Date())))+
labs(title = "Cases COVID-19",
subtitle = "relationship between the age groups, based on the 7 day moving average",fill=NULL,
caption = paste("Source: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.text.y = element_blank(),
axis.ticks.length = unit(0.1, "cm"),
axis.line = element_line(colour = "#F5F5F5"))+
scale_fill_manual(values=c("darkgrey", '#f8cbad','#c55a11', '#2f5597', '#8faadc', '#5b9bd5')) # Use custom colors
ggsave("data/03_EN_leeftijd_relatief.png",width=16, height = 9)
| /Scripts/07_Age-heatmap_barchart.R | no_license | frankgrivel/COVID_data_RIVM_Netherlands | R | false | false | 17,084 | r |
library(tidyverse)
#### local import ####
## CBS_age_10yrs_GH <-read.csv("C:\\Rdir\\data-contstant\\CBS_age_10yr_groups.csv",sep=";")
## read.aantal.landelijk.path <- paste("C:\\Rdir\\data\\",Sys.Date(),"\\", Sys.Date(), "_COVID-19_casus_landelijk.csv",sep="")
## RIVM_casus_landelijk <- read.csv(read.aantal.landelijk.path,sep=";")
today = Sys.Date()
#casus.working <-cases_per_day
casus.working <-RIVM_casus_landelijk
casus.working$week<-strftime(casus.working$date,format = "%V") #adding week_number to the case
casus.working$weekbegin <- floor_date(casus.working$date, " week", week_start = 1)
#Aantal per week per groep tellen + leeftijdverdeling landelijk pakken
casus.working<-count(casus.working,weekbegin,Agegroup)
#mergen + per honderduizen berekenen
casus.working <- merge(casus.working,CBS_age_10yrs_GH)
casus.working$phd <- round(casus.working$n*100000/casus.working$population,0)
#weeknumber <- isoweek(Sys.Date())
weeknumber<-strftime(Sys.Date(),format = "%V")
#Gewenste weken subsetten
casus.working <- casus.working[casus.working$weekbegin>"2020-07-01"&casus.working$weekbegin<=today,]
#Heatmap
ggplot(casus.working,aes(weekbegin,Agegroup,fill=phd))+
geom_tile(size=1.5,color="#F5F5F5")+
geom_text(label=casus.working$phd,size=5)+
scale_fill_gradient2(trans="sqrt",low = "#5B9BD5",mid="#FFEB84",midpoint = 15,
high = "#c00000")+
scale_x_date(as.Date("2020-07-06"),breaks = "1 week", labels = date_format("%V"))+
coord_cartesian(expand = FALSE)+
#ggtitle("Aantal geconstateerde besmettingen per 100.000 per week")+
theme_minimal()+
xlab("")+
ylab("")+
theme(legend.position = "none")+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "Aantal positief geteste mensen per 100.000 binnen de leeftijdsgroep. Week 3 & 4 zullen nog sterk stijgen",fill=NULL,
caption = paste("Bron data: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"),
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.3, "cm"),
axis.title.x=element_blank())# +
ggsave("data/02_leeftijd_heatmap.png",width=16, height = 9)
ggplot(casus.working,aes(weekbegin,Agegroup,fill=phd))+
geom_tile(size=1.5,color="#F5F5F5")+
geom_text(label=casus.working$phd,size=5)+
scale_fill_gradient2(trans="sqrt",low = "#5B9BD5",mid="#FFEB84",midpoint = 15,
high = "#c00000")+
scale_x_date(as.Date("2020-07-06"),breaks = "1 week", labels = date_format("%V"))+
coord_cartesian(expand = FALSE)+
#ggtitle("cases per 100.000 per week")+
theme_minimal()+
xlab("")+
ylab("")+
theme(legend.position = "none")+
labs(title = "Cases COVID-19",
subtitle = "Number of cases per 100.000, within each agegroup. Week 3 and 4 will still rise.",fill=NULL,
caption = paste("Source: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"),
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 30,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.3, "cm"),
axis.title.x=element_blank())# +
ggsave("data/02_EN_leeftijd_heatmap.png",width=16, height = 9)
#Gewenste weken subsetten
casus.working <- casus.working[casus.working$weekbegin>"2020-12-07"&casus.working$weekbegin<=today,]
casus.working$weekbegin <- as.factor(casus.working$weekbegin)
#barchart
ggplot(casus.working,aes(Agegroup,phd,fill=weekbegin))+
geom_bar(stat="identity", position=position_dodge(0.85),width = 0.7)+
theme_classic()+
theme(legend.position= c(0.5,0.9), legend.direction = "horizontal")+
xlab("")+
ylab("")+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "Aantal positief geteste mensen per 100.000 binnen de leeftijdsgroep. Week 3 & 4 zullen nog sterk stijgen.",
fill="Week",
caption = paste("Bron data: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25, face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black", face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.5, "cm"),
axis.line = element_line(colour = "#F5F5F5"),
panel.grid.major.y = element_line(colour= "lightgray", linetype = "dashed"))+
scale_fill_manual(values=c('#c6cee6','#adb9dd', '#8fa2d4', '#6383c9', '#416ebd', '#3b64ad', '#f1a069', '#f8cbad' ), labels=c(
"50",
"51",
"52",
"53",
"1",
"2",
"3",
"4"))# +
ggsave("data/01_leeftijd_barchart.png",width=16, height = 9)
ggplot(casus.working,aes(Agegroup,phd,fill=weekbegin))+
geom_bar(stat="identity", position=position_dodge(0.85),width = 0.7)+
theme_classic()+
theme(legend.position= c(0.5,0.9), legend.direction = "horizontal")+
xlab("")+
ylab("")+
labs(title = "Cases COVID-19",
subtitle = "Number of cases per 100.000, within each agegroup. Week 3 and 4 will still rise.",
fill="Week",
caption = paste("Source: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25, face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black", face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.5, "cm"),
axis.line = element_line(colour = "#F5F5F5"),
panel.grid.major.y = element_line(colour= "lightgray", linetype = "dashed"))+
scale_fill_manual(values=c('#c6cee6','#adb9dd', '#8fa2d4', '#6383c9', '#416ebd', '#3b64ad', '#f1a069', '#f8cbad' ), labels=c( "50", "50",
"51",
"52",
"53",
"1",
"2",
"3",
"4"))
ggsave("data/01_EN_leeftijd_barchart.png",width=16, height = 9)
#barchart - abs
ggplot(casus.working,aes(Agegroup,n,fill=weekbegin))+
geom_bar(stat="identity", position=position_dodge(0.85),width = 0.7)+
theme_classic()+
theme(legend.position= c(0.5,0.9), legend.direction = "horizontal")+
xlab("")+
ylab("")+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "Aantal positief geteste mensen per 100.000 binnen de leeftijdsgroep. Week 3 & 4 zullen nog sterk stijgen.",
fill="Week",
caption = paste("Bron data: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25, face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black", face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.ticks.length = unit(0.5, "cm"),
axis.line = element_line(colour = "#F5F5F5"),
panel.grid.major.y = element_line(colour= "lightgray", linetype = "dashed"))+
scale_fill_manual(values=c('#c6cee6','#adb9dd', '#8fa2d4', '#6383c9', '#416ebd', '#3b64ad', '#f1a069', '#f8cbad' ), labels=c( "48",
"49",
"50",
"51",
"52",
"53",
"1",
"2"))
ggsave("data/01_leeftijd_barchart_abs.png",width=16, height = 9)
#### Code onderlinge verhouding plot #####
casus.working = filter(RIVM_casus_landelijk, Agegroup != "<50" & Agegroup !="Unknown")
casus.working <- casus.working %>% mutate(age_grouping = case_when(str_detect(Agegroup, "0-9") ~ '0-9',
str_detect(Agegroup, "10-19") ~ '10-19',
str_detect(Agegroup, "20-29") ~ '20-39',
str_detect(Agegroup, "30-39") ~ '20-39',
str_detect(Agegroup, "40-49") ~ '40-59',
str_detect(Agegroup, "50-59") ~ '40-59',
str_detect(Agegroup, "60-69") ~ '60-79',
str_detect(Agegroup, "70-79") ~ '60-79',
str_detect(Agegroup, "80-89") ~ '80+',
str_detect(Agegroup, "90+") ~ '80+',))
casus.working <-count(casus.working,date,age_grouping)
#Take rolling 7-day averages
casus.working <- casus.working %>%
group_by(age_grouping) %>%
arrange(date) %>%
mutate(cases_avg=roll_mean(n, 7, align="right", fill=0))
dag<-strftime(Sys.Date()-1)
casus.working <- casus.working[casus.working$date>"2020-02-29"&casus.working$date<dag,]
casus.working$date <- as.Date(casus.working$date)
draw_key_polygon3 <- function(data, params, size) {
lwd <- min(data$size, min(size) / 4)
grid::rectGrob(
width = grid::unit(0.6, "npc"),
height = grid::unit(0.6, "npc"),
gp = grid::gpar(
col = data$colour,
fill = alpha(data$fill, data$alpha),
lty = data$linetype,
lwd = lwd * .pt,
linejoin = "mitre"
))
}
GeomBar$draw_key = draw_key_polygon3
#### PLOT onderlinge verhouding ####
ggplot(casus.working, aes(date,cases_avg,fill=age_grouping))+
geom_bar(stat="identity", position=position_fill(), width=1) + scale_y_reverse() +
theme_classic()+
theme(legend.position = "right", legend.direction = "vertical", legend.background =element_rect(fill = "#F5F5F5") , legend.spacing.y = unit(0, "cm"), legend.key.size = unit(1, "cm"))+ #legend.spacing =0.5
xlab("")+
ylab("")+
scale_x_date(date_breaks = "1 month",
date_labels= format("%b"),
limits = as.Date(c("2020-02-27", Sys.Date())))+
# scale_y_continuous( label = percent_format(), sec.axis = sec_axis(~ . * 1, label = percent_format()))+
scale_fill_manual(values=c("darkgray", '#f8cbad','#c55a11', '#2f5597', '#8faadc', '#5b9bd5'))+ # Use custom colors
guides(fill = guide_legend(reverse = TRUE))+
labs(title = "Geconstateerde besmettingen COVID-19",
subtitle = "verhouding tussen de groepen, gebaseerd op 7 daags lopend gemiddelde",
fill="",
caption = paste("Bron data: RIVM | Plot: @YorickB | ",Sys.Date()-1))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.text.y = element_blank(),
axis.ticks.length = unit(0.1, "cm"),
axis.line = element_line(colour = "#F5F5F5"))+
ggsave("data/03_leeftijd_relatief.png",width=16, height = 9)
ggplot(casus.working, aes(date,cases_avg,fill=age_grouping))+
geom_bar(stat="identity", position=position_fill(), width=1) + scale_y_reverse() +
theme_classic()+
theme(legend.position = "right", legend.direction = "vertical", legend.background =element_rect(fill = "#F5F5F5") , legend.spacing.y = unit(0, "cm"), legend.key.size = unit(1, "cm"))+ #legend.spacing =0.5
xlab("")+
ylab("")+
guides(fill = guide_legend(reverse = TRUE))+
scale_x_date(date_breaks = "1 month",
date_labels= format("%b"),
limits = as.Date(c("2020-02-27", Sys.Date())))+
labs(title = "Cases COVID-19",
subtitle = "relationship between the age groups, based on the 7 day moving average",fill=NULL,
caption = paste("Source: RIVM / CBS | Plot: @YorickB | ",Sys.Date()))+
theme(plot.background = element_rect(fill = "#F5F5F5"), #background color/size (border color and size)
panel.background = element_rect(fill = "#F5F5F5", colour = "#F5F5F5"),
plot.title = element_text(hjust = 0.5,size = 25,face = "bold"),
plot.subtitle = element_text(hjust=0.5,color = "black", face = "italic"),
axis.text = element_text(size=14,color = "black",face = "bold"),
axis.ticks = element_line(colour = "#F5F5F5", size = 1, linetype = "solid"),
axis.text.y = element_blank(),
axis.ticks.length = unit(0.1, "cm"),
axis.line = element_line(colour = "#F5F5F5"))+
scale_fill_manual(values=c("darkgrey", '#f8cbad','#c55a11', '#2f5597', '#8faadc', '#5b9bd5')) # Use custom colors
ggsave("data/03_EN_leeftijd_relatief.png",width=16, height = 9)
|
context("cluster")
library(largeVis)
set.seed(1974)
data(iris)
dat <- as.matrix(iris[, 1:4])
dat <- scale(dat)
dupes <- which(duplicated(dat))
dat <- dat[-dupes, ]
dat <- t(dat)
neighbors <- randomProjectionTreeSearch(dat, K = 20, verbose = FALSE)
edges <- buildEdgeMatrix(data = dat,
neighbors = neighbors,
verbose = FALSE)
test_that("optics doesn't crash on iris with neighbors and data", {
expect_silent(optics(neighbors = neighbors, data = dat, eps = 10, minPts = 10, verbose = FALSE))
})
test_that("optics doesn't crash on iris with edges", {
expect_silent(optics(edges = edges, eps = 10, minPts = 10, verbose = FALSE))
})
test_that("optics doesn't crash on iris with edges and data", {
expect_silent(optics(edges = edges, data = dat, eps = 10, minPts = 10, verbose = FALSE))
})
test_that("dbscan doesn't crash on iris with edges", {
expect_silent(dbscan(edges = edges, eps = 10, minPts = 10, verbose = FALSE, partition = FALSE))
})
test_that("dbscan doesn't crash on iris with partitions", {
expect_silent(clusters <- dbscan(edges = edges, eps = 10, minPts = 10,
verbose = FALSE, partition = TRUE))
})
| /tests/testthat/testclusters2.R | no_license | Cookies-gh/largeVis | R | false | false | 1,209 | r | context("cluster")
library(largeVis)
set.seed(1974)
data(iris)
dat <- as.matrix(iris[, 1:4])
dat <- scale(dat)
dupes <- which(duplicated(dat))
dat <- dat[-dupes, ]
dat <- t(dat)
neighbors <- randomProjectionTreeSearch(dat, K = 20, verbose = FALSE)
edges <- buildEdgeMatrix(data = dat,
neighbors = neighbors,
verbose = FALSE)
test_that("optics doesn't crash on iris with neighbors and data", {
expect_silent(optics(neighbors = neighbors, data = dat, eps = 10, minPts = 10, verbose = FALSE))
})
test_that("optics doesn't crash on iris with edges", {
expect_silent(optics(edges = edges, eps = 10, minPts = 10, verbose = FALSE))
})
test_that("optics doesn't crash on iris with edges and data", {
expect_silent(optics(edges = edges, data = dat, eps = 10, minPts = 10, verbose = FALSE))
})
test_that("dbscan doesn't crash on iris with edges", {
expect_silent(dbscan(edges = edges, eps = 10, minPts = 10, verbose = FALSE, partition = FALSE))
})
test_that("dbscan doesn't crash on iris with partitions", {
expect_silent(clusters <- dbscan(edges = edges, eps = 10, minPts = 10,
verbose = FALSE, partition = TRUE))
})
|
# Copyright 2017 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Plot daily mean streamflow
#'
#' @description Plot the daily mean flow values from a streamflow dataset. Plots the statistics from all daily discharge values from all
#' years, unless specified. Can choose specific dates to start and end plotting. Can choose to plot out each year separately.
#'
#' @param flowdata Data frame. A data frame of daily mean flow data that includes two columns: a 'Date' column with dates formatted
#' YYYY-MM-DD, and a numeric 'Value' column with the corresponding daily mean flow values in units of cubic metres per second.
#' Not required if \code{HYDAT} argument is used.
#' @param HYDAT Character. A seven digit Water Survey of Canada station number (e.g. \code{"08NM116"}) of which to extract daily streamflow
#' data from a HYDAT database. \href{https://github.com/ropensci/tidyhydat}{Installation} of the \code{tidyhydat} package and a HYDAT
#' database are required. Not required if \code{flowdata} argument is used.
#' @param rolling_days Numeric. The number of days to apply a rolling mean. Default \code{1}.
#' @param rolling_align Character. Specifies whether the dates of the rolling mean should be specified by the first ('left'), last ('right),
#' or middle ('center') of the rolling n-day group of observations. Default \code{'right'}.
#' by the year in which they end. Default \code{FALSE}.
#' @param water_year Logical. Use water years to group flow data instead of calendar years. Water years are designated
#' by the year in which they end. Default \code{FALSE}.
#' @param water_year_start Integer. Month indicating the start of the water year. Used if \code{water_year=TRUE}. Default \code{10}.
#' @param start_year Integer. First year to consider for plotting. Leave blank if all years are required.
#' @param end_year Integer. Last year to consider for plotting. Leave blank if all years are required.
#' @param exclude_years Integer. Single year or vector of years to exclude from plotting. Leave blank if all years are required.
#' @param start_date Date. First date to consider for plotting. Leave blank if all years are required.
#' @param end_date Date. Last date to consider for plotting. Leave blank if all years are required.
#' @param log_discharge Logical. Place the discharge axis (Y) on log scale. Default \code{FALSE} (linear).
#' @param plot_by_year Logical. Plot each year of data individually. Default \code{FALSE}.
#' @param log_discharge Logical. Place the discharge axis (Y) on log scale. Default \code{FALSE} (linear).
#' @param station_name Character. Name of hydrometric station or stream that will be used to create file names. Leave blank if not writing
#' files or if \code{HYDAT} is used or a column in \code{flowdata} called 'STATION_NUMBER' contains a WSC station number, as the name
#' will be the \code{HYDAT} value provided in the argument or column. Setting the station name will replace the HYDAT station number.
#' @param write_plot Logical. Write the plot to specified directory. Default \code{FALSE}.
#' @param write_imgtype Character. One of "pdf","png","jpeg","tiff", or "bmp" image types to write the plot as. Default \code{"pdf"}.
#' @param write_imgsize Numeric. Height and width, respectively, of saved plot. Default \code{c(5,11)}.
#' @param write_dir Character. Directory folder name of where to write tables and plots. If directory does not exist, it will be created.
#' Default is the working directory.
#'
#' @return A ggplot2 object of daily flows from flowdata or HYDAT flow data provided
#'
#' @examples
#' \dontrun{
#'
#'plot_flow_data(flowdata = flowdata, station_name = "MissionCreek", write_plot = TRUE)
#'
#'plot_flow_data(HYDAT = "08NM116", water_year = TRUE, water_year_start = 8)
#'
#' }
#' @export
#--------------------------------------------------------------
plot_flow_data <- function(flowdata=NULL,
HYDAT=NULL,
rolling_days=1,
rolling_align="right",
water_year=FALSE,
water_year_start=10,
start_year=NULL,
end_year=NULL,
exclude_years=NULL,
start_date=NULL,
end_date=NULL,
log_discharge=FALSE,
plot_by_year=FALSE,
station_name=NA,
write_plot=FALSE,
write_imgtype="pdf",
write_imgsize=c(ifelse(plot_by_year,11,6.35),18),
write_dir="."){
#--------------------------------------------------------------
# Error checking on the input parameters
if( !is.null(HYDAT) & !is.null(flowdata)) {stop("must select either flowdata or HYDAT arguments, not both")}
if( is.null(HYDAT)) {
if( is.null(flowdata)) {stop("one of flowdata or HYDAT arguments must be set")}
if( !is.data.frame(flowdata)) {stop("flowdata arguments is not a data frame")}
if( !all(c("Date","Value") %in% names(flowdata))) {stop("flowdata data frame doesn't contain the variables 'Date' and 'Value'")}
if( !inherits(flowdata$Date[1], "Date")) {stop("'Date' column in flowdata data frame is not a date")}
if( !is.numeric(flowdata$Value)) {stop("'Value' column in flowdata data frame is not numeric")}
if( any(flowdata$Value <0, na.rm=TRUE)) {warning('flowdata cannot have negative values - check your data')}
}
if( !is.logical(water_year)) {stop("water_year argument must be logical (TRUE/FALSE)")}
if( !is.numeric(water_year_start) ) {stop("water_year_start argument must be a number between 1 and 12 (Jan-Dec)")}
if( length(water_year_start)>1) {stop("water_year_start argument must be a number between 1 and 12 (Jan-Dec)")}
if( !water_year_start %in% c(1:12) ) {stop("water_year_start argument must be an integer between 1 and 12 (Jan-Dec)")}
if( length(start_year)>1) {stop("only one start_year value can be selected")}
if( !is.null(start_year) ) {if( !start_year %in% c(0:5000) ) {stop("start_year must be an integer")}}
if( length(end_year)>1) {stop("only one end_year value can be selected")}
if( !is.null(end_year) ) {if( !end_year %in% c(0:5000) ) {stop("end_year must be an integer")}}
if( !is.null(exclude_years) & !is.numeric(exclude_years)) {stop("list of exclude_years must be numeric - ex. 1999 or c(1999,2000)")}
if( !is.na(station_name) & !is.character(station_name) ) {stop("station_name argument must be a character string.")}
if( !is.logical(log_discharge)) {stop("log_discharge argument must be logical (TRUE/FALSE)")}
if( !is.logical(write_plot)) {stop("write_plot argument must be logical (TRUE/FALSE)")}
if( length(write_imgtype)>1) {stop("write_imgtype argument cannot have length > 1")}
if( !is.na(write_imgtype) & !write_imgtype %in% c("pdf","png","jpeg","tiff","bmp")) {
stop("write_imgtype argument must be one of 'pdf','png','jpeg','tiff', or 'bmp'")}
if( !is.numeric(write_imgsize) ) {stop("write_imgsize must be two numbers for height and width, respectively")}
if( length(write_imgsize)!=2 ) {stop("write_imgsize must be two numbers for height and width, respectively")}
if( !dir.exists(as.character(write_dir))) {
message("directory for saved files does not exist, new directory will be created")
if( write_table & write_dir!="." ) {dir.create(write_dir)}
}
if( !is.list(na.rm)) {stop("na.rm is not a list") }
if(! is.logical(unlist(na.rm))) {stop("na.rm is list of logical (TRUE/FALSE) values only.")}
my.na.rm <- list(na.rm.global=FALSE)
if( !all(names(na.rm) %in% names(my.na.rm))){stop("Illegal element in na.rm")}
my.na.rm[names(na.rm)]<- na.rm
na.rm <- my.na.rm # set the na.rm for the rest of the function.
if( !is.numeric(rolling_days)) {stop("rolling_days argument must be numeric")}
if( !all(rolling_days %in% c(1:180)) ) {stop("rolling_days argument must be integers > 0 and <= 180)")}
if( !rolling_align %in% c("right","left","center")) {stop("rolling_align argument must be 'right', 'left', or 'center'")}
if( !is.null(start_date)) {if(class(try(as.Date(start_date)))=="try-error" ) {stop("start_date must be a date formatted YYYY-MM-DD")}}
if( !is.null(end_date)) {if(class(try(as.Date(end_date)))=="try-error" ) {stop("end_date must be a date formatted YYYY-MM-DD")}}
if( !is.null(end_date) & !is.null(start_date) ) {if( start_date>=end_date ) {stop("start_date must be less than end_date")}}
# If HYDAT station is listed, check if it exists and make it the flowdata
if (!is.null(HYDAT)) {
if( length(HYDAT)>1 ) {stop("only one HYDAT station can be selected")}
if( !HYDAT %in% dplyr::pull(tidyhydat::allstations[1]) ) {stop("Station in 'HYDAT' parameter does not exist")}
if( is.na(station_name) ) {station_name <- HYDAT}
flowdata <- suppressMessages(tidyhydat::hy_daily_flows(station_number = HYDAT))
}
#--------------------------------------------------------------
# Set the flowdata for plotting
flowdata <- dplyr::select(flowdata,Date,Value)
flowdata <- fasstr::fill_missing_dates(flowdata,water_year_start=water_year_start)
flowdata <- fasstr::add_date_variables(flowdata,water_year = T,water_year_start=water_year_start)
flowdata <- fasstr::add_rolling_means(flowdata,days = rolling_days,align = rolling_align)
colnames(flowdata)[ncol(flowdata)] <- "RollingValue"
# determine the min/max cal/water years
min_year <- ifelse(water_year,min(flowdata$WaterYear),min(flowdata$Year))
max_year <- ifelse(water_year,max(flowdata$WaterYear),max(flowdata$Year))
# If start/end years are not select, set them as the min/max dates
if (is.null(start_year)) {start_year <- min_year}
if (is.null(end_year)) {end_year <- max_year}
# Set selected year-type for plotting
if (water_year) {
flowdata$AnalysisYear <- flowdata$WaterYear
} else {
flowdata$AnalysisYear <- flowdata$Year
}
# Filter for specific years, if selected
flowdata <- dplyr::filter(flowdata, AnalysisYear >= start_year & AnalysisYear <= end_year)
flowdata <- dplyr::filter(flowdata,!(AnalysisYear %in% exclude_years))
# Filter for specific dates, if selected
if( !is.null(start_date)) { flowdata <- dplyr::filter(flowdata, Date >= start_date) }
if( !is.null(end_date)) { flowdata <- dplyr::filter(flowdata, Date <= end_date) }
#--------------------------------------------------------------
# Plot the data
timeseries_plot <- ggplot2::ggplot(data=flowdata, ggplot2::aes(x=Date, y=RollingValue))+
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))+
ggplot2::geom_line(colour="dodgerblue4")+
ggplot2::ylab("Discharge (cms)")+
{if (plot_by_year) ggplot2::facet_wrap(~AnalysisYear, scales="free_x")} +
{if (!log_discharge) ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 8),expand = c(0, 0))}+
{if (log_discharge) ggplot2::scale_y_log10(expand = c(0, 0))}+
{if (plot_by_year) ggplot2::scale_x_date(date_labels = "%b")} +
{if (!plot_by_year) ggplot2::scale_x_date(breaks = scales::pretty_breaks(n = 12))} +
{if (!log_discharge) ggplot2::expand_limits(y = c(0,max(flowdata$RollingValue)*1.05))}+
{if (log_discharge) ggplot2::expand_limits(y = c(min(flowdata$RollingValue)*.95,max(flowdata$RollingValue)*1.05))}+
ggplot2::theme( panel.border = ggplot2::element_rect(colour = "grey80", fill=NA, size=.5),
panel.grid.minor.y = ggplot2::element_blank())
if (write_plot) {
file_timeseries_plot <- paste(write_dir,"/",
paste0(ifelse(!is.na(station_name),station_name,paste0("fasstr"))),
ifelse(plot_by_year,paste0("-annual-daily-flows."),paste0("-daily-flows.")),
write_imgtype,sep = "")
ggplot2::ggsave(filename = file_timeseries_plot,
timeseries_plot,
height = write_imgsize[1],
width = write_imgsize[2])
}
return(timeseries_plot)
} # end of function
| /R/plot_flow_data.R | permissive | pslota/fasstr | R | false | false | 13,013 | r | # Copyright 2017 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Plot daily mean streamflow
#'
#' @description Plot the daily mean flow values from a streamflow dataset. Plots the statistics from all daily discharge values from all
#' years, unless specified. Can choose specific dates to start and end plotting. Can choose to plot out each year separately.
#'
#' @param flowdata Data frame. A data frame of daily mean flow data that includes two columns: a 'Date' column with dates formatted
#' YYYY-MM-DD, and a numeric 'Value' column with the corresponding daily mean flow values in units of cubic metres per second.
#' Not required if \code{HYDAT} argument is used.
#' @param HYDAT Character. A seven digit Water Survey of Canada station number (e.g. \code{"08NM116"}) of which to extract daily streamflow
#' data from a HYDAT database. \href{https://github.com/ropensci/tidyhydat}{Installation} of the \code{tidyhydat} package and a HYDAT
#' database are required. Not required if \code{flowdata} argument is used.
#' @param rolling_days Numeric. The number of days to apply a rolling mean. Default \code{1}.
#' @param rolling_align Character. Specifies whether the dates of the rolling mean should be specified by the first ('left'), last ('right),
#' or middle ('center') of the rolling n-day group of observations. Default \code{'right'}.
#' by the year in which they end. Default \code{FALSE}.
#' @param water_year Logical. Use water years to group flow data instead of calendar years. Water years are designated
#' by the year in which they end. Default \code{FALSE}.
#' @param water_year_start Integer. Month indicating the start of the water year. Used if \code{water_year=TRUE}. Default \code{10}.
#' @param start_year Integer. First year to consider for plotting. Leave blank if all years are required.
#' @param end_year Integer. Last year to consider for plotting. Leave blank if all years are required.
#' @param exclude_years Integer. Single year or vector of years to exclude from plotting. Leave blank if all years are required.
#' @param start_date Date. First date to consider for plotting. Leave blank if all years are required.
#' @param end_date Date. Last date to consider for plotting. Leave blank if all years are required.
#' @param log_discharge Logical. Place the discharge axis (Y) on log scale. Default \code{FALSE} (linear).
#' @param plot_by_year Logical. Plot each year of data individually. Default \code{FALSE}.
#' @param log_discharge Logical. Place the discharge axis (Y) on log scale. Default \code{FALSE} (linear).
#' @param station_name Character. Name of hydrometric station or stream that will be used to create file names. Leave blank if not writing
#' files or if \code{HYDAT} is used or a column in \code{flowdata} called 'STATION_NUMBER' contains a WSC station number, as the name
#' will be the \code{HYDAT} value provided in the argument or column. Setting the station name will replace the HYDAT station number.
#' @param write_plot Logical. Write the plot to specified directory. Default \code{FALSE}.
#' @param write_imgtype Character. One of "pdf","png","jpeg","tiff", or "bmp" image types to write the plot as. Default \code{"pdf"}.
#' @param write_imgsize Numeric. Height and width, respectively, of saved plot. Default \code{c(5,11)}.
#' @param write_dir Character. Directory folder name of where to write tables and plots. If directory does not exist, it will be created.
#' Default is the working directory.
#'
#' @return A ggplot2 object of daily flows from flowdata or HYDAT flow data provided
#'
#' @examples
#' \dontrun{
#'
#'plot_flow_data(flowdata = flowdata, station_name = "MissionCreek", write_plot = TRUE)
#'
#'plot_flow_data(HYDAT = "08NM116", water_year = TRUE, water_year_start = 8)
#'
#' }
#' @export
#--------------------------------------------------------------
plot_flow_data <- function(flowdata=NULL,
HYDAT=NULL,
rolling_days=1,
rolling_align="right",
water_year=FALSE,
water_year_start=10,
start_year=NULL,
end_year=NULL,
exclude_years=NULL,
start_date=NULL,
end_date=NULL,
log_discharge=FALSE,
plot_by_year=FALSE,
station_name=NA,
write_plot=FALSE,
write_imgtype="pdf",
write_imgsize=c(ifelse(plot_by_year,11,6.35),18),
write_dir="."){
#--------------------------------------------------------------
# Error checking on the input parameters
if( !is.null(HYDAT) & !is.null(flowdata)) {stop("must select either flowdata or HYDAT arguments, not both")}
if( is.null(HYDAT)) {
if( is.null(flowdata)) {stop("one of flowdata or HYDAT arguments must be set")}
if( !is.data.frame(flowdata)) {stop("flowdata arguments is not a data frame")}
if( !all(c("Date","Value") %in% names(flowdata))) {stop("flowdata data frame doesn't contain the variables 'Date' and 'Value'")}
if( !inherits(flowdata$Date[1], "Date")) {stop("'Date' column in flowdata data frame is not a date")}
if( !is.numeric(flowdata$Value)) {stop("'Value' column in flowdata data frame is not numeric")}
if( any(flowdata$Value <0, na.rm=TRUE)) {warning('flowdata cannot have negative values - check your data')}
}
if( !is.logical(water_year)) {stop("water_year argument must be logical (TRUE/FALSE)")}
if( !is.numeric(water_year_start) ) {stop("water_year_start argument must be a number between 1 and 12 (Jan-Dec)")}
if( length(water_year_start)>1) {stop("water_year_start argument must be a number between 1 and 12 (Jan-Dec)")}
if( !water_year_start %in% c(1:12) ) {stop("water_year_start argument must be an integer between 1 and 12 (Jan-Dec)")}
if( length(start_year)>1) {stop("only one start_year value can be selected")}
if( !is.null(start_year) ) {if( !start_year %in% c(0:5000) ) {stop("start_year must be an integer")}}
if( length(end_year)>1) {stop("only one end_year value can be selected")}
if( !is.null(end_year) ) {if( !end_year %in% c(0:5000) ) {stop("end_year must be an integer")}}
if( !is.null(exclude_years) & !is.numeric(exclude_years)) {stop("list of exclude_years must be numeric - ex. 1999 or c(1999,2000)")}
if( !is.na(station_name) & !is.character(station_name) ) {stop("station_name argument must be a character string.")}
if( !is.logical(log_discharge)) {stop("log_discharge argument must be logical (TRUE/FALSE)")}
if( !is.logical(write_plot)) {stop("write_plot argument must be logical (TRUE/FALSE)")}
if( length(write_imgtype)>1) {stop("write_imgtype argument cannot have length > 1")}
if( !is.na(write_imgtype) & !write_imgtype %in% c("pdf","png","jpeg","tiff","bmp")) {
stop("write_imgtype argument must be one of 'pdf','png','jpeg','tiff', or 'bmp'")}
if( !is.numeric(write_imgsize) ) {stop("write_imgsize must be two numbers for height and width, respectively")}
if( length(write_imgsize)!=2 ) {stop("write_imgsize must be two numbers for height and width, respectively")}
if( !dir.exists(as.character(write_dir))) {
message("directory for saved files does not exist, new directory will be created")
if( write_table & write_dir!="." ) {dir.create(write_dir)}
}
if( !is.list(na.rm)) {stop("na.rm is not a list") }
if(! is.logical(unlist(na.rm))) {stop("na.rm is list of logical (TRUE/FALSE) values only.")}
my.na.rm <- list(na.rm.global=FALSE)
if( !all(names(na.rm) %in% names(my.na.rm))){stop("Illegal element in na.rm")}
my.na.rm[names(na.rm)]<- na.rm
na.rm <- my.na.rm # set the na.rm for the rest of the function.
if( !is.numeric(rolling_days)) {stop("rolling_days argument must be numeric")}
if( !all(rolling_days %in% c(1:180)) ) {stop("rolling_days argument must be integers > 0 and <= 180)")}
if( !rolling_align %in% c("right","left","center")) {stop("rolling_align argument must be 'right', 'left', or 'center'")}
if( !is.null(start_date)) {if(class(try(as.Date(start_date)))=="try-error" ) {stop("start_date must be a date formatted YYYY-MM-DD")}}
if( !is.null(end_date)) {if(class(try(as.Date(end_date)))=="try-error" ) {stop("end_date must be a date formatted YYYY-MM-DD")}}
if( !is.null(end_date) & !is.null(start_date) ) {if( start_date>=end_date ) {stop("start_date must be less than end_date")}}
# If HYDAT station is listed, check if it exists and make it the flowdata
if (!is.null(HYDAT)) {
if( length(HYDAT)>1 ) {stop("only one HYDAT station can be selected")}
if( !HYDAT %in% dplyr::pull(tidyhydat::allstations[1]) ) {stop("Station in 'HYDAT' parameter does not exist")}
if( is.na(station_name) ) {station_name <- HYDAT}
flowdata <- suppressMessages(tidyhydat::hy_daily_flows(station_number = HYDAT))
}
#--------------------------------------------------------------
# Set the flowdata for plotting
flowdata <- dplyr::select(flowdata,Date,Value)
flowdata <- fasstr::fill_missing_dates(flowdata,water_year_start=water_year_start)
flowdata <- fasstr::add_date_variables(flowdata,water_year = T,water_year_start=water_year_start)
flowdata <- fasstr::add_rolling_means(flowdata,days = rolling_days,align = rolling_align)
colnames(flowdata)[ncol(flowdata)] <- "RollingValue"
# determine the min/max cal/water years
min_year <- ifelse(water_year,min(flowdata$WaterYear),min(flowdata$Year))
max_year <- ifelse(water_year,max(flowdata$WaterYear),max(flowdata$Year))
# If start/end years are not select, set them as the min/max dates
if (is.null(start_year)) {start_year <- min_year}
if (is.null(end_year)) {end_year <- max_year}
# Set selected year-type for plotting
if (water_year) {
flowdata$AnalysisYear <- flowdata$WaterYear
} else {
flowdata$AnalysisYear <- flowdata$Year
}
# Filter for specific years, if selected
flowdata <- dplyr::filter(flowdata, AnalysisYear >= start_year & AnalysisYear <= end_year)
flowdata <- dplyr::filter(flowdata,!(AnalysisYear %in% exclude_years))
# Filter for specific dates, if selected
if( !is.null(start_date)) { flowdata <- dplyr::filter(flowdata, Date >= start_date) }
if( !is.null(end_date)) { flowdata <- dplyr::filter(flowdata, Date <= end_date) }
#--------------------------------------------------------------
# Plot the data
timeseries_plot <- ggplot2::ggplot(data=flowdata, ggplot2::aes(x=Date, y=RollingValue))+
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))+
ggplot2::geom_line(colour="dodgerblue4")+
ggplot2::ylab("Discharge (cms)")+
{if (plot_by_year) ggplot2::facet_wrap(~AnalysisYear, scales="free_x")} +
{if (!log_discharge) ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 8),expand = c(0, 0))}+
{if (log_discharge) ggplot2::scale_y_log10(expand = c(0, 0))}+
{if (plot_by_year) ggplot2::scale_x_date(date_labels = "%b")} +
{if (!plot_by_year) ggplot2::scale_x_date(breaks = scales::pretty_breaks(n = 12))} +
{if (!log_discharge) ggplot2::expand_limits(y = c(0,max(flowdata$RollingValue)*1.05))}+
{if (log_discharge) ggplot2::expand_limits(y = c(min(flowdata$RollingValue)*.95,max(flowdata$RollingValue)*1.05))}+
ggplot2::theme( panel.border = ggplot2::element_rect(colour = "grey80", fill=NA, size=.5),
panel.grid.minor.y = ggplot2::element_blank())
if (write_plot) {
file_timeseries_plot <- paste(write_dir,"/",
paste0(ifelse(!is.na(station_name),station_name,paste0("fasstr"))),
ifelse(plot_by_year,paste0("-annual-daily-flows."),paste0("-daily-flows.")),
write_imgtype,sep = "")
ggplot2::ggsave(filename = file_timeseries_plot,
timeseries_plot,
height = write_imgsize[1],
width = write_imgsize[2])
}
return(timeseries_plot)
} # end of function
|
\name{sqlsurvey}
\alias{sqlsurvey}
\alias{open.sqlsurvey}
\alias{close.sqlsurvey}
\alias{dim.sqlsurvey}
\alias{open.sqlmodelmatrix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Survey design based on SQL database}
\description{
Specifies a survey data set based on a connection to a SQL (currently SQLite)
database. The data may be in an existing database or in a data frame
that will be written out to a database. If data are in an existing
database they should be in a single table in denormalized form.
}
\usage{
sqlsurvey(id, strata = NULL, weights = NULL, fpc = "0", data,
table.name = basename(tempfile("_tbl_")), key = "row_names")
\method{close}{sqlsurvey}(con, tidy=TRUE, ...)
\method{open}{sqlsurvey}(con, db=NULL,...)
\method{open}{sqlmodelmatrix}(con, design,...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{id}{vector of strings with names of sampling unit identifiers}
\item{strata}{vector of strings with names of stratum identifies }
\item{weights}{string with name of weighting variable }
\item{fpc}{string with name of population size variable (variable may
be zero for sampling with replacement or infinite population size)}
\item{data}{Either a data frame or the name of a SQLite database file}
\item{table.name}{Name for the data table containing the survey data
and design variables}
\item{key}{name of a variable that can be used as a unique key }
\item{con}{survey object or survey model matrix object to be
disconnected from or reconnected to the database}
\item{tidy}{Clear any pending results?}
\item{db}{An existing connection (optional, allows multiple objects to
be reconnected to the same database connection)}
\item{design}{A \code{sqlsurvey} object with an active database
connection}
\item{...}{For future expansion}
}
\value{
\code{sqlsurvey} returns an object of class \code{sqlsurvey}
}
\seealso{}
\examples{
library(survey)
data(api)
sqclus2<-sqlsurvey(id=c("dnum","snum"), fpc=c("fpc1","fpc2"),
weights="pw", data=apiclus2)
sqclus2
svymean(~api99+api00, design=sqclus2)
sqclus1<-sqlsurvey(id="dnum", fpc="fpc", weights="pw", strata="fpc",
data=system.file("apiclus1.db",package="surveyNG"),
table.name="clus1", key="snum")
sqclus1
svymean(~api99+api00, design=sqclus1)
close(sqclus1)
close(sqclus2)
}
\keyword{survey }
| /man/sqlsurvey.Rd | no_license | cran/surveyNG | R | false | false | 2,381 | rd | \name{sqlsurvey}
\alias{sqlsurvey}
\alias{open.sqlsurvey}
\alias{close.sqlsurvey}
\alias{dim.sqlsurvey}
\alias{open.sqlmodelmatrix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Survey design based on SQL database}
\description{
Specifies a survey data set based on a connection to a SQL (currently SQLite)
database. The data may be in an existing database or in a data frame
that will be written out to a database. If data are in an existing
database they should be in a single table in denormalized form.
}
\usage{
sqlsurvey(id, strata = NULL, weights = NULL, fpc = "0", data,
table.name = basename(tempfile("_tbl_")), key = "row_names")
\method{close}{sqlsurvey}(con, tidy=TRUE, ...)
\method{open}{sqlsurvey}(con, db=NULL,...)
\method{open}{sqlmodelmatrix}(con, design,...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{id}{vector of strings with names of sampling unit identifiers}
\item{strata}{vector of strings with names of stratum identifies }
\item{weights}{string with name of weighting variable }
\item{fpc}{string with name of population size variable (variable may
be zero for sampling with replacement or infinite population size)}
\item{data}{Either a data frame or the name of a SQLite database file}
\item{table.name}{Name for the data table containing the survey data
and design variables}
\item{key}{name of a variable that can be used as a unique key }
\item{con}{survey object or survey model matrix object to be
disconnected from or reconnected to the database}
\item{tidy}{Clear any pending results?}
\item{db}{An existing connection (optional, allows multiple objects to
be reconnected to the same database connection)}
\item{design}{A \code{sqlsurvey} object with an active database
connection}
\item{...}{For future expansion}
}
\value{
\code{sqlsurvey} returns an object of class \code{sqlsurvey}
}
\seealso{}
\examples{
library(survey)
data(api)
sqclus2<-sqlsurvey(id=c("dnum","snum"), fpc=c("fpc1","fpc2"),
weights="pw", data=apiclus2)
sqclus2
svymean(~api99+api00, design=sqclus2)
sqclus1<-sqlsurvey(id="dnum", fpc="fpc", weights="pw", strata="fpc",
data=system.file("apiclus1.db",package="surveyNG"),
table.name="clus1", key="snum")
sqclus1
svymean(~api99+api00, design=sqclus1)
close(sqclus1)
close(sqclus2)
}
\keyword{survey }
|
# ==============================================================================
# DEBARCODING
# ==============================================================================
# read input FCS
ffDeba <- reactive({
if (vals$keepDataComp) {
fs <- fsComped()
print(fs)
CATALYST::concatFCS(fs)
} else {
req(input$fcsDeba)
# check validity of input FCS files
valid <- check_FCS_fileInput(input$fcsDeba)
if (!valid) return()
read.FCS(
filename=input$fcsDeba$datapath,
transformation=FALSE,
truncate_max_range=FALSE)
}
})
# expand sidebar
output$debarcodingSidebar1 <- renderUI({
req(ffDeba())
debarcodingSidebar1
})
# fileInput "upload barcoding scheme (CSV)"
output$selectBcChs <- renderUI({
req(input$boxSelectBcChs == 1)
chs <- flowCore::colnames(ffDeba())
ms <- as.numeric(gsub("[[:alpha:][:punct:]]", "", chs))
chs <- chs[!is.na(as.numeric(ms))]
selectInput(
inputId="input_bcChs", label=NULL, choices=chs,
multiple=TRUE, selectize=FALSE, size=12)
})
# check validity of debarcoding scheme CSV
observe({
req(input$debaSchemeCsv)
match <- grep("(.csv)$", input$debaSchemeCsv$name, ignore.case=TRUE)
isCSV <- length(match) == 1
if (!isCSV) {
showNotification(
h4(strong("Input debarcoding scheme should be a CSV file.")),
duration=NULL, type="error")
return()
}
key <- read.csv(input$debaSchemeCsv$datapath,
check.names=FALSE, row.names=1)
vals$debaKeyIsValid <- check_key(key, ffDeba())
})
# get debarcoding scheme
debaKey <- reactive({
req(isTRUE(vals$debaKeyIsValid))
key <- read.csv(input$debaSchemeCsv$datapath,
check.names=FALSE, row.names=1)
})
# ------------------------------------------------------------------------------
# bsButton: "Debarcode"
# ------------------------------------------------------------------------------
observe({
toggleState(id="debarcodeDeba", condition=req(isTRUE(vals$debaKeyIsValid)))
})
observeEvent(input$debarcodeDeba, {
disable(id="debarcodeDeba")
# assignPrelim()
showNotification(h4(strong("Assigning preliminary IDs...")),
duration=NULL, closeButton=FALSE, id="msg", type="message")
vals$dbFrame1Deba <- CATALYST::assignPrelim(ffDeba(), debaKey())
removeNotification(id="msg")
# estCutoffs()
showNotification("Estimating separation cutoffs...",
id="estimating_sep_cutoffs", duration=NULL, closeButton=FALSE)
vals$cutoff_ests_deba <- sep_cutoffs(CATALYST::estCutoffs(x=vals$dbFrame1Deba))
removeNotification(id="estimating_sep_cutoffs")
# extend sidebar
output$debarcodingSidebar2 <- renderUI(debarcodingSidebar2)
enable(id="debarcodeDeba")
})
# ------------------------------------------------------------------------------
# cutoff adjustment
# ------------------------------------------------------------------------------
# render UI for cutoff adjustment or input for global cutoff
output$deba_cutoffs_UIDeba <- renderUI({
switch(input$deba_cutoffsDeba,
est_cutoffs=NULL,
adj_cutoffs=
adjustCutoffUI(
dbFrame=vals$dbFrame2Deba,
choices=adjustCutoffChoicesDeba(),
module="Deba"),
global_cutoff=
globalCutoffUI(module="Deba"))
})
# use cutoff estimates
observeEvent(input$deba_cutoffsDeba == "est_cutoffs", ignoreInit=TRUE, {
sep_cutoffs(vals$dbFrame1Deba) <- vals$cutoff_ests_deba
vals$dbFrame2Deba <- vals$dbFrame1Deba
})
# get selectInput choices for cutoff adjustment
adjustCutoffChoicesDeba <- reactive({
req(dbFrameDeba())
rownames(bc_key(dbFrameDeba()))
})
# synchronize selectInput & numericInput w/ yield plot
observe({
x <- selectedYieldPlotDeba()
req(!is.null(x), x != 0)
updateSelectInput(session, "select_adjustCutoffDeba", selected=x)
updateNumericInput(session, "input_adjustCutoffDeba",
value=paste(sep_cutoffs(vals$dbFrame2Deba)[x]))
})
observe({
req(input$select_adjustCutoffDeba)
updateSelectInput(session, "select_yieldPlotDeba",
selected=input$select_adjustCutoffDeba)
})
# adjust cutoff upon bsButton click
observeEvent(input$button_adjustCutoffDeba, {
x <- match(input$select_adjustCutoffDeba, adjustCutoffChoicesDeba())
sep_cutoffs(vals$dbFrame2Deba)[x] <-
as.numeric(input$input_adjustCutoffDeba)
})
# set global cutoff upon bsButton click
observeEvent(input$button_globalCutoffDeba, {
sep_cutoffs(vals$dbFrame2Deba) <- as.numeric(input$input_globalCutoffDeba)
})
# sliderInput: "Mahalanobis distance threshold"
observeEvent(input$button_mhlCutoffDeba, {
vals$mhlCutoffDeba <- input$mhlCutoffDeba
})
# apply cutoffs if deconvolution parameters change
dbFrameDeba <- reactive({
req(vals$dbFrame2Deba)
CATALYST::applyCutoffs(x=vals$dbFrame2Deba, mhl_cutoff=vals$mhlCutoffDeba)
})
# ------------------------------------------------------------------------------
# yieldPlot, eventPlot & mahalPlot
# ------------------------------------------------------------------------------
# inputSelect choices
yieldPlotChoices <- reactive({
req(vals$dbFrame1Deba)
ids <- rownames(bc_key(vals$dbFrame1Deba))
setNames(c(0, ids), c("All", ids))
})
eventPlotChoices <- reactive({
req(vals$dbFrame2Deba)
sort(unique(bc_ids(vals$dbFrame2Deba)))
})
mahalPlotChoices <- reactive({
req(vals$dbFrame2Deba)
choices <- eventPlotChoices()
choices[choices != 0]
})
# render UIs
output$yieldPlotPanelDeba <- renderUI({
req(yieldPlotChoices())
yieldPlotModule(yieldPlotChoices(), "Deba")
})
output$eventPlotPanel <- renderUI({
x <- eventPlotChoices()
y <- selectedYieldPlotDeba()
if (!is.null(x))
eventPlotPanel(x, y)
})
output$mahalPlotPanel <- renderUI({
x <- mahalPlotChoices()
y <- input$select_mahalPlot
if (!is.null(x))
mahalPlotPanel(x, y)
})
# get n_events & cofactor for eventPlot & mahalPlot
eventPlotNEvents <- reactive(as.numeric(input$n_events))
mahalPlotCofactor <- reactive(input$mahalPlotCofactor)
# render plots
output$yieldPlotDeba <- renderPlotly({
req(dbFrameDeba())
plotYields(
x=dbFrameDeba(),
which=selectedYieldPlotDeba())[[1]]
})
output$eventPlot <- renderPlot({
req(dbFrameDeba())
plotEvents(
x=dbFrameDeba(),
which=input$select_eventPlot,
n_events=eventPlotNEvents())
})
output$mahalPlot <- renderPlot({
req(dbFrameDeba())
plotMahal(
x=dbFrameDeba(),
which=input$select_mahalPlot,
cofactor=mahalPlotCofactor())
})
# renderDataTable: IDs | Counts | Cutoffs | Yields
output$summaryTblDeba <- DT::renderDataTable({
req(dbFrameDeba())
summary_tbl(dbFrameDeba())
})
# keep track of currently selected sample
selectedYieldPlotDeba <- reactive({input$select_yieldPlotDeba})
# ------------------------------------------------------------------------------
# next / previous buttons
# ------------------------------------------------------------------------------
observe({
choices <- yieldPlotChoices()
n <- length(choices)
selected <- match(selectedYieldPlotDeba(), choices)
toggleState(id="prev_yieldPlotDeba", condition=selected != 1)
toggleState(id="next_yieldPlotDeba", condition=selected != n)
})
observe({
choices <- eventPlotChoices()
n <- length(choices)
selected <- match(input$select_eventPlot, choices)
toggleState(id="prev_eventPlot", condition=selected != 1)
toggleState(id="next_eventPlot", condition=selected != n)
})
observe({
choices <- mahalPlotChoices()
n <- length(choices)
selected <- match(input$select_mahalPlot, choices)
toggleState(id="prev_mahalPlot", condition=selected != 1)
toggleState(id="next_mahalPlot", condition=selected != n)
})
observeEvent(input$prev_yieldPlotDeba, {
choices <- yieldPlotChoices()
selected <- match(selectedYieldPlotDeba(), choices)
updateSelectInput(session,
inputId="select_yieldPlotDeba",
selected=choices[selected-1])
})
observeEvent(input$next_yieldPlotDeba, {
choices <- yieldPlotChoices()
selected <- match(selectedYieldPlotDeba(), choices)
updateSelectInput(session,
inputId="select_yieldPlotDeba",
selected=choices[selected+1])
})
observeEvent(input$prev_eventPlot, {
choices <- eventPlotChoices()
selected <- match(input$select_eventPlot, choices)
updateSelectInput(session,
inputId="select_eventPlot",
selected=choices[selected-1])
})
observeEvent(input$next_eventPlot, {
choices <- eventPlotChoices()
selected <- match(input$select_eventPlot, choices)
updateSelectInput(session,
inputId="select_eventPlot",
selected=choices[selected+1])
})
observeEvent(input$prev_mahalPlot, {
choices <- mahalPlotChoices()
selected <- match(input$select_mahalPlot, choices)
updateSelectInput(session,
inputId="select_mahalPlot",
selected=choices[selected-1])
})
observeEvent(input$next_mahalPlot, {
choices <- mahalPlotChoices()
selected <- match(input$select_mahalPlot, choices)
updateSelectInput(session,
inputId="select_mahalPlot",
selected=choices[selected+1])
})
# ------------------------------------------------------------------------------
# synchronize eventPlot yieldPlot and mahalPlot
# ------------------------------------------------------------------------------
observe({
x <- selectedYieldPlotDeba()
if (is.null(x) || x == 0) return()
updateSelectInput(session, "select_eventPlot", selected=x)
updateSelectInput(session, "select_mahalPlot", selected=x)
})
observe({
x <- input$select_eventPlot
if (is.null(x) || x == 0) return()
updateSelectInput(session, "select_yieldPlotDeba", selected=x)
updateSelectInput(session, "select_mahalPlot", selected=x)
})
observe({
x <- input$select_mahalPlot
if (is.null(x)) return()
updateSelectInput(session, "select_yieldPlotDeba", selected=x)
updateSelectInput(session, "select_eventPlot", selected=x)
})
# toggle checkboxes
observe({
req(input$box_IDsAsNms == 1)
updateCheckboxInput(session, "box_upldNms", value=FALSE)
})
observe({
req(input$box_upldNms == 1)
updateCheckboxInput(session, "box_IDsAsNms", value=FALSE)
})
# toggle fileInput: "Upload naming sheet (CSV)"
observe(toggle(id="input_upldNms", condition=input$box_upldNms))
# ------------------------------------------------------------------------------
# download handlers
# ------------------------------------------------------------------------------
smplNmsDeba <- reactive({
req(dbFrameDeba())
if (input$box_IDsAsNms) {
c("Unassigned", rownames(bc_key(dbFrameDeba())))
} else if (input$box_upldNms) {
req(input$input_upldNms)
read.csv(input$input_upldNms$datapath, header=FALSE)
}
})
# toggle downloadButton
observe({
req(dbFrameDeba())
test <- input$box_IDsAsNms || (
input$box_upldNms == 1 && !is.null(input$input_upldNms))
toggleState(id="dwnld_debaFCS", condition=test)
})
output$dwnld_debaFcs <- downloadHandler(
filename=function() {
paste0(format(Sys.Date(), "%y%m%d"), "-debarcoding.zip")
},
content =function(file) {
tmpdir <- tempdir()
setwd(tmpdir)
# ----------------------------------------------------------------------
# debarcoding summary table:
# IDs | Counts | Cutoffs | Yields
ids <- rownames(bc_key(dbFrameDeba()))
nBcs <- nrow(bc_key(dbFrameDeba()))
cutoffs <- sep_cutoffs(dbFrameDeba())
yields <- yields(dbFrameDeba())[cbind(seq_len(nBcs),
findInterval(cutoffs, seq(0, 1, .01)))]
yields <- round(100*yields, 4)
tbl <- matrix(cbind(ids, sapply(ids, function(id)
sum(bc_ids(dbFrameDeba()) == id)), cutoffs, yields), ncol=4,
dimnames=list(NULL, c("ID", "Count","Cutoff", "Yield [%]")))
tblNm <- paste0(format(Sys.Date(), "%y%m%d"), "-debarcoding.csv")
write.csv(tbl, file.path(tmpdir, tblNm), row.names=FALSE)
# ----------------------------------------------------------------------
# population-wise FCS files
# ----------------------------------------------------------------------
# get output file names
if (input$box_IDsAsNms) {
smplNms <- smplNmsDeba()
} else if (input$box_upldNms) {
# check that a name has been supplied for all sample
# & that sample IDs match with the input barcoding scheme
if (nrow(smplNmsDeba()) < nBcs) {
showNotification(paste("Only", nrow(smplNmsDeba()),
"sample names provided but", nBcs, "needed."),
type="error", closeButton=FALSE)
return()
} else if (sum(smplNmsDeba()[, 1] %in% ids) != nBcs) {
showNotification(
"Couldn't find a file name for all samples.\n
Please make sure all sample IDs occur\n
in the provided naming scheme.",
type="error", closeButton=FALSE)
return()
}
smplNms <- c("Unassigned", paste0(smplNmsDeba()[, 2], "_", ids))
}
unique_ids <- unique(bc_ids(dbFrameDeba()))
nFiles <- length(unique_ids)
inds <- match(bc_ids(dbFrameDeba()), unique_ids)
out_nms <- paste0(smplNms, ".fcs")
out_nms <- out_nms[match(unique_ids, c(0, ids))]
# match assignments with IDs
# write population-wise FCS file
withProgress(message="Debarcoding samples...", value=0, {
for (i in seq_along(unique_ids)) {
suppressWarnings(flowCore::write.FCS(
x=ffDeba()[inds == i, ],
filename=file.path(tmpdir, out_nms[i])))
incProgress(1/nFiles, detail=paste0(i, "/", nFiles))
}
})
showNotification(h4(strong("Writing FCS files...")),
id="msg", duration=NULL, closeButton=NULL, type="default")
fileNms <- c(tblNm, smplNms)
zip(zipfile=file, files=out_nms)
removeNotification(id="msg")
},
contentType="application/zip")
output$dwnld_debaPlots <- downloadHandler(
filename=function() { "yield_event_plots.zip" },
content =function(file) {
tmpdir <- tempdir()
setwd(tmpdir)
CATALYST::plotYields(
x=dbFrameDeba(),
which=yieldPlotChoices(),
out_path=tmpdir)
CATALYST::plotEvents(
x=dbFrameDeba(),
out_path=tmpdir,
n_events=250)
zip(zipfile=file,
files=paste0(c("yield_plot", "event_plot"), ".pdf")) },
contentType="application/zip") | /inst/shinyGUI/server-debarcoding.R | no_license | mancapaolo/CATALYST | R | false | false | 15,001 | r | # ==============================================================================
# DEBARCODING
# ==============================================================================
# read input FCS
ffDeba <- reactive({
if (vals$keepDataComp) {
fs <- fsComped()
print(fs)
CATALYST::concatFCS(fs)
} else {
req(input$fcsDeba)
# check validity of input FCS files
valid <- check_FCS_fileInput(input$fcsDeba)
if (!valid) return()
read.FCS(
filename=input$fcsDeba$datapath,
transformation=FALSE,
truncate_max_range=FALSE)
}
})
# expand sidebar
output$debarcodingSidebar1 <- renderUI({
req(ffDeba())
debarcodingSidebar1
})
# fileInput "upload barcoding scheme (CSV)"
output$selectBcChs <- renderUI({
req(input$boxSelectBcChs == 1)
chs <- flowCore::colnames(ffDeba())
ms <- as.numeric(gsub("[[:alpha:][:punct:]]", "", chs))
chs <- chs[!is.na(as.numeric(ms))]
selectInput(
inputId="input_bcChs", label=NULL, choices=chs,
multiple=TRUE, selectize=FALSE, size=12)
})
# check validity of debarcoding scheme CSV
observe({
req(input$debaSchemeCsv)
match <- grep("(.csv)$", input$debaSchemeCsv$name, ignore.case=TRUE)
isCSV <- length(match) == 1
if (!isCSV) {
showNotification(
h4(strong("Input debarcoding scheme should be a CSV file.")),
duration=NULL, type="error")
return()
}
key <- read.csv(input$debaSchemeCsv$datapath,
check.names=FALSE, row.names=1)
vals$debaKeyIsValid <- check_key(key, ffDeba())
})
# get debarcoding scheme
debaKey <- reactive({
req(isTRUE(vals$debaKeyIsValid))
key <- read.csv(input$debaSchemeCsv$datapath,
check.names=FALSE, row.names=1)
})
# ------------------------------------------------------------------------------
# bsButton: "Debarcode"
# ------------------------------------------------------------------------------
observe({
toggleState(id="debarcodeDeba", condition=req(isTRUE(vals$debaKeyIsValid)))
})
observeEvent(input$debarcodeDeba, {
disable(id="debarcodeDeba")
# assignPrelim()
showNotification(h4(strong("Assigning preliminary IDs...")),
duration=NULL, closeButton=FALSE, id="msg", type="message")
vals$dbFrame1Deba <- CATALYST::assignPrelim(ffDeba(), debaKey())
removeNotification(id="msg")
# estCutoffs()
showNotification("Estimating separation cutoffs...",
id="estimating_sep_cutoffs", duration=NULL, closeButton=FALSE)
vals$cutoff_ests_deba <- sep_cutoffs(CATALYST::estCutoffs(x=vals$dbFrame1Deba))
removeNotification(id="estimating_sep_cutoffs")
# extend sidebar
output$debarcodingSidebar2 <- renderUI(debarcodingSidebar2)
enable(id="debarcodeDeba")
})
# ------------------------------------------------------------------------------
# cutoff adjustment
# ------------------------------------------------------------------------------
# render UI for cutoff adjustment or input for global cutoff
output$deba_cutoffs_UIDeba <- renderUI({
switch(input$deba_cutoffsDeba,
est_cutoffs=NULL,
adj_cutoffs=
adjustCutoffUI(
dbFrame=vals$dbFrame2Deba,
choices=adjustCutoffChoicesDeba(),
module="Deba"),
global_cutoff=
globalCutoffUI(module="Deba"))
})
# use cutoff estimates
observeEvent(input$deba_cutoffsDeba == "est_cutoffs", ignoreInit=TRUE, {
sep_cutoffs(vals$dbFrame1Deba) <- vals$cutoff_ests_deba
vals$dbFrame2Deba <- vals$dbFrame1Deba
})
# get selectInput choices for cutoff adjustment
adjustCutoffChoicesDeba <- reactive({
req(dbFrameDeba())
rownames(bc_key(dbFrameDeba()))
})
# synchronize selectInput & numericInput w/ yield plot
observe({
x <- selectedYieldPlotDeba()
req(!is.null(x), x != 0)
updateSelectInput(session, "select_adjustCutoffDeba", selected=x)
updateNumericInput(session, "input_adjustCutoffDeba",
value=paste(sep_cutoffs(vals$dbFrame2Deba)[x]))
})
observe({
req(input$select_adjustCutoffDeba)
updateSelectInput(session, "select_yieldPlotDeba",
selected=input$select_adjustCutoffDeba)
})
# adjust cutoff upon bsButton click
observeEvent(input$button_adjustCutoffDeba, {
x <- match(input$select_adjustCutoffDeba, adjustCutoffChoicesDeba())
sep_cutoffs(vals$dbFrame2Deba)[x] <-
as.numeric(input$input_adjustCutoffDeba)
})
# set global cutoff upon bsButton click
observeEvent(input$button_globalCutoffDeba, {
sep_cutoffs(vals$dbFrame2Deba) <- as.numeric(input$input_globalCutoffDeba)
})
# sliderInput: "Mahalanobis distance threshold"
observeEvent(input$button_mhlCutoffDeba, {
vals$mhlCutoffDeba <- input$mhlCutoffDeba
})
# apply cutoffs if deconvolution parameters change
dbFrameDeba <- reactive({
req(vals$dbFrame2Deba)
CATALYST::applyCutoffs(x=vals$dbFrame2Deba, mhl_cutoff=vals$mhlCutoffDeba)
})
# ------------------------------------------------------------------------------
# yieldPlot, eventPlot & mahalPlot
# ------------------------------------------------------------------------------
# inputSelect choices
yieldPlotChoices <- reactive({
req(vals$dbFrame1Deba)
ids <- rownames(bc_key(vals$dbFrame1Deba))
setNames(c(0, ids), c("All", ids))
})
eventPlotChoices <- reactive({
req(vals$dbFrame2Deba)
sort(unique(bc_ids(vals$dbFrame2Deba)))
})
mahalPlotChoices <- reactive({
req(vals$dbFrame2Deba)
choices <- eventPlotChoices()
choices[choices != 0]
})
# render UIs
output$yieldPlotPanelDeba <- renderUI({
req(yieldPlotChoices())
yieldPlotModule(yieldPlotChoices(), "Deba")
})
output$eventPlotPanel <- renderUI({
x <- eventPlotChoices()
y <- selectedYieldPlotDeba()
if (!is.null(x))
eventPlotPanel(x, y)
})
output$mahalPlotPanel <- renderUI({
x <- mahalPlotChoices()
y <- input$select_mahalPlot
if (!is.null(x))
mahalPlotPanel(x, y)
})
# get n_events & cofactor for eventPlot & mahalPlot
eventPlotNEvents <- reactive(as.numeric(input$n_events))
mahalPlotCofactor <- reactive(input$mahalPlotCofactor)
# render plots
output$yieldPlotDeba <- renderPlotly({
req(dbFrameDeba())
plotYields(
x=dbFrameDeba(),
which=selectedYieldPlotDeba())[[1]]
})
output$eventPlot <- renderPlot({
req(dbFrameDeba())
plotEvents(
x=dbFrameDeba(),
which=input$select_eventPlot,
n_events=eventPlotNEvents())
})
output$mahalPlot <- renderPlot({
req(dbFrameDeba())
plotMahal(
x=dbFrameDeba(),
which=input$select_mahalPlot,
cofactor=mahalPlotCofactor())
})
# renderDataTable: IDs | Counts | Cutoffs | Yields
output$summaryTblDeba <- DT::renderDataTable({
req(dbFrameDeba())
summary_tbl(dbFrameDeba())
})
# keep track of currently selected sample
selectedYieldPlotDeba <- reactive({input$select_yieldPlotDeba})
# ------------------------------------------------------------------------------
# next / previous buttons
# ------------------------------------------------------------------------------
observe({
choices <- yieldPlotChoices()
n <- length(choices)
selected <- match(selectedYieldPlotDeba(), choices)
toggleState(id="prev_yieldPlotDeba", condition=selected != 1)
toggleState(id="next_yieldPlotDeba", condition=selected != n)
})
observe({
choices <- eventPlotChoices()
n <- length(choices)
selected <- match(input$select_eventPlot, choices)
toggleState(id="prev_eventPlot", condition=selected != 1)
toggleState(id="next_eventPlot", condition=selected != n)
})
observe({
choices <- mahalPlotChoices()
n <- length(choices)
selected <- match(input$select_mahalPlot, choices)
toggleState(id="prev_mahalPlot", condition=selected != 1)
toggleState(id="next_mahalPlot", condition=selected != n)
})
observeEvent(input$prev_yieldPlotDeba, {
choices <- yieldPlotChoices()
selected <- match(selectedYieldPlotDeba(), choices)
updateSelectInput(session,
inputId="select_yieldPlotDeba",
selected=choices[selected-1])
})
observeEvent(input$next_yieldPlotDeba, {
choices <- yieldPlotChoices()
selected <- match(selectedYieldPlotDeba(), choices)
updateSelectInput(session,
inputId="select_yieldPlotDeba",
selected=choices[selected+1])
})
observeEvent(input$prev_eventPlot, {
choices <- eventPlotChoices()
selected <- match(input$select_eventPlot, choices)
updateSelectInput(session,
inputId="select_eventPlot",
selected=choices[selected-1])
})
observeEvent(input$next_eventPlot, {
choices <- eventPlotChoices()
selected <- match(input$select_eventPlot, choices)
updateSelectInput(session,
inputId="select_eventPlot",
selected=choices[selected+1])
})
observeEvent(input$prev_mahalPlot, {
choices <- mahalPlotChoices()
selected <- match(input$select_mahalPlot, choices)
updateSelectInput(session,
inputId="select_mahalPlot",
selected=choices[selected-1])
})
observeEvent(input$next_mahalPlot, {
choices <- mahalPlotChoices()
selected <- match(input$select_mahalPlot, choices)
updateSelectInput(session,
inputId="select_mahalPlot",
selected=choices[selected+1])
})
# ------------------------------------------------------------------------------
# synchronize eventPlot yieldPlot and mahalPlot
# ------------------------------------------------------------------------------
observe({
x <- selectedYieldPlotDeba()
if (is.null(x) || x == 0) return()
updateSelectInput(session, "select_eventPlot", selected=x)
updateSelectInput(session, "select_mahalPlot", selected=x)
})
observe({
x <- input$select_eventPlot
if (is.null(x) || x == 0) return()
updateSelectInput(session, "select_yieldPlotDeba", selected=x)
updateSelectInput(session, "select_mahalPlot", selected=x)
})
observe({
x <- input$select_mahalPlot
if (is.null(x)) return()
updateSelectInput(session, "select_yieldPlotDeba", selected=x)
updateSelectInput(session, "select_eventPlot", selected=x)
})
# toggle checkboxes
observe({
req(input$box_IDsAsNms == 1)
updateCheckboxInput(session, "box_upldNms", value=FALSE)
})
observe({
req(input$box_upldNms == 1)
updateCheckboxInput(session, "box_IDsAsNms", value=FALSE)
})
# toggle fileInput: "Upload naming sheet (CSV)"
observe(toggle(id="input_upldNms", condition=input$box_upldNms))
# ------------------------------------------------------------------------------
# download handlers
# ------------------------------------------------------------------------------
smplNmsDeba <- reactive({
req(dbFrameDeba())
if (input$box_IDsAsNms) {
c("Unassigned", rownames(bc_key(dbFrameDeba())))
} else if (input$box_upldNms) {
req(input$input_upldNms)
read.csv(input$input_upldNms$datapath, header=FALSE)
}
})
# toggle downloadButton
observe({
req(dbFrameDeba())
test <- input$box_IDsAsNms || (
input$box_upldNms == 1 && !is.null(input$input_upldNms))
toggleState(id="dwnld_debaFCS", condition=test)
})
output$dwnld_debaFcs <- downloadHandler(
filename=function() {
paste0(format(Sys.Date(), "%y%m%d"), "-debarcoding.zip")
},
content =function(file) {
tmpdir <- tempdir()
setwd(tmpdir)
# ----------------------------------------------------------------------
# debarcoding summary table:
# IDs | Counts | Cutoffs | Yields
ids <- rownames(bc_key(dbFrameDeba()))
nBcs <- nrow(bc_key(dbFrameDeba()))
cutoffs <- sep_cutoffs(dbFrameDeba())
yields <- yields(dbFrameDeba())[cbind(seq_len(nBcs),
findInterval(cutoffs, seq(0, 1, .01)))]
yields <- round(100*yields, 4)
tbl <- matrix(cbind(ids, sapply(ids, function(id)
sum(bc_ids(dbFrameDeba()) == id)), cutoffs, yields), ncol=4,
dimnames=list(NULL, c("ID", "Count","Cutoff", "Yield [%]")))
tblNm <- paste0(format(Sys.Date(), "%y%m%d"), "-debarcoding.csv")
write.csv(tbl, file.path(tmpdir, tblNm), row.names=FALSE)
# ----------------------------------------------------------------------
# population-wise FCS files
# ----------------------------------------------------------------------
# get output file names
if (input$box_IDsAsNms) {
smplNms <- smplNmsDeba()
} else if (input$box_upldNms) {
# check that a name has been supplied for all sample
# & that sample IDs match with the input barcoding scheme
if (nrow(smplNmsDeba()) < nBcs) {
showNotification(paste("Only", nrow(smplNmsDeba()),
"sample names provided but", nBcs, "needed."),
type="error", closeButton=FALSE)
return()
} else if (sum(smplNmsDeba()[, 1] %in% ids) != nBcs) {
showNotification(
"Couldn't find a file name for all samples.\n
Please make sure all sample IDs occur\n
in the provided naming scheme.",
type="error", closeButton=FALSE)
return()
}
smplNms <- c("Unassigned", paste0(smplNmsDeba()[, 2], "_", ids))
}
unique_ids <- unique(bc_ids(dbFrameDeba()))
nFiles <- length(unique_ids)
inds <- match(bc_ids(dbFrameDeba()), unique_ids)
out_nms <- paste0(smplNms, ".fcs")
out_nms <- out_nms[match(unique_ids, c(0, ids))]
# match assignments with IDs
# write population-wise FCS file
withProgress(message="Debarcoding samples...", value=0, {
for (i in seq_along(unique_ids)) {
suppressWarnings(flowCore::write.FCS(
x=ffDeba()[inds == i, ],
filename=file.path(tmpdir, out_nms[i])))
incProgress(1/nFiles, detail=paste0(i, "/", nFiles))
}
})
showNotification(h4(strong("Writing FCS files...")),
id="msg", duration=NULL, closeButton=NULL, type="default")
fileNms <- c(tblNm, smplNms)
zip(zipfile=file, files=out_nms)
removeNotification(id="msg")
},
contentType="application/zip")
output$dwnld_debaPlots <- downloadHandler(
filename=function() { "yield_event_plots.zip" },
content =function(file) {
tmpdir <- tempdir()
setwd(tmpdir)
CATALYST::plotYields(
x=dbFrameDeba(),
which=yieldPlotChoices(),
out_path=tmpdir)
CATALYST::plotEvents(
x=dbFrameDeba(),
out_path=tmpdir,
n_events=250)
zip(zipfile=file,
files=paste0(c("yield_plot", "event_plot"), ".pdf")) },
contentType="application/zip") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.