blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08e67c752b11d9ee549cdd844e31c57495ebfcac
|
5d5d7785f5ce2ff377ebec29d74382652502c1d8
|
/man/create_IV.Rd
|
56aee463da052653db9d0908cc608863455daffe
|
[
"MIT"
] |
permissive
|
standardgalactic/wpa
|
d7256e719732c7c3f067e88d253e600cd1d66a06
|
b64b562cee59ea737df58a9cd2b3afaec5d9db64
|
refs/heads/main
| 2023-08-10T19:11:03.211088
| 2021-09-08T13:40:35
| 2021-09-08T13:40:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,771
|
rd
|
create_IV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_IV.R
\name{create_IV}
\alias{create_IV}
\title{Calculate Information Value for a selected outcome variable}
\usage{
create_IV(
data,
predictors = NULL,
outcome,
bins = 5,
siglevel = 0.05,
exc_sig = FALSE,
return = "plot"
)
}
\arguments{
\item{data}{A Person Query dataset in the form of a data frame.}
\item{predictors}{A character vector specifying the columns to be used as
predictors. Defaults to NULL, where all numeric vectors in the data will be
used as predictors.}
\item{outcome}{A string specifying a binary variable, i.e. can only contain
the values 1 or 0.}
\item{bins}{Number of bins to use, defaults to 5.}
\item{siglevel}{Significance level to use in comparing populations for the
outcomes, defaults to 0.05}
\item{exc_sig}{Logical value determining whether to exclude values where the
p-value lies below what is set at \code{siglevel}. Defaults to \code{FALSE}.}
\item{return}{String specifying what to return. This must be one of the
following strings:
\itemize{
\item \code{"plot"}
\item \code{"summary"}
\item \code{"list"}
\item \code{"plot-WOE"}
\item \code{"IV"}
}
See \code{Value} for more information.}
}
\value{
A different output is returned depending on the value passed to the \code{return}
argument:
\itemize{
\item \code{"plot"}: 'ggplot' object. A bar plot showing the IV value of the top
(maximum 12) variables.
\item \code{"summary"}: data frame. A summary table for the metric.
\item \code{"list"}: list. A list of outputs for all the input variables.
\item \code{"plot-WOE"}: A list of 'ggplot' objects that show the WOE for each
predictor used in the model.
\item \code{"IV"} returns a list object which mirrors the return
in \code{Information::create_infotables()}.
}
}
\description{
Specify an outcome variable and return IV outputs.
All numeric variables in the dataset are used as predictor variables.
}
\examples{
# Return a summary table of IV
sq_data \%>\%
dplyr::mutate(X = ifelse(Workweek_span > 40, 1, 0)) \%>\%
create_IV(outcome = "X",
predictors = c("Email_hours",
"Meeting_hours",
"Instant_Message_hours"),
return = "plot")
# Return summary
sq_data \%>\%
dplyr::mutate(X = ifelse(Collaboration_hours > 2, 1, 0)) \%>\%
create_IV(outcome = "X",
predictors = c("Email_hours", "Meeting_hours"),
return = "summary")
}
\seealso{
Other Variable Association:
\code{\link{IV_by_period}()},
\code{\link{IV_report}()},
\code{\link{plot_WOE}()}
Other Information Value:
\code{\link{IV_by_period}()},
\code{\link{IV_report}()},
\code{\link{plot_WOE}()}
}
\concept{Information Value}
\concept{Variable Association}
|
72c455ed39ceaa948df3f1403798949be2bb0a61
|
d88feac4b653bbbae48765f667062fa28b6b93aa
|
/tests/testthat/setup-Strategy.R
|
b8d0931f3071b4ad56c938ab6bceeaa790abd044
|
[
"MIT"
] |
permissive
|
zumthor86/optPnL
|
7358f7a674de78534b7e85fbb706af11011ba7e6
|
5540f3f0dead7879590abe269ea31b4c95233d52
|
refs/heads/master
| 2023-01-09T19:49:24.228151
| 2020-11-15T15:29:52
| 2020-11-15T15:29:52
| 258,577,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82
|
r
|
setup-Strategy.R
|
setup({data(strategies);
strats <<- lapply(strategies, optPnL:::create_strat)})
|
066c0ae5e9520842d94615c3a2d54e0694b4c883
|
d092a1a552ad4265fe6c917bbf4c9e14aa8b2e78
|
/R/GUI_juicr.R
|
c0f31ab65217883992708e7855dd895c74b62054
|
[] |
no_license
|
cran/juicr
|
fed72fc55186d707524f41a8722600b035afbc2e
|
c4f95aa95ce98bd8ea1f22007b42b5f938d70b06
|
refs/heads/master
| 2023-04-28T08:34:58.107134
| 2021-04-30T06:10:02
| 2021-04-30T06:10:02
| 363,187,685
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 172,833
|
r
|
GUI_juicr.R
|
#' A GUI screener to quickly code candidate studies for inclusion/exclusion into
#' a systematic review or meta-analysis.
#'
#' A GUI screener to help scan and evaluate the title and abstract of studies to
#' be included in a systematic review or meta-analysis. A description of GUI
#' options and layout is found here: \url{http://lajeunesse.myweb.usf.edu/juicr/juicr_basic_vignette_v0.1.html}.
#'
#' @param theFigureFile An optional file name and location of a .jpg, .png, or
#' .tiff file containing the scientific image/plot/chart/figure to pre-load
#' in the GUI. Within the GUI there is also a button to select the image file.
#' Images in other formats should be converted to .png prior to using juicr.
#' @param theJuicrFile An optional file name and location of a *_juicr.html
#' report containing extractions and images from a previous juicr
#' session to pre-load into the GUI. Within the GUI there is also a button to
#' select an .html file.
#' @param standardizeTheImage When \code{"TRUE"}, all large images are
#' standardized to a common size with a width specified
#' by \code{"standardSize"}. When \code{"FALSE"}, the image is unaltered
#' in size.
#' @param standardSize The common width in pixels for standardizing large images;
#' default is a width of 1000 pixels.
#' @param figureWindowSize Specifies the window size containing the image. By
#' default, this image-viewer window will be 800 (width) by 600 (height)
#' pixels, larger images will be scrollable to fit this window.
#' @param pointSize Changes the default size of a clickable data-point on the
#' image. Size is the radius in pixels (default is 6).
#' @param animateDelay When \code{"TRUE"}, creates a very small pause when
#' plotting individual automated extractions -- giving an animated effect.
#' @param groupNames A vector of the default eight names specifying the
#' labels of each group. Default labels are fruit themed. Can be any size,
#' but GUI will only print first 9 characters.
#' @param groupColors A vector of the default eight color names specifying the
#' coloring of each group. Are in color-names format, but can also be HEX.
#'
#'
#' @return A console message of where saved .csv or *_juicr.html files are located.
#'
#' @examples \dontrun{
#'
#' GUI_juicr(system.file("images", "Kortum_and_Acymyan_2013_Fig4.jpg", package = "juicr"))
#'
#'}
#'
#' @note \strong{Installation and troubleshooting}\cr\cr For Mac OS users,
#' installation is sometimes not straighforward as this GUI requires the
#' Tcl/Tk GUI toolkit to be installed. You can get this toolkit by making sure
#' the latest X11 application (xQuartz) is installed, see here:
#' \url{https://www.xquartz.org/}. More information on
#' installation is found in \code{juicrs}'s vignette.
#'
#' @import tcltk utils
#' @importFrom stats sd
#' @importFrom grDevices rgb col2rgb
#' @importFrom XML readHTMLTable htmlParse xpathSApply xmlAttrs
#' @importFrom RCurl base64Encode base64Decode
#'
#' @export GUI_juicr
GUI_juicr <- function(theFigureFile = "",
theJuicrFile = "",
standardizeTheImage = TRUE,
standardSize = 1000,
figureWindowSize = c(800, 600),
pointSize = 6,
animateDelay = TRUE,
groupNames = c("orangeGrp",
"berryGrp",
"cherryGrp",
"plumGrp",
"kiwiGrp",
"bananaGrp",
"grapeGrp",
"pruneGrp"),
groupColors = c("dark orange",
"turquoise3",
"tomato3",
"orchid",
"yellow green",
"goldenrod2",
"plum4",
"saddle brown")
) {
# if EBImage not installed, do it
.juicrDependencies("EBImage")
getIMG <- function(aFilename) return(system.file("images", aFilename, package = "juicr"))
# checks if tcltk is available and can be loaded
if(requireNamespace("tcltk", quietly = TRUE)) {
juicrLogo <- tcltk::tcl("image", "create", "photo",
file = getIMG("juicr_hex_small_juicing2.png"))
#############################################################################
# START: ABOUT WINDOW: citation and authorship info
aboutJuicrWindow <- function() {
aboutWindow <- tcltk::tktoplevel()
tcltk::tktitle(aboutWindow) <- "about juicr"
aboutFrame <- tcltk::ttkframe(aboutWindow)
juicrVignette <- tcltk::tkbutton(aboutFrame, text = "go to vignette for help",
width = 180, compound = 'top',
image = juicrLogo,
command = function() utils::browseURL("http://lajeunesse.myweb.usf.edu/metagear/metagear_basic_vignette.html"))
aboutText <- tcltk::tktext(aboutFrame,
font = "Consolas 10",
height = 20, width = 75,
tabs = "0.9i left")
theText <- paste0(c("citation for 0.1 (beta):\n\n",
" Lajeunesse M.J. (2021) juicr: extract data from images. v.0.1 R package\n",
"\n\nabout author:\n\n",
" Marc J. Lajeunesse, Associate Professor\n",
" Department of Integrative Biology\n",
" University of South Florida, Tampa USA\n",
" homepage: http://lajeunesse.myweb.usf.edu/\n",
" email: lajeunesse@usf.edu\n",
" twitter: @LajeunesseLab\n",
" youtube: https://www.youtube.com/c/LajeunesseLab\n",
"\n\nacknowledgements:\n\n",
" Citrus icons provided by: https://icons8.com"),
collapse = "")
tcltk::tkinsert(aboutText, "1.0", theText)
tcltk::tkgrid(juicrVignette, aboutText, padx = 5)
tcltk::tkpack(aboutFrame)
}
# END: ABOUT WINDOW: citation and authorship info
#################################################
#############################################################################
# START: DEBUG: totally unnecessary but necessary print
# function for within-GUI debugging
debugGUI <- function(aTCLTKObject)
message(paste0(as.character(aTCLTKObject), " "))
# END: DEBUG: totally unnecessary but necessary print
# function for within-GUI debugging
#################################################
#############################################################################
# START: GUI THEME & ICONS
tcltk::.Tcl("ttk::style configure TNotebook -background white")
tcltk::.Tcl("ttk::style configure TNotebook.Tab -background white")
tcltk::.Tcl("ttk::style configure TNotebook.Tab -foreground grey")
tcltk::.Tcl("ttk::style configure TNotebook -focuscolor grey")
tcltk::.Tcl("ttk::style configure TFrame -background white")
tcltk::.Tcl("ttk::style configure TLabelframe -background white")
tcltk::.Tcl("ttk::style configure TLabelframe.Label -background white")
tcltk::.Tcl("ttk::style configure TLabelframe.Label -foreground grey")
tcltk::.Tcl("ttk::style configure TLabel -background white")
tcltk::.Tcl("ttk::style configure TLabel -foreground grey")
tcltk::.Tcl("ttk::style configure TCombobox -background white")
tcltk::.Tcl("ttk::style configure TCombobox -foreground grey")
tcltk::.Tcl("ttk::style configure TScrollbar -background white")
tcltk::.Tcl("ttk::style configure TButton -foreground black")
tcltk::.Tcl("ttk::style configure message.TButton -foreground orange")
tcltk::.Tcl("ttk::style configure TButton -background white")
tcltk::.Tcl("ttk::style map TButton -background [list active white]")
tcltk::.Tcl("ttk::style map TButton -foreground [list active {green}]")
imageScatter <- tcltk::tcl("image", "create", "photo", file = getIMG("scatterPlot_orange.png"))
imageBarX <- tcltk::tcl("image", "create", "photo", file = getIMG("barPlotX_orange.png"))
imageBarY <- tcltk::tcl("image", "create", "photo", file = getIMG("barPlotY_orange.png"))
imageRegression <- tcltk::tcl("image", "create", "photo", file = getIMG("regressionPlot_orange.png"))
imageLine <- tcltk::tcl("image", "create", "photo", file = getIMG("linePlot_orange.png"))
orangeJuice <- tcltk::tcl("image", "create", "photo", file = getIMG("drinkjuice.png"))
orangeJuiceSave <- tcltk::tcl("image", "create", "photo", file = getIMG("drinkjuice_nostraw.png"))
juicrLogoJuicing <- tcltk::tcl("image", "create", "photo", file = getIMG("juicr_hex_small_juicing.png"))
juiceBottle <- tcltk::tcl("image", "create", "photo", file = getIMG("juiceBottle.png"))
circlePoint1 <- tcltk::tcl("image", "create", "photo", file = getIMG("pointCircle1.png"))
circlePoint5 <- tcltk::tcl("image", "create", "photo", file = getIMG("pointCircle5.png"))
circlePoint15 <- tcltk::tcl("image", "create", "photo", file = getIMG("pointCircle15.png"))
circlePoint15Closed <-tcltk::tcl("image", "create", "photo", file = getIMG("pointCircleOpen.png"))
diamondPoint15 <- tcltk::tcl("image", "create", "photo", file = getIMG("pointDiamond.png"))
squarePoint15 <- tcltk::tcl("image", "create", "photo", file = getIMG("pointSquare.png"))
lineQualityHigh <- tcltk::tcl("image", "create", "photo", file = getIMG("antialiasedLOW.png"))
lineQualityLow <- tcltk::tcl("image", "create", "photo", file = getIMG("antialiasedHIGH.png"))
barPoint1 <- tcltk::tcl("image", "create", "photo", file = getIMG("barShort5.png"))
barPoint5 <- tcltk::tcl("image", "create", "photo", file = getIMG("barShort11.png"))
barPoint15 <- tcltk::tcl("image", "create", "photo", file = getIMG("barShort19.png"))
theOrange <- tcltk::tcl("image", "create", "photo", file = getIMG("orange_ico.png"))
theOrangeGrey <- tcltk::tcl("image", "create", "photo", file = getIMG("orange_grey_ico_test.png"))
autoPointImage <- tcltk::tcl("image", "create", "photo", file = getIMG("autoClustertest.png"))
clusterPointImage <- tcltk::tcl("image", "create", "photo", file = getIMG("autoPointtest.png"))
theBarImage <- tcltk::tcl("image", "create", "photo", file = getIMG("barLine11test.png"))
leftArrowImage <- tcltk::tcl("image", "create", "photo", file = getIMG("left.png"))
rightArrowImage <- tcltk::tcl("image", "create", "photo", file = getIMG("right.png"))
hoverImage <- tcltk::tcl("image", "create", "photo", file = getIMG("hover2.png"))
orangeJuiceFlip <- tcltk::tcl("image", "create", "photo")
tcltk::tcl(orangeJuiceFlip, "copy", orangeJuice, "-subsample", -1, 1)
juiceContainer <- tcltk::tcl("image", "create", "photo", file = getIMG("icons8-juice-bottle-96.png"))
juiceContainerSmall <- tcltk::tcl("image", "create", "photo")
tcltk::tcl(juiceContainerSmall, "copy", juiceContainer, "-subsample", 2, 2)
juiceContainerSmall <- tcltk::tcl("image", "create", "photo")
tcltk::tcl(juiceContainerSmall, "copy", juiceContainer, "-subsample", 2, 2)
juicrLogoSmall <- tcltk::tcl("image", "create", "photo", file = getIMG("juicr_hex_small_juicing3.png"))
# END: GUI THEME & ICONS
########################
#############################################################################
# START: juicr figure frame
createJuicrFrame <- function(aJuicrWindow,
theFigureFile,
theStandardizedImageFile,
theFigure,
theFigureJuiced,
animateDelay,
openJuicrFile = "",
aPointColor = groupColors[1],
aTempPointColor = groupColors[1]) {
# crate juicr environment to store globals
juicr.env <- new.env()
set_juicr <- function(aJuicrVar, aValue) assign(aJuicrVar, aValue, envir = juicr.env)
get_juicr <- function(aJuicrVar) get(aJuicrVar, envir = juicr.env)
set_juicr("pointColor", aPointColor)
set_juicr("tempPointColor", aTempPointColor)
#############################################################################
# START: automated extractor functions
asOdd <- function(aNum) return(ceiling(aNum) - ceiling(aNum) %% 2 + 1)
autoX <- function(anEBImage,
binary_threshold = 0.6,
object_threshold = 0.2,
axis_length = 0.5,
asY = FALSE) {
if(asY == TRUE) anEBImage <- EBImage::transpose(EBImage::flop(anEBImage))
# convert to binary, remove where axis unlikely, extract
aBinaryFigure <- 1 - (EBImage::channel(anEBImage, mode = "gray") > binary_threshold)
aBinaryFigure[, 1:round(dim(aBinaryFigure)[2] * axis_length)] <- 0
lineBrush <- EBImage::makeBrush(asOdd(dim(aBinaryFigure)[2] * axis_length), shape = "line", angle = 0)
aPaintedPlot <- EBImage::opening(EBImage::distmap(aBinaryFigure), lineBrush)
allDetectedX <- EBImage::watershed(EBImage::distmap(aPaintedPlot), tolerance = object_threshold, ext = 1)
# if none found, repeat with alternative parameterization
adjust <- 0.1
while((max(allDetectedX) == 0) && (adjust != 0.5)) {
aBinaryFigure <- 1 - (EBImage::channel(anEBImage, mode = "gray") > (binary_threshold + adjust))
aBinaryFigure[, 1:round(dim(aBinaryFigure)[2] * (axis_length - adjust))] <- 0
lineBrush <- EBImage::makeBrush(asOdd(dim(aBinaryFigure)[2] * (axis_length - adjust)), shape = "line", angle = 0)
aPaintedPlot <- EBImage::opening(EBImage::distmap(aBinaryFigure), lineBrush)
allDetectedX <- EBImage::watershed(EBImage::distmap(aPaintedPlot), tolerance = object_threshold, ext = 1)
adjust <- adjust + 0.1
}
# eliminate all but the longest & lowermost
if(max(allDetectedX) > 1) {
allLines <- EBImage::computeFeatures.shape(allDetectedX)
exclusionList <- which(allLines[, "s.area"] != max(allLines[, "s.area"]))
allDetectedX <- EBImage::rmObjects(allDetectedX, exclusionList)
theCoordinates <- EBImage::computeFeatures.moment(allDetectedX)
exclusionList <- which(theCoordinates[, "m.cy"] != max(theCoordinates[, "m.cy"]))
allDetectedX <- EBImage::rmObjects(allDetectedX, exclusionList)
}
if(max(allDetectedX) == 0) return(FALSE)
if(asY == TRUE) return(EBImage::flop(EBImage::transpose(allDetectedX)))
return(allDetectedX)
}
theAutoPointsAreEmpty <- FALSE
theAutoPointsShape <- "disc"
autoPoints <- function(anEBImage,
theX,
theY,
point_shape = "disc",
point_empty = FALSE,
point_size = 3,
point_tolerance = 2,
binary_threshold = 0.63) {
aBinaryFigure <- 1 - (EBImage::channel(anEBImage, mode = "gray") > binary_threshold)
# erase everything outside detected axis
Xcontr <- EBImage::ocontour(theX)
Xmax <- max(Xcontr[[1]][, 1]); Xmin <- min(Xcontr[[1]][, 1])
aBinaryFigure[c(1:(Xmin + 3), Xmax:dim(aBinaryFigure)[1]), ] <- 0
Ycontr <- EBImage::ocontour(theY)
Ymax <- max(Ycontr[[1]][, 2]); Ymin <- min(Ycontr[[1]][, 2])
aBinaryFigure[, c(1:(Ymin + 3), Ymax:dim(aBinaryFigure)[2]) ] <- 0
if(point_empty == TRUE) {
aBinaryFigure <- EBImage::fillHull(EBImage::watershed(EBImage::distmap(aBinaryFigure), tolerance = 2, ext = 1))
}
# paint candidate points with box, disc, or diamond brush with defined size
pointBrush <- EBImage::makeBrush(size = asOdd(point_size), shape = point_shape, step = TRUE)
aPaintedFigure <- EBImage::opening(EBImage::distmap(aBinaryFigure), pointBrush)
detectedPoints <- EBImage::watershed(EBImage::distmap(aPaintedFigure), tolerance = point_tolerance, ext = 1)
# if none found, repeat with alternative parameterization
adjust <- 1
while((max(detectedPoints) == 0) && (adjust != 11)) {
pointBrush <- EBImage::makeBrush(size = asOdd(adjust), shape = point_shape, step = TRUE)
aPaintedFigure <- EBImage::opening(EBImage::distmap(aBinaryFigure), pointBrush)
detectedPoints <- EBImage::watershed(EBImage::distmap(aPaintedFigure), tolerance = point_tolerance, ext = 1)
adjust <- adjust + 2
}
if(max(detectedPoints) == 0) return(FALSE)
return(detectedPoints)
}
getClusters <- function(theDectedPoints) {
isCluster <- mean(EBImage::computeFeatures.shape(theDectedPoints)[, "s.area"]) +
stats::sd(EBImage::computeFeatures.shape(theDectedPoints)[, "s.area"])
thenonClusters <- which(EBImage::computeFeatures.shape(theDectedPoints)[, "s.area"] < isCluster)
return(EBImage::rmObjects(theDectedPoints, thenonClusters))
}
getNonClusters <- function(theDectedPoints) {
isCluster <- mean(EBImage::computeFeatures.shape(theDectedPoints)[, "s.area"]) +
stats::sd(EBImage::computeFeatures.shape(theDectedPoints)[, "s.area"])
theClusters <- which(EBImage::computeFeatures.shape(theDectedPoints)[, "s.area"] >= isCluster)
return(EBImage::rmObjects(theDectedPoints, theClusters))
}
getCoord_detectedAxis <- function(aDetectedImage) {
theAxis <- EBImage::ocontour(aDetectedImage)
coordX1 <- min(theAxis[[1]][, 1]); coordY1 <- min(theAxis[[1]][, 2]);
coordX2 <- max(theAxis[[1]][, 1]); coordY2 <- max(theAxis[[1]][, 2]);
return(c(coordX1, coordY1, coordX2, coordY2))
}
getCoord_detectedPoints <- function(aDetectedImage) {
return(EBImage::computeFeatures.moment(aDetectedImage)[, 1:2])
}
resolve_crossedAxes <- function(theX, theY, asY = FALSE) {
theCoordX <- getCoord_detectedAxis(theX)
theCoordY <- getCoord_detectedAxis(theY)
if(asY == TRUE) return(c(theCoordY[1], theCoordY[2], theCoordY[3], theCoordX[2]))
return(c(theCoordY[3], theCoordX[2], theCoordX[3], theCoordX[4]))
}
autoBars <- function(anEBImage,
theX,
theY,
binary_threshold = 0.6,
object_threshold = 0.1,
bar_length = 9,
axis_length = 0.5,
asY = FALSE) {
if(asY == TRUE) anEBImage <- EBImage::transpose(EBImage::flop(anEBImage))
aBinaryFigure <- 1 - (EBImage::channel(anEBImage, mode = "gray") > binary_threshold)
# erase everything outside detected axis
Xcontr <- EBImage::ocontour(theX)
Xmax <- max(Xcontr[[1]][, 1]); Xmin <- min(Xcontr[[1]][, 1])
aBinaryFigure[c(1:(Xmin + 3), Xmax:dim(aBinaryFigure)[1]), ] <- 0
Ycontr <- EBImage::ocontour(theY)
Ymax <- max(Ycontr[[1]][, 2]); Ymin <- min(Ycontr[[1]][, 2])
aBinaryFigure[, c(1:(Ymin + 3), Ymax:dim(aBinaryFigure)[2]) ] <- 0
# detect all horizontal lines (the caps of column bars and error bars)
lineBrush <- EBImage::makeBrush(bar_length, shape = "line", angle = 0)
verticalLinesOnlyFigure <- EBImage::opening(EBImage::distmap(aBinaryFigure), lineBrush)
extractedBars <- EBImage::watershed(EBImage::distmap(verticalLinesOnlyFigure), object_threshold)
# clean up detections: exclude large lines detected, based on % X axis length
theLines <- EBImage::computeFeatures.shape(extractedBars)
exclusionList <- which(theLines[, "s.area"] >= dim(extractedBars)[1] * axis_length)
extractedBars <- EBImage::rmObjects(extractedBars, exclusionList)
## clean up detections: outliers
#extractedBars <- figure_removeOutlyingPoints(extractedBars, extractedXFigure, extractedYFigure)
if(max(extractedBars) == 0) return(FALSE)
if(asY == TRUE) return(EBImage::flop(EBImage::transpose(extractedBars)))
return(extractedBars)
}
# END: automated extractor functions
#######################################################
#############################################################################
# START: figure point vector and manipulation functions
set_juicr("figurePoints", c())
point_indexToPoint <- function(aPointIndex) return(as.numeric(gsub("pointID", "", aPointIndex)))
point_pointToIndex <- function(aPoint) return(paste0("pointID", aPoint))
point_add <- function() {
allPoints <- get_juicr("figurePoints")
newPoint <- ifelse(length(allPoints) == 0, 1, max(allPoints) + 1)
set_juicr("figurePoints", c(allPoints, newPoint))
return(newPoint)
}
point_delete <- function(aPoint) {
allPoints <- get_juicr("figurePoints")
set_juicr("figurePoints", allPoints[!allPoints %in% aPoint])
}
point_getTags <- function(aPointIndex) return(as.character(tcl(mainFigureCanvas, "gettags", aPointIndex)))
point_getAll <- function() return(get_juicr("figurePoints"))
point_getType <- function(aPointIndex) return(as.character(point_getTags(aPointIndex)[3]))
point_getAuto <- function(aPointIndex) return(as.character(point_getTags(aPointIndex)[2]))
point_getAllbyType <- function(pointType = "point") {
allThePoints <- point_pointToIndex(point_getAll())
theTags <- as.character(sapply(allThePoints, function(x) paste0(point_getType(x))))
return(allThePoints[theTags == pointType])
}
point_getAllbyAuto <- function(pointType = "auto") {
allThePoints <- point_pointToIndex(point_getAll())
theTags <- as.character(sapply(allThePoints, function(x) paste0(point_getAuto(x))))
return(allThePoints[theTags == pointType])
}
point_getCoordinates <- function(aPointIndex) {
theCoord <- as.numeric(as.character(tkcoords(mainFigureCanvas, aPointIndex)))
theType <- point_getType(aPointIndex)
if(theType == "point") {
if(point_getAuto(aPointIndex) == "autobar") {
theCoordinates <- c(theCoord[1] + 8, theCoord[2] + 3)
} else if(point_getAuto(aPointIndex) == "auto") {
theCoordinates <- c(theCoord[1] + 8, theCoord[2] + 8)
} else if(point_getAuto(aPointIndex) == "cluster") {
theCoordinates <- c(theCoord[1] + 8, theCoord[2] + 8)
} else {
theCoordinates <- c(theCoord[1] + pointSize, theCoord[2] + pointSize)
}
} else if(theType == "error") {
theCoordinates <- c(theCoord[1], theCoord[2], theCoord[3], theCoord[4])
} else if (theType == "regression") {
theCoordinates <- c(theCoord[1], theCoord[2], theCoord[3], theCoord[4])
} else if (theType == "line") {
theCoordinates <- theCoord
}
return(theCoordinates)
}
point_getCalibratedValue <- function(aPointIndex, theAxis = "x", coordinates = FALSE) {
theCoord <- point_getCoordinates(aPointIndex)[ifelse(theAxis == "x", 1, 2)]
if(coordinates == TRUE) return(theCoord)
if(theAxis == "x") {
xMaxValue <- as.numeric(text_get(figureXmaxDisplay))
xMinValue <- as.numeric(text_get(figureXminDisplay))
if(all(is.na(c(xMaxValue, xMinValue)))) return(theCoord)
}
if(theAxis == "y") {
yMaxValue <- as.numeric(text_get(figureYmaxDisplay))
yMinValue <- as.numeric(text_get(figureYminDisplay))
if(all(is.na(c(yMaxValue, yMinValue)))) return(theCoord)
}
return(coordinate_calibrate(theCoord, theAxis))
}
isEmpty_calibrate <- function(theAxis = "x") {
if(theAxis == "x") {
if(text_get(figureXmaxDisplay) == "" || text_get(figureXminDisplay) == "") return(TRUE)
} else {
if(text_get(figureYmaxDisplay) == "" || text_get(figureYminDisplay) == "") return(TRUE)
}
return(FALSE)
}
coordinate_calibrate <- function(theCoor, theAxis = "x") {
if(theAxis == "x") {
maxValue <- as.numeric(text_get(figureXmaxDisplay))
minValue <- as.numeric(text_get(figureXminDisplay))
if(all(is.na(c(maxValue, minValue)))) return(NA)
posLine <- as.numeric(tkcoords(mainFigureCanvas, x_calibrationLine))[c(1, 3)]
calibrated <- (theCoor - min(posLine)) * ((maxValue - minValue)/(max(posLine) - min(posLine))) + minValue
} else {
maxValue <- as.numeric(text_get(figureYmaxDisplay))
minValue <- as.numeric(text_get(figureYminDisplay))
if(all(is.na(c(maxValue, minValue)))) return(NA)
posLine <- as.numeric(tkcoords(mainFigureCanvas, y_calibrationLine))[c(2, 4)]
calibrated <- (max(posLine) - theCoor) * ((maxValue - minValue)/(max(posLine) - min(posLine))) + minValue
}
return(calibrated)
}
point_pixelError <- function(theAxis = "x") {
if(theAxis == "x") {
maxValue <- as.numeric(text_get(figureXmaxDisplay))
minValue <- as.numeric(text_get(figureXminDisplay))
posLine <- as.numeric(tkcoords(mainFigureCanvas, x_calibrationLine))[c(1, 3)]
} else {
maxValue <- as.numeric(text_get(figureYmaxDisplay))
minValue <- as.numeric(text_get(figureYminDisplay))
posLine <- as.numeric(tkcoords(mainFigureCanvas, y_calibrationLine))[c(2, 4)]
}
return((maxValue - minValue)/(max(posLine) - min(posLine)))
}
text_get <- function(aTextIndex) paste(as.character(tcl(aTextIndex, "get", "1.0", "end")), collapse = " ")
# END: figure point vector and manipulation functions
#######################################################
#############################################################################
# START: text functions for data tabulation
displayData <- function(tabDelimitedText, caption) {
extractionWindow <- tcltk::tktoplevel()
tcltk::tktitle(extractionWindow) <- paste0(caption, " via juicr")
dataFrame <- tcltk::ttklabelframe(extractionWindow,
text = caption,
padding = 2)
dataScroll <- tcltk::ttkscrollbar(extractionWindow, orient = "vertical",
command = function(...) tcltk::tkyview(dataText, ...))
dataText <- tcltk::tktext(dataFrame,
font = "Consolas 10",
height = 20, width = 160, tabs = "0.9i left",
yscrollcommand = function(...) tcltk::tkset(dataScroll, ...))
aText <- tcltk::tkinsert(dataText, "1.0", tabDelimitedText)
tcltk::tktag.add(dataText, "aTag1", "1.0", "1.end")
tcltk::tktag.configure(dataText, "aTag1", font = "Consolas 10 bold")
tcltk::tkgrid(dataText, dataScroll, sticky = "nsew")
buttonFrame <- tcltk::ttkframe(dataFrame)
clipboardButton <- tcltk::ttkbutton(buttonFrame, width = 12,
text = " copy to\nclipboard",
command = function() utils::writeClipboard(tabDelimitedText))
removeFormatingButton <- tcltk::ttkbutton(buttonFrame, width = 12,
text = " remove\nformatting",
command = function() {
tcltk::tkconfigure(dataText, tabs = "")
tcltk::tktag.delete(dataText, "aTag1")
})
csvButton <- tcltk::ttkbutton(buttonFrame, width = 12,
text = "save as\n .csv",
command = function() {
fileContents <- switch(caption,
"point/sample extractions" = "points",
"bar extractions" = "bars",
"axis line extractions" = "axes",
"error bar extractions" = "error_bars",
"regression line extractions" = "regressions",
"line extractions" = "lines"
)
theNewFile <- paste0(tools::file_path_sans_ext(basename(theFigureFile)),
"_juicr_extracted_",
fileContents,
".csv")
tcltk::tkconfigure(closeButton, text = paste0("SAVING AS:\n", theNewFile), style = "message.TButton")
tcltk::tcl("update"); Sys.sleep(2);
someTable <- read.table(text = tabDelimitedText,
sep = "\t", header = TRUE)
write.csv(someTable,
file = theNewFile,
row.names = FALSE)
tcltk::tkconfigure(closeButton, text = " close\nwindow", style = "TButton")
})
closeButton <- tcltk::ttkbutton(buttonFrame, width = 40,
text = " close\nwindow",
command = function() tcltk::tkdestroy(extractionWindow))
tcltk::tkgrid(removeFormatingButton, clipboardButton, csvButton, closeButton)
tcltk::tkgrid(buttonFrame)
tcltk::tkpack(dataFrame)
}
get_ExtractionList <- function() {
fullNotes <- ""
for(i in 1:(as.integer(tcltk::tclvalue(tcltk::tcl(theNotes, "index", "end"))) - 1)) {
lineNotes <- tcltk::tcl(theNotes, "get", paste0(i, ".0"), paste0(i, ".end"))
fullNotes <- paste0(fullNotes, paste0(lineNotes, collapse = " "), "\n")
}
allExtractions <- list("points" = getPointExtractions(sendToFile = TRUE),
"bars" = getPointExtractions(sendToFile = TRUE),
"axes" = getPointExtractions(sendToFile = TRUE),
"error_bars" = getPointExtractions(sendToFile = TRUE),
"regressions" = getPointExtractions(sendToFile = TRUE),
"lines" = getPointExtractions(sendToFile = TRUE),
"info" = data.frame("file" = theFigureFile,
"date" = Sys.Date(),
"notes" = fullNotes,
"figureXminDisplay" = as.character(text_get(figureXminDisplay)),
"figureXmaxDisplay" = as.character(text_get(figureXmaxDisplay)),
"figureXcaptionDisplay" = as.character(text_get(figureXcaptionDisplay)),
"figureXunitsDisplay" = as.character(text_get(figureXunitsDisplay)),
"figureYminDisplay" = as.character(text_get(figureYminDisplay)),
"figureYmaxDisplay" = as.character(text_get(figureYmaxDisplay)),
"figureYcaptionDisplay" = as.character(text_get(figureYcaptionDisplay)),
"figureYunitsDisplay" = as.character(text_get(figureYunitsDisplay))))
return(allExtractions)
}
set_juicr("theSavedFile", "not saved this session")
point_summary <- function() {
#TO DO: OUT OF BOUNDS VALUES
theNumberOfPoints <- length(point_getAll())
theSummary <- "EXTRACTION SUMMARY\n---------------------------------\n"
theSummary <- paste0(theSummary, "number of extractions = ", theNumberOfPoints, "\n")
if(theNumberOfPoints == 0) return(theSummary)
allThePoints <- point_pointToIndex(point_getAll())
xMaxValue <- suppressWarnings(as.numeric(text_get(figureXmaxDisplay)))
xMinValue <- suppressWarnings(as.numeric(text_get(figureXminDisplay)))
yMaxValue <- suppressWarnings(as.numeric(text_get(figureYmaxDisplay)))
yMinValue <- suppressWarnings(as.numeric(text_get(figureYminDisplay)))
pointCoorX <- sapply(allThePoints, function(x) point_getCoordinates(x)[1])
pointCoorY <- sapply(allThePoints, function(x) point_getCoordinates(x)[2])
if(all(is.na(c(xMaxValue, yMaxValue, xMinValue, yMinValue)))) {
xCalibrated <- signif(pointCoorX, 4)
yCalibrated <- signif(pointCoorY, 4)
} else {
theSummary <- paste0(theSummary, "pixel error per extraction:\n")
if(length(xMaxValue) == 0 && length(xMinValue) == 0) {
xCalibrated <- NA
} else {
xCalibrated <- sapply(allThePoints, function(x) suppressWarnings(point_getCalibratedValue(x, theAxis = "x")))
theSummary <- paste0(theSummary, " x = ",
paste0(text_get(figureXcaptionDisplay), sep = " ", collapse = ""),
paste0("(", text_get(figureXunitsDisplay),")", sep = " "),
"+/- ", signif(point_pixelError("x"), 4), "\n")
}
if(length(yMaxValue) == 0 && length(yMinValue) == 0) {
yCalibrated <- NA
} else {
yCalibrated <- sapply(allThePoints, function(x)suppressWarnings(point_getCalibratedValue(x, theAxis = "y")))
theSummary <- paste0(theSummary, " y = ",
paste0(text_get(figureYcaptionDisplay), sep = " ", collapse = ""),
paste0("(", text_get(figureYunitsDisplay),")", sep = " "),
"+/- ", signif(point_pixelError("y"), 4), "\n")
}
}
theSummary <- paste0(theSummary, "saved in file =\n")
theSummary <- paste0(theSummary, " ", get_juicr("theSavedFile"), "\n")
theSummary <- paste0(theSummary, "---------------------------------\n")
theSummary <- paste0(theSummary, "x\ty\ttype\tgroup\n")
theSums <- paste0(signif(xCalibrated,4), "\t",
signif(yCalibrated,4), "\t",
sapply(allThePoints, function(x) paste0(abbreviate(point_getTags(x)[3], 3, dot = TRUE), "\t")),
sapply(allThePoints, function(x) paste0(point_getTags(x)[2], "\n")))
return(paste0(c(theSummary, theSums), collapse = ""))
}
getPointExtractions <- function(sendToWindow = FALSE, sendToFile = FALSE, coordinates = FALSE) {
allThePoints <- point_getAllbyType("point")
if(length(allThePoints) == 0) return(data.frame())
xCoordinate <- sapply(allThePoints, function(x) suppressWarnings(point_getCalibratedValue(x, theAxis = "x", coordinates = TRUE)))
yCoordinate <- sapply(allThePoints, function(x) suppressWarnings(point_getCalibratedValue(x, theAxis = "y", coordinates = TRUE)))
xCalibrated <- sapply(allThePoints, function(x) suppressWarnings(point_getCalibratedValue(x, theAxis = "x")))
yCalibrated <- sapply(allThePoints, function(x) suppressWarnings(point_getCalibratedValue(x, theAxis = "y")))
theSummary <- paste0(c("x-calibrated\tx-label\tx-units\tx-coord\ty-calibrated\ty-label\ty-units\ty-coord\tgroup\n",
paste0(
signif(as.numeric(xCalibrated), 7), "\t",
text_get(figureXcaptionDisplay), "\t",
text_get(figureXunitsDisplay), "\t",
as.numeric(xCoordinate), "\t",
signif(as.numeric(yCalibrated), 7), "\t",
text_get(figureYcaptionDisplay), "\t",
text_get(figureYunitsDisplay), "\t",
as.numeric(yCoordinate), "\t",
sapply(allThePoints, function(x) paste0(point_getTags(x)[2], "\n"))
)), collapse = "")
if(sendToFile == TRUE) return(read.table(text = theSummary, sep = "\t", header = TRUE))
if(sendToWindow == TRUE) displayData(theSummary, "point/sample extractions")
return(theSummary)
}
getBarExtractions <- function(sendToWindow = FALSE, sendToFile = FALSE) {
allThePoints <- point_getAllbyAuto("autobar")
if(length(allThePoints) == 0) return(data.frame())
allXCoords <- sapply(allThePoints, function(x) point_getCalibratedValue(x, theAxis = "x"))
allYCoords <- sapply(allThePoints, function(x) point_getCalibratedValue(x, theAxis = "y"))
numberBars = length(allXCoords) %/% 3
if(max(allXCoords[1:3]) - min(allXCoords[1:3]) >= 3) numberBars = length(allXCoords) %/% 2
theValues <- data.frame(matrix(allYCoords, nrow = numberBars, byrow = TRUE))
for(i in 1:nrow(theValues))
theValues[i, ] <- theValues[i, (sort(as.numeric(theValues[i,]), index.return = TRUE)$ix)]
if(numberBars == length(allXCoords) %/% 3) {
theSummary <- paste0(c("bar\tlower\tupper\tgroup\n",
paste0(
signif(as.numeric(theValues[, 2]), 7), "\t",
signif(as.numeric(theValues[, 1]), 7), "\t",
signif(as.numeric(theValues[, 3]), 7), "\t",
paste0("autoBar", 1:nrow(theValues)), "\n"
)), collapse = "")
} else {
theSummary <- paste0(c("bar\terror\tgroup\n",
paste0(
signif(as.numeric(theValues[, 1]), 7), "\t",
signif(as.numeric(theValues[, 2]), 7), "\t",
paste0("autoBar", 1:nrow(theValues)), "\n"
)), collapse = "")
}
if(sendToFile == TRUE) return(read.table(text = theSummary, sep = "\t", header = TRUE))
if(sendToWindow == TRUE) displayData(theSummary, "bar extractions")
return(theSummary)
}
getErrorExtractions <- function(sendToWindow = FALSE, sendToFile = FALSE) {
allThePoints <- point_getAllbyType("error")
if(length(allThePoints) == 0) return(data.frame())
errorCoords <- lapply(allThePoints, function(x) point_getCoordinates(x))
theValues <- lapply(errorCoords,
function(x) {
if(x[1] == x[3]) {
theMean <- suppressWarnings(coordinate_calibrate(x[2], "y"))
theError <- suppressWarnings(abs(theMean - coordinate_calibrate(x[4], "y")))
theType <- "y"
meanX <- x[1]
meanY <- x[2]
errorX <- x[3]
errorY <- x[4]
} else {
theMean <- suppressWarnings(coordinate_calibrate(x[1], "x"))
theError <- suppressWarnings(abs(theMean - coordinate_calibrate(x[3], "x")))
theType <- "x"
meanX <- x[1]
meanY <- x[2]
errorX <- x[3]
errorY <- x[4]
}
return(c(mean = theMean,
error = theError,
type = theType,
mx = meanX,
my = meanY,
ex = errorX,
ey = errorY))
})
theValues <- data.frame(matrix(unlist(theValues), nrow = length(theValues), byrow = TRUE))
theSummary <- paste0(c("mean\terror\taxis\tgroup\tmean.x\tmean.y\terror.x\terror.y\n",
paste0(
signif(as.numeric(theValues[, 1]), 7), "\t",
signif(as.numeric(theValues[, 2]), 7), "\t",
theValues[, 3], "\t",
sapply(allThePoints, function(x) paste0(point_getTags(x)[2], "\t")),
theValues[, 4], "\t",
theValues[, 5], "\t",
theValues[, 6], "\t",
theValues[, 7], "\n"
)), collapse = "")
if(sendToFile == TRUE) return(read.table(text = theSummary, sep = "\t", header = TRUE))
if(sendToWindow == TRUE) displayData(theSummary, "error bar extractions")
return(theSummary)
}
getRegressionExtractions <- function(sendToWindow = FALSE, sendToFile = FALSE) {
allThePoints <- point_getAllbyType("regression")
if(length(allThePoints) == 0) return(data.frame())
regressionCoords <- lapply(allThePoints, function(x) point_getCoordinates(x))
theValues <- lapply(regressionCoords,
function(x) {
x1 <- suppressWarnings(coordinate_calibrate(x[1], "x"))
y1 <- suppressWarnings(coordinate_calibrate(x[2], "y"))
x2 <- suppressWarnings(coordinate_calibrate(x[3], "x"))
y2 <- suppressWarnings(coordinate_calibrate(x[4], "y"))
slope <- (y2 - y1)/(x2 - x1)
intercept <- y1 - slope * x1
x1coord <- x[1]
y1coord <- x[2]
x2coord <- x[3]
y2coord <- x[4]
return(c(x1, y1, x2, y2, slope, intercept, x1coord, y1coord, x2coord, y2coord))
})
theValues <- data.frame(matrix(unlist(theValues), nrow = length(theValues), byrow = TRUE))
theSummary <- paste0(c("x1\ty1\tx2\ty2\tslope\tintercept\tx1.coord\ty1.coord\tx2.coord\ty2.coord\tgroup\n",
paste0(
signif(as.numeric(theValues[, 1]), 7), "\t",
signif(as.numeric(theValues[, 2]), 7), "\t",
signif(as.numeric(theValues[, 3]), 7), "\t",
signif(as.numeric(theValues[, 4]), 7), "\t",
signif(as.numeric(theValues[, 5]), 7), "\t",
signif(as.numeric(theValues[, 6]), 7), "\t",
theValues[, 7], "\t",
theValues[, 8], "\t",
theValues[, 9], "\t",
theValues[, 10], "\t",
sapply(allThePoints, function(x) paste0(point_getTags(x)[2], "\n"))
)), collapse = "")
if(sendToFile == TRUE) return(read.table(text = theSummary, sep = "\t", header = TRUE))
if(sendToWindow == TRUE) displayData(theSummary, "regression line extractions")
return(theSummary)
}
getAxisExtractions <- function(sendToWindow = FALSE, sendToFile = FALSE) {
theSummary <- paste0(c("coord\tX.axis\tY.axis\n",
paste0(
c("y1", "x1", "y2", "x2"), "\t",
as.numeric(tkcoords(mainFigureCanvas, x_calibrationLine)), "\t",
as.numeric(tkcoords(mainFigureCanvas, y_calibrationLine)), "\n"
)), collapse = "")
if(sendToFile == TRUE) return(read.table(text = theSummary, sep = "\t", header = TRUE))
if(sendToWindow == TRUE) displayData(theSummary, "axis line extractions")
return(theSummary)
}
getLineExtractions <- function(sendToWindow = FALSE, sendToFile = FALSE) {
allThePoints <- point_getAllbyType("line")
if(length(allThePoints) == 0) return(data.frame())
lineCoords <- lapply(allThePoints, function(x) point_getCoordinates(x))
allText <- data.frame()
for(i in 1:length(lineCoords)) {
coordMatrix <- matrix(lineCoords[[i]], ncol = 2, byrow = TRUE)
allCoords <- split(coordMatrix, row(coordMatrix))
theValues <- lapply(allCoords,
function(somePoint) {
x <- suppressWarnings(coordinate_calibrate(somePoint[1], "x"))
y <- suppressWarnings(coordinate_calibrate(somePoint[2], "y"))
xcoord <- somePoint[1]
ycoord <- somePoint[2]
return(c(x, y, xcoord, ycoord))
})
someText <- data.frame(matrix(unlist(theValues), nrow = length(theValues), byrow = TRUE),
c(1:length(theValues)),
i,
point_getTags(allThePoints[i])[2])
if(is.null(dim(allText))) allText <- someText
else allText <- rbind(allText, someText)
}
theSummary <- paste0(c("x\ty\tx.coord\ty.coord\tlink\tset\tgroup\n",
paste0(
signif(as.numeric(allText[, 1]), 7), "\t",
signif(as.numeric(allText[, 2]), 7), "\t",
allText[, 3], "\t",
allText[, 4], "\t",
signif(as.numeric(allText[, 5]), 7), "\t",
as.character(allText[, 6]), "\t",
as.character(allText[, 7]), "\n"
)), collapse = "")
if(sendToFile == TRUE) return(read.table(text = theSummary, sep = "\t", header = TRUE))
if(sendToWindow == TRUE) displayData(theSummary, "line extractions")
return(theSummary)
}
# END: text functions for data tabulation
#########################################
########################################
## START: plot/figure/main image frame
########################################
figureWindow <- tcltk::ttkframe(aJuicrWindow)
mainFigureWidth <- as.integer(tcltk::tcl("image", "width", theFigure))
mainFigureHeight <- as.integer(tcltk::tcl("image", "height", theFigure))
mainFigureCanvas <- tcltk::tkcanvas(figureWindow, background = "grey95",
width = figureWindowSize[1], height = figureWindowSize[2],
"-scrollregion",
paste(0, 0, mainFigureWidth + 20, mainFigureHeight + 50))
mainFigure <- tcltk::tcl(mainFigureCanvas, "create", "image", 0,0, image = theFigure, anchor = "nw")
#mainFigureXscroll <- tkscrollbar(figureWindow, command = function(...) tcl(mainFigureCanvas, "xview", ...), orient = "horizontal")
#mainFigureYscroll <- tkscrollbar(figureWindow, command = function(...) tcl(mainFigureCanvas, "yview", ...), orient = "vertical")
mainFigureXscroll <- tcltk::tkscrollbar(figureWindow, command = function(...) tcltk::tkxview(mainFigureCanvas, ...), orient = "horizontal")
mainFigureYscroll <- tcltk::tkscrollbar(figureWindow, command = function(...) tcltk::tkyview(mainFigureCanvas, ...), orient = "vertical")
tcltk::tkconfigure(mainFigureCanvas, xscrollcommand = function(...) tcltk::tkset(mainFigureXscroll, ...))
tcltk::tkconfigure(mainFigureCanvas, yscrollcommand = function(...) tcltk::tkset(mainFigureYscroll, ...))
hoverText <- tcltk::tkcreate(mainFigureCanvas, "text", 0, 0, justify = "left", text = "", fill = "black", font = "Consolas 8")
hoverShadow <- tcltk::tcl(mainFigureCanvas, "create", "image", 0, 0, image = "", anchor = "nw")
epsButton <- tcltk::tkbutton(mainFigureCanvas, text = "save image as .eps", relief = "groove",
width = 16, command = function(){
tcltk::tkitemconfigure(mainFigureCanvas, epsWindow, state = "hidden")
tcltk::tkitemconfigure(mainFigureCanvas, clearWindow, state = "hidden")
aEspFile <- tcltk::tkgetSaveFile(filetypes = "{{eps postscript files} {.eps}} {{All files} *}",
defaultextension = ".eps",
title = "juicr: save exact copy of image/extractions as postscript file")
tcltk::tcl(mainFigureCanvas, "postscript", file = aEspFile)
tcltk::tkitemconfigure(mainFigureCanvas, epsWindow, state = "normal")
tcltk::tkitemconfigure(mainFigureCanvas, clearWindow, state = "normal")
})
epsWindow <- tcltk::tkcreate(mainFigureCanvas, "window", mainFigureWidth, mainFigureHeight + 10, anchor = "ne", window = epsButton)
clearButton <- tcltk::tkbutton(mainFigureCanvas, text = "hide extractions", relief = "groove",
width = 13, command = function(){
if(as.character(tcltk::tkcget(clearButton, "-relief")) == "sunken") {
tcltk::tkconfigure(clearButton, relief = "groove")
tcltk::tkitemconfigure(mainFigureCanvas, "extraction", state = "normal")
} else {
tcltk::tkconfigure(clearButton, relief = "sunken")
tcltk::tkitemconfigure(mainFigureCanvas, "extraction", state = "hidden")
}
})
clearWindow <- tcltk::tkcreate(mainFigureCanvas, "window", mainFigureWidth - 130, mainFigureHeight + 10, anchor = "ne", window = clearButton)
tcltk::tkgrid(mainFigureCanvas, mainFigureYscroll, sticky = "news")
tcltk::tkgrid(mainFigureXscroll, sticky = "ew")
########################################
## END: plot/figure/main image frame
########################################
#################################
##### START: options notebook
#################################
notebookFrame <- tcltk::ttknotebook(aJuicrWindow)
########################################
##### START: automated frame in notebook
automatedWindow <- tcltk::ttkframe(notebookFrame)
isBarPlot <- function(thePlot,
binary_threshold = 0.98,
object_threshold = 0.1,
bar_length = 0.05) {
anEBImage <- EBImage::transpose(EBImage::flop(thePlot))
aBinaryFigure <- 1 - (EBImage::channel(anEBImage, mode = "gray") > binary_threshold)
aBinaryFigure[, round(dim(aBinaryFigure)[2] * 0.3):dim(aBinaryFigure)[2] ] <- 0
aBinaryFigure[1:round(dim(aBinaryFigure)[1] * 0.6), ] <- 0
lineBrush <- EBImage::makeBrush(bar_length * dim(aBinaryFigure)[1], shape = "line", angle = 0)
verticalLinesOnlyFigure <- EBImage::opening(EBImage::distmap(aBinaryFigure), lineBrush)
extractedBars <- EBImage::watershed(EBImage::distmap(verticalLinesOnlyFigure), 0.1)
if(max(extractedBars) > 2) return(TRUE)
return(FALSE)
}
update_X_axis <- function(y1, x1, y2, x2) {
tcltk::tkcoords(mainFigureCanvas, x_calibrationLine, y1, x1, y2, x2)
}
update_Y_axis <- function(y1, x1, y2, x2) {
tcltk::tkcoords(mainFigureCanvas, y_calibrationLine, y1, x1, y2, x2)
}
juiceItReset <- function() {
tcltk::tkconfigure(juiceButton, image = juicrLogoJuicing); tcltk::tcl("update")
if(length(c(as.character(tcltk::tkget(mainFigureCanvas, "autobar")), as.character(tcltk::tkget(mainFigureCanvas, "auto")))) != 0) {
update_X_axis(1, 1, 1, 1); update_Y_axis(1, 1, 1, 1);
tcltk::tkconfigure(xorangeLabel, image = theOrangeGrey)
tcltk::tkconfigure(yorangeLabel, image = theOrangeGrey)
tcltk::tkconfigure(dataOrangeLabel, image = theOrangeGrey); tcltk::tcl("update")
allthePoints <- point_getAllbyType("point")
for(i in 1:length(allthePoints)) {
if((point_getTags(allthePoints[i])[2] == "autobar") || (point_getTags(allthePoints[i])[2] == "auto") || (point_getTags(allthePoints[i])[2] == "cluster")) {
point_delete(point_indexToPoint(point_getTags(allthePoints[i])[1]))
tcltk::tcl(mainFigureCanvas, "delete", point_getTags(allthePoints[i])[1])
}
}
tcltk::tkitemconfigure(txtCanvas, theDataText, text = point_summary())
}
}
animateAutodetection <- function() {
if(animateDelay != FALSE) {Sys.sleep(0.01); tcltk::tcl("update");}
}
juiceIt <- function() {
# reset all
juiceItReset()
# start axis detections
detectedX <- autoX(theFigureJuiced, binary_threshold = as.numeric(text_get(qualityDisplay)))
if(max(detectedX) == 1) {
theCoordX <- getCoord_detectedAxis(detectedX)
tcltk::tkitemconfigure(mainFigureCanvas, x_calibrationLine, width = 10, fill = "orange")
update_X_axis(theCoordX[1], min(theCoordX[2], theCoordX[4]), theCoordX[3], min(theCoordX[2], theCoordX[4]))
tcltk::tkconfigure(xorangeLabel, image = theOrange); tcltk::tcl("update")
}
detectedY <- autoX(theFigureJuiced, binary_threshold = as.numeric(text_get(qualityDisplay)), asY = TRUE)
if(max(detectedY) == 1) {
theCoordY <- getCoord_detectedAxis(detectedY)
tcltk::tkitemconfigure(mainFigureCanvas, y_calibrationLine, width = 10, fill = "orange")
update_Y_axis(max(theCoordY[1],theCoordY[3]), theCoordY[2], max(theCoordY[1],theCoordY[3]), theCoordY[4])
tcltk::tkconfigure(yorangeLabel, image = theOrange); tcltk::tcl("update")
}
if((max(detectedX) == 1) && (max(detectedY) == 1)) {
newXCoord <- resolve_crossedAxes(detectedX, detectedY)
newYCoord <- resolve_crossedAxes(detectedX, detectedY, asY = TRUE)
update_X_axis(newXCoord[1], min(newXCoord[2], newXCoord[4]), newXCoord[3], min(newXCoord[2], newXCoord[4]))
update_Y_axis(max(newYCoord[1],newYCoord[3]), newYCoord[2], max(newYCoord[1],newYCoord[3]), newYCoord[4])
tcltk::tkitemconfigure(mainFigureCanvas, y_calibrationLine, width = 5, fill = "tomato3")
tcltk::tkitemconfigure(mainFigureCanvas, x_calibrationLine, width = 5, fill = "tomato")
}
# extract bar or scatter data
if(isBarPlot(theFigureJuiced) == TRUE) {
detectedBars <- autoBars(theFigureJuiced, detectedX, detectedY, binary_threshold = as.numeric(text_get(qualityDisplay)), bar_length = as.numeric(text_get(barSizeDisplay)))
theCoords <- getCoord_detectedPoints(detectedBars)
theCoords <- theCoords[order(theCoords[, 1]), ]
if(!is.null(theCoords) && (length(theCoords) > 0)) {
if(!is.null(nrow(theCoords))) {
for(i in 1:nrow(theCoords)) {autoBar(theCoords[i, 1], theCoords[i, 2]); animateAutodetection();}
} else {
autoBar(theCoords[1], theCoords[2]);
}
}
if(max(detectedBars) >= 1) tcltk::tkconfigure(dataOrangeLabel, image = theOrange); tcltk::tcl("update")
} else {
detectedPoints <- autoPoints(theFigureJuiced, detectedX, detectedY, point_empty = theAutoPointsAreEmpty, point_shape = theAutoPointsShape, point_size = as.numeric(text_get(circleSizeDisplay)))
if(max(detectedPoints) >= 1) tcltk::tkconfigure(dataOrangeLabel, image = theOrange); tcltk::tcl("update")
allAutoPoints <- getNonClusters(detectedPoints)
theCoords <- getCoord_detectedPoints(allAutoPoints)
if(!is.null(theCoords) && (length(theCoords) > 0)) {
if(!is.null(nrow(theCoords))) {
for(i in 1:nrow(theCoords)) {autoPoint(theCoords[i, 1], theCoords[i, 2]); animateAutodetection();}
} else {
autoPoint(theCoords[1], theCoords[2]);
}
}
allAutoClusters <- getClusters(detectedPoints)
theCoords <- getCoord_detectedPoints(allAutoClusters)
if(!is.null(theCoords) && (length(theCoords) > 0)) {
if(!is.null(nrow(theCoords))) {
for(i in 1:nrow(theCoords)) {autoCluster(theCoords[i, 1], theCoords[i, 2]); animateAutodetection();}
} else {
autoCluster(theCoords[1], theCoords[2]);
}
}
}
tcltk::tkitemconfigure(txtCanvas, theDataText, text = point_summary())
tcltk::tkconfigure(juiceButton, image = juicrLogo)
}
#### START: juicr automate button
juiceItCanvas <- tcltk::ttkframe(automatedWindow)
juiceButton <- tcltk::ttkbutton(juiceItCanvas, text = "juice image for data", width=33, compound = 'top', image = juicrLogo, command = function(){juiceIt();})
tcltk::tkgrid(juiceButton, padx = 2, pady = 8)
#### END: juicr automate button
#### END: juicr progress frame
progressCanvas <- tcltk::ttklabelframe(automatedWindow, text = "Extraction success", padding = 4)
progressFrame <- tcltk::ttkframe(progressCanvas)
xorangeLabel <- tcltk::ttklabel(progressFrame, text = "x-axis", compound = 'top', image = theOrangeGrey)
yorangeLabel<- tcltk::ttklabel(progressFrame, text = "y-axis", compound = 'top', image = theOrangeGrey)
dataOrangeLabel <- tcltk::ttklabel(progressFrame, text = "data", compound = 'top', image = theOrangeGrey)
tcltk::tkgrid(xorangeLabel, yorangeLabel, dataOrangeLabel, padx = 7)
detectionFrame <- tcltk::ttkframe(progressCanvas)
autoPointLabel <- tcltk::ttklabel(detectionFrame, text = "= detected", compound = "left", image = autoPointImage)
clusterPointLabel <- tcltk::ttklabel(detectionFrame, text = "= cluster", compound = "left", image = clusterPointImage)
tcltk::tkgrid(autoPointLabel, clusterPointLabel, padx = 12, pady = 3)
tcltk::tkgrid(detectionFrame)
tcltk::tkgrid(progressFrame)
#### END: juicr progress frame
#### START: point options frame
figureTypeCanvas <- tcltk::ttklabelframe(automatedWindow, text = "Point detection options", padding = 6)
sizeFrame <- tcltk::ttkframe(figureTypeCanvas)
circleSizeLabel <- tcltk::ttklabel(sizeFrame, text = "= size", width = 7)
circleSmallButton <- tcltk::tkbutton(sizeFrame, text = "smallest", relief = "groove", image = circlePoint1, command = function(...) {tcltk::tkdelete(circleSizeDisplay, "0.0", "end"); tcltk::tkinsert(circleSizeDisplay, "1.0", as.character(1)); tcltk::tkconfigure(circleSmallButton, relief = "sunken"); tcltk::tkconfigure(circleMediumButton, relief = "groove"); tcltk::tkconfigure(circleBigButton, relief = "groove");} )
circleMediumButton <- tcltk::tkbutton(sizeFrame, text = "medium", relief = "groove", image = circlePoint5, command = function(...) {tcltk::tkdelete(circleSizeDisplay, "0.0", "end"); tcltk::tkinsert(circleSizeDisplay, "1.0", as.character(5)); tcltk::tkconfigure(circleSmallButton, relief = "groove"); tcltk::tkconfigure(circleMediumButton, relief = "sunken"); tcltk::tkconfigure(circleBigButton, relief = "groove");} )
tcltk::tkconfigure(circleMediumButton, relief = "sunken")
circleBigButton <- tcltk::tkbutton(sizeFrame, text = "big", relief = "groove", image = circlePoint15, command = function(...) {tcltk::tkdelete(circleSizeDisplay, "0.0", "end"); tcltk::tkinsert(circleSizeDisplay, "1.0", as.character(15)); tcltk::tkconfigure(circleSmallButton, relief = "groove"); tcltk::tkconfigure(circleMediumButton, relief = "groove"); tcltk::tkconfigure(circleBigButton, relief = "sunken");} )
circleSizeDisplay <- tcltk::tktext(sizeFrame, foreground = "tomato", height = 1, width = 4)
tcltk::tkinsert(circleSizeDisplay, "1.0", as.character(5))
tcltk::tkgrid(circleSmallButton, circleMediumButton, circleBigButton, circleSizeLabel, circleSizeDisplay, padx=3)
shapeFrame <- tcltk::ttkframe(figureTypeCanvas)
circleShapeLabel <- tcltk::ttklabel(shapeFrame, text = "= shape", width = 7)
circleCircleButton <- tcltk::tkbutton(shapeFrame, text = "circle", relief = "groove", image = circlePoint15, command = function(...) {theAutoPointsShape <- "disc"; tcltk::tkconfigure(circleCircleButton, relief = "sunken"); tcltk::tkconfigure(circleDiamondButton, relief = "groove"); tcltk::tkconfigure(circleSquareButton, relief = "groove");})
tcltk::tkconfigure(circleCircleButton, relief = "sunken")
circleDiamondButton <- tcltk::tkbutton(shapeFrame, text = "diamond", relief = "groove", image = diamondPoint15, command = function(...) {theAutoPointsShape <- "diamond"; tcltk::tkconfigure(circleCircleButton, relief = "groove"); tcltk::tkconfigure(circleDiamondButton, relief = "sunken"); tcltk::tkconfigure(circleSquareButton, relief = "groove");})
circleSquareButton <- tcltk::tkbutton(shapeFrame, text = "square", relief = "groove", image = squarePoint15, command = function(...) {theAutoPointsShape <- "box"; tcltk::tkconfigure(circleCircleButton, relief = "groove"); tcltk::tkconfigure(circleDiamondButton, relief = "groove"); tcltk::tkconfigure(circleSquareButton, relief = "sunken");})
tcltk::tkgrid(circleCircleButton, circleDiamondButton, circleSquareButton, circleShapeLabel, padx=3, pady = 3)
styleFrame <- tcltk::ttkframe(figureTypeCanvas)
styleLabel <- tcltk::ttklabel(shapeFrame, text = "= style", width = 7)
circleClosedButton <- tcltk::tkbutton(shapeFrame, text = "closed", relief = "groove", image = circlePoint15, command = function(...) {theAutoPointsAreEmpty <- FALSE; tcltk::tkconfigure(circleClosedButton, relief = "sunken"); tcltk::tkconfigure(circleOpenButton, relief = "groove");})
connectLabel <- tcltk::ttklabel(shapeFrame, text = "or")
tcltk::tkconfigure(circleClosedButton, relief = "sunken")
circleOpenButton <- tcltk::tkbutton(shapeFrame, text = "open", relief = "groove", image = circlePoint15Closed, command = function(...) {theAutoPointsAreEmpty <- TRUE; tcltk::tkconfigure(circleClosedButton, relief = "groove"); tcltk::tkconfigure(circleOpenButton, relief = "sunken");})
tcltk::tkgrid(circleClosedButton, connectLabel, circleOpenButton, styleLabel, padx=3)
tcltk::tkgrid(shapeFrame, sticky = "w")
tcltk::tkgrid(styleFrame, sticky = "w")
tcltk::tkgrid(sizeFrame, sticky = "w")
#tkgrid(clusterPointLabel, sticky = "w" )
#### END: point options frame
#### START: line options frame
lineTypeCanvas <- tcltk::ttklabelframe(automatedWindow, text = "Axis detection options", padding = 6)
lineFrame <- tcltk::ttkframe(lineTypeCanvas)
lineQualityLabel <- tcltk::ttklabel(lineFrame, text = "= quality", width = 9)
highQualityButton <- tcltk::tkbutton(lineFrame, text = "smallest", relief = "groove", width = 21, height = 21, image = lineQualityHigh, command = function(...) {tcltk::tkdelete(qualityDisplay, "0.0", "end"); tcltk::tkinsert(qualityDisplay, "1.0", as.character(0.6)); tcltk::tkconfigure(highQualityButton, relief = "sunken"); tcltk::tkconfigure(lowQualityButton, relief = "groove");} )
tcltk::tkconfigure(highQualityButton, relief = "sunken")
lineConnectLabel <- tcltk::ttklabel(lineFrame, text = "or")
lowQualityButton <- tcltk::tkbutton(lineFrame, text = "medium", relief = "groove", width = 21, height = 21, image = lineQualityLow, command = function(...) {tcltk::tkdelete(qualityDisplay, "0.0", "end"); tcltk::tkinsert(qualityDisplay, "1.0", as.character(0.4)); tcltk::tkconfigure(highQualityButton, relief = "groove"); tcltk::tkconfigure(lowQualityButton, relief = "sunken");} )
qualityDisplay <- tcltk::tktext(lineFrame, foreground = "tomato", height = 1, width = 4)
tcltk::tkinsert(qualityDisplay, "1.0", as.character(0.6))
tcltk::tkgrid(highQualityButton, lineConnectLabel, lowQualityButton, lineQualityLabel, qualityDisplay, padx=3)
tcltk::tkgrid(lineFrame, sticky = "w")
#### END: line options frame
#### START: bar options frame
barTypeCanvas <- tcltk::ttklabelframe(automatedWindow, text = "Bar detection options", padding = 6)
barFrame <- tcltk::ttkframe(barTypeCanvas)
barSizeLabel <- tcltk::ttklabel(barFrame, text = "= size", width = 7)
barSmallButton <- tcltk::tkbutton(barFrame, text = "smallest", relief = "groove", image = barPoint1, command = function(...) {tcltk::tkdelete(barSizeDisplay, "0.0", "end"); tcltk::tkinsert(barSizeDisplay, "1.0", as.character(3)); tcltk::tkconfigure(barSmallButton, relief = "sunken"); tcltk::tkconfigure(barMediumButton, relief = "groove"); tcltk::tkconfigure(barBigButton, relief = "groove");})
barMediumButton <- tcltk::tkbutton(barFrame, text = "medium", relief = "groove", image = barPoint5, command = function(...) {tcltk::tkdelete(barSizeDisplay, "0.0", "end"); tcltk::tkinsert(barSizeDisplay, "1.0", as.character(9)); tcltk::tkconfigure(barSmallButton, relief = "groove"); tcltk::tkconfigure(barMediumButton, relief = "sunken"); tcltk::tkconfigure(barBigButton, relief = "groove");})
tcltk::tkconfigure(barMediumButton, relief = "sunken")
barBigButton <- tcltk::tkbutton(barFrame, text = "big", relief = "groove", image = barPoint15, command = function(...) {tcltk::tkdelete(barSizeDisplay, "0.0", "end"); tcltk::tkinsert(barSizeDisplay, "1.0", as.character(19)); tcltk::tkconfigure(barSmallButton, relief = "groove"); tcltk::tkconfigure(barMediumButton, relief = "groove"); tcltk::tkconfigure(barBigButton, relief = "sunken");})
barSizeDisplay <- tcltk::tktext(barFrame, foreground = "tomato", height = 1, width = 4)
tcltk::tkinsert(barSizeDisplay, "1.0", as.character(9))
tcltk::tkgrid(barSmallButton, barMediumButton, barBigButton, barSizeLabel, barSizeDisplay, padx=3)
tcltk::tkgrid(barFrame, sticky = "w")
#### END: bar options frame
tcltk::tkgrid(juiceItCanvas, padx = 24, pady = 3)
tcltk::tkgrid(progressCanvas)
tcltk::tkgrid(lineTypeCanvas)
tcltk::tkgrid(figureTypeCanvas)
tcltk::tkgrid(barTypeCanvas)
tcltk::tkgrid(automatedWindow)
##### END: automated frame in notebook
########################################
########################################
##### START: manual frame in notebook
manualWindow <- tcltk::ttkframe(aJuicrWindow)
#### START: zoom frame
zoomFrame <- tcltk::ttkframe(manualWindow)
zoomCanvas <- tcltk::tkcanvas(zoomFrame, width = 225, height = 225)
zoomFigure <- tcltk::tcl("image", "create", "photo")
tcltk::tcl(zoomFigure, "copy", theFigure, "-from", 0, 0, 77, 77, "-zoom", 3)
zoomWidth <- as.integer(tcltk::tcl("image", "width", zoomFigure))
zoomHeight <- as.integer(tcltk::tcl("image", "height", zoomFigure))
zoomImage <- tcltk::tcl(zoomCanvas, "create", "image", 0, 0, image = zoomFigure, anchor = "nw")
tcltk::tkcreate(zoomCanvas, "rec", (zoomWidth - 1)/2 - 1, (zoomHeight - 1)/2 - 1, (zoomWidth - 1)/2 + 1, (zoomHeight - 1)/2 + 1, outline = "DarkOrange1", fill = "DarkOrange1")
tcltk::tkcreate(zoomCanvas, "line", (zoomWidth - 1)/2 - 30, (zoomHeight - 1)/2, (zoomWidth - 1)/2 - 16, (zoomHeight - 1)/2, width = 3, fill = "turquoise3")
tcltk::tkcreate(zoomCanvas, "line", (zoomWidth - 1)/2 + 30, (zoomHeight - 1)/2, (zoomWidth - 1)/2 + 16, (zoomHeight - 1)/2, width = 3, fill = "turquoise3")
tcltk::tkcreate(zoomCanvas, "line", (zoomWidth - 1)/2, (zoomHeight - 1)/2 - 30, (zoomWidth - 1)/2, (zoomHeight - 1)/2 - 16, width = 3, fill = "turquoise3")
tcltk::tkcreate(zoomCanvas, "line", (zoomWidth - 1)/2, (zoomHeight - 1)/2 + 30, (zoomWidth - 1)/2, (zoomHeight - 1)/2 + 16, width = 3, fill = "turquoise3")
coordTypes <- c("pixels", "data"); theValue <- tcltk::tclVar("NA");
pixelComboBox <- tcltk::ttkcombobox(zoomFrame, value = coordTypes, textvariable = theValue, width = 6, font = "Consolas 8")
tcltk::tkcreate(zoomCanvas, "window", 5, 206, anchor = "nw", window = pixelComboBox)
tcltk::tkset(pixelComboBox, coordTypes[1])
theCOORD <- sprintf("(x,y)=(%5s,%5s)", "NA", "NA")
zoomText <- tcltk::tkcreate(zoomCanvas, "text", 159, 215, justify = "left", text = theCOORD, fill = "grey", font = "Consolas 9")
tcltk::tkgrid(zoomCanvas, padx = 7, pady = 5)
#### END: zoom frame
#### START: figure type frame
figureTypeCanvas <- tcltk::ttklabelframe(manualWindow, text = "plot-type (scatter, error bar, other)", padding = 8)
scatterPlotButton <- tcltk::tkbutton(figureTypeCanvas,
command = function(){
set_juicr("x_error", FALSE); set_juicr("y_error", FALSE); set_juicr("x_regression", FALSE); set_juicr("x_connected", FALSE)
tcltk::tkconfigure(scatterPlotButton, relief = "sunken"); tcltk::tkconfigure(barPlotButton, relief = "raised"); tcltk::tkconfigure(linePlotButton, relief = "raised");
tcltk::tkpack.forget(manualWindowItems[4]); tcltk::tkpack.forget(manualWindowItems[5]); tcltk::tkpack(manualWindowItems[3], after = manualWindowItems[2]);
tcltk::tkcoords(mainFigureCanvas, x_errorLine, 1, 1, 1, 1); tcltk::tkcoords(mainFigureCanvas, y_errorLine, 1, 1, 1, 1);
tcltk::tkcoords(mainFigureCanvas, x_regressionLine, 1, 1, 1, 1);
}, text = "scatter", image = imageScatter)
tcltk::tkconfigure(scatterPlotButton, relief = "sunken")
barPlotButton <- tcltk::tkbutton(figureTypeCanvas,
command = function(){
set_juicr("x_error", FALSE); set_juicr("y_error", TRUE); set_juicr("x_regression", FALSE); set_juicr("x_connected", FALSE)
tcltk::tkconfigure(scatterPlotButton, relief = "raised"); tcltk::tkconfigure(barPlotButton, relief = "sunken"); tcltk::tkconfigure(linePlotButton, relief = "raised");
tcltk::tkpack.forget(manualWindowItems[3]); tcltk::tkpack.forget(manualWindowItems[5]); tcltk::tkpack(manualWindowItems[4], after = manualWindowItems[2])
}, text = "error", image = imageBarX)
linePlotButton <- tcltk::tkbutton(figureTypeCanvas,
command = function(){
set_juicr("y_error", FALSE); set_juicr("x_error", FALSE); set_juicr("x_regression", FALSE); set_juicr("x_connected", FALSE)
tcltk::tkconfigure(scatterPlotButton, relief = "raised"); tcltk::tkconfigure(barPlotButton, relief = "raised"); tcltk::tkconfigure(linePlotButton, relief = "sunken");
tcltk::tkpack.forget(manualWindowItems[3]); tcltk::tkpack.forget(manualWindowItems[4]); tcltk::tkpack(manualWindowItems[5], after = manualWindowItems[2])
tcltk::tkcoords(mainFigureCanvas, x_errorLine, 1, 1, 1, 1); tcltk::tkcoords(mainFigureCanvas, y_errorLine, 1, 1, 1, 1);
tcltk::tkcoords(mainFigureCanvas, x_regressionLine, 1, 1, 1, 1);
}, text = "line", image = imageLine)
tcltk::tkgrid(scatterPlotButton, barPlotButton, linePlotButton, padx = 8)
#### END: figure type frame
#### START: figure calibration frame
figureCalibration <- tcltk::ttklabelframe(manualWindow, text = "plot-to-data calibration\n (min/max = plotted values on axis)", padding = 8)
calibrationXButton <- tcltk::tkbutton(figureCalibration, command = function(){set_juicr("x_calibrate", TRUE); tcltk::tkconfigure(calibrationXButton, relief = "sunken");}, text = "add\nx-axis", width = 5, height = 2, foreground = "tomato")
calibrationYButton <- tcltk::tkbutton(figureCalibration, command = function(){set_juicr("y_calibrate", TRUE); tcltk::tkconfigure(calibrationYButton, relief = "sunken");}, text = "add\ny-axis", width = 5, height = 2, foreground = "tomato3")
xcaptionCanvas <- tcltk::ttkframe(figureCalibration)
figureXminLabel <- tcltk::ttklabel(xcaptionCanvas, text = "min", font = "Arial 8")
figureXminDisplay <- tcltk::tktext(xcaptionCanvas, foreground = "tomato", height = 1, width = 4)
figureXmaxLabel <-tcltk:: ttklabel(xcaptionCanvas, text = "max", font = "Arial 8")
figureXmaxDisplay <- tcltk::tktext(xcaptionCanvas, foreground = "tomato", height = 1, width = 4)
figureXcaptionLabel <- tcltk::ttklabel(xcaptionCanvas, text = "label", font = "Arial 8")
figureXcaptionDisplay <- tcltk::tktext(xcaptionCanvas, foreground = "tomato", height = 1, width = 9)
tcltk::tkinsert(figureXcaptionDisplay, "1.0", "x")
figureXunitsLabel <- tcltk::ttklabel(xcaptionCanvas, text = "units", font = "Arial 8")
figureXunitsDisplay <- tcltk::tktext(xcaptionCanvas, foreground = "tomato", height = 1, width = 9)
tcltk::tkgrid(figureXcaptionLabel, figureXcaptionDisplay, figureXminLabel, figureXminDisplay)
tcltk::tkgrid(figureXunitsLabel, figureXunitsDisplay, figureXmaxLabel, figureXmaxDisplay)
ycaptionCanvas <- tcltk::ttkframe(figureCalibration)
figureYminLabel <- tcltk::ttklabel(ycaptionCanvas, text = "min", font = "Arial 8")
figureYminDisplay <- tcltk::tktext(ycaptionCanvas, foreground = "tomato3", height = 1, width = 4)
figureYmaxLabel <- tcltk::ttklabel(ycaptionCanvas, text = "max", font = "Arial 8")
figureYmaxDisplay <- tcltk::tktext(ycaptionCanvas, foreground = "tomato3", height = 1, width = 4)
figureYcaptionLabel <- tcltk::ttklabel(ycaptionCanvas, text = "label", font = "Arial 8")
figureYcaptionDisplay <- tcltk::tktext(ycaptionCanvas, foreground = "tomato3", height = 1, width = 9)
tcltk::tkinsert(figureYcaptionDisplay, "1.0", "y")
figureYunitsLabel <- tcltk::ttklabel(ycaptionCanvas, text = "units", font = "Arial 8")
figureYunitsDisplay <- tcltk::tktext(ycaptionCanvas, foreground = "tomato3", height = 1, width = 9)
tcltk::tkgrid(figureYcaptionLabel, figureYcaptionDisplay, figureYminLabel, figureYminDisplay)
tcltk::tkgrid(figureYunitsLabel, figureYunitsDisplay, figureYmaxLabel, figureYmaxDisplay)
tcltk::tkgrid(calibrationXButton, xcaptionCanvas)
tcltk::tkgrid(calibrationYButton, ycaptionCanvas)
x_calibrationLine <- tcltk::tkcreate(mainFigureCanvas, "line", 1, 1, 1, 1, width = 0, fill = "tomato", arrow = "both")
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", x_calibrationLine)
#x_calibrate <- FALSE; x_startCalibrate <- FALSE; x_endCalibrate <- FALSE;
set_juicr("x_calibrate", FALSE); set_juicr("x_startCalibrate", FALSE); set_juicr("x_endCalibrate", FALSE);
y_calibrationLine <- tcltk::tkcreate(mainFigureCanvas, "line", 1, 1, 1, 1, width = 0, fill = "tomato3", arrow = "both")
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", y_calibrationLine)
#y_calibrate <- FALSE; y_startCalibrate <- FALSE; y_endCalibrate <- FALSE;
set_juicr("y_calibrate", FALSE); set_juicr("y_startCalibrate", FALSE); set_juicr("y_endCalibrate", FALSE);
#### END: figure calibration frame
#### END: figure error frame
figureError <- tcltk::ttklabelframe(manualWindow, text = "add points with error bars\n (e.g., bar, whisker, box plots)", padding = 8)
errorXbutton <- tcltk::tkbutton(figureError, width = 70,
command = function(){
set_juicr("x_error", TRUE); set_juicr("y_error", FALSE);
tcltk::tkconfigure(errorXbutton, relief = "sunken"); tcltk::tkconfigure(errorYbutton, relief = "raised");
}, text = "add error\n on x", image = imageBarY)
errorYbutton <- tcltk::tkbutton(figureError, width = 70,
command = function(){
set_juicr("x_error", FALSE); set_juicr("y_error", TRUE);
tcltk::tkconfigure(errorXbutton, relief = "raised"); tcltk::tkconfigure(errorYbutton, relief = "sunken");
}, text = "add error\n on x", image = imageBarX)
tcltk::tkconfigure(errorYbutton, relief = "sunken")
tcltk::tkgrid(errorYbutton, errorXbutton, pady = 4, padx = 5)
theMean <- tcltk::tclVar("NA"); theError <- tcltk::tclVar("NA"); theSample <- tcltk::tclVar("NA"); theAxisType <- tcltk::tclVar("NA");
meanTypes <- c("mean", "median", "%", "count", "prediction", "sample", "other", "none")
meanComboBox <- tcltk::ttkcombobox(figureError, value = meanTypes, textvariable = theMean, width = 6)
tcltk::tkset(meanComboBox, meanTypes[1])
errorTypes <- c("SD", "SE", "95%CI", "range", "min", "max", "IQR", "LQ", "UQ", "other", "none")
errorComboBox <- tcltk::ttkcombobox(figureError, value = errorTypes, textvariable = theError, width = 4)
tcltk::tkset(errorComboBox, errorTypes[1])
tcltk::tkgrid(meanComboBox, errorComboBox, sticky = "nwse")
x_errorLine <- tcltk::tkcreate(mainFigureCanvas, "line", 1, 1, 1, 1, width = 0, fill = "tomato", arrow = "first")
set_juicr("x_error", FALSE); set_juicr("x_startError", FALSE); set_juicr("x_endError", FALSE);
y_errorLine <- tcltk::tkcreate(mainFigureCanvas, "line", 1, 1, 1, 1, width = 0, fill = "tomato3", arrow = "first")
set_juicr("y_error", FALSE); set_juicr("y_startError", FALSE); set_juicr("y_endError", FALSE);
#### END: figure error frame
#### START: figure regression frame
figureLine <- tcltk::ttklabelframe(manualWindow, text = "add lines\n (e.g., regression, line plot)", padding = 0)
regressionButton <- tcltk::tkbutton(figureLine, width = 70,
command = function(){
set_juicr("x_regression", TRUE); set_juicr("x_connected", FALSE);
tcltk::tkconfigure(regressionButton, relief = "sunken"); tcltk::tkconfigure(connectedButton, relief = "raised");
}, text = "add\nslope", image = imageRegression)
x_regressionLine <- tcltk::tkcreate(mainFigureCanvas, "line", 1, 1, 1, 1, width = 0, fill = "tomato")
#x_regression <- FALSE; x_startRegression <- FALSE; x_endRegression <- FALSE;
set_juicr("x_regression", FALSE); set_juicr("x_startRegression", FALSE); set_juicr("x_endRegression", FALSE);
connectedButton <- tcltk::tkbutton(figureLine, width = 70,
command = function(){
set_juicr("x_regression", FALSE); set_juicr("x_connected", TRUE);
if(as.character(tcltk::tkcget(connectedButton, "-relief")) == "sunken") {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_connectedLine))
createMultiLine(xyPos)
tcltk::tkcoords(mainFigureCanvas, x_connectedLine, 1,1,1,1)
set_juicr("x_startConnected", FALSE); set_juicr("x_endConnected", FALSE); set_juicr("x_connected", FALSE);
updatedSummary <- point_summary()
tcltk::tkitemconfigure(txtCanvas, theDataText, text = updatedSummary)
tcltk::tkconfigure(regressionButton, relief = "raised"); tcltk::tkconfigure(connectedButton, relief = "raised");
} else {
tcltk::tkconfigure(regressionButton, relief = "raised"); tcltk::tkconfigure(connectedButton, relief = "sunken");
}
}, text = "add\n connected line", image = imageLine)
x_connectedLine <- tcltk::tkcreate(mainFigureCanvas, "line", 1, 1, 1, 1, width = 0, fill = "tomato")
#x_connected <- FALSE; x_startConnected <- FALSE; x_endConnected <- FALSE;
#x_connectedPos <- 1; y_connectedPos <- 1
set_juicr("x_connected", FALSE); set_juicr("x_startConnected", FALSE); set_juicr("x_endConnected", FALSE);
set_juicr("x_connectedPos", 1); set_juicr("y_connectedPos", 1);
tcltk::tkgrid(regressionButton, connectedButton, pady = 4, padx = 5)
#### END: figure regression frame
#### START: figure grouping frame
radioGroup <- tcltk::ttklabelframe(manualWindow, text = "extract-by-group (group=color+label)", padding = 8)
groupRadio1 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[1], background = "white")
groupRadio2 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[2], background = "white")
groupRadio3 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[3], background = "white")
groupRadio4 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[4], background = "white")
groupRadio5 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[5], background = "white")
groupRadio6 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[6], background = "white")
groupRadio7 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[7], background = "white")
groupRadio8 <- tcltk::tkradiobutton(radioGroup, foreground = groupColors[8], background = "white")
groupRadio1Label <- tcltk::tktext(radioGroup, foreground = groupColors[1], height = 1, width = 12, font = "Arial 8")
groupRadio2Label <- tcltk::tktext(radioGroup, foreground = "white", height = 1, width = 12, font = "Arial 8")
groupRadio3Label <- tcltk::tktext(radioGroup, foreground = "white", height = 1, width = 12, font = "Arial 8")
groupRadio4Label <- tcltk::tktext(radioGroup, foreground = "white", height = 1, width = 12, font = "Arial 8")
groupRadio5Label <- tcltk::tktext(radioGroup, foreground = "white", height = 1, width = 12, font = "Arial 8")
groupRadio6Label <- tcltk::tktext(radioGroup, foreground = "white", height = 1, width = 12, font = "Arial 8")
groupRadio7Label <- tcltk::tktext(radioGroup, foreground = "white", height = 1, width = 12, font = "Arial 8")
groupRadio8Label <- tcltk::tktext(radioGroup, foreground = "white", height = 1, width = 12, font = "Arial 8")
tcltk::tkinsert(groupRadio1Label, "1.0", groupNames[1])
tcltk::tkinsert(groupRadio2Label, "1.0", groupNames[2])
tcltk::tkinsert(groupRadio3Label, "1.0", groupNames[3])
tcltk::tkinsert(groupRadio4Label, "1.0", groupNames[4])
tcltk::tkinsert(groupRadio5Label, "1.0", groupNames[5])
tcltk::tkinsert(groupRadio6Label, "1.0", groupNames[6])
tcltk::tkinsert(groupRadio7Label, "1.0", groupNames[7])
tcltk::tkinsert(groupRadio8Label, "1.0", groupNames[8])
pointGroup <- tcltk::tclVar("NA")
tcltk::tkconfigure(groupRadio1, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio1Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[1]); tcltk::tkconfigure(groupRadio1Label, foreground = groupColors[1]);})
tcltk::tkconfigure(groupRadio2, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio2Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[2]); tcltk::tkconfigure(groupRadio2Label, foreground = groupColors[2]);})
tcltk::tkconfigure(groupRadio3, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio3Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[3]); tcltk::tkconfigure(groupRadio3Label, foreground = groupColors[3]);})
tcltk::tkconfigure(groupRadio4, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio4Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[4]); tcltk::tkconfigure(groupRadio4Label, foreground = groupColors[4]);})
tcltk::tkconfigure(groupRadio5, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio5Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[5]); tcltk::tkconfigure(groupRadio5Label, foreground = groupColors[5]);})
tcltk::tkconfigure(groupRadio6, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio6Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[6]); tcltk::tkconfigure(groupRadio6Label, foreground = groupColors[6]);})
tcltk::tkconfigure(groupRadio7, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio7Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[7]); tcltk::tkconfigure(groupRadio7Label, foreground = groupColors[7]);})
tcltk::tkconfigure(groupRadio8, variable = pointGroup, value = as.character(tcltk::tcl(groupRadio8Label, "get", "1.0", "end")), command = function() {set_juicr("pointColor", groupColors[8]); tcltk::tkconfigure(groupRadio8Label, foreground = groupColors[8]);})
tcltk::tcl(groupRadio1, "select");
tcltk::tkgrid(groupRadio1, groupRadio1Label, groupRadio2, groupRadio2Label, pady = 0)
tcltk::tkgrid(groupRadio3, groupRadio3Label, groupRadio4, groupRadio4Label, pady = 0)
tcltk::tkgrid(groupRadio5, groupRadio5Label, groupRadio6, groupRadio6Label, pady = 0)
tcltk::tkgrid(groupRadio7, groupRadio7Label, groupRadio8, groupRadio8Label, pady = 0)
tcltk::tkpack(zoomFrame, figureTypeCanvas, figureCalibration, figureError, figureLine, radioGroup)
tcltk::tkgrid(manualWindow)
manualWindowItems <- as.character(tcltk::tkpack.slaves(manualWindow))
tcltk::tkpack.forget(manualWindowItems[4])
tcltk::tkpack.forget(manualWindowItems[5])
##### END: manual frame in notebook
########################################
tcltk::tkadd(notebookFrame, automatedWindow, sticky = "nswe", text = " automated ", compound = "left")
tcltk::tkinsert(notebookFrame, 0, manualWindow, sticky = "nswe", text = " manual ")
#################################
##### END: options notebook
#################################
#######################################
##### START: data and save frame
#######################################
saveJuicr <- function() {
# convert tcltk txt into regular txt
fullNotes <- ""
for(i in 1:(as.integer(tclvalue(tcl(theNotes, "index", "end"))) - 1)) {
lineNotes <- tcltk::tcl(theNotes, "get", paste0(i, ".0"), paste0(i, ".end"))
fullNotes <- paste0(fullNotes, paste0(lineNotes, collapse = " "), "\n")
}
# collect juicr settings
settingsJuicr <- data.frame(
"theNotes" = fullNotes,
"circleSmallButton" = as.character(tcltk::tkcget(circleSmallButton, "-relief")),
"circleMediumButton" = as.character(tcltk::tkcget(circleMediumButton, "-relief")),
"circleBigButton" = as.character(tcltk::tkcget(circleBigButton, "-relief")),
"circleSizeDisplay" = as.character(text_get(circleSizeDisplay)),
"circleCircleButton" = as.character(tcltk::tkcget(circleCircleButton, "-relief")),
"circleDiamondButton" = as.character(tcltk::tkcget(circleDiamondButton, "-relief")),
"circleSquareButton" = as.character(tcltk::tkcget(circleSquareButton, "-relief")),
"circleClosedButton" = as.character(tcltk::tkcget(circleClosedButton, "-relief")),
"circleOpenButton" = as.character(tcltk::tkcget(circleOpenButton, "-relief")),
"highQualityButton" = as.character(tcltk::tkcget(highQualityButton, "-relief")),
"lowQualityButton" = as.character(tcltk::tkcget(lowQualityButton, "-relief")),
"qualityDisplay" = as.character(text_get(qualityDisplay)),
"barSmallButton" = as.character(tcltk::tkcget(barSmallButton, "-relief")),
"barMediumButton" = as.character(tcltk::tkcget(barMediumButton, "-relief")),
"barBigButton" = as.character(tcltk::tkcget(barBigButton, "-relief")),
"barSizeDisplay" = as.character(text_get(barSizeDisplay)),
"figureXminDisplay" = as.character(text_get(figureXminDisplay)),
"figureXmaxDisplay" = as.character(text_get(figureXmaxDisplay)),
"figureXcaptionDisplay" = as.character(text_get(figureXcaptionDisplay)),
"figureXunitsDisplay" = as.character(text_get(figureXunitsDisplay)),
"figureYminDisplay" = as.character(text_get(figureYminDisplay)),
"figureYmaxDisplay" = as.character(text_get(figureYmaxDisplay)),
"figureYcaptionDisplay" = as.character(text_get(figureYcaptionDisplay)),
"figureYunitsDisplay" = as.character(text_get(figureYunitsDisplay)),
"meanComboBox" = as.character(tcltk::tkget(meanComboBox)),
"errorComboBox" = as.character(tcltk::tkget(errorComboBox)),
"groupRadio1Label" = as.character(text_get(groupRadio1Label)),
"groupRadio2Label" = as.character(text_get(groupRadio2Label)),
"groupRadio3Label" = as.character(text_get(groupRadio3Label)),
"groupRadio4Label" = as.character(text_get(groupRadio4Label)),
"groupRadio5Label" = as.character(text_get(groupRadio5Label)),
"groupRadio6Label" = as.character(text_get(groupRadio6Label)),
"groupRadio7Label" = as.character(text_get(groupRadio7Label)),
"groupRadio8Label" = as.character(text_get(groupRadio8Label)),
"groupRadio1LabelStatus" = tclvalue(tcltk::tkcget(groupRadio1Label, "-foreground")),
"groupRadio2LabelStatus" = tclvalue(tcltk::tkcget(groupRadio2Label, "-foreground")),
"groupRadio3LabelStatus" = tclvalue(tcltk::tkcget(groupRadio3Label, "-foreground")),
"groupRadio4LabelStatus" = tclvalue(tcltk::tkcget(groupRadio4Label, "-foreground")),
"groupRadio5LabelStatus" = tclvalue(tcltk::tkcget(groupRadio5Label, "-foreground")),
"groupRadio6LabelStatus" = tclvalue(tcltk::tkcget(groupRadio6Label, "-foreground")),
"groupRadio7LabelStatus" = tclvalue(tcltk::tkcget(groupRadio7Label, "-foreground")),
"groupRadio8LabelStatus" = tclvalue(tcltk::tkcget(groupRadio8Label, "-foreground"))
)
# collect extractions
resultsJuicr <- list("axes" = getAxisExtractions(sendToFile = TRUE),
"points" = getPointExtractions(sendToFile = TRUE),
"points_coordinates" = getPointExtractions(sendToFile = TRUE, coordinates = TRUE),
"autoBars" = getBarExtractions(sendToFile = TRUE),
"errorBars" = getErrorExtractions(sendToFile = TRUE),
"regressions" = getRegressionExtractions(sendToFile = TRUE),
"lines" = getLineExtractions(sendToFile = TRUE))
# collect image settings
theOriginal <- EBImage::readImage(theFigureFile)
#theStandardized <- theFigure #EBImage::readImage(theStandardizedImageFile)
theFigureExtractions <- theFigureJuiced #EBImage::readImage(theStandardizedImageFile)
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$axes$X.axis[1],
resultsJuicr$axes$X.axis[2],
radius = 7, col = grDevices::rgb(t(grDevices::col2rgb("mediumseagreen")), maxColorValue = 255),
fill = TRUE)
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$axes$X.axis[3],
resultsJuicr$axes$X.axis[4],
radius = 7, col = grDevices::rgb(t(grDevices::col2rgb("mediumseagreen")), maxColorValue = 255),
fill = TRUE)
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$axes$Y.axis[1],
resultsJuicr$axes$Y.axis[2],
radius = 7, col = grDevices::rgb(t(grDevices::col2rgb("mediumseagreen")), maxColorValue = 255),
fill = TRUE)
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$axes$Y.axis[3],
resultsJuicr$axes$Y.axis[4],
radius = 7, col = grDevices::rgb(t(grDevices::col2rgb("mediumseagreen")), maxColorValue = 255),
fill = TRUE)
if(nrow(resultsJuicr$points_coordinates) != 0) {
for(i in 1:nrow(resultsJuicr$points_coordinates)) {
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$points_coordinates$x.coord[i],
resultsJuicr$points_coordinates$y.coord[i],
radius = 3, col = grDevices::rgb(t(grDevices::col2rgb("orange")), maxColorValue = 255),
fill = TRUE)
}
}
if(nrow(resultsJuicr$errorBars) != 0) {
for(i in 1:nrow(resultsJuicr$errorBars)) {
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$errorBars$mean.x[i],
resultsJuicr$errorBars$mean.y[i],
radius = 3, col = grDevices::rgb(t(grDevices::col2rgb("dodgerblue")), maxColorValue = 255),
fill = TRUE)
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$errorBars$error.x[i],
resultsJuicr$errorBars$error.y[i],
radius = 3, col = grDevices::rgb(t(grDevices::col2rgb("dodgerblue")), maxColorValue = 255),
fill = TRUE)
}
}
if(nrow(resultsJuicr$regressions) != 0) {
for(i in 1:nrow(resultsJuicr$regressions)) {
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$regressions$x1.coord[i],
resultsJuicr$regressions$y1.coord[i],
radius = 5, col = grDevices::rgb(t(grDevices::col2rgb("violet")), maxColorValue = 255),
fill = TRUE)
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$regressions$x2.coord[i],
resultsJuicr$regressions$y2.coord[i],
radius = 5, col = grDevices::rgb(t(grDevices::col2rgb("violet")), maxColorValue = 255),
fill = TRUE)
}
}
if(nrow(resultsJuicr$lines) != 0) {
for(i in 1:nrow(resultsJuicr$lines)) {
theFigureExtractions <- EBImage::drawCircle(theFigureExtractions,
resultsJuicr$lines$x.coord[i],
resultsJuicr$lines$y.coord[i],
radius = 3, col = grDevices::rgb(t(grDevices::col2rgb("slateblue")), maxColorValue = 255),
fill = TRUE)
}
}
theExtractions <- paste0(tools::file_path_sans_ext(basename(theFigureFile)), "_juicr_extracted.png")
EBImage::writeImage(theFigureExtractions, file = theExtractions, type = "png")
EBImage::writeImage(theFigureJuiced, file = theStandardizedImageFile, type = "png")
filesJurcr <- data.frame("file_name" = c(basename(theFigureFile), basename(theStandardizedImageFile), theExtractions),
"formated" = c("original", "standardized", "standardized with extractions"),
"size_bites" = c(file.info(theFigureFile)$size, file.info(theStandardizedImageFile)$size, file.info(theExtractions)$size),
"date_created" = c(paste(file.info(theFigureFile)$ctime), paste(file.info(theStandardizedImageFile)$ctime), paste(file.info(theExtractions)$ctime)),
"width_pixels" = c(dim(theOriginal)[1], dim(theFigureJuiced)[1], dim(theFigureExtractions)[1]),
"height_pixels" = c(dim(theOriginal)[2], dim(theFigureJuiced)[2], dim(theFigureExtractions)[2]))
toHTML_table <- function(aDataFrame, theID, aConnection) {
#message(attributes(aDataFrame))
if(length(aDataFrame) == 0) {
cat(paste0("<table style=\"border-spacing: 20px 0px;\" id=\"", theID ,"\">\n"), file = aConnection)
cat("<tr>\n", paste0("<th>", "no extractions", "</th>\n"), "</tr>\n", file = aConnection)
cat("<tr>\n", paste0("<th>", "NA" , "</th>\n"), "</tr>\n", file = aConnection)
cat("</table>\n", file = aConnection)
return("");
}
cat(paste0("<table style=\"border-spacing: 20px 0px;\" id=\"", theID ,"\">\n"), file = aConnection)
cat("<tr>\n", paste0("<th>", labels(aDataFrame)[[2]], "</th>\n"), "</tr>\n", file = aConnection)
for(i in 1:nrow(aDataFrame)) cat("<tr>\n", paste0("<td>", aDataFrame[i, ], "</td>\n"), "</tr>\n", file = aConnection)
cat("</table>\n", file = aConnection)
}
toHTML_image <- function(theImage, aConnection, type = "jpg", theID = "logo") {
cat(paste0("<img id=\"", theID, "\" src=\"data:image/", type, ";base64,"), file = aConnection)
rawFile <- readBin(theImage, "raw", file.info(theImage)$size)
cat(base64Encode(rawFile, "character")[1], file = aConnection)
cat("\">\n", file = aConnection)
}
toHTML_image2 <- function(theImage, aConnection, type = "jpg", theID = "logo") {
imgTXT <- paste0("<img id=\"", theID, "\" src=\"data:image/", type, ";base64,")
rawFile <- readBin(theImage, "raw", file.info(theImage)$size)
imgTXT <- paste0(imgTXT, RCurl::base64Encode(rawFile, "character")[1], "\">")
return(imgTXT)
}
toHTML <- function(theImageFile, allResults) {
aConnection <- file(paste0(tools::file_path_sans_ext(basename(allResults$files[1,1])), "_juicr.html"), "w")
cat("<!DOCTYPE html>\n",
"<!--\n\tLajeunesse, M.J. (2021) Squeezing data from images with the juicr package for R. v 0.1\n-->\n",
"<html>\n",
paste0("<head>\n<title>Juicr extraction: ", basename(theFigureFile), "</title>\n"),
paste0("<meta name=\"descripton\" content=\"image extractions using juicr R package\">\n"),
paste0("<meta name=\"author\" content=\"juicr v. 0.1\">\n</head>\n"),
"<body>\n", file = aConnection)
toHTML_image(getIMG("test_orange3.png"), aConnection, type = "png")
cat(paste0("<h1>JUICR record of extractions from image:<br>", allResults$files[1,1] , "</h1>\n"), file = aConnection)
cat(paste0("<br><hr><br><h2>File information</h2><br>\n"), file = aConnection)
toHTML_table(allResults$files, "files", aConnection)
cat(paste0("<br>\n"), file = aConnection)
collectImages <- data.frame(
file_name = c(allResults$files$file_name),
image = c(
toHTML_image2(theFigureFile, theID = "original"),
toHTML_image2(theStandardizedImageFile, theID = "standardized"),
toHTML_image2(allResults$files[3,1], theID = "extracted"))
)
toHTML_table(collectImages, "images", aConnection)
cat(paste0("<br><hr><br><h2>Data extractions from: ", allResults$files[2,1], "</h2><br>\n"), file = aConnection)
cat(paste0("<h3 style=\"color:orange\">extracted data: points</h3>\n"), file = aConnection)
toHTML_table(allResults$extractions$points, "points", aConnection)
cat(paste0("<h3 style=\"color:mediumseagreen\">extracted data: coordinates for X and Y axes</h3>\n"), file = aConnection)
toHTML_table(allResults$extractions$axes, "axes", aConnection)
cat(paste0("<h3 style=\"color:orange\">extracted data: auto-bars</h3>\n"), file = aConnection)
toHTML_table(allResults$extractions$autoBars, "autobars", aConnection)
cat(paste0("<h3 style=\"color:dodgerblue\">extracted data: error Bars</h3>\n"), file = aConnection)
toHTML_table(allResults$extractions$errorBars, "errorbars", aConnection)
cat(paste0("<h3 style=\"color:violet\">extracted data: regressions</h3>\n"), file = aConnection)
toHTML_table(allResults$extractions$regressions, "regressions", aConnection)
cat(paste0("<h3 style=\"color:slateblue\">extracted data: lines</h3>\n"), file = aConnection)
toHTML_table(allResults$extractions$lines, "lines", aConnection)
cat(paste0("<br><hr><br><h2>juicr parameters</h2><br>\n"), file = aConnection)
toHTML_table(allResults$settings, "parameters", aConnection)
cat("</body>\n", "</html>\n", file = aConnection)
close(aConnection)
file.remove(theExtractions)
file.remove(theStandardizedImageFile)
}
allResults <- list("extractions" = resultsJuicr, "settings" = settingsJuicr, "files" = filesJurcr)
toHTML("", allResults)
#print(paste0(getwd(), "/", paste0(tools::file_path_sans_ext(basename(allResults$files[1,1])), "_juicr.html")))
return(paste0(tools::file_path_sans_ext(basename(allResults$files[1,1])), "_juicr.html"))
}
### START OF DATA FRAME
dataWindow <- tcltk::ttkframe(aJuicrWindow)
#### start: text summary frame
txtCanvas <- tcltk::tkcanvas(dataWindow, background = "white", width = 200, height = 440, "-scrollregion", paste(0, 0, 200, 500 * 13))
theDataText <- tcltk::tkcreate(txtCanvas, "text", 100, 3, justify = "left", text = point_summary(), font = "Consolas 8", anchor = "n")
theExtractedScroll <- tcltk::ttkscrollbar(dataWindow, command = function(...) tcltk::tcl(txtCanvas, "yview", ...), orient = "vertical")
tcltk::tkconfigure(txtCanvas, yscrollcommand = function(...) tcltk::tkset(theExtractedScroll, ...))
#### end: text summary frame
#### start: notes frame
notesCanvas <- tcltk::ttklabelframe(dataWindow, text = "Notes (e.g., user name, fig. #, ref.)", padding = 5)
theNotes <- tcltk::tktext(notesCanvas, height = 4, width = 26, font = "arial 10")
tcltk::tkinsert(theNotes, "1.0", "")
#### end: notes frame
#### start: save frame
saveWindow <- tcltk::ttkframe(dataWindow)
getDataWindow <- tcltk::ttkframe(saveWindow)
viewAllDataButton <- tcltk::ttkbutton(getDataWindow, text = " save .csv\nextractions", command = function() {getPointExtractions(sendToWindow = TRUE); getBarExtractions(sendToWindow = TRUE); getErrorExtractions(sendToWindow = TRUE); getRegressionExtractions(sendToWindow = TRUE); getLineExtractions(sendToWindow = TRUE); getAxisExtractions(sendToWindow = TRUE);})
#exportRButton <- ttkbutton(getDataWindow,text = "export to R", command = function() get_ExtractionList())
aboutButton <- tcltk::ttkbutton(getDataWindow, text = "help/cite", command = function() aboutJuicrWindow())
saveButton <- tcltk::ttkbutton(saveWindow, compound = "left", text = "save\nextractions\nas .html", image = orangeJuiceSave,
command = function() {
#tcltk::tk_choose.dir()
tcltk::tkconfigure(saveButton, text = paste0("saving..."))
tcltk::tcl("update"); Sys.sleep(2);
set_juicr("theSavedFile", saveJuicr());
updatedSummary <- point_summary();
tcltk::tkitemconfigure(txtCanvas, theDataText, text = updatedSummary);
tcltk::tkconfigure(saveButton, text = paste0("save\nextractions\nas .html"));})
#### end: save frame
tcltk::tkgrid(txtCanvas, theExtractedScroll, sticky = "news")
tcltk::tkgrid(theNotes, pady = 3)
tcltk::tkgrid(notesCanvas, sticky = "news")
tcltk::tkgrid(viewAllDataButton, pady = 1, sticky = "news")
#tkgrid(exportRButton, pady = 1, sticky = "news")
tcltk::tkgrid(aboutButton, pady = 1, sticky = "news")
tcltk::tkgrid(getDataWindow, saveButton, padx = 5, pady = 6, sticky = "news")
tcltk::tkgrid(saveWindow)
#######################################
##### END: data and save frame
#######################################
tcltk::tkpack(figureWindow, side = "left", pady = 15, padx = 15)
tcltk::tkpack(dataWindow, side = "right", pady = 15, padx = 15)
tcltk::tkpack(notebookFrame, side = "top", pady = 15, padx = 15)
# # # # # # # # # # # # # # # # # #
##### END OF JUICR GUI WINDOW #####
# # # # # # # # # # # # # # # # # #
#############################################################################
#############################################################################
##---------------------------
## START: interactivity
##---------------------------
mainFigureMouseOver <- function(x, y){
xpos <- as.numeric(tcltk::tcl(mainFigureCanvas$ID, "canvasx", as.integer(x)))
ypos <- as.numeric(tcltk::tcl(mainFigureCanvas$ID, "canvasy", as.integer(y)))
# update the zoom coordinates
if(as.character(tcltk::tkget(pixelComboBox)) == "pixels") {
tcltk::tkitemconfigure(zoomCanvas, zoomText, text = sprintf("(x,y)=(%5s,%5s)", xpos, ypos))
} else {
tcltk::tkitemconfigure(zoomCanvas, zoomText, text = sprintf("(x,y)=(%5s,%5s)",
signif(coordinate_calibrate(xpos, "x"), 4),
signif(coordinate_calibrate(ypos, "y"), 4)))
}
xfigMax <- as.integer(tcltk::tcl("image", "width", theFigure))
yfigMax <- as.integer(tcltk::tcl("image", "height", theFigure))
zoomFigure <- tcltk::tcl("image", "create", "photo", paste(zoomFigure))
xmin <- ifelse(xpos <= 38, 0, xpos - 38)
ymin <- ifelse(ypos <= 38, 0, ypos - 38)
xmax <- ifelse(xpos >= xfigMax - 38, xfigMax, xpos + 38)
ymax <- ifelse(ypos >= yfigMax - 38, yfigMax, ypos + 38)
tcltk::tcl(zoomFigure, "copy", theFigure, "-from", xmin, ymin, xmax, ymax, "-zoom", 3)
tcltk::tkitemconfigure(zoomCanvas, zoomImage, image = zoomFigure)
tcltk::tkcoords(zoomCanvas, zoomImage, ifelse(xpos <= 38, (77*3)/2 - xpos*3, 0), ifelse(ypos <= 38, (77*3)/2 - ypos*3, 0))
### START: X-axis calibration
if(get_juicr("x_calibrate") == TRUE && get_juicr("x_startCalibrate") == FALSE) {
tcltk::tkitemconfigure(mainFigureCanvas, x_calibrationLine, width = 5)
update_X_axis(xpos, ypos, xpos + 30, ypos)
}
if(get_juicr("y_calibrate") == TRUE && get_juicr("y_startCalibrate") == FALSE) {
tcltk::tkitemconfigure(mainFigureCanvas, y_calibrationLine, width = 5)
update_Y_axis(xpos, ypos, xpos, ypos + 30)
}
if(get_juicr("x_startCalibrate") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_calibrationLine))
update_X_axis(xyPos[1], xyPos[2], xpos, xyPos[2])
}
if(get_juicr("y_startCalibrate") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, y_calibrationLine))
update_Y_axis(xyPos[1], xyPos[2], xyPos[1], ypos)
}
### END: X-axis calibration
if(as.character(tcltk::tkcget(errorXbutton, "-relief")) == "sunken" && as.character(tcltk::tkcget(barPlotButton, "-relief")) == "sunken") {
if(get_juicr("x_error") == TRUE && get_juicr("x_startError") == FALSE) {
tcltk::tkitemconfigure(mainFigureCanvas, x_errorLine, width = 3)
tcltk::tkcoords(mainFigureCanvas, x_errorLine, xpos, ypos, xpos, ypos,
xpos, ypos - 7, xpos, ypos + 8)
}
if(get_juicr("x_startError") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_errorLine))
tcltk::tkcoords(mainFigureCanvas, x_errorLine, xyPos[1], xyPos[2], xpos, xyPos[2],
xpos, xyPos[2] - 7, xpos, xyPos[2] + 8)
}
}
if(as.character(tcltk::tkcget(errorYbutton, "-relief")) == "sunken" && as.character(tcltk::tkcget(barPlotButton, "-relief")) == "sunken") {
if(get_juicr("y_error") == TRUE && get_juicr("y_startError") == FALSE) {
tcltk::tkitemconfigure(mainFigureCanvas, y_errorLine, width = 3)
tcltk::tkcoords(mainFigureCanvas, y_errorLine, xpos, ypos, xpos, ypos,
xpos - 7, ypos, xpos + 8, ypos)
}
if(get_juicr("y_startError") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, y_errorLine))
tcltk::tkcoords(mainFigureCanvas, y_errorLine, xyPos[1], xyPos[2], xyPos[1], ypos,
xyPos[1] - 7, ypos, xyPos[1] + 8, ypos)
}
}
##############
if(as.character(tcltk::tkcget(regressionButton, "-relief")) == "sunken" && as.character(tcltk::tkcget(linePlotButton, "-relief")) == "sunken") {
if(get_juicr("x_regression") == TRUE && get_juicr("x_startRegression") == FALSE) {
tcltk::tkitemconfigure(mainFigureCanvas, x_regressionLine, width = 3)
tcltk::tkcoords(mainFigureCanvas, x_regressionLine, xpos, ypos, xpos + 2, ypos + 2)
}
if(get_juicr("x_startRegression") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_regressionLine))
tcltk::tkcoords(mainFigureCanvas, x_regressionLine, xyPos[1], xyPos[2], xpos + 2, ypos + 2)
}
}
##############
if(as.character(tcltk::tkcget(connectedButton, "-relief")) == "sunken" && as.character(tcltk::tkcget(linePlotButton, "-relief")) == "sunken") {
if(get_juicr("x_connected") == TRUE && get_juicr("x_startConnected") == FALSE) {
tcltk::tkitemconfigure(mainFigureCanvas, x_connectedLine, width = 3, arrow = "last")
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_connectedLine))
if(length(xyPos) == 4) {tcltk::tkcoords(mainFigureCanvas, x_connectedLine, xpos, ypos, xpos + 2, ypos + 2)}
else {
tcltk::tkcoords(mainFigureCanvas, x_connectedLine, as.character(c(head(xyPos,-2L), xpos, ypos)))}
}
if(get_juicr("x_startConnected") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_connectedLine))
if(length(xyPos) == 4) {tcltk::tkcoords(mainFigureCanvas, x_connectedLine, xyPos[1], xyPos[2], xpos + 2, ypos + 2)}
else {tcltk::tkcoords(mainFigureCanvas, x_connectedLine, as.character(c(head(xyPos,-2L), xpos, ypos)))}
}
}
}
deletePoint <- function() {
point_delete(point_indexToPoint(point_getTags("current")[1]))
tcltk::tcl(mainFigureCanvas, "delete", "current")
tcltk::tkitemconfigure(txtCanvas, theDataText, text = point_summary())
tcltk::tkitemconfigure(mainFigureCanvas, hoverText, text = "")
tcltk::tkcoords(mainFigureCanvas, hoverText, 0, 0)
tcltk::tkitemconfigure(mainFigureCanvas, hoverShadow, image = "")
tcltk::tkcoords(mainFigureCanvas, hoverShadow, 0, 0)
}
createPoint <- function(xPos, yPos) {
# create new point
newPoint <- tcltk::tkcreate(mainFigureCanvas, "oval",
xPos - pointSize,
yPos - pointSize,
xPos + pointSize,
yPos + pointSize,
width = 1,
outline = "white",
fill = get_juicr("pointColor"))
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", newPoint)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, as.character(tcltk::tclvalue(pointGroup)), "withtag", newPoint)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "point", "withtag", newPoint)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", newPoint)
}
autoPoint <- function(xPos, yPos) {
# create new point
newPoint <- tcltk::tcl(mainFigureCanvas,
"create", "image",
xPos - 8, yPos - 8,
image = autoPointImage, anchor = "nw")
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", newPoint)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, "auto", "withtag", newPoint)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "point", "withtag", newPoint)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", newPoint)
}
autoCluster <- function(xPos, yPos) {
# create new point
newPoint <- tcltk::tcl(mainFigureCanvas,
"create", "image",
xPos - 8, yPos - 8,
image = clusterPointImage, anchor = "nw")
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", newPoint)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, "cluster", "withtag", newPoint)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "point", "withtag", newPoint)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", newPoint)
}
autoBar <- function(xPos, yPos, xAdjust = 8, yAdjust = 4) {
# create new point
newPoint <- tcltk::tcl(mainFigureCanvas,
"create", "image",
xPos - xAdjust, yPos - yAdjust,
image = theBarImage, anchor = "nw")
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", newPoint)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, "autobar", "withtag", newPoint)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "point", "withtag", newPoint)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", newPoint)
}
createErrorBarX <- function(x1, y1, x2, y2) {
errorPoint <- tcltk::tkcreate(mainFigureCanvas, "line",
x1, y1, x2, y2,
x2, y1 - 7, x2, y2 + 8, # cap
width = 3,
arrow = "first",
fill = get_juicr("pointColor"))
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", errorPoint)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, as.character(tcltk::tclvalue(pointGroup)), "withtag", errorPoint)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "error", "withtag", errorPoint)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", errorPoint)
}
createErrorBarY <- function(x1, y1, x2, y2) {
errorPoint <- tcltk::tkcreate(mainFigureCanvas, "line",
x1, y1, x1, y2,
x1 - 7, y2, x1 + 8, y2, # cap
width = 3,
arrow = "first",
fill = get_juicr("pointColor"))
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", errorPoint)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, as.character(tcltk::tclvalue(pointGroup)), "withtag", errorPoint)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "error", "withtag", errorPoint)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", errorPoint)
}
createRegressionLine <- function (x1, y1, x2, y2) {
regressionPoint <- tcltk::tkcreate(mainFigureCanvas, "line",
x1, y1, x2, y2,
width = 3,
fill = get_juicr("pointColor"))
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", regressionPoint)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, as.character(tcltk::tclvalue(pointGroup)), "withtag", regressionPoint)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "regression", "withtag", regressionPoint)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", regressionPoint)
}
createMultiLine <- function (theXYs) {
connectedPoints <- tcltk::tkcreate(mainFigureCanvas, "line",
as.character(c(head(theXYs,-2L))),
width = 3,
fill = get_juicr("pointColor"),
arrow = "last")
# add unique ID
tcltk::tkaddtag(mainFigureCanvas, point_pointToIndex(point_add()), "withtag", connectedPoints)
# add grouping ID
tcltk::tkaddtag(mainFigureCanvas, as.character(tcltk::tclvalue(pointGroup)), "withtag", connectedPoints)
# add common ID
tcltk::tkaddtag(mainFigureCanvas, "line", "withtag", connectedPoints)
# add all common tag ID
tcltk::tkaddtag(mainFigureCanvas, "extraction", "withtag", connectedPoints)
}
mainFigureClick <- function(x, y) {
xPos <- as.numeric(tcltk::tcl(mainFigureCanvas$ID, "canvasx", as.integer(x)))
yPos <- as.numeric(tcltk::tcl(mainFigureCanvas$ID, "canvasy", as.integer(y)))
if(!any(get_juicr("y_calibrate"), get_juicr("x_calibrate"), get_juicr("y_startCalibrate"), get_juicr("x_startCalibrate"), get_juicr("y_endCalibrate"), get_juicr("x_endCalibrate"), as.character(tcltk::tkcget(barPlotButton, "-relief")) == "sunken", as.character(tcltk::tkcget(linePlotButton, "-relief")) == "sunken")) {
createPoint(xPos, yPos)
}
### START: axis calibration
if(get_juicr("x_startCalibrate") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_calibrationLine))
update_X_axis(xyPos[1], xyPos[2], xPos, xyPos[2])
set_juicr("x_startCalibrate", FALSE); set_juicr("x_endCalibrate", FALSE); set_juicr("x_calibrate", FALSE);
tcltk::tkconfigure(calibrationXButton, relief = "raised");
}
if(get_juicr("x_calibrate") == TRUE) {
update_X_axis(xPos, yPos, xPos + 30, yPos)
set_juicr("x_startCalibrate", TRUE)
}
if(get_juicr("y_startCalibrate") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, y_calibrationLine))
update_Y_axis(xyPos[1], xyPos[2], xyPos[1], yPos)
set_juicr("y_startCalibrate", FALSE); set_juicr("y_endCalibrate", FALSE); set_juicr("y_calibrate", FALSE);
tcltk::tkconfigure(calibrationYButton, relief = "raised");
}
if(get_juicr("y_calibrate") == TRUE) {
update_Y_axis(xPos, yPos, xPos, yPos + 30)
set_juicr("y_startCalibrate", TRUE)
}
### END: X-axis calibration
if(as.character(tcltk::tkcget(errorXbutton, "-relief")) == "sunken" && as.character(tcltk::tkcget(barPlotButton, "-relief")) == "sunken") {
if(get_juicr("x_startError") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_errorLine))
createErrorBarX(xyPos[1], xyPos[2], xPos, xyPos[2])
tcltk::tkcoords(mainFigureCanvas, x_errorLine, 1, 1, 1, 1)
set_juicr("x_startError", FALSE); set_juicr("x_endError", FALSE); set_juicr("x_error", FALSE);
}
if(get_juicr("x_error") == TRUE) {
tcltk::tkitemconfigure(mainFigureCanvas, x_errorLine, width = 3)
tcltk::tkcoords(mainFigureCanvas, x_errorLine, xPos, yPos, xPos, yPos,
xPos, yPos - 7, xPos, yPos + 8)
set_juicr("x_startError", TRUE)
}
}
if(as.character(tcltk::tkcget(errorYbutton, "-relief")) == "sunken" && as.character(tcltk::tkcget(barPlotButton, "-relief")) == "sunken") {
if(get_juicr("y_startError") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, y_errorLine))
createErrorBarY(xyPos[1], xyPos[2], xyPos[1], yPos)
tcltk::tkcoords(mainFigureCanvas, y_errorLine, 1, 1, 1, 1)
set_juicr("y_startError", FALSE); set_juicr("y_endError", FALSE); set_juicr("y_error", FALSE);
}
if(get_juicr("y_error") == TRUE) {
tcltk::tkitemconfigure(mainFigureCanvas, y_errorLine, width = 3)
tcltk::tkcoords(mainFigureCanvas, y_errorLine, xPos, yPos, xPos, yPos,
xPos - 7, yPos, xPos + 8, yPos)
set_juicr("y_startError", TRUE)
}
}
if(as.character(tcltk::tkcget(errorXbutton, "-relief")) == "sunken" && as.character(tcltk::tkcget(barPlotButton, "-relief")) == "sunken") {set_juicr("x_error", TRUE); set_juicr("y_error", FALSE);}
if(as.character(tcltk::tkcget(errorYbutton, "-relief")) == "sunken" && as.character(tcltk::tkcget(barPlotButton, "-relief")) == "sunken") {set_juicr("y_error", TRUE); set_juicr("x_error", FALSE);}
if(as.character(tcltk::tkcget(regressionButton, "-relief")) == "sunken" && as.character(tcltk::tkcget(linePlotButton, "-relief")) == "sunken") {
if(get_juicr("x_startRegression") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_regressionLine))
createRegressionLine(xyPos[1], xyPos[2], xPos, yPos)
tcltk::tkcoords(mainFigureCanvas, x_regressionLine, 1, 1, 1, 1)
set_juicr("x_startRegression", FALSE); set_juicr("x_endRegression", FALSE); set_juicr("x_regression", FALSE);
}
if(get_juicr("x_regression") == TRUE) {
tcltk::tkitemconfigure(mainFigureCanvas, x_regressionLine, width = 3)
tcltk::tkcoords(mainFigureCanvas, x_regressionLine, xPos, yPos, xPos, yPos)
set_juicr("x_startRegression", TRUE)
}
}
if(as.character(tcltk::tkcget(regressionButton, "-relief")) == "sunken" && as.character(tcltk::tkcget(linePlotButton, "-relief")) == "sunken") {set_juicr("x_regression", TRUE);}
if(as.character(tcltk::tkcget(connectedButton, "-relief")) == "sunken" && as.character(tcltk::tkcget(linePlotButton, "-relief")) == "sunken") {
if(get_juicr("x_startConnected") == TRUE) {
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_connectedLine))
set_juicr("x_connectedPos", xPos); set_juicr("y_connectedPos", yPos)
tcltk::tkcoords(mainFigureCanvas, x_connectedLine, as.character(c(xyPos, xPos, yPos)))
set_juicr("x_startConnected", FALSE); set_juicr("x_endConnected", FALSE); #set_juicr("x_connected", FALSE);
}
if(get_juicr("x_connected") == TRUE) {
tcltk::tkitemconfigure(mainFigureCanvas, x_connectedLine, width = 3)
xyPos <- as.numeric(tcltk::tkcoords(mainFigureCanvas, x_connectedLine))
if(length(xyPos) == 4) {tcltk::tkcoords(mainFigureCanvas, x_connectedLine, xPos, yPos, xPos, yPos)}
else {tcltk::tkcoords(mainFigureCanvas, x_connectedLine, as.character(c(head(xyPos,-2L), xPos, yPos)))}
set_juicr("x_startConnected", TRUE)
}
}
if(as.character(tcltk::tkcget(connectedButton, "-relief")) == "sunken" && as.character(tcltk::tkcget(linePlotButton, "-relief")) == "sunken") {set_juicr("x_connected", TRUE);}
# update summary canvas
updatedSummary <- point_summary()
tcltk::tkitemconfigure(txtCanvas, theDataText, text = updatedSummary)
}
##---------------------------
## END: interactivity
##---------------------------
if(openJuicrFile != "") {
openJuicr <- function(openJuicrFile){
# collect tables from juicr .html file
inputTables <- XML::readHTMLTable(openJuicrFile)
# update parameters
tcltk::tkdelete(theNotes, "0.0", "end"); tcltk::tkinsert(theNotes, "1.0", inputTables$parameters$theNotes)
tcltk::tkconfigure(circleSmallButton, relief = inputTables$parameters$circleSmallButton)
tcltk::tkconfigure(circleMediumButton, relief = inputTables$parameters$circleMediumButton)
tcltk::tkconfigure(circleBigButton, relief = inputTables$parameters$circleBigButton)
tcltk::tkdelete(circleSizeDisplay, "0.0", "end"); tcltk::tkinsert(circleSizeDisplay, "1.0", inputTables$parameters$circleSizeDisplay)
tcltk::tkconfigure(highQualityButton, relief = inputTables$parameters$highQualityButton)
tcltk::tkconfigure(lowQualityButton, relief = inputTables$parameters$lowQualityButton)
tcltk::tkdelete(qualityDisplay, "0.0", "end"); tcltk::tkinsert(qualityDisplay, "1.0", inputTables$parameters$qualityDisplay)
tcltk::tkconfigure(circleCircleButton, relief = inputTables$parameters$circleCircleButton)
tcltk::tkconfigure(circleDiamondButton, relief = inputTables$parameters$circleDiamondButton)
tcltk::tkconfigure(circleSquareButton, relief = inputTables$parameters$circleSquareButton)
tcltk::tkconfigure(circleClosedButton, relief = inputTables$parameters$circleClosedButton)
tcltk::tkconfigure(circleOpenButton, relief = inputTables$parameters$circleOpenButton)
tcltk::tkconfigure(barSmallButton, relief = inputTables$parameters$barSmallButton)
tcltk::tkconfigure(barMediumButton, relief = inputTables$parameters$barMediumButton)
tcltk::tkconfigure(barBigButton, relief = inputTables$parameters$barBigButton)
tcltk::tkdelete(barSizeDisplay, "0.0", "end"); tcltk::tkinsert(barSizeDisplay, "1.0", inputTables$parameters$barSizeDisplay)
tcltk::tkdelete(figureXminDisplay, "0.0", "end"); tcltk::tkinsert(figureXminDisplay, "1.0", inputTables$parameters$figureXminDisplay)
tcltk::tkdelete(figureXmaxDisplay, "0.0", "end"); tcltk::tkinsert(figureXmaxDisplay, "1.0", inputTables$parameters$figureXmaxDisplay)
tcltk::tkdelete(figureXcaptionDisplay, "0.0", "end"); tcltk::tkinsert(figureXcaptionDisplay, "1.0", inputTables$parameters$figureXcaptionDisplay)
tcltk::tkdelete(figureXunitsDisplay, "0.0", "end"); tcltk::tkinsert(figureXunitsDisplay, "1.0", inputTables$parameters$figureXunitsDisplay)
tcltk::tkdelete(figureYminDisplay, "0.0", "end"); tcltk::tkinsert(figureYminDisplay, "1.0", inputTables$parameters$figureYminDisplay)
tcltk::tkdelete(figureYmaxDisplay, "0.0", "end"); tcltk::tkinsert(figureYmaxDisplay, "1.0", inputTables$parameters$figureYmaxDisplay)
tcltk::tkdelete(figureYcaptionDisplay, "0.0", "end"); tcltk::tkinsert(figureYcaptionDisplay, "1.0", inputTables$parameters$figureYcaptionDisplay)
tcltk::tkdelete(figureYunitsDisplay, "0.0", "end"); tcltk::tkinsert(figureYunitsDisplay, "1.0", inputTables$parameters$figureYunitsDisplay)
tcltk::tkset(meanComboBox, inputTables$parameters$meanComboBox)
tcltk::tkset(errorComboBox, inputTables$parameters$errorComboBox)
for(i in 1:8) {
eval(parse(text = paste0(
" tcltk::tkconfigure(groupRadio", i ,"Label, foreground = inputTables$parameters$groupRadio", i ,"LabelStatus)
tcltk::tkdelete(groupRadio", i ,"Label, \"0.0\", \"end\")
tcltk::tkinsert(groupRadio", i ,"Label, \"1.0\", inputTables$parameters$groupRadio", i ,"Label)
"))
)
}
# collect color groups
theColorGroups <- c(inputTables$parameters$groupRadio1Label,
inputTables$parameters$groupRadio2Label,
inputTables$parameters$groupRadio3Label,
inputTables$parameters$groupRadio4Label,
inputTables$parameters$groupRadio5Label,
inputTables$parameters$groupRadio6Label,
inputTables$parameters$groupRadio7Label,
inputTables$parameters$groupRadio8Label)
theColorGroupsColor <- c(inputTables$parameters$groupRadio1LabelStatus,
inputTables$parameters$groupRadio2LabelStatus,
inputTables$parameters$groupRadio3LabelStatus,
inputTables$parameters$groupRadio4LabelStatus,
inputTables$parameters$groupRadio5LabelStatus,
inputTables$parameters$groupRadio6LabelStatus,
inputTables$parameters$groupRadio7LabelStatus,
inputTables$parameters$groupRadio8LabelStatus)
# update calibration lines
tcltk::tkitemconfigure(mainFigureCanvas, x_calibrationLine, width = 5)
loadedX <- as.numeric(inputTables$axes$X); update_X_axis(loadedX[1],
loadedX[2],
loadedX[3],
loadedX[4])
tcltk::tkitemconfigure(mainFigureCanvas, y_calibrationLine, width = 5)
loadedY <- as.numeric(inputTables$axes$Y); update_Y_axis(loadedY[1],
loadedY[2],
loadedY[3],
loadedY[4])
# add autobars
loadedAutoBars <- inputTables$points[inputTables$points$group == "autobar", ]
if(nrow(loadedAutoBars) != 0) {
for(i in 1:nrow(loadedAutoBars)) autoBar(as.numeric(loadedAutoBars$x.coord[i]),
as.numeric(loadedAutoBars$y.coord[i]),
yAdjust = 3)
}
# add autopoints
loadedAutoPoints <- inputTables$points[inputTables$points$group == "auto", ]
if(nrow(loadedAutoPoints) != 0) {
for(i in 1:nrow(loadedAutoPoints)) autoPoint(as.numeric(loadedAutoPoints$x.coord[i]),
as.numeric(loadedAutoPoints$y.coord[i]))
}
# add autoclusters
loadedAutoClusters <- inputTables$points[inputTables$points$group == "cluster", ]
if(nrow(loadedAutoClusters) != 0) {
for(i in 1:nrow(loadedAutoClusters)) autoCluster(as.numeric(loadedAutoClusters$x.coord[i]),
as.numeric(loadedAutoClusters$y.coord[i]))
}
# add manual points
for(i in 1:8) {
eval(parse(text = paste0(
"if(inputTables$parameters$groupRadio", i ,"LabelStatus != \"white\") {
loadedManualPoints <- inputTables$points[inputTables$points$group == inputTables$parameters$groupRadio", i ,"Label, ]
if(nrow(loadedManualPoints) != 0) {
set_juicr(\"pointColor\", inputTables$parameters$groupRadio", i ,"LabelStatus)
tcltk::tcl(groupRadio", i ,", \"select\")
for(i in 1:nrow(loadedManualPoints)) createPoint(as.numeric(loadedManualPoints$x.coord[i]),
as.numeric(loadedManualPoints$y.coord[i]))
}
}"))
)
}
# add error bars TODO: colors
loadedErrorBars <- inputTables$errorbars
if(colnames(loadedErrorBars)[1] != "no extractions") {
for(i in 1:nrow(loadedErrorBars)) {
if(loadedErrorBars$axis[i] == "y") {
eval(parse(text = paste0("tcltk::tcl(groupRadio", which(theColorGroups == loadedErrorBars$group[i]), " , \"select\")")))
set_juicr("pointColor", theColorGroupsColor[which(theColorGroups == loadedErrorBars$group[i])])
createErrorBarY(as.numeric(loadedErrorBars$mean.x[i]),
as.numeric(loadedErrorBars$mean.y[i]),
as.numeric(loadedErrorBars$error.x[i]),
as.numeric(loadedErrorBars$error.y[i]))
}
if(loadedErrorBars$axis[i] == "x") {
eval(parse(text = paste0("tcltk::tcl(groupRadio", which(theColorGroups == loadedErrorBars$group[i]), " , \"select\")")))
set_juicr("pointColor", theColorGroupsColor[which(theColorGroups == loadedErrorBars$group[i])])
createErrorBarX(as.numeric(loadedErrorBars$mean.x[i]),
as.numeric(loadedErrorBars$mean.y[i]),
as.numeric(loadedErrorBars$error.x[i]),
as.numeric(loadedErrorBars$error.y[i]))
}
}
}
loadedRegressions <- inputTables$regressions
if(colnames(loadedRegressions)[1] != "no extractions") {
for(i in 1:nrow(loadedRegressions)) {
eval(parse(text = paste0("tcltk::tcl(groupRadio", which(theColorGroups == loadedRegressions$group[i]), " , \"select\")")))
set_juicr("pointColor", theColorGroupsColor[which(theColorGroups == loadedRegressions$group[i])])
createRegressionLine(as.numeric(loadedRegressions$x1.coord[i]),
as.numeric(loadedRegressions$y1.coord[i]),
as.numeric(loadedRegressions$x2.coord[i]),
as.numeric(loadedRegressions$y2.coord[i]))
}
}
loadedMultiLines <- inputTables$lines
if(colnames(loadedMultiLines)[1] != "no extractions") {
for(j in unique(inputTables$lines$set)) {
theSet <- inputTables$lines[ which(j == inputTables$lines$set), ]
eval(parse(text = paste0("tcltk::tcl(groupRadio", which(theColorGroups == theSet$group[1]), " , \"select\")")))
setCoords <- unlist(strsplit(paste(theSet[, 3], theSet[, 4], sep = " ", collapse = " "), " "))
set_juicr("pointColor", theColorGroupsColor[which(theColorGroups == theSet$group[1])])
createMultiLine(setCoords)
}
}
tcltk::tkitemconfigure(txtCanvas, theDataText, text = point_summary())
}
openJuicr(openJuicrFile)
}
####################################
# START: mouse and keyboard bindings
tcltk::tkitembind(mainFigureCanvas, mainFigure, "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, mainFigure, "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, x_calibrationLine, "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, x_calibrationLine, "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, y_calibrationLine, "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, y_calibrationLine, "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, x_errorLine, "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, x_errorLine, "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, y_errorLine, "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, y_errorLine, "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, x_regressionLine, "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, x_regressionLine, "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, x_connectedLine, "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, x_connectedLine, "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, "point", "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, "point", "<Any-Enter>", function() {
if((point_getTags("current")[2] != "autobar") && (point_getTags("current")[2] != "auto") && (point_getTags("current")[2] != "cluster")) {set_juicr("tempPointColor", tcltk::tkitemcget(mainFigureCanvas, "current", "-fill")); tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 4, outline = "tomato3", fill = "tomato3");}
theCoords <- point_getCoordinates("current")
theCOORD <- sprintf(" %5s,%5s ", round(theCoords[1], 1), round(theCoords[2], 1))
tcltk::tkcoords(mainFigureCanvas, hoverText, round(theCoords[1], 2) + 50, round(theCoords[2], 2) - 2)
tcltk::tkitemconfigure(mainFigureCanvas, hoverText, text = theCOORD)
tcltk::tkitemconfigure(mainFigureCanvas, hoverShadow, image = hoverImage)
tcltk::tkcoords(mainFigureCanvas, hoverShadow, round(theCoords[1], 2) + 13, round(theCoords[2], 2) - 9)
tcltk::tkitemraise(mainFigureCanvas, hoverShadow)
tcltk::tkitemraise(mainFigureCanvas, hoverText)
})
tcltk::tkitembind(mainFigureCanvas, "point", "<Any-Leave>", function() {
if((point_getTags("current")[2] != "autobar") && (point_getTags("current")[2] != "auto") && (point_getTags("current")[2] != "cluster")) {tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 1, outline = "white", fill = get_juicr("tempPointColor"));}
tcltk::tkitemconfigure(mainFigureCanvas, hoverText, text = "")
tcltk::tkcoords(mainFigureCanvas, hoverText, 0, 0)
tcltk::tkitemconfigure(mainFigureCanvas, hoverShadow, image = "")
tcltk::tkcoords(mainFigureCanvas, hoverShadow, 0, 0)
})
tcltk::tkitembind(mainFigureCanvas, "point", "<Button-3>", deletePoint)
tcltk::tkitembind(mainFigureCanvas, "point", "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, "error", "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, "error", "<Any-Enter>", function() {tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 4, fill = "tomato3")})
tcltk::tkitembind(mainFigureCanvas, "error", "<Any-Leave>", function() {tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 3, fill = get_juicr("pointColor"))})
tcltk::tkitembind(mainFigureCanvas, "error", "<Button-3>", deletePoint)
tcltk::tkitembind(mainFigureCanvas, "error", "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, "regression", "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, "regression", "<Any-Enter>", function() {tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 4, fill = "tomato3")})
tcltk::tkitembind(mainFigureCanvas, "regression", "<Any-Leave>", function() {tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 3, fill = get_juicr("pointColor"))})
tcltk::tkitembind(mainFigureCanvas, "regression", "<Button-3>", deletePoint)
tcltk::tkitembind(mainFigureCanvas, "regression", "<Motion>", mainFigureMouseOver)
tcltk::tkitembind(mainFigureCanvas, "line", "<Button-1>", mainFigureClick)
tcltk::tkitembind(mainFigureCanvas, "line", "<Any-Enter>", function() {tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 4, fill = "tomato3")})
tcltk::tkitembind(mainFigureCanvas, "line", "<Any-Leave>", function() {tcltk::tkitemconfigure(mainFigureCanvas, "current", width = 3, fill = get_juicr("pointColor"))})
tcltk::tkitembind(mainFigureCanvas, "line", "<Button-3>", deletePoint)
tcltk::tkitembind(mainFigureCanvas, "line", "<Motion>", mainFigureMouseOver)
theInputText <- c(paste0("groupRadio", 1:8, "Label"), "figureXcaptionDisplay", "figureYcaptionDisplay", "figureXunitsDisplay", "figureYunitsDisplay", "figureXminDisplay", "figureXmaxDisplay", "figureYminDisplay", "figureYmaxDisplay")
for(i in theInputText) eval(parse(text = paste0("tcltk::tkbind(", i, ", \"<Key>\", function() {tcltk::tkitemconfigure(txtCanvas, theDataText, text = point_summary())})")))
####################################
# END: mouse and keyboard bindings
}
#############################################################################
#############################################################################
#############################################################################
# START: LOAD & PROCESS FIGURE IMAGE
# FIGURE IMAGE PROCESSING
get_standardizedFileNames <- function(aFileName) {
return(paste0(strsplit(aFileName, "[.]")[[1]][1],"_juicr.png"))
}
standardizeImage <- function(aFileName) {
newImage <- EBImage::readImage(aFileName)
if(standardizeTheImage == TRUE) {
if(dim(newImage)[1] > standardSize) newImage <- EBImage::resize(newImage, w = standardSize)
}
EBImage::writeImage(x = newImage,
file = paste0(strsplit(aFileName, "[.]")[[1]][1],"_juicr.png"),
type = "png")
return(get_standardizedFileNames(aFileName))
}
# END: LOAD & PROCESS FIGURE IMAGE
##################################
# # # # # # # # # # # # # # # # # # #
##### START OF JUICR GUI WINDOW #####
# # # # # # # # # # # # # # # # # # #
mainExtractorWindow <- tcltk::tktoplevel(bg = "white", width = 2000, height = 1000)
tcltk::tktitle(mainExtractorWindow) <- "juicr: image data extractor"
tcltk::tcl("wm", "iconphoto", mainExtractorWindow, juicrLogo)
# create mainExtractorWindow environment to store globals
main.env <- new.env()
set_main <- function(aMainVar, aValue) assign(aMainVar, aValue, envir = main.env)
get_main <- function(aMainVar) get(aMainVar, envir = main.env)
# image summary functions
theFigureSmall <- tcltk::tcl("image", "create", "photo")
getFigureSmall <- function() return(theFigureSmall)
update_FigureSmall <- function() {
tcltk::tcl(theFigureSmall, "copy", get_allJuicrImages()[getCurrentJuicrFrame()], "-subsample", 2, 2)
tcltk::tkconfigure(button_previewImage, image = getFigureSmall())
}
# START of multi-juicr frames
set_main("numberJuicrFrames", 0)
addAJuicrFrame <- function() set_main("numberJuicrFrames", get_main("numberJuicrFrames") + 1)
createNewJuicrFrame <- function(aFileName, sourceHTML) {
if(sourceHTML == TRUE) {
# collect tables from juicr .html file
inputTables <- XML::readHTMLTable(aFileName)
# collect standardized figure from juicr .html file
juicrHTML = XML::htmlParse(aFileName)
inputImages <- XML::xpathSApply(juicrHTML, "//table/tr/td/img", XML::xmlAttrs)["src", ]
sourceHTML <- aFileName
# re-create original image but avoid erasing original if in folder
tempOrginalFileName <- paste0("temp_", inputTables$files$file_name[1])
file.create(tempOrginalFileName)
tempImageFile <- file(tempOrginalFileName, "wb")
writeBin(RCurl::base64Decode(sub(".*,", "", inputImages[1]), mode = "raw"),
tempImageFile, useBytes = TRUE)
close(tempImageFile)
aFileName <- tempOrginalFileName
# re-create standardized image
file.create(inputTables$files$file_name[2])
tempImageFile <- file(inputTables$files$file_name[2], "wb")
writeBin(RCurl::base64Decode(sub(".*,", "", inputImages[2]), mode = "raw"),
tempImageFile, useBytes = TRUE)
close(tempImageFile)
theStandardizedImageFile <- inputTables$files$file_name[2]
} else {
theStandardizedImageFile <- standardizeImage(aFileName)
sourceHTML <- ""
}
#theOriginalFigure <- EBImage::readImage(theStandardizedImageFile)
# the figure displayed in frame widget
theFigure <- tcltk::tcl("image", "create", "photo", file = theStandardizedImageFile)
# the figure not displayed but gets juiced for extractions
theFigureJuiced <- EBImage::readImage(theStandardizedImageFile)
addAJuicrFrame()
add_allJuicrImages(as.character(theFigure))
eval(parse(text = paste0("juicrFrame", get_main("numberJuicrFrames"),
" <- tcltk::tkframe(mainExtractorWindow, background = \"white\"); createJuicrFrame(juicrFrame", get_main("numberJuicrFrames"),
", aFileName, theStandardizedImageFile, theFigure, theFigureJuiced, ", animateDelay,
", sourceHTML);")))
if(get_main("numberJuicrFrames") == 1) {
eval(parse(text = "tcltk::tkpack(juicrFrame1)"))
tcltk::tcl(theFigureSmall, "copy", theFigure, "-subsample", 2, 2)
}
#file.remove(theStandardizedImageFile)
#update_ArrowButtons(); if(animateDelay != FALSE) {tcltk::tcl("update"); Sys.sleep(1);}
eval(parse(text = paste0("return(as.character(juicrFrame", get_main("numberJuicrFrames"), "))")))
}
createManyJuicrFrames <- function(aFileList, sourceHTML = FALSE) {
theJuicrFrames <- c()
if(length(aFileList) != 1) {
tempPB <- tcltk::tkProgressBar(title = "juicr: Processing files", label = "",
min = 1, max = length(aFileList), initial = 1, width = 500)
for(i in 1:length(aFileList)) {
tcltk::setTkProgressBar(tempPB, i, title = paste("juicr: Processing files = ", basename(aFileList[i])), "")
theJuicrFrames <- c(theJuicrFrames, createNewJuicrFrame(aFileList[i], sourceHTML)[1])
#update_ArrowButtons(); if(animateDelay != FALSE) {tcltk::tcl("update"); Sys.sleep(1); };
tcltk::tcl("update");
}
close(tempPB)
} else {
theJuicrFrames <- c(theJuicrFrames, createNewJuicrFrame(aFileList, sourceHTML)[1])
#update_ArrowButtons(); if(animateDelay != FALSE) {tcltk::tcl("update"); Sys.sleep(1); };
}
set_main("currentJuicrFrame", 1)
return(theJuicrFrames)
}
# juicr frame management
set_main("currentJuicrFrame", 0)
getCurrentJuicrFrame <- function() return(get_main("currentJuicrFrame"))
previousJuicrFrame <- function() {
if(getCurrentJuicrFrame() <= 1) return()
tcltk::tkpack.forget(get_main("allJuicrFrames")[getCurrentJuicrFrame()])
set_main("currentJuicrFrame", getCurrentJuicrFrame() - 1)
tcltk::tkpack(get_main("allJuicrFrames")[getCurrentJuicrFrame()])
return()
}
nextJuicrFrame <- function() {
if(getCurrentJuicrFrame() == length(get_main("allJuicrFrames"))) return()
tcltk::tkpack.forget(get_main("allJuicrFrames")[getCurrentJuicrFrame()])
set_main("currentJuicrFrame", getCurrentJuicrFrame() + 1)
tcltk::tkpack(get_main("allJuicrFrames")[getCurrentJuicrFrame()])
return()
}
set_main("allJuicrFrames", c())
get_allJuicrFrames <- function() return(get_main("allJuicrFrames"))
set_allJuicrFrames <- function(aJuicrFrameList) set_main("allJuicrFrames", aJuicrFrameList)
add_allJuicrFrames <- function(someJuicrFiles, sourceHTML = FALSE) {
set_allJuicrFrames(c(get_allJuicrFrames(), createManyJuicrFrames(someJuicrFiles, sourceHTML)))
}
next_numberJuicrFrames <- function() return(length(get_main("allJuicrFrames")) - getCurrentJuicrFrame())
previous_numberJuicrFrames <- function() return(length(get_main("allJuicrFrames")) - next_numberJuicrFrames() - 1)
get_JuicrFilenames <- function() {
aFile <- tcltk::tkgetOpenFile(filetypes = "{{juicr files} {_juicr.html}} {{All files} *}",
multiple = TRUE,
title = "juicr: open 1 or many juicr files")
return(as.character(aFile))
}
get_ImageFilenames <- function() {
aFile <- tcltk::tkgetOpenFile(filetypes = "{{image files} {.jpg .png .tiff}} {{All files} *}",
multiple = TRUE,
title = "juicr: open 1 or many image files with a plot to extract")
return(as.character(aFile))
}
get_SourceFilenames <- function() {
aFile <- tcltk::tkgetOpenFile(filetypes = paste0("{{source file} {", theFigureFile[getCurrentJuicrFrame()], "}}"),
multiple = TRUE,
title = "juicr: source of the current image")
return(aFile)
}
# update_theFigureFile [was global] function(newFiles) theFigureFile <- c(theFigureFile, newFiles)
update_theFigureFile <- function(newFiles) theFigureFile <- c(theFigureFile, newFiles)
set_main("allJuicrImages", c())
get_allJuicrImages <- function() return(get_main("allJuicrImages"))
set_allJuicrImages <- function(aJuicrImagesList) set_main("allJuicrImages", aJuicrImagesList)
add_allJuicrImages <- function(someJuicrImage) {
set_allJuicrImages(c(get_allJuicrImages(), someJuicrImage))
}
# START: image manipulation bar
fhead <- tcltk::tkframe(mainExtractorWindow, relief = "flat", bd = "1", background = "lightgrey", width = 1000)
# get images or juicr html images
button_OpenNewImage <- tcltk::tkbutton(fhead, compound = "top",
text = "add new\n image(s)",
width = 80, height = 80, image = theOrange,
relief = "flat",
command = function(){
newFrames <- get_ImageFilenames();
if(!identical(newFrames, character(0))) {
tcltk::tkconfigure(button_OpenNewImage, text = paste0("adding ", length(newFrames), "\nimages..."));
tcltk::tcl("update");
add_allJuicrFrames(newFrames);
update_theFigureFile(newFrames);
update_ArrowButtons();
tcltk::tkconfigure(button_OpenNewImage, text = "add new\n image(s)");
tcltk::tkconfigure(button_previewImage, state = "active")
tcltk::tkconfigure(button_SaveAllImages, state = "active")
tcltk::tkconfigure(button_JuiceAllImages, state = "active")
tcltk::tkconfigure(button_leftArrow, state = "active");
tcltk::tkconfigure(button_rightArrow, state = "active");
}
})
button_OpenJuicedImage <- tcltk::tkbutton(fhead, compound = "top",
text = "add juiced\n image(s)",
width = 80, height = 80, image = orangeJuice,
relief = "flat",
command = function(){
newFrames <- get_JuicrFilenames();
if(!identical(newFrames, character(0))) {
tcltk::tkconfigure(button_OpenJuicedImage, text = paste0("adding ", length(newFrames), "\nimages..."));
tcltk::tcl("update");
add_allJuicrFrames(newFrames, TRUE);
update_theFigureFile(newFrames);
update_ArrowButtons();
tcltk::tkconfigure(button_OpenJuicedImage, text = "add juiced\n image(s)");
tcltk::tkconfigure(button_previewImage, state = "active")
tcltk::tkconfigure(button_SaveAllImages, state = "active")
tcltk::tkconfigure(button_JuiceAllImages, state = "active")
tcltk::tkconfigure(button_leftArrow, state = "active");
tcltk::tkconfigure(button_rightArrow, state = "active");
}
})
# start of multi-image toggle buttons
getText_leftArrow <- function() {
if(length(get_main("allJuicrFrames")) == 0) return("no other\nimages")
return(paste0("previous\n", previous_numberJuicrFrames(), " images"))
}
getText_rightArrow <- function() {
if(length(get_main("allJuicrFrames")) == 0) return("no other\nimages")
return(paste0("next\n", next_numberJuicrFrames(), " images"))
}
update_ArrowButtons <- function() {
tcltk::tkconfigure(button_leftArrow, text = getText_leftArrow())
tcltk::tkconfigure(button_rightArrow, text = getText_rightArrow())
tcltk::tkconfigure(button_leftArrow, state = "active")
tcltk::tkconfigure(button_rightArrow, state = "active")
}
button_leftArrow <- tcltk::tkbutton(fhead, compound = "top", state = "disabled",
text = getText_leftArrow(),
width = 80, height = 80, image = leftArrowImage,
relief = "flat",
command = function(){ previousJuicrFrame(); update_ArrowButtons(); update_FigureSmall();})
button_previewImage <- tcltk::tkbutton(fhead, compound = "center", font = "Helvetica 8 bold", state = "disabled",
foreground = "tomato3",
text = "\n\n\n\n\n source",
width = 80, height = 80, image = getFigureSmall(),
relief = "flat",
command = function(){get_SourceFilenames();})
imageInformation <- tcltk::tktext(fhead, foreground = "lightgrey",
height = 6, width = 80, background = "lightgrey",
relief = "flat", font = "Helvetica 7")
button_rightArrow <- tcltk::tkbutton(fhead, compound = "top", state = "disabled",
text = getText_rightArrow(),
width = 80, height = 80, image = rightArrowImage,
relief = "flat",
command = function(){ nextJuicrFrame(); update_ArrowButtons(); update_FigureSmall();})
# save multi-image button
button_SaveAllImages <- tcltk::tkbutton(fhead, compound = "top", state = "disabled",
text = "save all\n juiced image(s)",
width = 80, height = 80, image = juiceContainerSmall,
relief = "flat",
command = function() {
#theSaveDirectory <- tcltk::tkchooseDirectory()
someJuicrFrames <- get_allJuicrFrames()
for(i in 1:length(someJuicrFrames)) {
tcltk::tkconfigure(button_SaveAllImages, text = paste0("saving ", i, " of ", length(someJuicrFrames), "\n.html files"))
tcltk::tcl("update")
tcltk::tkinvoke(paste0(someJuicrFrames[i], ".4.4.2"));
}
tcltk::tkconfigure(button_SaveAllImages, text = "save all\n juiced image(s)")
})
# save multi-image button
button_JuiceAllImages <- tcltk::tkbutton(fhead, compound = "center", state = "disabled",
text = "juice all\nimages",
width = 80, height = 80, image = juicrLogoSmall,
relief = "flat",
command = function() {
someJuicrFrames <- get_allJuicrFrames()
for(i in 1:length(someJuicrFrames)) {
tcltk::tkconfigure(button_JuiceAllImages, text = paste0("juicing ", i, " of ", length(someJuicrFrames), "\nimages"))
tcltk::tcl("update")
tcltk::tkinvoke(paste0(someJuicrFrames[i], ".2.1.1.1"));
}
tcltk::tkconfigure(button_JuiceAllImages, text = "juice all\nimages")
})
tcltk::tkgrid(button_OpenNewImage , row = 0, column = 0, sticky = "w", padx = 10, pady = 10)
tcltk::tkgrid(button_OpenJuicedImage, row = 0, column = 1, sticky = "w", padx = 10, pady = 10)
tcltk::tkgrid(button_leftArrow, row = 0, column = 2, sticky = "e", padx = 10, pady = 10)
tcltk::tkgrid(button_previewImage, row = 0, column = 3, padx = 10, pady = 10)
tcltk::tkgrid(button_rightArrow, row = 0, column = 4, sticky = "w", padx = 10, pady = 10)
tcltk::tkgrid(imageInformation, row = 0, column = 5, sticky = "w", padx = 10, pady = 10)
tcltk::tkgrid(button_SaveAllImages, row = 0, column = 7, sticky = "e", padx = 10, pady = 10)
tcltk::tkgrid(button_JuiceAllImages, row = 0, column = 6, sticky = "e", padx = 10, pady = 10)
tcltk::tkgrid.columnconfigure(fhead, 2, weight = 3)
tcltk::tkgrid.columnconfigure(fhead, 6, weight = 2)
tcltk::tkpack(fhead, side = "bottom", fill = "x")
tcltk::tkbind(button_OpenNewImage, "<Any-Enter>", function() {tcltk::tkconfigure(button_OpenNewImage, background = "floral white");})
tcltk::tkbind(button_OpenNewImage, "<Any-Leave>", function() {tcltk::tkconfigure(button_OpenNewImage, background = "grey95");})
tcltk::tkbind(button_OpenJuicedImage, "<Any-Enter>", function() {tcltk::tkconfigure(button_OpenJuicedImage, background = "floral white");})
tcltk::tkbind(button_OpenJuicedImage, "<Any-Leave>", function() {tcltk::tkconfigure(button_OpenJuicedImage, background = "grey95");})
tcltk::tkbind(button_leftArrow, "<Any-Enter>", function() {tcltk::tkconfigure(button_leftArrow, background = "floral white");})
tcltk::tkbind(button_leftArrow, "<Any-Leave>", function() {tcltk::tkconfigure(button_leftArrow, background = "grey95");})
tcltk::tkbind(button_previewImage, "<Any-Enter>", function() {
if(theFigureFile != "") {
theSavedFilename <- paste0(tools::file_path_sans_ext(basename(theFigureFile[getCurrentJuicrFrame()])), "_juicr.html")
theLastSavedTime <- "never"
if(file.exists(theSavedFilename) == TRUE) {
theLastSavedTime <- paste(file.info(theSavedFilename)$ctime)
} else {
theSavedFilename <- "NA"
}
if(file.exists(theFigureFile[getCurrentJuicrFrame()]) == TRUE) {
theImageSummary <- paste("current image: ", theFigureFile[getCurrentJuicrFrame()],
"\nsize: ", file.size(theFigureFile[getCurrentJuicrFrame()]),
"\ndimentions: ", paste(paste(dim(EBImage::readImage(theFigureFile[getCurrentJuicrFrame()]))[1:2], collapse = " by "), "pixels"),
"\n\nlast saved: ", theLastSavedTime,
"\nsaved filename: ", theSavedFilename,
"\n\n");
} else {
theImageSummary <- paste("current image: ", theFigureFile[getCurrentJuicrFrame()],
"\n\nlast saved: ", theLastSavedTime,
"\nsaved filename: ", theSavedFilename,
"\n\n");
}
tcltk::tkinsert(imageInformation, "1.0", theImageSummary);
tcltk::tkconfigure(imageInformation, foreground = "black");
tcltk::tkconfigure(button_previewImage, text = "\n\n\n\n\n get source");
}
})
tcltk::tkbind(button_previewImage, "<Any-Leave>", function() {tcltk::tkconfigure(imageInformation, foreground = "lightgrey");
tcltk::tkconfigure(button_previewImage, text = "\n\n\n\n\n source");
})
tcltk::tkbind(button_rightArrow, "<Any-Enter>", function() {tcltk::tkconfigure(button_rightArrow, background = "floral white");})
tcltk::tkbind(button_rightArrow, "<Any-Leave>", function() {tcltk::tkconfigure(button_rightArrow, background = "grey95");})
tcltk::tkbind(button_SaveAllImages, "<Any-Enter>", function() {tcltk::tkconfigure(button_SaveAllImages, background = "floral white");})
tcltk::tkbind(button_SaveAllImages, "<Any-Leave>", function() {tcltk::tkconfigure(button_SaveAllImages, background = "grey95");})
tcltk::tkbind(button_JuiceAllImages, "<Any-Enter>", function() {tcltk::tkconfigure(button_JuiceAllImages, foreground = "orange");})
tcltk::tkbind(button_JuiceAllImages, "<Any-Leave>", function() {tcltk::tkconfigure(button_JuiceAllImages, foreground = "black");})
# CATCHING FILES INPUTED VIA GUI_juicr FUNCTION CALL
if(theFigureFile != "") {
if(length(theFigureFile) == 1) {
add_allJuicrFrames(theFigureFile);
}
else {
add_allJuicrFrames(theFigureFile);
update_ArrowButtons();
tcltk::tkconfigure(button_leftArrow, state = "active")
tcltk::tkconfigure(button_rightArrow, state = "active")
}
tcltk::tkconfigure(button_previewImage, state = "active")
tcltk::tkconfigure(button_SaveAllImages, state = "active")
tcltk::tkconfigure(button_JuiceAllImages, state = "active")
}
if(theJuicrFile != "") {
if(length(theJuicrFile) == 1) {
add_allJuicrFrames(theJuicrFile, TRUE);
}
else {
add_allJuicrFrames(theJuicrFile, TRUE);
update_ArrowButtons();
tcltk::tkconfigure(button_leftArrow, state = "active")
tcltk::tkconfigure(button_rightArrow, state = "active")
}
tcltk::tkconfigure(button_previewImage, state = "active")
tcltk::tkconfigure(button_SaveAllImages, state = "active")
tcltk::tkconfigure(button_JuiceAllImages, state = "active")
}
tcltk::tkfocus(mainExtractorWindow)
# # # # # # # # # # # # # # # # #
##### END OF JUICR
# # # # # # # # # # # # # # # # #
# TCLTK GARBAGE COLLECTION
# deletes all images (need better solution for avoiding memory leaks)
imageCleanUp <- function() {
oldImages <- as.character(tkimage.names())
oldImages <- oldImages[grep("image", oldImages)]
for(someImage in oldImages) tcltk::tkimage.delete(someImage)
}
tcltk::tkbind(mainExtractorWindow, "<Destroy>", imageCleanUp)
# only have one juicr window open at a time
tcltk::tkwait.window(mainExtractorWindow)
tcltk::tkdestroy(mainExtractorWindow)
} else {
.juicrPROBLEM("error",
paste("\n tcltk package is missing and is needed to generate the GUI.",
" --> If using Windows/Linux, try 'install.packages('tcltk')'",
" --> If using a Mac, install latest XQuartz application (X11) from:",
" https://www.xquartz.org/",
sep = "\n"))
}
message(paste0("juicr exit note: if files were saved, they are found here:\n ", getwd(), "/n"))
return("")
}
|
9a6946dbbed5af403d880d1b5dfdb21d0c0fa488
|
21c35042573da908206f7736bc54265846e0b35d
|
/Foo_Using ARIMA and Financial Ratios for Portfolio Optimization.R
|
a85133596f7c2fa369974f366048de456ba2a20a
|
[
"MIT"
] |
permissive
|
biaohuan/StatisticalFinance
|
837b2ead6794d518855d049a375f1b6c52a51306
|
da95b4475967b9e020cd165c7f85fbee3cd47bcd
|
refs/heads/master
| 2020-06-04T22:56:56.705081
| 2019-07-01T11:11:25
| 2019-07-01T11:11:25
| 192,223,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,170
|
r
|
Foo_Using ARIMA and Financial Ratios for Portfolio Optimization.R
|
#---
#title: "ARIMA, Sharpe and Beta for Asset Selection and Optimization"
#author: "Biao Huan Foo"
#---
#library(GMCM)
#library(ggplot2)
#library(xts)
#library(forecast)
#library(PortfolioAnalytics)
#library(urca)
#Set Working Directory to full_history (Contains csv of all stocks)
#Step 1.1: Cleaning of Assets - Less than 5% Missing Values
masterList = list.files(pattern="*.csv")
for (i in 1:length(masterList)){
print(i)
assign(masterList[i], read.csv(masterList[i])[1:2520,c(1,7)])
}
masterList2=lapply(ls(pattern="*.csv"),get)
nadf<-data.frame()
for(i in 1:length(masterList2)){
print(i)
nadf<-rbind(nadf,mean(is.na(masterList2[i][[1]][[2]])))
}
criteria = data.frame()
for(i in 1:length(masterList)){
print(i)
if(nadf[1][[1]][[i]]<0.05){
criteria <- rbind(criteria,i)
}
}
rm(list=ls(pattern="*.csv"))
for(i in 1:length(criteria[1][[1]])){
print(i)
assign(masterList[criteria[i,1]], read.csv(masterList[criteria[i,1]])[1:2520,c(1,2,7)])
}
#Step 1.2: Cleaning of Assets - Must have Recorded Volume
masterList2=lapply(ls(pattern="*.csv"),get)
criteria2<-data.frame()
for(i in 1:length(criteria[1][[1]])){
print(i)
if(!is.na(mean(masterList2[i][[1]][["volume"]]))){
criteria2 <- rbind(criteria2,criteria[i,1])
}
}
rm(list=ls(pattern="*.csv"))
for(i in 1:length(criteria2[1][[1]])){
print(i)
assign(masterList[criteria2[i,1]], read.csv(masterList[criteria2[i,1]])[1:2520,c(1,2,7)])
}
#Step 1.3: Cleaning of Assets - Must Have Sufficiently High (>1000) Volume
criteria3<-data.frame()
masterList2=lapply(ls(pattern="*.csv"),get)
for(i in 1:length(masterList2)){
print(i)
if(max(na.omit(masterList2[i][[1]][["volume"]]))>1000){
criteria3 <- rbind(criteria3,criteria2[i,1])
}
}
masterList2=lapply(ls(pattern="*.csv"),get)
rm(list=ls(pattern="*.csv"))
for(i in 1:length(criteria3[1][[1]])){
print(i)
assign(masterList[criteria3[i,1]], read.csv(masterList[criteria3[i,1]])[1:2520,c(1,7)])
}
#Step 2: Compute Daily Log Returns
masterList2=lapply(ls(pattern="*.csv"),get)
length(masterList2)
for(i in 1:length(masterList2)){
print(i)
lrest=log(masterList2[i][[1]][["adjclose"]][-2520]/masterList2[i][[1]][["adjclose"]][-1])
lrest[2520]<-0
masterList2[i][[1]][["logreturns"]]<- lrest
}
for(i in 1:length(masterList2)){
print(i)
assign(ls(pattern="*.csv")[i],masterList2[i][[1]][[3]])
}
masterList2=lapply(ls(pattern="*.csv"),get)
#Check That There Are No 0s
pricesRaw<-unlist(masterList2[i][[1]][["adjclose"]])
for(i in 2:length(masterList2)){
print(i)
pricesRaw<-cbind(pricesRaw,unlist(masterList2[i][[1]][["adjclose"]]))
}
pricesRaw[pricesRaw==0]<-NA
dim(pricesRaw)
nadf<-data.frame()
for(i in 1:ncol(pricesRaw)){
print(i)
nadf<-rbind(nadf,mean(is.na(pricesRaw[,i])))
}
criteria4 = data.frame()
for(i in 1:ncol(pricesRaw)){
print(i)
if(nadf[i,]<0.05){
criteria4 <- rbind(criteria4,criteria3[i,1])
}
}
#Step 3: Replace Missing Values
masterList2=lapply(ls(pattern="*.csv"),get)
for(i in 1:length(masterList2)){
print(i)
masterList2[i][[1]]<-na.interp(masterList2[i][[1]])
}
for(i in 1:length(masterList2)){
print(i)
assign(ls(pattern="*.csv")[i],masterList2[i][[1]])
}
#Step 4.1-4.4.1: Processing Data for Asset Selection
masterList2=lapply(ls(pattern="*.csv"),get)
logReturnsRaw<-unlist(masterList2[1])
for(i in 2:length(masterList2)){
print(i)
logReturnsRaw<-cbind(logReturnsRaw,unlist(masterList2[i]))
}
logReturnsOrdered<-logReturnsRaw
logReturnsOrdered<-logReturnsOrdered[nrow(logReturnsOrdered):1,]
dates<-unlist(read.csv(masterList[1])[1:2520,1])
dates<-matrix(dates,nrow=2520,ncol=1)
dates<-dates[nrow(logReturnsOrdered):1,]
logReturnsWithTime<-cbind(dates,logReturnsOrdered)
#Flip objects in Workspace
masterList2=lapply(ls(pattern="*.csv"),get)
for(i in 1:3930){
print(i)
tempflip<-data.frame(masterList2[i][[1]])
tempflip<-tempflip[nrow(tempflip):1,]
masterList2[i][[1]]<-tempflip
assign(ls(pattern="*.csv")[i],masterList2[i][[1]])
}
#Step 4.4.2: Developing an Asset Selection Based on Sharpe Ratio, Beta, Returns and ARIMA
#Rolling Window with Sample Size(T) = 2520, Size(m) = 252, Forecast Window (h) = 20
count=0
loopcount=1
index<-matrix(c(1:3930), nrow=1, ncol=3930)
for(i in 232:2520){
print(i)
if(count==20){
#Find Top 240 Stocks with Highest Sharpe Ratio in Last 252 Days
criteriaSharpe<-(colMeans(logReturnsOrdered[(i-251):(i),]))/(GMCM:::colSds(logReturnsOrdered[(i-251):(i),]))
orderedCriteriaSharpe<-rbind(index,criteriaSharpe)
orderedCriteriaSharpe<-data.frame(orderedCriteriaSharpe)
orderedCriteriaSharpe<-orderedCriteriaSharpe[order(orderedCriteriaSharpe[2,],decreasing=TRUE)]
topSharpeStocks<-c(as.integer(orderedCriteriaSharpe[1,1:240]))
topSharpeStocksReturns<-logReturnsOrdered[(i-251):(i),topSharpeStocks]
marketReturns<-(rowSums(logReturnsOrdered[(i-251):(i),1:3930]))
marketReturns<-unlist(marketReturns)
#Find Top 120 Stocks out of 240 with Highest Beta in Last 252 Days
topSharpeStocksCov<-cov(marketReturns,topSharpeStocksReturns)
topSharpeStocksBeta<-topSharpeStocksCov/sd(marketReturns)
orderedCriteriaBeta<-rbind(matrix(topSharpeStocks,nrow=1,ncol=240),matrix(topSharpeStocksBeta,nrow=1,ncol=240))
orderedCriteriaBeta<-data.frame(orderedCriteriaBeta)
orderedCriteriaBeta<-orderedCriteriaBeta[order(orderedCriteriaBeta[2,],decreasing=TRUE)]
topBetaStocks<-c(as.integer(orderedCriteriaBeta[1,1:120]))
#Find Top 60 Stocks of out 120 with Highest Returns in Last 252 Days
topBetaStocksReturns<-colSums(logReturnsOrdered[1:252,topBetaStocks])
orderedCriteriaReturn<-rbind(matrix(topBetaStocks,nrow=1,ncol=120),matrix(topBetaStocksReturns,nrow=1,ncol=120))
orderedCriteriaReturn<-data.frame(orderedCriteriaReturn)
orderedCriteriaReturn<-orderedCriteriaReturn[order(orderedCriteriaReturn[2,],decreasing=TRUE)]
#Finding Top 30 Stocks out of 30 with Highest ARIMA Forecasts Over a 20 Day Forecast Window
topReturnStocks<-c(as.integer(orderedCriteriaReturn[1,1:60]))
topReturnStocksReturns<-data.frame(logReturnsOrdered[(i-251):(i),topReturnStocks])
topReturnStocksReturns<-cbind(data.frame(as.Date(logReturnsWithTime[(i-251):(i),1])),topReturnStocksReturns)
topReturnStocksXTS<-xts(topReturnStocksReturns[ ,2], order.by=topReturnStocksReturns[,1])
#Creating Stationary Data and Best-Fit ARIMA Model
topReturnStocksFit<-auto.arima(topReturnStocksXTS)
fitForecasted<-topReturnStocksFit %>% forecast(h=20)
fitForecasted<-data.frame(fitForecasted)
#Re-compiling Forecasted Log Returns for Each 20-Day Forecast Window
fitForecastedPrices<-fitForecasted
fitReturn<-data.frame(sum(fitForecastedPrices))
for(k in 2:30){
print(k+100)
topReturnStocksXTS<-xts(topReturnStocksReturns[ ,(k+1)], order.by=topReturnStocksReturns[,1])
topReturnStocksFit<-auto.arima(topReturnStocksXTS)
fitForecasted<-topReturnStocksFit %>% forecast(h=20)
fitForecasted<-data.frame(fitForecasted)
fitForecastedPrices<-fitForecasted
tempFitReturn<-data.frame(sum(fitForecastedPrices))
fitReturn<-cbind(fitReturn,tempFitReturn)
}
orderedFitReturn<-rbind(topReturnStocks,fitReturn)
orderedFitReturn<-orderedFitReturn[order(orderedFitReturn[2,],decreasing=TRUE)]
if(loopcount==1){
topArimaStocks<-matrix(c(as.integer(orderedFitReturn[1,1:30])),nrow=1,ncol=30)
}else{
tempTopArimaStocks<-matrix(c(as.integer(orderedFitReturn[1,1:30])),nrow=1,ncol=30)
topArimaStocks<-rbind(topArimaStocks,tempTopArimaStocks)
}
loopcount=loopcount+1
count=0
}
count=count+1
}
dim(topArimaStocks)
#Step 4.5: Portfolio Optimization
count=0
loopcount=1
for(i in 232:2520){
print(i)
if(count==20){
myStocks<-logReturnsOrdered[(i-251):(i),c(topArimaStocks[loopcount,])]
myStocksXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(i-251):(i),1])),myStocks)
myStocksXTS<-xts(myStocksXTS[,2:31],order.by=myStocksXTS[,1])
portfolioConstructor <-portfolio.spec(c(topArimaStocks[loopcount,]))
portfolioConstructor<-add.constraint(portfolioConstructor, type="full_investment")
portfolioConstructor<-add.constraint(portfolioConstructor, type ="box", min=0.01, max=0.1)
portfolioConstructor<-add.constraint(portfolioConstructor, type ="long_only")
portfolioConstructor<-add.objective(portfolioConstructor, type="return", name="mean")
portfolioConstructor<-add.objective(portfolioConstructor, type="risk", name="StdDev")
optimizedPortfolio <- optimize.portfolio(myStocksXTS, portfolioConstructor, optimize_method="random",search_size=10000,maxSR=TRUE, message=TRUE)
if(loopcount==1){
weightsForPortfolio <- data.frame(extractWeights(optimizedPortfolio))
}else{
tempWeightsForPortfolio <- data.frame(extractWeights(optimizedPortfolio))
weightsForPortfolio<-cbind(weightsForPortfolio,tempWeightsForPortfolio)
}
count=0
loopcount=loopcount+1
}
count=count+1
}
weightsForPortfolio<-matrix(unlist(weightsForPortfolio),nrow=30,ncol=114)
#Step 5.1: Cumulative Returns for Optimized Portfolio
count=0
loopcount=1
for(i in 232:2520){
print(i)
if(count==20){
myReturns<-matrix(logReturnsOrdered[(i+1):(i+20),c(topArimaStocks[loopcount,])],nrow=20,ncol=30)
tempMyPortfolio<-myReturns %*% as.matrix(unlist(weightsForPortfolio[,loopcount]))
if(loopcount==1){
myPortfolio<-tempMyPortfolio
}else{
myPortfolio<-rbind(myPortfolio,as.matrix(tempMyPortfolio))
}
count=0
loopcount=loopcount+1
}
count=count+1
if(i==2511){
break
}
}
dim(myPortfolio)
myPortfolioXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),myPortfolio)
myPortfolioXTS<-xts(myPortfolioXTS[,2],order.by=myPortfolioXTS[,1])
myPortfolioXTSBlown=myPortfolioXTS*1000
myPortfolioXTSBlown[2260,]
cumPortfolioXTSBlown<-cumsum(myPortfolioXTSBlown)
cumPortfolioXTSBlown[2260,]
plot(cumPortfolioXTSBlown, main="Optimized Portfolio",xlab="Time",ylab="logreturns*1000" )
#Step 5.2:Cumulative Returns for Equal Weight Portfolio
count=0
loopcount=1
for(i in 232:2520){
print(i)
if(count==20){
myReturnsEqual<-matrix(logReturnsOrdered[(i+1):(i+20),c(topArimaStocks[loopcount,])],nrow=20,ncol=30)
tempMyPortfolioEqual<-myReturnsEqual %*% matrix(1/30, nrow=30, ncol=1)
if(loopcount==1){
myPortfolioEqual<-tempMyPortfolioEqual
}else{
myPortfolioEqual<-rbind(myPortfolioEqual,as.matrix(tempMyPortfolioEqual))
}
count=0
loopcount=loopcount+1
}
count=count+1
if(i==2511){
break
}
}
myPortfolioEqualXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),myPortfolioEqual)
myPortfolioEqualXTS<-xts(myPortfolioEqualXTS[,2],order.by=myPortfolioEqualXTS[,1])
myPortfolioEqualXTSBlown=myPortfolioEqualXTS*1000
myPortfolioEqualXTSBlown[2260,]
cumEqualPortfolioXTSBlown<-cumsum(myPortfolioEqualXTSBlown)
cumEqualPortfolioXTSBlown[2260,]
plot(cumEqualPortfolioXTSBlown, main="Equally Weighted Portfolio",xlab="Time",ylab="logreturns*1000" )
#Step 5.3: Cumulative Returns for S&P 500
SP500<-read.csv("SP500new.csv")
SP500<-SP500[37920:35401,c(5)]
for(i in 1:2520){
print(i)
if(i==2520) break
SP500[i]<-log(SP500[i]/SP500[i+1])
}
SP500[2520]<-0
incorrectDates<-dates[nrow(logReturnsOrdered):1]
SP500<-cbind(incorrectDates,SP500)
SP500<-SP500[2520:1,]
SP500<-as.matrix(SP500)
SP500XTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),as.numeric(SP500[253:2512,2]))
SP500XTS<-xts(SP500XTS[,2],order.by=SP500XTS[,1])
SP500XTSBlown=SP500XTS*1000
SP500XTSBlown[2260,]
cumSP500XTSBlown<-cumsum(SP500XTSBlown)
cumSP500XTSBlown[2260,]
cumEqualPortfolioXTSBlown[2260,]
cumPortfolioXTSBlown[2260,]
plot(cumSP500XTSBlown, main="S&P 500",xlab="Time",ylab="logreturns*1000" )
allThree<-merge.xts(cumSP500XTSBlown,cumPortfolioXTSBlown,cumEqualPortfolioXTSBlown)
plot(allThree, main="All Portfolios",xlab="Time",ylab="logreturns*1000")
addLegend(legend.loc = "bottomright", legend.names = c("S&P500 - Black", "Equal Weight - Orange", "Optimized - Red"))
#Step 5.4.1: Test ARIMA Forecasts
count=0
loopcount=1
for(i in 232:2520){
print(i)
if(count==20){
tempReturns<-data.frame(logReturnsOrdered[(i-251):(i),c(topArimaStocks[loopcount,])])
tempReturns<-cbind(data.frame(as.Date(logReturnsWithTime[(i-251):(i),1])),tempReturns)
tempReturnsXTS<-xts(tempReturns[ ,2], order.by=tempReturns[,1])
tempFit<-auto.arima(tempReturnsXTS)
predictedDiff<-tempFit %>% forecast(h=20)
predictedDiff<-data.frame(predictedDiff)
predictedReturns<-predictedDiff
predictedReturns<-data.frame(predictedReturns)
for(k in 2:30){
print(k+100)
tempReturnsXTS<-xts(tempReturns[ ,(k+1)], order.by=tempReturns[,1])
tempFit<-auto.arima(tempReturnsXTS)
predictedDiff<-tempFit %>% forecast(h=20)
predictedDiff<-data.frame(predictedDiff)
tempPredictedReturns<-predictedDiff
tempPredictedReturns<-data.frame(tempPredictedReturns)
predictedReturns<-cbind(predictedReturns,tempPredictedReturns)
}
if(loopcount==1){
totalReturns<-matrix(predictedReturns,nrow=20,ncol=30)
}else{
tempTotalReturns<-matrix(predictedReturns,nrow=20,ncol=30)
totalReturns<-rbind(totalReturns,tempTotalReturns)
}
loopcount=loopcount+1
count=0
}
count=count+1
}
totalReturns[1,]
for(i in 1:114){
print(i)
tempEnlargedArimaStocks<-matrix(topArimaStocks[i,],nrow=1,ncol=30)
for(k in 1:19){
tempEnlargedArimaStocks<-rbind(tempEnlargedArimaStocks,matrix(topArimaStocks[i,],nrow=1,ncol=30))
count=count+1
}
if(i==1){
enlargedArimaStocks<-tempEnlargedArimaStocks
}else{
enlargedArimaStocks<-rbind(enlargedArimaStocks,tempEnlargedArimaStocks)
}
}
sumTotalReturns<-matrix(unlist(totalReturns),nrow=2280,ncol=30)
sumTotalReturns<-rowSums(sumTotalReturns)
as.matrix(sumTotalReturns)
sumTotalReturnsXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),sumTotalReturns[1:2260])
sumTotalReturnsXTS<-xts(sumTotalReturnsXTS[,2],order.by=sumTotalReturnsXTS[,1])
sumTotalReturnsXTSBlown=sumTotalReturnsXTS*100/30
cumSumTotalReturnsXTSBlown<-cumsum(sumTotalReturnsXTSBlown)
cumSumTotalReturnsXTSBlown[2260,]
plot(cumSumTotalReturnsXTSBlown, main="Forecasted ARIMA Returns",xlab="Time",ylab="logreturns*1000" )
allFour<-merge.xts(allThree,cumSumTotalReturnsXTSBlown)
plot(allFour, main="Forecast v. Actual",xlab="Time",ylab="logreturns*1000" )
addLegend(legend.loc = "topleft", legend.names = c("S&P500 - Black", "Equal Weight - Orange", "Optimized - Red","Forecasted ARIMA (Equal Weight) - Blue"))
#Step 5.4.2: Compare with Optimized Forecasts
weightedReturns<-enlargedWeights*matrix(unlist(totalReturns), nrow=2280, ncol=30)
sumWeightedReturns<-rowSums(weightedReturns)
sumWeightedReturnsXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),sumWeightedReturns[1:2260])
sumWeightedReturnsXTS<-xts(sumWeightedReturnsXTS[,2],order.by=sumWeightedReturnsXTS[,1])
sumWeightedReturnsXTSBlown=sumWeightedReturnsXTS*1000/30
cumSumWeightedReturnsXTSBlown<-cumsum(sumWeightedReturnsXTSBlown)
cumSumTotalReturnsXTSBlown[2260,]
#Step 5.4.3: Compare with Equal Forecasts
equalReturns<-matrix(1/30, nrow=2280, ncol=30)*matrix(unlist(totalReturns), nrow=2280, ncol=30)
sumEqualReturns<-rowSums(equalReturns)
sumEqualReturnsXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),sumEqualReturns[1:2260])
sumEqualReturnsXTS<-xts(sumEqualReturnsXTS[,2],order.by=sumEqualReturnsXTS[,1])
sumEqualReturnsXTSBlown=sumEqualReturnsXTS*1000/30
cumSumEqualReturnsXTSBlown<-cumsum(sumEqualReturnsXTSBlown)
cumSumEqualReturnsXTSBlown[2260,]
#Step 5.4.4: Compare with S&P 500 Forecasts
SP500XTS2<-cbind(data.frame(as.Date(logReturnsWithTime[(1):(2520),1])),as.numeric(SP500[1:2520,2]))
SP500XTS2<-xts(SP500XTS2[,2],order.by=SP500XTS2[,1])
count=0
loopcount=1
for(i in 232:2520){
print(i)
if(count==20){
tempSP500ReturnsXTS<-SP500XTS2[(i-251):(i)]
tempSP500Fit<-auto.arima(tempSP500ReturnsXTS)
predictedSP500Diff<-tempSP500Fit %>% forecast(h=20)
predictedSP500Diff<-data.frame(predictedSP500Diff)
predictedSP500Returns<-predictedSP500Diff
predictedSP500Returns<-data.frame(predictedSP500Returns)
if(loopcount==1){
totalSP500Returns<-data.frame(predictedSP500Returns)
}else{
tempTotalSP500Returns<-data.frame(predictedSP500Returns)
totalSP500Returns<-rbind(totalSP500Returns,tempTotalSP500Returns)
}
loopcount=loopcount+1
count=0
}
count=count+1
}
tempTotalSP500Returns[2000,]
sumSP500ReturnsXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),totalSP500Returns[1:2260,])
sumSP500ReturnsXTS<-xts(sumSP500ReturnsXTS[,2],order.by=sumSP500ReturnsXTS[,1])
sumSP500ReturnsXTSBlown=sumSP500ReturnsXTS*1000
cumSumSP500ReturnsXTSBlown<-cumsum(sumSP500ReturnsXTSBlown)
cumSumSP500ReturnsXTSBlown[2260]
cumSumWeightedReturnsXTSBlown[2260]
cumSumEqualReturnsXTSBlown[2260]
portfolioForecastTwo<-merge.xts(cumSumSP500ReturnsXTSBlown,cumSumEqualReturnsXTSBlown,cumSumWeightedReturnsXTSBlown)
plot(portfolioForecastTwo, main="Forecast v. Actual",xlab="Time",ylab="logreturns*1000" )
addLegend(legend.loc = "topleft", legend.names = c("S&P500 - Black", "Equal Weight - Red", "Optimized - Orange"))
#Step 6: Comparative Statistics
#Step 6.1: Mean Returns
meanOptimized<-mean(myPortfolioXTSBlown)
meanEqual<-mean(myPortfolioEqualXTSBlown)
meanSP500<-mean(SP500XTSBlown)
meanOptimized
meanEqual
meanSP500
#Step 6.2: Volatility
volOptimized<-StdDev(myPortfolioXTS)
volEqual<-StdDev(myPortfolioEqualXTS)
volSP500<-StdDev(SP500XTS)
volOptimized
volEqual
volSP500
#Step 6.3: Sharpe Ratio
sharpeOptimized<-SharpeRatio.annualized(myPortfolioXTS)
sharpeEqual<-SharpeRatio.annualized(myPortfolioEqualXTS)
sharpeSP500<-SharpeRatio.annualized(SP500XTS)
sharpeOptimized
sharpeEqual
sharpeSP500
#Step 6.4: Sortino Ratio
sortinoOptimized<-SortinoRatio(myPortfolioXTS, MAR=0)
sortinoEqual<-SortinoRatio(myPortfolioEqualXTS, MAR=0)
sortinoSP500<-SortinoRatio(SP500XTS, MAR=0)
sortinoOptimized
sortinoEqual
sortinoSP500
#Step 6.5: Maximum Drawdown
mDrawOptimized<-maxDrawdown(myPortfolioXTS)
mDrawEqual<-maxDrawdown(myPortfolioEqualXTS)
mDrawSP500<-maxDrawdown(SP500XTS)
mDrawOptimized
mDrawEqual
mDrawSP500
#Step 6.6: Turnover
#Weights for 2260 Days
fWeightsForPortfolio=t(weightsForPortfolio)
for(i in 1:114){
print(i)
tempEnlargedWeights<-matrix(fWeightsForPortfolio[i,],nrow=1,ncol=30)
for(k in 1:19){
tempEnlargedWeights<-rbind(tempEnlargedWeights,matrix(fWeightsForPortfolio[i,],nrow=1,ncol=30))
count=count+1
}
if(i==1){
enlargedWeights<-tempEnlargedWeights
}else{
enlargedWeights<-rbind(enlargedWeights,tempEnlargedWeights)
}
}
count=0
loopcount=1
for(i in 232:2520){
print(i)
if(count==20){
tempRawLogReturns<-matrix(logReturnsOrdered[(i+1):(i+20),c(topArimaStocks[loopcount,])],nrow=20,ncol=30)
if(loopcount==1){
rawLogReturns<-as.matrix(tempRawLogReturns)
}else{
rawLogReturns<-rbind(rawLogReturns,as.matrix(tempRawLogReturns))
}
count=0
loopcount=loopcount+1
}
count=count+1
if(i==2511){
break
}
}
enlargedWeightsXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),enlargedWeights[1:2260,])
enlargedWeightsXTS<-xts(enlargedWeightsXTS[,2:31],order.by=enlargedWeightsXTS[,1])
rawLogReturnsXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),rawLogReturns[1:2260,])
rawLogReturnsXTS<-xts(rawLogReturnsXTS[,2:31],order.by=rawLogReturnsXTS[,1])
rawLogReturnsXTS[1,]
#Step 6.6.1: Daily Turnover for Optimized
outOptimized <- Return.portfolio(R = rawLogReturnsXTS, weights = enlargedWeightsXTS, verbose = TRUE)
beginWeightsOptimized <- outOptimized$BOP.Weight
endWeightsOptimized <- outOptimized$EOP.Weight
txnsOptimized <- beginWeightsOptimized - lag(endWeightsOptimized)
dailyTOOptimized <- xts(rowSums(abs(txnsOptimized[,1:30])), order.by=index(txnsOptimized))
barplot(dailyTOOptimized, main="Daily Turnover for Optimized")
#Step 6.6.2: Daily Turnover for Equal
equalWeightsXTS<-cbind(data.frame(as.Date(logReturnsWithTime[(253):(2512),1])),matrix(1/30,nrow=2260,ncol=30))
equalWeightsXTS<-xts(equalWeightsXTS[,2:31],order.by=equalWeightsXTS[,1])
outEqual <- Return.portfolio(R = rawLogReturnsXTS, weights = equalWeightsXTS , verbose = TRUE)
beginWeightsEqual <- outEqual$BOP.Weight
endWeightsEqual <- outEqual$EOP.Weight
txnsEqual <- beginWeightsEqual - lag(endWeightsEqual)
dailyTOEqual <- xts(rowSums(abs(txnsEqual[,1:30])), order.by=index(txnsEqual))
barplot(dailyTOEqual, main="Daily Turnover for Equal")
mean(dailyTOEqual[2:2259])
mean(dailyTOOptimized[2:2259])
|
62a73725a2363761ee1a301cc3bd68da5d3bc21e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/multilevel/examples/summary.rgr.agree.Rd.R
|
862e4460b6408f834b4a6efc5278cedffcf98512
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 247
|
r
|
summary.rgr.agree.Rd.R
|
library(multilevel)
### Name: summary.rgr.agree
### Title: S3 method for class 'rgr.agree'
### Aliases: summary.rgr.agree
### Keywords: programming
### ** Examples
data(bh1996)
RGROUT<-rgr.agree(bh1996$HRS,bh1996$GRP,1000)
summary(RGROUT)
|
a932e44c9122b0ccb3c14042e76c4fe161c35228
|
4d4e54893fe008a3947801f72045932e1ecebaaf
|
/scripts/Lagarias/LagariasANOVAtmp.R
|
831f28573f171f6d092f093cb479cd8a7ad39002
|
[] |
no_license
|
nicolise/SideProjects
|
835dcf1c35e8b070e3fa8e98637ac3de21f8a08f
|
30828173bf0aa3cf9a2e4580f3b250d2b253dc69
|
refs/heads/master
| 2021-01-21T04:53:51.267551
| 2019-03-30T01:23:53
| 2019-03-30T01:23:53
| 54,146,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,416
|
r
|
LagariasANOVAtmp.R
|
#Nicole E Soltis
#03/17/16
#ANOVA for Lagarias RNAseq
#-------------------------------------------------------------------
#setwd("~/Projects/SideProjects/data/Lagarias")
setwd("~/Documents/GitRepos/SideProjects/data")
myData <- read.csv("for_pathwayANOVA.csv")
names(myData)
#is data normal?
attach(myData)
#graphically...
hist(RPKM)
#more graphs
require(car); require(MASS)
myData$RPKM.t <- myData$RPKM + 1
#is it more normal or log-normal?
#very long right tail, log-normal is also a bad estimate
qqp(myData$RPKM.t, "norm")
qqp(myData$RPKM.t, "lnorm")
#statistically...
#dataset is too large (37980 obs. instead of 5000 max)
#shapiro.test(myData$RPKM.t)
#try transformations
transf <- log10(RPKM)
hist(transf)
#randomly select a subset of values from transf to test
set.seed(100)
sample.shapiro <- sample( 1:nrow(myData) , size=1e3 , replace=TRUE )
sample.RPKM <- myData$RPKM[ sample.shapiro ]
df.shapiro <- data.frame(matrix(unlist(sample.RPKM), nrow=1e3, byrow=T))
df.shapiro$RPKM <- df.shapiro$matrix.unlist.sample.RPKM...nrow...1000..byrow...T.
df.shapiro$transf <- (log(df.shapiro$RPKM+1))
hist(df.shapiro$transf)
shapiro.test(df.shapiro$transf) #still significantly non-normal
qqp(df.shapiro$transf) #but it looks pretty good
#at any rate I'll log-transform
myData$RPKM.t <- (log(myData$RPKM+1))
#next check assumption of homoscedasticity
#graphically...
attach(myData)
boxplot(RPKM.t~GenotypeID*Time,
ylab="YTITLE", main="PLOTTITLE", las=3) #looks good yay!
#statistically...
bartlett.test(RPKM.t ~ GenotypeID, data=myData) #meh not homoscedastic
bartlett.test(RPKM.t ~ Time, data=myData) #also iffy
leveneTest(RPKM.t~GenotypeID) #also not homoscedastic
#check: balance by genotype?
#perfect? 6330 obs per geno?
as.data.frame(table(unique(myData[])$GenotypeID))
#gpuR::gpuLm
#gpux
#gpud
#go ahead with ANOVA anyway
head(myData)
#small model
Model.lm <- lm(RPKM.t~ PathwayID + GenotypeID + Time)
#big model
Model.lm <- lm(RPKM.t~ PathwayID/GeneIDV5.5 + GenotypeID + Time )
Model.aov <- aov(RPKM.t~ PathwayID/GeneIDV5.5 + GenotypeID + Time )
#aov(Y ~ A + B %in% A, data=d)
#aov(Y ~ A/B, data=d)
#aov(Y ~ A + A:B, data=d)
MY.ANOVA <- anova(Model.lm)
summary(MY.ANOVA)
MY.ANOVA #sig fx of genotype and time
interaction.plot(GenotypeID,Time,RPKM.t)
TukeyHSD(MY.ANOVA)
MY.aov <- aov(RPKM.t~GenotypeID*Time)
summary(MY.aov)
#adjusted p-values give 6 sig. pairs and 1 marg. sig.
TukeyHSD(MY.aov)
|
f698b96120bb850f2ca8ce762ecb79063e4c887e
|
2c13e79e11220d7c942e9c4d049839896d1af22e
|
/uncertaintyImportance.R
|
f783da9188933248328c77827020d15065662b7b
|
[] |
no_license
|
DoktorMike/PlayGround
|
a2fa2b76093e14241cee8beb5785294149900940
|
3efaa96d201c37825f281de6d521638304f7ae14
|
refs/heads/master
| 2023-08-08T21:01:24.293399
| 2023-08-06T11:09:01
| 2023-08-06T11:09:01
| 10,732,191
| 0
| 0
| null | 2023-07-25T23:26:03
| 2013-06-17T07:28:48
|
HTML
|
UTF-8
|
R
| false
| false
| 1,153
|
r
|
uncertaintyImportance.R
|
library(ggplot2)
library(scales)
library(tibble)
x <- rgamma(5000, 0.5, 1); quantile(x); mean(x); qplot(x)
mydf <- tibble(TV=rnorm(5000, 0.5, 0.2), Radio=rgamma(5000, 0.5, 1)) %>% gather(Media, ROI)
group_by(mydf, Media) %>% summarise(Mean=mean(ROI), Min=min(ROI), Max=max(ROI),
Median=median(ROI), Mass=sum(ROI>0.3)/n(),
Sharpe=Mean/sd(ROI))
ggplot(mydf, aes(x=ROI, group=Media, fill=Media)) + geom_histogram() + theme_minimal() +
scale_fill_brewer(type = "qual", palette = 6) + facet_grid(.~Media) + geom_vline(xintercept = 0.5) +
ylab("Probability")
ggplot(mydf, aes(x=ROI, group=Media, fill=Media)) + geom_density(alpha=0.5) + theme_minimal(base_size = 15) +
scale_fill_brewer(type = "qual", palette = 6) + facet_grid(.~Media) + geom_vline(xintercept = 0.5) +
ylab("Probability")
ggplot(mydf, aes(x=ROI, group=Media, fill=Media)) + geom_density(alpha=0.5) + theme_minimal(base_size = 15) +
scale_fill_brewer(type = "qual", palette = 6) + facet_grid(Media~.) + geom_vline(xintercept = 0.5) +
ylab("Probability") + xlim(0, 3.5)
ggsave("uncertaintyImportance.png")
|
16d03c70f0d83e2a8afa727712418f7665261ac6
|
cfc4a7b37657114bb93c7130eff4fc2458381a4f
|
/doc-ja/sample-geometry01.rb.v.rd
|
eaadbc36d0f9b42978558a3a0091ad4a4ae4f9cb
|
[
"MIT"
] |
permissive
|
kunishi/algebra-ruby2
|
5bc3fae343505de879f7a8ae631f9397a5060f6b
|
ab8e3dce503bf59477b18bfc93d7cdf103507037
|
refs/heads/master
| 2021-11-11T16:54:52.502856
| 2021-11-04T02:18:45
| 2021-11-04T02:18:45
| 28,221,289
| 6
| 0
| null | 2016-05-05T16:11:38
| 2014-12-19T08:36:45
|
Ruby
|
UTF-8
|
R
| false
| false
| 673
|
rd
|
sample-geometry01.rb.v.rd
|
=begin
# sample-geometry01.rb
require 'algebra'
R = MPolynomial(Rational)
x,y,a1,a2,b1,b2,c1,c2 = R.vars('xya1a2b1b2c1c2')
V = Vector(R, 2)
X, A, B, C = V[x,y], V[a1,a2], V[b1,b2], V[c1,c2]
D = (B + C) /2
E = (C + A) /2
F = (A + B) /2
def line(p1, p2, p3)
SquareMatrix.det([[1, *p1], [1, *p2], [1, *p3]])
end
l1 = line(X, A, D)
l2 = line(X, B, E)
l3 = line(X, C, F)
s = line(A, B, C)
g = Groebner.basis [l1, l2, l3, s-1] #s-1 means non degeneracy
g.each_with_index do |f, i|
p f
end
#x - 1/3a1 - 1/3b1 - 1/3c1
#y - 1/3a2 - 1/3b2 - 1/3c2
#a1b2 - a1c2 - a2b1 + a2c1 + b1c2 - b2c1 - 1
((<_|CONTENTS>))
=end
|
5593b8120a2cc3ab703744689715836e0de00a21
|
9d7e58dbbc2556c5052f5367ad4636f901383b60
|
/man/XRchart.Rd
|
27511b07047165c4db5184d2b287c5723cfc2710
|
[] |
no_license
|
cran/SSDforR
|
eea003ef400f2274772958dd1a220b2f916feb93
|
b0276a38634e9be787d4b5ddaabb3deec768b2fd
|
refs/heads/master
| 2023-02-25T10:54:27.473408
| 2023-02-17T19:10:02
| 2023-02-17T19:10:02
| 17,693,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,543
|
rd
|
XRchart.Rd
|
\name{XRchart}
\alias{XRchart}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{SPC XR-Chart
%% ~~function to do ... ~~
}
\description{This chart can be used when there are multiple observations per sample and uses the mean of each sample to create the chart.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
XRchart(behavior, groupX, bandX, ABxlab, ABylab, ABmain)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{behavior}{behavior variable
%% ~~Describe \code{behavior} here~~
}
\item{groupX}{grouping variable
%% ~~Describe \code{groupX} here~~
}
\item{bandX}{number of standard deviations desired (e.g., 2)
%% ~~Describe \code{bandX} here~~
}
\item{ABxlab}{x-axis label in quotation marks (e.g., "week")
%% ~~Describe \code{ABxlab} here~~
}
\item{ABylab}{y-axis label in quotation marks (e.g., "mean amount")
%% ~~Describe \code{ABylab} here~~
}
\item{ABmain}{main title for chart in quotation marks (e.g., "Admits to Hospital")
%% ~~Describe \code{ABmain} here~~
}
}
\references{
Auerbach, Charles, and Zeitlin Wendy. SSD for R: An R Package for Analyzing Single-Subject Data. Oxford University Press, 2014. p71, p105
{Orme, J. & Cox, M.E. (2001). Analyzing single-subject design data using statistical proces control charts. Social Work Research, 25(2), 115-127.
}
{Go to www.ssdanalysis.com for more information.}
%% ~put references to the literature/web site here ~
}
\author{Charles Auerbach, PhD
Wurzweiler School of Social Work
Wendy Zeitlin, PhD
Montclair State University
%% ~~who you are~~
}
\examples{
admit<-c(85,90,80,84,82,79,75,76,80,84,75,80,79,83,88,78,80,85,83,
82,89,84,89,91,87,84,77,86,80,
89,81,86,88,83,86,90,86,85,85,87,80,89,NA,86,87,88,89,79,73,75,
74,70,75,81,85,75,73,75,
79,70,72,71,69,70,64,60,59,54,53,55,50,54,51,49,48,50,46,55,51,
55,49,50,48,51,33)
day<-c(1,1,1,1,1,1,1,2,2,2,2,2,2,2,3,3,3,3,3,3,3,4,4,4,4,4,4,4,5,
5,5,5,5,5,5,6,6,6,6,6,6,6,NA,7,7,7,7,7,7,7,8,8,8,8,8,8,8,9,
9,9,9,9,9,9,10,10,10,10,10,10,10,11,11,11,11,11,11,11,12,
12,12,12,12,12,12)
padmit<-c("A","A","A","A","A","A","A","A","A","A",
"A","A","A","A","A","A",
"A","A","A","A","A","A","A","A","A","A","A","A","A","A","A","A",
"A","A","A","A","A","A","A","A","A","A",NA,"B","B",
"B","B","B","B","B","B",
"B","B","B","B","B","B","B","B","B","B","B","B",
"B","B","B","B",
"B","B","B","B","B","B","B","B","B","B","B","B",
"B","B","B","B","B","B")
XRchart(admit, day, 2, "week", "amount", "Admits to Hospital")
}
|
8a3bd333c9c0410b39aa34aeed0466c897f9c4ca
|
13865ec82c8197b6e547eb12e0ef7aa00e23a137
|
/man/cxx11Normal.Rd
|
ef289b148772624974ae493ab87c758d03b78b10
|
[] |
no_license
|
junjiemao/OneDayOneRcpp
|
c48001a667782237240929fa9b616cb3644874b5
|
bb88c7f7c941e27bbfa95283df1327ff263efc43
|
refs/heads/master
| 2021-01-10T18:59:06.952674
| 2014-08-19T15:20:22
| 2014-08-19T15:20:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
rd
|
cxx11Normal.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{cxx11Normal}
\alias{cxx11Normal}
\alias{cxx11Normals}
\title{cxx11Normal}
\usage{
cxx11Normals(n, seed = 42L)
}
\arguments{
\item{n}{}
\item{seed}{default value is 42}
}
\description{
cxx11Normal
}
|
ae46ae1465e667649c1f24b31f1250f666b1334c
|
5b7920289afd94750ca42e0626a4b1dc394fb6da
|
/R/Lp.R
|
c4c93ec15ff84c847fe480ef0b13d03a2431953f
|
[] |
no_license
|
blasern/edd
|
72ac3b4559bb3668b30ea9a5b2e4a2b0fd339d09
|
3c175502fcb29e48fd0ed5f42e0290f361419263
|
refs/heads/master
| 2021-03-22T03:00:35.917517
| 2019-03-14T22:11:41
| 2019-03-14T22:11:41
| 94,869,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 353
|
r
|
Lp.R
|
#' Lp Distance
#'
#' Compute Lp distance
#'
#' @param X,Y input data
#' @param p the power p
#' @examples
#' X <- rexp(80, rate = 0.2)
#' Y <- rexp(120, rate = 0.4)
#' Lp_dist(X, Y, p = 2)
#' @importFrom rdist cdist
#' @export
Lp_dist <- function (X, Y, p = 2)
{
D <- rdist::cdist(X, Y, metric = "minkowski", p = p)
return((mean(D^p)) ^ (1/p))
}
|
71f7583d04f265dcd910ecff10fb70acad5465dc
|
bf9f77e17111b590fe44905ebd9391009a2a1390
|
/man/lib_composante.Rd
|
90682095ff0c72605251e208d254d7e633f416d7
|
[
"MIT"
] |
permissive
|
ove-ut3/apogee
|
5cd9fed8e1cb4fc359b824fdb16ff269952d6320
|
c08ff84497bbaab4af90a0eeb779a338ff158b87
|
refs/heads/master
| 2021-06-02T09:03:41.344113
| 2020-05-19T13:22:59
| 2020-05-19T13:22:59
| 115,185,672
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
lib_composante.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/libelle.R
\name{lib_composante}
\alias{lib_composante}
\title{Renvoie le libelle a partir du code composante}
\usage{
lib_composante(code_composante)
}
\arguments{
\item{code_composante}{Un vecteur de code composante.}
}
\value{
Un vecteur contenant les libellés composante.
Jeu de données source : \code{apogee::composante}.\cr
Il est créé à partir de la table "composante" de la base Access "Tables_ref.accdb" (projet Apogée).
}
\description{
Renvoie le libellé à partir du code composante.
}
|
ec6d632b883c0016f87973f2851b28f15d517fb8
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961572-test.R
|
8ad51079e537e9b46e7dd55a72866adf57e7bafb
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
1609961572-test.R
|
testlist <- list(x = c(439374847L, -687920385L, -5723992L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L, -1465341784L), y = c(-1465341784L, -1465341784L, -1465341784L, -1465341738L, -13959210L, -13959424L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
87f6dc1e35a96bbd210d692ccfc1f20c04a7ddce
|
8b506669cb283da750d473888c11afe7747ab162
|
/server.R
|
1eabc181c974e0ac5c83c0f01333f28200ce96bb
|
[] |
no_license
|
Yannael/covid19-forecast-belgium
|
dd7ca9832e6da0e3e8fdb584cfe6b62ca93aa353
|
309956e93442712f8cdb3c6886c228dde3476b55
|
refs/heads/master
| 2022-09-16T10:07:05.321457
| 2020-05-29T08:04:06
| 2020-05-29T08:04:06
| 265,491,129
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,510
|
r
|
server.R
|
library(DT)
library(rpivotTable)
server <- function(input, output, session) {
#############################################################################
# Visualization: Filter data based on user input
data_f <- reactive({ all_data %>% filter(forecast_date == input$forecast_date) })
data_ft <- reactive({ data_f() %>% filter(team_model == input$team_model) })
data_ftmt <- reactive({ data_ft() %>% filter(unit == input$resolution) })
data_ftmtl <- reactive({ data_ftmt() %>% filter(location == input$location) })
truth_plot <- reactive({
truth %>%
filter(location == input$location,
unit == input$resolution)
})
output$prediction_plot <- renderPlotly({
xaxis <- list(
title = 'Date',
titlefont = list(size=15)
)
yaxis <- list(
title = 'Mortality',
titlefont = list(size=15)
)
m <- list(l=100, r=20, b=100, t=100)
graph_title <- paste0('Mortality predictions for the next 4 weeks.',sep="")
d <- data_ftmtl()
if (NROW(d)>0) {
fig <- plot_ly(type = "scatter", mode='lines')
fig <- fig %>% add_trace(x = truth_plot()$date, y = truth_plot()$value, type = "scatter", mode='lines',
color = I("Green"), name = "Observed")
fig <- fig %>% add_trace(x = truth_plot()$date, y = truth_plot()$value, type = "scatter", mode='markers',
color = I("Green"), name = "Observed")
fig <- fig %>% add_trace(x = d$target_end_date, y = d$point, type = "scatter", mode='lines',
color = I("blue"), name = "Prediction")
fig <- fig %>% add_trace(x = d$target_end_date, y = d$point, type = "scatter", mode='markers',
color = I("blue"), name = "Prediction")
fig <- fig %>% add_ribbons(x = d$target_end_date, ymin = d$`0.025`, d$`0.975`,
color = I("gray80"), name = "95% confidence")
fig <- fig %>% layout(title = graph_title, xaxis = xaxis, yaxis = yaxis, margin = m)
fig$elementId <- NULL
fig
}
})
output$rpivot_model_accuracy <- renderRpivotTable({
rpivotTable(data = prediction_error , rows = c( "forecast_date"), cols="team_model",
vals = "error", aggregatorName = "Average", rendererName = "Table",
width="100%", height="500px")
})
output$all_data <- DT::renderDT(all_data, filter = "top")
}
|
98f0dcf85f0825abbfbf1faed3c5f8181a021f94
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/circular/examples/aov.circular.Rd.R
|
adf4faeaa8e17896e76a9187baba9851f0016466
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 351
|
r
|
aov.circular.Rd.R
|
library(circular)
### Name: aov.circular
### Title: Analysis of Variance for circular data
### Aliases: aov.circular print.aov.circular
### Keywords: models
### ** Examples
x <- c(rvonmises(50, circular(0), 1), rvonmises(100, circular(pi/3), 10))
group <- c(rep(0, 50), rep(1, 100))
aov.circular(x, group)
aov.circular(x, group, method="LRT")
|
65dda07a6fb44388f10a608aa9cb61feea8200c7
|
9de05fb9c3aa4309a08d85428127ceb8ea7a6193
|
/tests/testthat/helper.R
|
d374939b98458a15a20a98f6bd830fdc39f18e21
|
[] |
no_license
|
bbolker/clusteredinterference
|
4c54fda11b8d590bdfc5d5f9c285f4f33996041a
|
2a56e8fd067d8835b8fa8f1226d826668c28b452
|
refs/heads/master
| 2023-03-20T04:42:49.847364
| 2019-07-17T23:39:10
| 2019-07-17T23:39:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 139
|
r
|
helper.R
|
# library(rprojroot)
quickLookup <- function(name) {
rprojroot::find_testthat_root_file("historical_data", name)
}
helper_tol <- 1e-7
|
bfe966148bb5e5aff011fe8adc37cb60ab09a3f0
|
c55c02f27dc68f5a912a0cb7edf232ddc7197f7b
|
/exercises/exc_04_10_1.R
|
bcead4eaf2e4fd9fcdfd9d2fbd39e32b941ce1e7
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
benmarwick/gams-in-r-course
|
3e631518be8ab89c9e08d83743aa15053a8bc9d1
|
ed45f12a183d1ba023ee43e8b2fa557773c9b5ef
|
refs/heads/master
| 2020-05-27T19:02:16.685461
| 2019-05-27T02:01:13
| 2019-05-27T02:01:13
| 188,754,422
| 0
| 0
| null | 2019-05-27T02:00:18
| 2019-05-27T02:00:18
| null |
UTF-8
|
R
| false
| false
| 229
|
r
|
exc_04_10_1.R
|
# Calculate predictions and errors
predictions <- predict(log_mod2, newdata = new_credit_data,
type = "link", se.fit = TRUE)
# Calculate high and low prediction intervals
high_pred <- ___
low_pred <- ___
|
e58b4727954c9e34e46f8ecfce4a045aceb412d0
|
ef8d66ebaeaf27fa1aed1cf01ebd70ce8224c5cd
|
/man/reorder_cormat.Rd
|
362ababaed7634e53e1743ecd3a6646f988351a8
|
[] |
no_license
|
Alice-MacQueen/CDBNgenomics
|
dd6c8026156d91be7f12a9857d0ebeb89c32c384
|
6b00f48eb1c6eec848f11416d7a5fd752cd778bd
|
refs/heads/master
| 2021-07-08T06:15:56.774003
| 2020-08-12T19:28:32
| 2020-08-12T19:28:32
| 178,261,021
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 389
|
rd
|
reorder_cormat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/handle_mash_results.R
\name{reorder_cormat}
\alias{reorder_cormat}
\title{Reorder correlation matrix}
\usage{
reorder_cormat(cormat)
}
\arguments{
\item{cormat}{A correlation matrix}
}
\description{
Reorder correlation coefficients from a matrix of things
(including NA's) and hierarchically cluster them
}
|
5c88056908eb1d51c58430c7eede12640f7ae181
|
94312532e4e32f4d29f8e73eb90bce03d24ab66e
|
/R/fit3models.alt.R
|
3a97cdff16fae0d29af73e0fc69aebf58df6b6c9
|
[] |
no_license
|
cran/paleoTSalt
|
327f66292808ca313e71c28e70f9ad45b3a53647
|
6782f0756b8a12564b5dabaccd333fb1c3ac27da
|
refs/heads/master
| 2021-01-19T08:29:23.656335
| 2007-10-22T00:00:00
| 2007-10-22T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
fit3models.alt.R
|
`fit3models.alt` <-
function (y, pool = TRUE, silent = FALSE, wts = "AICc")
{
mn<- c("GRW", "URW", "Stasis")
m.grw <- opt.alt.GRW(y, pool = pool)
m.urw <- opt.alt.URW(y, pool = pool)
m.st <- opt.alt.Stasis(y, pool = pool)
aic <- c(m.grw$AIC, m.urw$AIC, m.st$AIC)
aicc <- c(m.grw$AICc, m.urw$AICc, m.st$AICc)
logl <- c(m.grw$value, m.urw$value, m.st$value)
hats <- c(m.grw$par, m.urw$par, m.st$par)
if (wts == "AICc")
ak.wts <- akaike.wts(aicc)
else ak.wts <- akaike.wts(aic)
names(aic)<- names(aicc)<- names(logl)<- names(ak.wts)<- mn
w <- list(aic = aic, aicc = aicc, logl = logl, hats = hats,
ak.wts = ak.wts)
if (!silent) {
cat("Results Summary:\n\n")
rt <- cbind(logl, aic, aicc, ak.wts)
row.names(rt) <- c("GRW", "URW", "Stasis")
print(rt)
cat("\n\nParameter estimates: \n")
print(hats)
}
else return(w)
}
|
7b2fda500f07feee1227a463ffcb3f6f3254ef6d
|
33efeec39033156d7b598f8989f82fcf810db812
|
/man/query_pa_dist_sub.Rd
|
8c224b84cbe9cd23316c58f775e990d5861d65bd
|
[] |
no_license
|
johnchower/oneD7
|
76b4712de0bb89fa70246880b69d7c9a1d90a7fa
|
0ffcf86db58ddbe80330ac5185a7fc14c355545e
|
refs/heads/master
| 2021-01-11T20:39:07.943924
| 2017-03-08T23:11:30
| 2017-03-08T23:11:30
| 79,161,189
| 0
| 0
| null | 2017-02-23T19:02:14
| 2017-01-16T21:28:54
|
R
|
UTF-8
|
R
| false
| true
| 525
|
rd
|
query_pa_dist_sub.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_doc.r
\docType{data}
\name{query_pa_dist_sub}
\alias{query_pa_dist_sub}
\title{A string containing the platform action distribution query with a placeholder for
subsetting on users and a relative time frame.}
\format{A length-one character vector.}
\usage{
query_pa_dist_sub
}
\description{
A string containing the platform action distribution query with a placeholder for
subsetting on users and a relative time frame.
}
\keyword{datasets}
|
6d24a810392b85164532c63c958bb49d1875648e
|
d05015051ae43528589a2a8dd61c72c45e333805
|
/inst/shiny-app/server.R
|
9025e63f3f2f55020d0c2b6f6ecb2af4a87db005
|
[
"MIT"
] |
permissive
|
wrightrc/r1001genomes
|
c2658de1f0be024c5fab2be8a7e97b14b776d284
|
2efddf207ff532ac390d49af7d70cd5266aeeb20
|
refs/heads/master
| 2021-06-03T09:20:03.086774
| 2019-10-31T03:28:37
| 2019-10-31T03:28:37
| 115,557,446
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,518
|
r
|
server.R
|
# Server =================================================================
library(shiny)
library(biomaRt)
library(leaflet)
library(RColorBrewer)
library(r1001genomes)
library(knitr)
library(stringr)
library(DECIPHER)
library(ggseqlogo)
library(shinyBS)
library(ggplot2)
library(ggpmisc)
library(dplyr)
library(cowplot)
library(viridis)
library(gginnards)
parseInput <- function (textIn) {
names <- str_extract_all(textIn, "AT[1-5]G[0-9]{5}")
return (names[[1]])
}
parseFilterText <- function (textIn) {
inputList <- strsplit(textIn, ", ")
inputList <- gsub(" ", "", inputList[[1]]) # remove extra spaces
return(inputList)
}
enableBookmarking(store = "url")
server <- function(input, output, session){
##
## ------------------------------------------------------------------------
##
## INPUT AREA #############
#### tab1.buttons ####
tab1.buttons <- reactiveValues(last_button="none pressed", total_presses=0)
observeEvent(input$STATS_submit,{
if (input$STATS_submit > 0){
tab1.buttons$last_button <- "STATS_submit"
tab1.buttons$total_presses <- tab1.buttons$total_presses + 1
}
})
observeEvent(input$file_submit,{
if (input$file_submit > 0){
tab1.buttons$last_button <- "file_submit"
tab1.buttons$total_presses <- tab1.buttons$total_presses + 1
}
})
#### all.Genes ####
all.Genes <- eventReactive({tab1.buttons$total_presses},{
req(tab1.buttons$last_button!="none pressed")
if (tab1.buttons$last_button == "file_submit"){
genes <- geneInfoFromFile(input$genesFile$datapath, source="araport11")
req(genes != FALSE)
return(genes)
}
if (input$STATS_quick_demo){
names <- c("AT3G62980", "AT3G26810")
genes <- getGeneInfo(names, source="araport11")
req(genes != FALSE)
return(genes)
}
# list of genes for tab 1, updated on pressing submit button
names <- parseInput(input$gene_ids)
genes <- getGeneInfo(names, source="araport11")
req(genes != FALSE)
return(genes)
})
#### all.GeneChoices ####
all.GeneChoices <- reactive({
# displayNames <- paste(all.Genes()$transcript_ID, " (", all.Genes()$tair_symbol, ")", sep="" )
# displayNames <- gsub(" \\(\\)", displayNames, replacement="") # if no tair symbol, remove empty parens.
displayNames <- paste(all.Genes()$tair_symbol, " (", all.Genes()$transcript_ID, ")", sep="")
output <- all.Genes()$transcript_ID
names(output) <- displayNames
return(output)
})
#### anno_df ####
anno_df <- eventReactive(input$annoSubmit,{
anno_df <- readAnnotationFile(input$annoFile$datapath, gene_info = all.Genes())
return(anno_df)
})
#### annoTemplateDownload ####
output$annoTemplateDownload <- downloadHandler(
filename="annotations_template.csv",
content = function(file) {
file.copy("annotations_template.csv", file)
}
)
#### all.VCFList ####
all.VCFList <- reactive({
if(isolate(input$STATS_quick_demo) & (tab1.buttons$last_button == "STATS_submit")) {
all.Genes() # DO NOT DELETE this is here to make all.VCFList update after unchecking quickdemo
return(readRDS(file = system.file("shiny-app", "demo_VCFs.rds",
package = "r1001genomes")))
}
withProgress(message="downloading data from 1001genomes.org",
detail="this will take a while, progress bar will not move",
value=0.3, {
output <- VCFList(all.Genes())
setProgress(value=0.7, message="downloading complete, processing data...",
detail="Parsing EFF field")
output <- llply(output, parseEFF)
setProgress(value=0.9, message=NULL,
detail="Calculating nucleotide diversity")
output <- llply(output, Nucleotide_diversity)
output <- llply(output, addAccDetails)
setProgress(value=1)
})
return(output)
})
## _________
## / tab1 \
## --------------------------------------------------
## Tab 1 ####################
#### tab1.genes_table ####
output$tab1.genes_table <- DT::renderDataTable(DT::datatable(all.Genes()[, -c(5,6,7,10)], colnames = c("tair locus", "symbol", "transcript", "Chr", "transcript \nstart", "transcript \nend", "transcript \nlength"), rownames = FALSE, options=list(paging=FALSE, searching=FALSE)))
#### tab1.nonUniqueVariants ####
tab1.nonUniqueVariants <- eventReactive({all.VCFList()},{
req(isolate(tab1.buttons$last_button)!="none pressed")
ldply(all.VCFList(), variantCounts, unique=FALSE, .id="transcript_ID")
})
#### tab1.uniqueVariants ####
tab1.uniqueVariants <- eventReactive({all.VCFList()},{
req(isolate(tab1.buttons$last_button)!="none pressed")
ldply(all.VCFList(), variantCounts, unique=TRUE, .id="transcript_ID")
})
#### tab1.divStats ####
tab1.divStats <- eventReactive({all.VCFList()},{
req(isolate(tab1.buttons$last_button)!="none pressed")
ldply(all.VCFList(), diversityStats, geneInfo=isolate(all.Genes()), .id="transcript_ID")
})
#### SNPStats ####
SNPStats <- reactive({
req(isolate(tab1.buttons$last_button)!="none pressed")
# rename column names on unique variant counts.
uniqueVariantsRenamed <- tab1.uniqueVariants()
colnames(uniqueVariantsRenamed) <- paste(colnames(uniqueVariantsRenamed),
"unique", sep="_")
cbind(tab1.nonUniqueVariants(), uniqueVariantsRenamed[, -1], tab1.divStats()[, -1])
})
#### tab1.SNPcounts ####
output$tab1.SNPcounts <- DT::renderDataTable({
table <- tab1.nonUniqueVariants()
colnames(table) <- c("transcript", "symbol", "5' UTR", "intron", "3' UTR",
"coding \n synonymous", "coding \n missense",
"stop\ngained", "frameshift\nvariant",
"upstream", "coding \n total")
table <- table[,c(TRUE,TRUE, colSums(table[,3:11])!=0)] # remove columns with all zeros
table <- DT::datatable(table,rownames = FALSE, options=list(paging=FALSE, searching=FALSE))
return(table)
})
#### tab1.SNPcountsUnique ####
output$tab1.SNPcountsUnique <- DT::renderDataTable({
table <- tab1.uniqueVariants()
colnames(table) <- c("transcript", "symbol", "5' UTR", "intron", "3' UTR",
"coding \n synonymous", "coding \n missense",
"stop\ngained", "frameshift\nvariant",
"upstream", "coding \n total")
table <- table[,c(TRUE,TRUE, colSums(table[,3:11])!=0)] # remove columns with all zeros
table <- DT::datatable(table,rownames = FALSE, options=list(paging=FALSE, searching=FALSE))
return(table)
})
#### tab1.Diversity_table ####
output$tab1.Diversity_table <- DT::renderDataTable(
DT::formatRound(DT::datatable(tab1.divStats(),
#
colnames = c("transcript",
"symbol",
"π<sub>N</sub>",
"π<sub>S</sub>",
"π<sub>N</sub>/π<sub>S</sub>",
"π coding",
"π transcript"),
rownames = FALSE, escape = FALSE,
options = list(paging=FALSE, searching=FALSE)),
columns = 2:7, digits = 6))
output$tab1.downloadStats <- downloadHandler(
filename=function(){
paste("SNPStats-", Sys.time(), ".csv", sep="")
},
content = function(file) {
write.csv(SNPStats(), file, row.names=FALSE)
}
)
#### tab1.downloadGeneInfo ####
output$tab1.downloadGeneInfo <- downloadHandler(
filename=function(){
paste("GeneInfo-", Sys.time(), ".csv", sep="")
},
content = function(file) {
write.csv(all.Genes(), file, row.names=FALSE)
}
)
## _________
## / tab2 \
## --------------- -------------------------------------
## Tab 2 ###################
#### tab2.selectGene ####
output$tab2.selectGene <- renderUI({
tagList(
selectInput("tab2.transcript_ID", label=NULL, choices=all.GeneChoices()),
actionButton(inputId="tab2.Submit", label = "Submit")
)
})
#### tab2.Genes ####
tab2.Genes <- eventReactive(input$tab2.Submit, {
#gene Info for gene on tab 2, updates on 'submit' button press
# names <- parseInput(input$plotGene)
# genes <- getGeneInfo(names[1])
# return(genes)
return(all.Genes()[ all.Genes()$transcript_ID == input$tab2.transcript_ID,])
})
#### tab2.gene_table ####
output$tab2.gene_table <- DT::renderDataTable(DT::datatable(tab2.Genes()[, -c(5,6,7,10)], colnames = c("tair locus", "symbol", "transcript", "Chr", "transcript \nstart", "transcript \nend", "transcript \nlength"), rownames = FALSE, options=list(paging=FALSE, searching=FALSE)))
#rendered table of Gene info
#### tab2.tableData ####
#tab2.tableData <- reactive({load_tab_2_Data(tab2.Genes())})
#SNP reactive data
tab2.tableData <- eventReactive(input$tab2.Submit, {
tab2data <- all.VCFList()[[input$tab2.transcript_ID]]
coding_variants <- getCodingDiv(tab2data)
return(coding_variants)
})
#### Diversity_Table ####
output$Diversity_Table <- DT::renderDataTable(tab2.tableData())
#render table of diversity data
output$tab2.downloadSNPData <- downloadHandler(
filename=function(){
paste("SNPData-", Sys.time(), ".csv", sep="")
},
content = function(file) {
write.csv(tab2.tableData(), file, row.names=FALSE)
}
)
#### diversityPlot ####
output$diversityPlot <- renderPlot({
p <- plotCodingDiv(uniqueCodingVars = tab2.tableData())
if(!is.null(input$annoFile)){
p <- append_layers(p,list(
geom_rect(data = subset(anno_df()$domains,
transcript_ID == input$tab2.transcript_ID),
mapping = aes(xmin = as.integer(start),
xmax = as.integer(end),
fill = annotation),
ymin = -Inf, ymax = Inf, inherit.aes = FALSE, alpha = 0.2),
geom_rect(data = subset(anno_df()$positions,
transcript_ID == input$tab2.transcript_ID),
mapping = aes(xmin = as.integer(position)-0.5,
xmax = as.integer(position)+0.5,
fill = annotation),
ymin = -Inf, ymax = Inf, inherit.aes = FALSE, alpha = 0.8)),
position = "bottom")
}
return(p)
})
#### tab2.hover ####
output$tab2.hover <- renderUI({
hover <- input$div_plot_hover
point <- nearPoints(tab2.tableData(), hover, "Codon_Number", "Diversity", maxpoints=1)
if (nrow(point) == 0) return(NULL)
# calculate point position INSIDE the image as percent of total dimensions
# from left (horizontal) and from top (vertical)
left_pct <- (hover$x - hover$domain$left) / (hover$domain$right - hover$domain$left)
top_pct <- (hover$domain$top - log10(hover$y)) / (hover$domain$top - hover$domain$bottom)
# calculate distance from left and bottom side of the picture in pixels
left_px <- hover$range$left + left_pct * (hover$range$right - hover$range$left)
top_px <- hover$range$top + top_pct * (hover$range$bottom - hover$range$top)
# create style property fot tooltip
# background color is set so tooltip is a bit transparent
# z-index is set so we are sure are tooltip will be on top
style <- paste0("position:absolute; z-index:100;
background-color: rgba(245, 245, 245, 0.85); ",
"left:", left_px + 2, "px; top:", top_px + 2, "px;")
wellPanel(style=style,
p(HTML(paste0("<b>Codon: </b>", point$Codon_Number, "<br/>",
"<b>A_A_Change: </b>", point$Amino_Acid_Change, "<br/>",
"<b>Effect: </b>", point$Effect, "<br/>",
"<b>Diversity: </b>", point$Diversity)))
)
})
#### info ####
output$info <- renderPrint({
brushedPoints(tab2.tableData(), input$plot_brush, "Codon_Number", "Diversity")
})
#### annotations
## _________
## / tab3 \
## -------------------------- ----------------------------
## Tab 3 ##################################
#### tab3.selectGene ####
output$tab3.selectGene <- renderUI({
tagList(
checkboxGroupInput("tab3.transcript_ID", label=NULL, choices=all.GeneChoices()),
actionButton(inputId="tab3.Submit", label = "Submit")
)
})
#### tab3.Genes ####
tab3.Genes <- eventReactive(input$tab3.Submit, {
#gene Info for gene on tab 3, updates on 'submit' button press
return(all.Genes()[ all.Genes()$transcript_ID %in% input$tab3.transcript_ID,])
})
#### tab3.tidyData ####
tab3.tidyData <- eventReactive(input$tab3.Submit, {
data <- ldply(all.VCFList()[tab3.Genes()$transcript_ID])
# remove 0|0 genotypes
data <- data[data$gt_GT != "0|0",]
return(data)
})
#### tab3.EffectValues ####
tab3.EffectValues <- reactive({
# effects <- c("5_prime_UTR_variant",
# "intron_variant",
# "3_prime_UTR_variant",
# "synonymous_variant",
# "missense_variant",
# "upstream_gene_variant",
# "downstream_gene_variant")
effects <- unique(tab3.tidyData()$Effect)
return( switch(input$tab3.SNPtype,
"All"=effects,
"Missense"="missense_variant",
"Coding"= c("missense_variant", "synonymous_variant"))
)
})
#### tab3.debug ####
output$tab3.debug <- renderPrint({
# temporary debug output
print(input$tab3.filter_value)
})
#### tab3.filteredByDiv ####
tab3.filteredByDiv <- reactive({
# filter by diversity slider and SNP type radio button then add SNPs column
data <- tab3.tidyData()
# filter by effect type (all, coding, or missense)
data2 <- data[data$Effect %in% tab3.EffectValues(), ]
# filter on positions with diversity greater than or equal to the 10^slider value
keyPOS <- unique(data2[which(data2$Diversity >= 10^input$tab3.filter_value[1] &
data2$Diversity <= 10^input$tab3.filter_value[2]), "POS"])
keydata <- data[data$POS %in% keyPOS, ]
return(keydata)
})
#### tab3.mutationList ####
tab3.mutationList <- reactive({
mutList <- labelBySNPs(tab3.filteredByDiv(), collapse=FALSE)$SNPs
mutList <- unique(mutList[!is.na(mutList)])
return(mutList)
})
#### tab3.mutation_checkbox ####
output$tab3.mutation_checkbox <- renderUI({
tagList(
tags$div(class="input-format",
tags$h3("Allele selection"),
tags$h5("Select the alleles you want to see on the map by clicking the checkboxes"),
tags$div(class="checkbox-format",
checkboxGroupInput("tab3.allele_select", "select_alleles to display", choices=tab3.mutationList())
),
actionButton(inputId="tab3.update_map", label = "Update Map")
)
)
})
#### tab3.labeled ####
tab3.labeled <- eventReactive(input$tab3.update_map, {
# a dataframe with a single row per accession, containing accession info,
# start with the data filtered by the diversity slider and type buttons
data <- tab3.filteredByDiv()
# label by SNPs creates column SNPs with text strings formatted [transcriptID|AA_Change]
data <- labelBySNPs(data, collapse=FALSE)
# filter on selected SNPs
data <- data[data$SNPs %in% input$tab3.allele_select, ]
# combine mutations to single row (this is slow)
data <- ddply(data, "Indiv", summarise, SNPs=paste(SNPs, collapse=","))
# add back ecotype details
data <- addAccDetails(data, allAccs=TRUE)
return(data)
})
#### tab3.map ####
output$tab3.map <- renderLeaflet({
mapdata <- tab3.labeled()
# Reorganize to plot NA's underneath non NA's
mapdata <- rbind(mapdata[is.na(mapdata$SNPs), ], mapdata[!is.na(mapdata$SNPs), ])
# make a field with text to be displayed when clicking on a marker
mapdata$popup <- paste("EcoID:", mapdata$Indiv,"Name:", mapdata$Name, " SNPs:", mapdata$SNPs)
# create the color pallet for the map points
pal <- brewer.pal(8, "Set1")
pallet <- colorFactor(palette=pal, domain=mapdata$SNPs)
# create a new leaflet map
map <- leaflet()
map <- addProviderTiles(map, providers$Stamen.TonerLite,
options = providerTileOptions(noWrap = TRUE))
# groupnames to be used by draw groups of points as separate layers below
groupnames <- unique(mapdata$SNPs)
groupnames <- groupnames[!is.na(groupnames)]
# add markers for NA points first so they are furthest back layer
map <- addCircleMarkers(map, data=mapdata[is.na(mapdata$SNPs), ], color= "#9b9b9b", group="NA",
radius=6, popup= ~popup, stroke=FALSE, fillOpacity=0.6)
# for each of the group names, add a set of markers
for (SNP in groupnames){
map <- addCircleMarkers(map, data=mapdata[mapdata$SNPs == SNP, ], color= ~pallet(SNPs), group= SNP,
radius=6, popup= ~popup, stroke=FALSE, fillOpacity=0.85)
}
# add the legend to the map
map <- addLegend(map, position="bottomright", pal=pallet,
values=mapdata$SNPs, title="Marker Colors", opacity=1)
# add layer control to map to turn on or off groups of points
map <- addLayersControl(map, overlayGroups=c(groupnames, "NA"),
options = layersControlOptions(collapsed = TRUE),
position="bottomleft")
return(map)
})
#### tab3.dataTable ####
output$tab3.dataTable <- DT::renderDataTable(tab3.labeled())
#### tab3.downloadMapData ####
output$tab3.downloadMapData <- downloadHandler(
filename=function(){
paste("MapData-", Sys.time(), ".csv", sep="")
},
content = function(file) {
write.csv(tab3.labeled(), file, row.names=FALSE)
}
)
## _________
## / tab4 \
## -------------------------------------- ----------------
## Tab 4 #####################
#### tab4.selectGene ####
output$tab4.selectGene <- renderUI({
tagList(
checkboxGroupInput("tab4.transcript_ID", label=NULL, choices=all.GeneChoices()),
actionButton(inputId="tab4.Submit", label = "Submit")
)
})
#### tab4.Genes ####
tab4.Genes <- eventReactive(input$tab4.Submit, {
#gene Info for gene on tab 3, updates on 'submit' button press
return(all.Genes()[ all.Genes()$transcript_ID %in% input$tab4.transcript_ID,])
})
#### tab4.tidyData ####
tab4.tidyData <- eventReactive(input$tab4.Submit, {
data <- ldply(all.VCFList()[tab4.Genes()$transcript_ID])
data <- subset(data, select=-c(EFF, Transcript_ID, ID, FILTER ))
data <- data[,filterTab.allCols]
return(data)
})
#### tab4.textFilters ####
tab4.textFilters <- reactive({
textFilters <- data.frame("filterID" = c("filter1", "filter2"),
"column" = c(input$tab4.filter1.column, input$tab4.filter2.column),
"values" = I(list(parseFilterText(input$tab4.filter1.textIn),
parseFilterText(input$tab4.filter2.textIn))),
stringsAsFactors=FALSE)
})
#### tab4.numFilters ####
tab4.numFilters <- reactive({
numFilters <- data.frame("filterID" = c("filter3", "filter4"),
"column" = c(input$tab4.filter3.column, input$tab4.filter4.column),
"max" = c(input$tab4.filter3.max, input$tab4.filter4.max),
"min" = c(input$tab4.filter3.min, input$tab4.filter4.min),
"missing" = c(input$tab4.filter3.missing, input$tab4.filter4.missing),
stringsAsFactors=FALSE)
})
#### tab4.filteredVariants ####
tab4.filteredVariants <- eventReactive(input$tab4.updateFilter,{
# add all filtering here.
data <- tab4.tidyData()
if (input$tab4.filterRef) {
# remove 0|0 genotypes
data <- data[data$gt_GT != "0|0",]
}
for (i in 1:nrow(tab4.textFilters())){
if (length(tab4.textFilters()[i,"values"][[1]]) > 0) {
data <- data[as.character(data[, tab4.textFilters()[i, "column"]]) %in% tab4.textFilters()[i, "values"][[1]] , ]
}
}
for (i in 1:nrow(tab4.numFilters())){
naRows <- data[is.na(data[, tab4.numFilters()[i, "column"]]) , ]
# remove NA rows to avoid issues with logical operators
data <- data[!is.na(data[, tab4.numFilters()[i, "column"]]) , ]
if (!is.na(tab4.numFilters()[i, "max"])){
data <- data[ data[, tab4.numFilters()[i, "column"]] <= tab4.numFilters()[i, "max"], ]
}
if (!is.na(tab4.numFilters()[i, "min"])){
data <- data[ data[, tab4.numFilters()[i, "column"]] >= tab4.numFilters()[i, "min"], ]
}
if (tab4.numFilters()[i,"missing"]){
# add back NA rows if checkbox checked
data <- rbind(data, naRows)
}
}
return(data)
})
#### tab4.debug ####
output$tab4.debug <- renderPrint({
print(tab4.numFilters())
})
#### tab4.variantTable ####
output$tab4.variantTable <- DT::renderDataTable(tab4.filteredVariants())
#### tab4.downloadVariantTable ####
output$tab4.downloadVariantTable <- downloadHandler(
filename=function(){
paste("VariantTable-", Sys.time(), ".csv", sep="")
},
content = function(file) {
write.csv(tab4.filteredVariants(), file, row.names=FALSE)
}
)
## _________
## / tab5 \
## -------------------------------------- ----------------
## Tab 5 #########################
#### tab5.selectGene ####
output$tab5.selectGene <- renderUI({
tagList(
checkboxGroupInput("tab5.transcript_ID",
label=NULL, choices=all.GeneChoices()),
actionButton(inputId="tab5.Submit", label = "Submit"),
checkboxInput(inputId = "tab5.primary_transcript",
label = "Primary transcripts only?",
value = TRUE),
radioButtons(inputId = "tab5.type",
label = "Alignment type:",
choices = c("DNA", "AA"),
selected = "AA", inline = TRUE)
)
})
#### tab5.Genes ####
tab5.Genes <- eventReactive(input$tab5.Submit, {
#gene Info for gene on tab 5, updates on 'submit' button press
return(input$tab5.transcript_ID)
})
#### debug ####
output$tab5.debug <- renderPrint({
aln_df()})
#### type ####
type <- reactive({
return(switch(input$tab5.type, "AA" = 2, "DNA" = 1))
})
#### alignment ####
alignment <- eventReactive(input$tab5.Submit, {
alignment <- alignCDS(IDs = tab5.Genes(), primary_only = input$tab5.primary_transcript, all = {if(input$tab5.primary_transcript) FALSE else TRUE})
return(alignment)
})
#### tab5.alignment ####
# output$tab5.alignment <- renderMsaR({
# msaR(alignment()[[type]], alignmentHeight = 100,
# colorscheme = {if(type) "taylor" else "nucleotide"})
# })
#### tab5.BrowseSeqs ####
# output$tab5.BrowseSeqs <- reactive({
# file <- BrowseSeqs(alignment()[[type + 1]],
# openURL = FALSE)
# html <- paste(readLines(file), collapse="\n")
# return(html)
# })
#### aln_df ####
aln_df <- reactive({
aln_df <- makeAlnDF(alignment()[[type()]])
vcf <- ldply(.data = all.VCFList()[input$tab5.transcript_ID],
.fun = subset, !is.na(Transcript_ID) & gt_GT != "0|0")
vcf <- getCodingDiv(vcf)
aln_df <- addSNPsToAlnDF(aln_df, vcf)
aln_df <- left_join(aln_df, dplyr::select(all.Genes(), "tair_locus",
"tair_symbol", "transcript_ID"),
by = c("transcript_ID" = "transcript_ID"))
## chunk up aln_df
aln_df <- chunkAlnDF(aln_df, chunk_width = 80)
aln_df$seq_name <- as.character(aln_df$seq_name)
aln_df$seq_name[!is.na(aln_df$tair_symbol)] <- aln_df$tair_symbol[!is.na(aln_df$tair_symbol)]
aln_df$seq_name <- as.factor(aln_df$seq_name)
print(aln_df)
return(aln_df)
})
#### tab5.aln_anno ####
tab5.aln_anno <- reactive({
## read in annotation
anno_df <- anno_df()
anno_df <- addAlnPosToAnno(anno_df, aln_df())
#print(anno_df)
## make chunks from aln_df
chunks <- makeChunksDF(aln_df())
## chunk up annotations
print(chunks)
anno_df <- chunkAnnotation(anno_df, chunks)
if(is.null(input$tab5.primary_transcript)) {
anno_df$domains$seq_name <- factor(anno_df$domains$transcript_ID,
levels = levels(as.factor(aln_df()$transcript_ID)))
anno_df$positions$seq_name <-
factor(anno_df$positions$transcript_ID,
levels = levels(as.factor(aln_df()$transcript_ID)))}
else {
anno_df$domains$seq_name <- factor(anno_df$domains$tair_symbol,
levels = levels(as.factor(aln_df()$tair_symbol)))
anno_df$positions$seq_name <- factor(anno_df$positions$tair_symbol,
levels = levels(as.factor(aln_df()$tair_symbol)))
}
print(anno_df)
return(anno_df)
})
#### aln_plot_height ####
aln_plot_height <- reactive({
N <- length(unique(aln_df()$seq_name))
chunks <- length(unique(aln_df()$chunk))
height <- 262 + 1.14*N + 19*chunks + 10*N*chunks
return(ceiling(height))
}
)
#### tab5.aln_plot ####
tab5.aln_plot <- reactive({
p <-ggplot(aln_df(), aes(x = aln_pos, y = seq_name,
group = seq_pos, text = variants))
if(!is.null(input$annoFile)) p <- p +
geom_rect(data = tab5.aln_anno()$domains,
mapping = aes(xmin = start_aln_pos - 0.5,
xmax = end_aln_pos + 0.5,
color = annotation,
ymin = as.numeric(seq_name)-0.5,
ymax = as.numeric(seq_name)+0.5),
inherit.aes = FALSE, fill = NA, size = 1.2, alpha = 0.5) +
geom_tile(data = tab5.aln_anno()$positions,
mapping = aes(x = aln_pos, y = seq_name, color = annotation),
width = 1, height = 1,
fill = NA, size = 1.2, alpha = 0.5, inherit.aes = FALSE)
p <- p +
geom_tile(data = na.omit(aln_df()), mapping = aes(fill = effects),
width = 1, height = 1, alpha = 0.8) +
geom_text(aes(label=letter), alpha= 1, family = "Courier") +
scale_x_continuous(breaks=seq(1,max(aln_df()$aln_pos), by = 10)) +
scale_y_discrete() +
# expand increases distance from axis
xlab("") +
ylab("") +
theme_logo(base_family = "Helvetica") +
theme(panel.grid = element_blank(), panel.grid.minor = element_blank()) +
facet_wrap(facets = ~chunk, ncol = 1, scales = "free") +
theme(strip.background = element_blank(),
strip.text.x = element_blank(),
legend.box = "vertical") +
scale_fill_viridis(option = "A", discrete = TRUE)
p})
output$tab5.aln_plot <- renderPlot(expr = tab5.aln_plot() +
theme(legend.position = "none"),
res = 100)
tab5.aln_plot_legend <- reactive({
get_legend(tab5.aln_plot())
})
output$tab5.aln_plot_legend <- renderPlot(plot_grid(tab5.aln_plot_legend()),
res = 100)
#### plot.ui ####
output$plot.ui <- renderUI({
plotOutput('tab5.aln_plot', height = aln_plot_height(),
hover = hoverOpts("plot_hover", delay = 100,
delayType = "debounce"))
})
#### aln_plot_hover ####
output$aln_plot_hover <- renderUI({
hover <- input$plot_hover
point <- nearPoints(aln_df(), coordinfo = hover, xvar = "aln_pos",
yvar = "seq_name", panelvar1 = "chunk", threshold = 8,
maxpoints = 1, addDist = TRUE)
if (nrow(point) == 0) return(NULL)
# # calculate point position INSIDE the image as percent of total dimensions
# # from left (horizontal) and from top (vertical)
# # https://gallery.shinyapps.io/093-plot-interaction-basic/
# # Range and coords_img seem to match up
# print(paste("hover$coords_img:", hover$coords_img))
# print(paste("hover$range:", hover$range))
#
left_pct <- (hover$coords_img$x- hover$range$left) /
(hover$range$right - hover$range$left)
# print(left_pct)
# top_pct <- (hover$coords_img$y - hover$range$top) /
# (hover$range$bottom - hover$range$top)
# print(top_pct)
#
# # calculate distance from left and bottom side of the picture in pixels
# left_px <- hover$range$left + left_pct *
# (hover$range$right - hover$range$left)
# print(left_px)
# right_px <- (1-left_pct) *
# (hover$range$right - hover$range$left)
# print(right_px)
# top_px <- hover$range$top + top_pct *
# (hover$range$bottom - hover$range$top)
# print(top_px)
# create style property fot tooltip
# background color is set so tooltip is a bit transparent
# z-index is set so we are sure are tooltip will be on top
if(left_pct < .70)
style <- paste0("position:absolute; z-index:100;
background-color: rgba(245, 245, 245, 0.85); ",
"left:", hover$coords_img$x/2.17,
"px; top:", hover$coords_img$y/2.17, "px;") else
style <- paste0("position:absolute; z-index:100;
background-color: rgba(245, 245, 245, 0.85); ",
"right:", (hover$range$right - hover$coords_img$x)/2.16, "px; top:",
(hover$coords_img$y)/2.17, "px;")
# actual tooltip created as wellPanel
wellPanel(
style = style,
p(HTML(paste0("<b>symbol: </b>", point$seq_name, "<br/>",
"<b>transcript: </b>", point$transcript_ID, "<br/>",
"<b>seq_pos: </b>", point$seq_pos, "<br/>",
"<b>variants: </b>", point$variants)))
)
})
}
|
9408b290436c14e8484ddcf9f89d8cb296c953c7
|
f69138fa69d215b67d8f5ca61a260a19549bf6d4
|
/kMeans_Temp_Work.r
|
fe3d166fe11977b4d291d1743b48c46afaea9cb1
|
[
"MIT"
] |
permissive
|
JamesRekow/Canine_Cohort_GLV_Model
|
302a6de25897dfd6f038f88def159c8bc4480db3
|
b7a3b167650471d0ae5356d1d2a036bde771778c
|
refs/heads/master
| 2021-09-10T06:43:39.086540
| 2018-03-21T19:07:41
| 2018-03-21T19:07:41
| 105,334,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 877
|
r
|
kMeans_Temp_Work.r
|
# James Rekow
# I just returned the fractured abdList in the beginning of the program
abdList = marginalDissDensity(M = 80, thresholdMult = 10 ^ (-3))
# convert abd list to a matrix, each row is an abundance vector, so the columns are the abundances of a
# given species
mat = Reduce(rbind, abdList)
h(101)
# identify clusters using kmeans algorithm, standardizing cluster labels so that sample 1 is always in
# cluster 1
f = function(x = NULL){
kmr = kmeans(x = mat, centers = 2, iter.max = 1000, nstart = 10)$cluster
if(kmr[1] == 2){
a = kmr == 2
kmr[a] = 1
kmr[!a] = 2
}
return(kmr)
}
g = function(n){
kmrMat = replicate(n, f())
kmrAgg = apply(kmrMat, 1, numMode)
return(which(kmrAgg == 1))
}
h = function(n){
gr = g(n)
a = sum((1:40) %in% g(n))
b = sum((41:80) %in% g(n))
return(c(a, b))
}
|
d2a284fd8ea9a706ece795188e6276239bc6db5e
|
ca07d4e442efc63098e21fd16f6a6ade693bceb6
|
/[AP] HDB Resale Flat Price Estimator App/Project-SA1-Team8/server.R
|
4134abc06f9dd9cfa7544fe282552ead1ddbd043
|
[] |
no_license
|
davidyoucm/projects
|
ff50d3a03bdf9e651de01d737750884a38fc2304
|
c0da6aa812e9873154e37a1298b7c4c228ac56a3
|
refs/heads/master
| 2023-03-09T18:44:49.297455
| 2021-02-24T17:45:17
| 2021-02-24T17:45:17
| 294,929,476
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,789
|
r
|
server.R
|
library(shinydashboard)
library(leaflet)
library(shiny)
library(curl) # make the jsonlite suggested dependency explicit
library(geosphere)
library(ggmap)
register_google("insert token here")
library(scales)
library(tidyverse)
library(rvest)
library(RCurl)
library(curl)
library(jsonlite)
library(XML)
library(broom)
library(plotly)
library(dplyr)
#install.packages('shinycssloaders')
library(shinycssloaders)
options(scipen=100000000)
hdb_housing_data <- read.csv('hdb_final_with_num.csv')
hdb_housing_data$town <- as.factor(hdb_housing_data$town)
hdb_housing_data$flat_model <- as.factor(hdb_housing_data$flat_model)
hdb_housing_data$bedrooms <- as.numeric(hdb_housing_data$bedrooms)
#table(hdb_housing_data$bedrooms)
hdb_housing_data <- hdb_housing_data %>% mutate(age = 2020-lease_commence_date)
head(hdb_housing_data)
hdb_housing_data<- hdb_housing_data[-16]
x <- hdb_housing_data %>% group_by(town) %>% summarise(mean(bedrooms))
#hdb_housing_data
towns <- as.character(x$town)
#hdb_housing_data
#names(hdb_housing_data) <- c("town", "flat_type", "flat_model", "floor_area_sqm", "street_name", "resale_price", "Year", "Month", "remaining_lease", "lease_commence_date", "Min_Storey", "Max_Storey", "_id", "block", "Planning_area", "Future_stations", "No_stations", "No_Future_stations", "Total_stations", "Operational_stations", "bedrooms", "years_remaining", "Floor", "Age") #Rename columns for readibility
hdb_housing_data$No_stations <- as.factor(hdb_housing_data$No_stations)
price_estimate <- lm(resale_price~town+floor_area_sqm+years_remaining+Floor+bedrooms,hdb_housing_data)
myCDs <- rev(sort(cooks.distance(price_estimate)))
#summary(myCDs)
#names(myCDs)
#plot(price_estimate, pch =18, col = "red", which=c(4))
influential <- as.numeric(names(myCDs)[(myCDs > 0.00001)])
res <- hdb_housing_data[-influential,]
#hdb_housing_data
#res
price_estimate <- lm(resale_price~town+floor_area_sqm+years_remaining+Floor+bedrooms,res)
price_estimate2 <- lm(resale_price~town+floor_area_sqm+years_remaining+Floor+bedrooms+num,res)
price_estimate.res <- resid(price_estimate)
regression <- tidy(price_estimate)
Av_Price <- hdb_housing_data %>% group_by(town) %>% summarise(mean=mean(resale_price))
#Av_Price$mean <- as.character(Av_Price$mean)
Av_Price$mean <- paste0('$',formatC(Av_Price$mean,digits=2,big.mark=',',format='f'))
hdb_price_estimate <- function(town, areas, years, level, bedrooms) {
town <- toupper(town)
if(town == "ANG MO KIO"){ #Need this as Ang Mo Kio is the base category, so it is not included in the regression table and has no value for 'town'
result <- regression$estimate[1] + areas*regression$estimate[27] + years*regression$estimate[28] + level*regression$estimate[29] + bedrooms*regression$estimate[30]
return(result)
} #Everything else however does have a value for town.
area<- regression[grep(town, regression$term),] #Filter the town estimate, eg. if im in Bukit Batok, I get the value -73316 and add this to the rest of the regression parameters.
area_value <- area$estimate[1]
result <- regression$estimate[1] + area_value + areas*regression$estimate[27] + years*regression$estimate[28] + level*regression$estimate[29] + bedrooms*regression$estimate[30] #We extract individual parts of the regression from here, and add them to the value of the area
if(result<140000){
return(140000)
}
if(result>1205000){
return(1205000)
}
return(result)
}
Percentage <- function(price){
number <- hdb_housing_data %>% filter(resale_price>=price)
answer <- 1-nrow(number)/nrow(hdb_housing_data)
answer <- sprintf("%.2f",answer*100, "%")
answer <- paste0(answer, "%")
return(answer)
}
x <- hdb_housing_data %>% group_by(town) %>% summarise(mean(bedrooms))
towns <- as.character(x$town)
towns <- list(towns)
towns <- str_sort(towns[[1]])
#towns
towns_lat_lon <- data.frame(towns, stringsAsFactors = FALSE)
towns_lat_lon$towns <- paste(towns_lat_lon$towns, "Singapore")
#towns_lat_lon
towns_lat_lon <- read.csv("towns_lat_lon.csv")[c(2,3,4)]
towns_lat_lon <- towns_lat_lon %>% rename(town = towns)
towns_lat_lon$town <- towns
town_df <- data.frame(towns_lat_lon, estimated_price = rep(0,26))
price_estimate_by_town <- function(areas, years, level, bedrooms) { #This function spits out the price estimate for every town based on floor area bedrooms level years remaining and their lat/lon so we can leaflet plot this
result <- regression$estimate[1] + areas*regression$estimate[27] + years*regression$estimate[28] + level*regression$estimate[29] + bedrooms*regression$estimate[30]
town_df$estimated_price[1] <- result
for (i in 1:25){
town_df$estimated_price[i+1] <- result + regression$estimate[1+i]
}
town_df$estimated_price <- paste0('$',formatC(town_df$estimated_price,digits=2,big.mark=',',format='f'))
return(town_df[c(1,4,2,3)])
}
produce_leaflet <- function(areas, years, level, bedrooms){
hdb_housing_data$No_stations <- as.character(hdb_housing_data$No_stations)
hdb_housing_data$No_stations <- as.numeric(hdb_housing_data$No_stations)
leaflet_df <- price_estimate_by_town(areas, years, level, bedrooms)
leaflet_df$estimated_price <- as.numeric(gsub('[$,]',"",leaflet_df$estimated_price))
qpal <- colorQuantile(c("green", "yellow", "firebrick1"), hdb_housing_data$resale_price, n = 5)
#qpal2 <- colorQuantile(c("green", "yellow", "firebrick1"), leaflet_df$resale_price, n = 3)
for(i in 1:26){
if(leaflet_df$estimated_price[i] < 140000)
{
leaflet_df$estimated_price[i] <- 140000
}
}
for(i in 1:26){
if(leaflet_df$estimated_price[i] > 1205000)
{
leaflet_df$estimated_price[i] <- 1205000
}
}
leaflet_df$estimated_price2 <- ifelse(leaflet_df$estimated_price == 140000,paste0('<$',formatC(leaflet_df$estimated_price,digits=2,big.mark=',',format='f')),ifelse(leaflet_df$estimated_price == 1205000,paste0('>$',formatC(leaflet_df$estimated_price,digits=2,big.mark=',',format='f')),paste0('$',formatC(leaflet_df$estimated_price,digits=2,big.mark=',',format='f'))))
mrts_by_town <- hdb_housing_data %>% group_by(town) %>% summarise(No_stations = mean(No_stations))
leaflet_df <- merge(leaflet_df,mrts_by_town)
library(stringi)
leaflet_df$av_price <- Av_Price$mean
leaflet() %>% addTiles() %>% addCircleMarkers(data = leaflet_df, lng = ~lon, lat=~lat, popup = ~sprintf('HDB Town = %s <br/> Price = %s <br/> Number of MRTs = %s <br/> <br/> Average_Price = %s <br/> ', stri_trans_totitle(town), estimated_price2, No_stations, av_price),color = ~qpal(estimated_price), radius=15, opacity = 0.7) %>% addLegend(data = leaflet_df, position = "bottomright", pal = qpal, values = hdb_housing_data$resale_price, title="Price Classification (Comparison With All SG HDB flats)")
}
produce_leaflet2 <- function(areas, years, level, bedrooms){
hdb_housing_data$No_stations <- as.character(hdb_housing_data$No_stations)
hdb_housing_data$No_stations <- as.numeric(hdb_housing_data$No_stations)
leaflet_df <- price_estimate_by_town(areas, years, level, bedrooms)
leaflet_df$estimated_price <- as.numeric(gsub('[$,]',"",leaflet_df$estimated_price))
qpal <- colorQuantile(c("green", "yellow", "firebrick1"), leaflet_df$estimated_price, n = 3)
#qpal2 <- colorQuantile(c("green", "yellow", "firebrick1"), leaflet_df$resale_price, n = 3)
for(i in 1:26){
if(leaflet_df$estimated_price[i] < 140000)
{
leaflet_df$estimated_price[i] <- 140000
}
}
for(i in 1:26){
if(leaflet_df$estimated_price[i] > 1205000)
{
leaflet_df$estimated_price[i] <- 1205000
}
}
leaflet_df$estimated_price2 <- ifelse(leaflet_df$estimated_price == 140000,paste0('<$',formatC(leaflet_df$estimated_price,digits=2,big.mark=',',format='f')),ifelse(leaflet_df$estimated_price == 1205000,paste0('>$',formatC(leaflet_df$estimated_price,digits=2,big.mark=',',format='f')),paste0('$',formatC(leaflet_df$estimated_price,digits=2,big.mark=',',format='f'))))
mrts_by_town <- hdb_housing_data %>% group_by(town) %>% summarise(No_stations = mean(No_stations))
leaflet_df <- merge(leaflet_df,mrts_by_town)
library(stringi)
leaflet_df$av_price <- Av_Price$mean
leaflet() %>% addTiles() %>% addCircleMarkers(data = leaflet_df, lng = ~lon, lat=~lat, popup = ~sprintf('HDB Town = %s <br/> Price = %s <br/> Number of MRTs = %s <br/> <br/> Average Price = %s <br/> ', stri_trans_totitle(town), estimated_price2 , No_stations, av_price),color = ~qpal(estimated_price), radius=15, opacity = 0.7) %>% addLegend(data = leaflet_df, position = "bottomright", pal = qpal, values = ~estimated_price, title="Colour Comparison of Price Estimates By Town)")
}
# I need to enter data for town, floor area, years remaining, level and bedrooms to get an estimate for the price.
#hdb_price_estimate("Bukit Timah", 150, 25, 10, 3)
#hdb_price_estimate function End
#from Calculate Distance
getwd()
primaryschools <- read.csv("primarysch.csv")
secondaryschools <- read.csv("secsch.csv")
tertiaryschools <- read.csv("tertiarysch.csv")
malls <- read.csv("mallcoord.csv")
mrt <- read.csv("mrtcoord.csv")
hawker <- read.csv("hawkercoord.csv")
primaryschools$Coord <- paste(primaryschools$lon,primaryschools$lat)
secondaryschools$Coord <- paste(secondaryschools$lon,secondaryschools$lat)
tertiaryschools$Coord <- paste(tertiaryschools$lon,tertiaryschools$lat)
malls$Coord <- paste(malls$lon,malls$lat)
mrt$Coord <- paste(mrt$lon,mrt$lat)
hawker$Coord <- paste(hawker$lon,hawker$lat)
findStraightLineDistance <- function(lon1,lat1,lon2,lat2){
return(distm (c(lon1, lat1), c(lon2, lat2), fun = distHaversine)) # in meters
}
distance <- function(place, lon, lat){
x <- strsplit(place, split = " ")
return(as.numeric(findStraightLineDistance(lon,lat, as.numeric(x[[1]][1]), as.numeric(x[[1]][2]))))
}
findNearest <- function(inputAddress, range = 0, waddress){
address<-as.data.frame(inputAddress, stringsAsFactors=FALSE)
addressGeocode <- mutate_geocode(address, inputAddress)
#find nearing mrt
mrt$dist <- sapply(mrt$Coord, distance, as.numeric(addressGeocode$lon[1]),as.numeric(addressGeocode$lat[1]))
mrt$withinRange <-ifelse(mrt$dist <=range, TRUE,FALSE)
mrtNearest <- data.frame(Type="MRT",Name=mrt$MRT[which(mrt$dist==min(mrt$dist))],Distance=mrt$dist[which(mrt$dist==min(mrt$dist))],withinRange=sum(mrt$withinRange))
#find nearest primary school
primaryschools$dist <- sapply(primaryschools$Coord, distance, as.numeric(addressGeocode$lon[1]),as.numeric(addressGeocode$lat[1]))
primaryschools$withinRange <-ifelse(primaryschools$dist <=range, TRUE,FALSE)
primaryschoolsNearest <- data.frame(Type="Primary School",Name=primaryschools$School[which(primaryschools$dist==min(primaryschools$dist))],Distance=primaryschools$dist[which(primaryschools$dist==min(primaryschools$dist))],withinRange=sum(primaryschools$withinRange))
#find nearest secondary school
secondaryschools$dist <- sapply(secondaryschools$Coord, distance, as.numeric(addressGeocode$lon[1]),as.numeric(addressGeocode$lat[1]))
secondaryschools$withinRange <-ifelse(secondaryschools$dist <=range, TRUE,FALSE)
secondaryschoolsNearest <- data.frame(Type="Secondary School",Name=secondaryschools$School[which(secondaryschools$dist==min(secondaryschools$dist))],Distance=secondaryschools$dist[which(secondaryschools$dist==min(secondaryschools$dist))],withinRange=sum(secondaryschools$withinRange))
#find nearest tertiary school
tertiaryschools$dist <- sapply(tertiaryschools$Coord, distance, as.numeric(addressGeocode$lon[1]),as.numeric(addressGeocode$lat[1]))
tertiaryschools$withinRange <-ifelse(tertiaryschools$dist <=range, TRUE,FALSE)
tertiaryschoolsNearest <- data.frame(Type="Tertiary School",Name=tertiaryschools$School[which(tertiaryschools$dist==min(tertiaryschools$dist))],Distance=tertiaryschools$dist[which(tertiaryschools$dist==min(tertiaryschools$dist))],withinRange=sum(tertiaryschools$withinRange))
#find nearest mall
malls$dist <- sapply(malls$Coord, distance, as.numeric(addressGeocode$lon[1]),as.numeric(addressGeocode$lat[1]))
malls$withinRange <-ifelse(malls$dist <=range, TRUE,FALSE)
mallNearest <- data.frame(Type="Shopping Mall",Name=malls$Mall[which(malls$dist==min(malls$dist))],Distance=malls$dist[which(malls$dist==min(malls$dist))],withinRange=sum(malls$withinRange))
#find nearest hawkercenter
hawker$dist <- sapply(hawker$Coord, distance, as.numeric(addressGeocode$lon[1]),as.numeric(addressGeocode$lat[1]))
hawker$withinRange <-ifelse(hawker$dist <=range, TRUE,FALSE)
hawkerNearest <- data.frame(Type="Hawker Center",Name=hawker$HawkerCentre[which(hawker$dist==min(hawker$dist))],Distance=hawker$dist[which(hawker$dist==min(hawker$dist))],withinRange=sum(hawker$withinRange))
#find distance to work
work <- geocode(paste(waddress, "Singapore"))
work$Coord <- paste(work$lon,work$lat)
work$dist <- sapply(work$Coord, distance, as.numeric(addressGeocode$lon[1]),as.numeric(addressGeocode$lat[1]))
workNearest <- data.frame(Type="Work Place",Name=waddress,Distance=work$dist[which(work$dist==min(work$dist))],withinRange="-/-")
nearest <- rbind(primaryschoolsNearest,secondaryschoolsNearest,tertiaryschoolsNearest,mrtNearest,mallNearest,hawkerNearest, workNearest)
nearest$Walking_Time <- nearest[3]/83.3
nearest$Walking_Time <- sprintf("%.1f", unlist(nearest$Walking_Time), "%")
print(nearest)
}
#findNearest("28 College Avenue Queenstown, Singapore",5000)
library(ggrepel)
price_estimate_summary_graph <- function(towns2, areas, years, level, bedrooms,intown){
if(intown ==F){
a <- toupper(towns2)
user_price <- hdb_price_estimate(towns2, areas, years, level, bedrooms)
prices <- hdb_housing_data$resale_price
prices <- sort(prices)
max <- max(prices)
prices <- data.frame(prices)
big <- prices %>% filter(prices > user_price)
prop <- nrow(big)/nrow(prices)
prop_perc <- paste(sprintf("%.2f",(1-prop)*100),"%")
user_price_c <- paste0('$',formatC(user_price,digits=2,big.mark=',',format='f'))
p <- ggplot(data = hdb_housing_data) + geom_histogram(aes(x=resale_price, y = ..count.., fill = cut(resale_price, breaks= c(0,user_price,2000000))),binwidth = 25000) + theme_linedraw() + theme(legend.position = "none") + scale_fill_manual(values = c("palegreen", "tomato")) + geom_vline(xintercept=user_price, size=2.5, color="gray") + ggtitle("Singapore Property Value Comparison Tool") + geom_text(x=(100000+user_price)/2, y=5000, label=prop_perc, size=5) + geom_text(x=(user_price+1250000)/2, y=2000, label=paste(sprintf("%.2f",prop*100),"%"),size=5) + geom_text(aes(x=user_price, label=paste0(" Price = ",user_price_c), y=0, size=5), colour="black", vjust = 1, text=element_text(size=8)) +ylab("Number Of Properties") + xlab("Sale Price") + coord_cartesian(xlim =c(0, max+100000))
return(p)
}
else{
user_price <- hdb_price_estimate(towns2, areas, years, level, bedrooms)
a <- toupper(towns2)
b <- bedrooms
town_price <- hdb_housing_data %>% filter(town == a, bedrooms==b) %>% select(resale_price)
town_price <- town_price %>% arrange(resale_price)
max <- max(town_price)
big2 <- town_price %>% filter(town_price > user_price)
prop2 <- nrow(big2)/nrow(town_price)
prop_perc <- paste(sprintf("%.2f",(1-prop2)*100),"%")
make <- hdb_housing_data %>% filter(town==a, bedrooms==b)
user_price_c <- paste0('$',formatC(user_price,digits=2,big.mark=',',format='f'))
p <- ggplot(data = make) + geom_histogram(aes(x=resale_price, y = ..count.., fill = cut(resale_price, breaks= c(0,user_price,2000000))),binwidth = 25000) + theme_linedraw() + theme(legend.position = "none") + scale_fill_manual(values = c("palegreen", "tomato")) + geom_vline(xintercept=user_price, size=2.5, color="gray") + ggtitle(paste(towns2, bedrooms, "Bedroom Property Value Comparison Tool")) + geom_text(x=user_price-50000, y=nrow(make)/20, label=prop_perc,hjust=1, size=5) + geom_text(x=user_price+50000, y=nrow(make)/20, label=paste(sprintf("%.2f",prop2*100),"%"), hjust=0, size=5) + geom_text(aes(x=user_price, size=5, label=paste0(" Price = ",user_price_c), y=0), colour="black", vjust = 1, text=element_text(size=8)) + ylab("Number Of Properties") + xlab("Sale Price") + coord_cartesian(xlim =c(0, max+100000))
return(p)
}
}
price_estimate_summary_graph2 <- function(towns2, areas, years, level, bedrooms,intown){
if(intown ==F){
a <- toupper(towns2)
b <- bedrooms
user_price <- hdb_price_estimate(towns2, areas, years, level, bedrooms)
hdb_housing_data <- hdb_housing_data %>% filter(bedrooms == b)
prices <- hdb_housing_data$resale_price
prices <- sort(prices)
max <- max(prices)
prices <- data.frame(prices)
big <- prices %>% filter(prices > user_price)
prop <- nrow(big)/nrow(prices)
prop_perc <- paste(sprintf("%.2f",(1-prop)*100),"%")
user_price_c <- paste0('$',formatC(user_price,digits=2,big.mark=',',format='f'))
p <- ggplot(data = hdb_housing_data) + geom_histogram(aes(x=resale_price, y = ..count.., fill = cut(resale_price, breaks= c(0,user_price,2000000))),binwidth = 25000) + theme_linedraw() + theme(legend.position = "none") + scale_fill_manual(values = c("palegreen", "tomato")) + geom_vline(xintercept=user_price, size=2.5, color="gray") + ggtitle(paste("Singapore", bedrooms,"Bedroom Property Value Comparison Tool")) + geom_text(x=(user_price-120000), y=nrow(hdb_housing_data)/20, label=prop_perc, size=5) + geom_text(x=(user_price+120000), y=nrow(hdb_housing_data)/30, label=paste(sprintf("%.2f",prop*100),"%"),size=5) + geom_text(aes(x=user_price, label=paste0(" Price = ",user_price_c), y=0, size=5), colour="black", vjust = 1, text=element_text(size=8)) +ylab("Number Of Properties") + xlab("Sale Price") + coord_cartesian(xlim =c(0, max+100000))
return(p)
}}
price_estimate_summary_graph3 <- function(towns2, areas, years, level, bedrooms,intown){
user_price <- hdb_price_estimate(towns2, areas, years, level, bedrooms)
a <- toupper(towns2)
b <- bedrooms
town_price <- hdb_housing_data %>% filter(town == a) %>% select(resale_price)
town_price <- town_price %>% arrange(resale_price)
max <- max(town_price)
big2 <- town_price %>% filter(town_price > user_price)
prop2 <- nrow(big2)/nrow(town_price)
prop_perc <- paste(sprintf("%.2f",(1-prop2)*100),"%")
make <- hdb_housing_data %>% filter(town==a)
user_price_c <- paste0('$',formatC(user_price,digits=2,big.mark=',',format='f'))
p <- ggplot(data = make) + geom_histogram(aes(x=resale_price, y = ..count.., fill = cut(resale_price, breaks= c(0,user_price,2000000))),binwidth = 25000) + theme_linedraw() + theme(legend.position = "none") + scale_fill_manual(values = c("palegreen", "tomato")) + geom_vline(xintercept=user_price, size=2.5, color="gray") + ggtitle(paste(towns2, "Property Value Comparison Tool")) + geom_text(x=user_price-50000, y=nrow(make)/20, label=prop_perc,hjust=1, size=5) + geom_text(x=user_price+50000, y=nrow(make)/20, label=paste(sprintf("%.2f",prop2*100),"%"), hjust=0, size=5) + geom_text(aes(x=user_price, size=5, label=paste0(" Price = ",user_price_c), y=0), colour="black", vjust = 1, text=element_text(size=8)) + ylab("Number Of Properties") + xlab("Sale Price") + coord_cartesian(xlim =c(0, max+100000))
return(p)
}
function(input, output, session) {
output$priceEstimate <- renderUI({
# locations <- routeVehicleLocations()
# if (length(locations) == 0 || nrow(locations) == 0)
# return(NULL)
# Create a Bootstrap-styled table
#print(hdb_price_estimate(input$town,input$floorarea,input$remainingLease,input$floor,input$bedroom))
est_price <- hdb_price_estimate(input$town,input$floorarea,input$remainingLease,input$floor,input$bedroom)
tags$table(class = "table",
# tags$h3("Approximate Distance to Amendities"),
tags$thead(tags$tr(
tags$th("Your Property Value:")
)),
tags$tbody(
tags$tr(
tags$td(h1(paste0('$',formatC(est_price, format="f", digits=2, big.mark=","), ' ± $30,000'))),
),
tags$tr(
tags$td(paste("Based on a sample of HDB sales between 2017-2020, Your Property is worth more than approximately",Percentage(est_price),"of all HDB resale flats in Singapore!")),
)
)
)
})
output$priceEstimate2 <- renderUI({
# locations <- routeVehicleLocations()
# if (length(locations) == 0 || nrow(locations) == 0)
# return(NULL)
# Create a Bootstrap-styled table
#print(hdb_price_estimate(input$town,input$floorarea,input$remainingLease,input$floor,input$bedroom))
est_price <- hdb_price_estimate(input$town2,input$floorarea2,input$remainingLease2,input$floor2,input$bedroom2)
tags$table(class = "table",
# tags$h3("Approximate Distance to Amendities"),
tags$thead(tags$tr(
tags$th("Your First Property's Value:")
)),
tags$tbody(
tags$tr(
tags$td(h1(paste0('$',formatC(est_price, format="f", digits=2, big.mark=","), ' ± $30,000'))),
),
tags$tr(
tags$td(paste("Based on a sample of HDB sales between 2017-2020, Your Property is worth more than approximately",Percentage(est_price),"of all HDB resale flats in Singapore!")),
)
)
)
})
output$priceEstimate3 <- renderUI({
# locations <- routeVehicleLocations()
# if (length(locations) == 0 || nrow(locations) == 0)
# return(NULL)
# Create a Bootstrap-styled table
#print(hdb_price_estimate(input$town,input$floorarea,input$remainingLease,input$floor,input$bedroom))
est_price <- hdb_price_estimate(input$town3,input$floorarea3,input$remainingLease3,input$floor3,input$bedroom3)
tags$table(class = "table",
# tags$h3("Approximate Distance to Amendities"),
tags$thead(tags$tr(
tags$th("Your Second Property's Value:")
)),
tags$tbody(
tags$tr(
tags$td(h1(paste0('$',formatC(est_price, format="f", digits=2, big.mark=","), ' ± $30,000'))),
),
tags$tr(
tags$td(paste("Based on a sample of HDB sales between 2017-2020, Your Property is worth more than approximately",Percentage(est_price),"of all HDB resale flats in Singapore!")),
)
)
)
})
output$nearestAmendities <- renderUI({
# locations <- routeVehicleLocations()
# if (length(locations) == 0 || nrow(locations) == 0)
# return(NULL)
nearestDf <- findNearest(paste(input$address,input$town,', Singapore'),input$range,input$waddress)
# Create a Bootstrap-styled table
tags$table(class = "table",
tags$h3(paste("Approximate Distance to Amenities From",input$address,input$town)),
tags$thead(tags$tr(
tags$th("Type"),
tags$th("Name"),
tags$th("Distance(m)"),
tags$th("Walking Time(min)"),
tags$th(paste("Number of Amenities Within", input$range, "m"))
)),
tags$tbody(
tags$tr(
tags$td("Primary School"),
tags$td(nearestDf[1,'Name']),
tags$td(round(nearestDf[1,'Distance'], digits = 0)),
tags$td(nearestDf[1,'Walking_Time']),
tags$td(nearestDf[1,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Secondary School"),
tags$td(nearestDf[2,'Name']),
tags$td(round(nearestDf[2,'Distance'], digits = 0)),
tags$td(nearestDf[2,'Walking_Time']),
tags$td(nearestDf[2,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Tertiary School"),
tags$td(nearestDf[3,'Name']),
tags$td(round(nearestDf[3,'Distance'], digits = 0)),
tags$td(nearestDf[3,'Walking_Time']),
tags$td(nearestDf[3,'withinRange'], digits = 0)
),
tags$tr(
tags$td("MRT"),
tags$td(nearestDf[4,'Name']),
tags$td(round(nearestDf[4,'Distance'], digits = 0)),
tags$td(nearestDf[4,'Walking_Time']),
tags$td(nearestDf[4,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Shopping Mall"),
tags$td(nearestDf[5,'Name']),
tags$td(round(nearestDf[5,'Distance'], digits = 0)),
tags$td(nearestDf[5,'Walking_Time']),
tags$td(nearestDf[5,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Hawker Center"),
tags$td(nearestDf[6,'Name']),
tags$td(round(nearestDf[6,'Distance'], digits = 0)),
tags$td(nearestDf[6,'Walking_Time']),
tags$td(nearestDf[6,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Work/POI"),
tags$td(nearestDf[7,'Name']),
tags$td(round(nearestDf[7,'Distance'], digits = 0)),
tags$td(nearestDf[7,'Walking_Time']),
tags$td(nearestDf[7,'withinRange'], digits = 0)
)
)
)
})
output$nearestAmenities2 <- renderUI({
# locations <- routeVehicleLocations()
# if (length(locations) == 0 || nrow(locations) == 0)
# return(NULL)
nearestDf <- findNearest(paste(input$address2,input$town2,', Singapore'),input$range2,input$waddress2)
# Create a Bootstrap-styled table
tags$table(class = "table",
tags$h3(paste("Approximate Distance to Amenities From",input$address2,input$town2)),
tags$thead(tags$tr(
tags$th("Type"),
tags$th("Name"),
tags$th("Distance(m)"),
tags$th("Walking Time(min)"),
tags$th(paste("Number of Amenities Within", input$range2, "m"))
)),
tags$tbody(
tags$tr(
tags$td("Primary School"),
tags$td(nearestDf[1,'Name']),
tags$td(round(nearestDf[1,'Distance'], digits = 0)),
tags$td(nearestDf[1,'Walking_Time']),
tags$td(nearestDf[1,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Secondary School"),
tags$td(nearestDf[2,'Name']),
tags$td(round(nearestDf[2,'Distance'], digits = 0)),
tags$td(nearestDf[2,'Walking_Time']),
tags$td(nearestDf[2,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Tertiary School"),
tags$td(nearestDf[3,'Name']),
tags$td(round(nearestDf[3,'Distance'], digits = 0)),
tags$td(nearestDf[3,'Walking_Time']),
tags$td(nearestDf[3,'withinRange'], digits = 0)
),
tags$tr(
tags$td("MRT"),
tags$td(nearestDf[4,'Name']),
tags$td(round(nearestDf[4,'Distance'], digits = 0)),
tags$td(nearestDf[4,'Walking_Time']),
tags$td(nearestDf[4,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Shopping Mall"),
tags$td(nearestDf[5,'Name']),
tags$td(round(nearestDf[5,'Distance'], digits = 0)),
tags$td(nearestDf[5,'Walking_Time']),
tags$td(nearestDf[5,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Hawker Center"),
tags$td(nearestDf[6,'Name']),
tags$td(round(nearestDf[6,'Distance'], digits = 0)),
tags$td(nearestDf[6,'Walking_Time']),
tags$td(nearestDf[6,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Work/POI"),
tags$td(nearestDf[7,'Name']),
tags$td(round(nearestDf[7,'Distance'], digits = 0)),
tags$td(nearestDf[7,'Walking_Time']),
tags$td(nearestDf[7,'withinRange'], digits = 0)
)
)
)
})
output$nearestAmenities3 <- renderUI({
# locations <- routeVehicleLocations()
# if (length(locations) == 0 || nrow(locations) == 0)
# return(NULL)
nearestDf <- findNearest(paste(input$address3,input$town3,', Singapore'),input$range3,input$waddress3)
# Create a Bootstrap-styled table
tags$table(class = "table",
tags$h3(paste("Approximate Distance to Amenities From",input$address3,input$town3)),
tags$thead(tags$tr(
tags$th("Type"),
tags$th("Name"),
tags$th("Distance(m)"),
tags$th("Walking Time(min)"),
tags$th(paste("Number of Amenities Within", input$range3, "m"))
)),
tags$tbody(
tags$tr(
tags$td("Primary School"),
tags$td(nearestDf[1,'Name']),
tags$td(round(nearestDf[1,'Distance'], digits = 0)),
tags$td(nearestDf[1,'Walking_Time']),
tags$td(nearestDf[1,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Secondary School"),
tags$td(nearestDf[2,'Name']),
tags$td(round(nearestDf[2,'Distance'], digits = 0)),
tags$td(nearestDf[2,'Walking_Time']),
tags$td(nearestDf[2,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Tertiary School"),
tags$td(nearestDf[3,'Name']),
tags$td(round(nearestDf[3,'Distance'], digits = 0)),
tags$td(nearestDf[3,'Walking_Time']),
tags$td(nearestDf[3,'withinRange'], digits = 0)
),
tags$tr(
tags$td("MRT"),
tags$td(nearestDf[4,'Name']),
tags$td(round(nearestDf[4,'Distance'], digits = 0)),
tags$td(nearestDf[4,'Walking_Time']),
tags$td(nearestDf[4,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Shopping Mall"),
tags$td(nearestDf[5,'Name']),
tags$td(round(nearestDf[5,'Distance'], digits = 0)),
tags$td(nearestDf[5,'Walking_Time']),
tags$td(nearestDf[5,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Hawker Center"),
tags$td(nearestDf[6,'Name']),
tags$td(round(nearestDf[6,'Distance'], digits = 0)),
tags$td(nearestDf[6,'Walking_Time']),
tags$td(nearestDf[6,'withinRange'], digits = 0)
),
tags$tr(
tags$td("Work/POI"),
tags$td(nearestDf[7,'Name']),
tags$td(round(nearestDf[7,'Distance'], digits = 0)),
tags$td(nearestDf[7,'Walking_Time']),
tags$td(nearestDf[7,'withinRange'], digits = 0)
)
)
)
})
output$housemap <- renderLeaflet({
if(input$radio=="Compare Price Estimate in Each Town With all Singapore HDB properties. "){
leaf <- produce_leaflet(input$floorarea,input$remainingLease,input$floor,input$bedroom)
leaf
}
else{
leaf <- produce_leaflet2(input$floorarea,input$remainingLease,input$floor,input$bedroom)
leaf
}
})
output$housemap2 <- renderPlotly({
data2 <- mean(hdb_housing_data[hdb_housing_data$town == input$town2,]$resale_price)
data3 <- mean(hdb_housing_data[hdb_housing_data$town == input$town3,]$resale_price)
y2 <- hdb_price_estimate(input$town3,input$floorarea3,input$remainingLease3,input$floor3,input$bedroom3)
y1 <- hdb_price_estimate(input$town2,input$floorarea2,input$remainingLease2,input$floor2,input$bedroom2)
Price <- c(y1,data2,y2,data3)
x <- c(paste(input$town2, "Price Estimate"),paste(input$town2,"Average Price"), paste(input$town3, "Price Estimate"),paste(input$town3, "Average Price"))
data <- data.frame(x,Price)
plot <- data %>% ggplot() + geom_bar(aes(x=x,y=Price, fill=x), stat="identity") + xlab("Property") + ylab("Price ($)") + theme(axis.text.x = element_text(angle=45, size = 12), axis.text.y=element_text(size=12)) + theme(legend.position="none")+ scale_y_continuous(name="Asking Price ($)", labels = function(x){dollar_format()(x)})+ theme(axis.title.x = element_text(size=13))+ theme(axis.title.y = element_text(size=11)) + scale_fill_manual(values=c("dodgerblue1","dodgerblue1", "lightsalmon2", "lightsalmon2"))
return(ggplotly(plot, tooltip="Price"))
})
output$priceGraph <-renderPlot({
if(input$comparison == 'All of Singapore'){
price_estimate_summary_graph(input$town, input$floorarea, input$remainingLease, input$floor, input$bedroom,F)
}
else if(input$comparison == 'All of Singapore (Same Bedrooms)'){
price_estimate_summary_graph2(input$town, input$floorarea, input$remainingLease, input$floor, input$bedroom,F)
}
else if(input$comparison=="Within HDB Town"){
price_estimate_summary_graph3(input$town, input$floorarea, input$remainingLease, input$floor, input$bedroom,T)
}
else {
price_estimate_summary_graph(input$town, input$floorarea, input$remainingLease, input$floor, input$bedroom,T)
}
})
output$priceGraph2 <-renderPlot({
if(input$comparison2 == 'All of Singapore'){
price_estimate_summary_graph(input$town2, input$floorarea2, input$remainingLease2, input$floor2, input$bedroom2,F)
}
else if(input$comparison2 == 'All of Singapore (Same Bedrooms)'){
price_estimate_summary_graph2(input$town2, input$floorarea2, input$remainingLease2, input$floor2, input$bedroom2,F)
}
else if(input$comparison2=="Within HDB Town"){
price_estimate_summary_graph3(input$town2, input$floorarea2, input$remainingLease2, input$floor2, input$bedroom2,T)
}
else {
price_estimate_summary_graph(input$town2, input$floorarea2, input$remainingLease2, input$floor2, input$bedroom2,T)
}
})
output$priceGraph3 <-renderPlot({
if(input$comparison3 == 'All of Singapore'){
price_estimate_summary_graph(input$town3, input$floorarea3, input$remainingLease3, input$floor3, input$bedroom3,F)
}
else if(input$comparison3 == 'All of Singapore (Same Bedrooms)'){
price_estimate_summary_graph2(input$town3, input$floorarea3, input$remainingLease3, input$floor3, input$bedroom3,F)
}
else if(input$comparison3=="Within HDB Town"){
price_estimate_summary_graph3(input$town3, input$floorarea3, input$remainingLease3, input$floor3, input$bedroom3,T)
}
else {
price_estimate_summary_graph(input$town3, input$floorarea3, input$remainingLease3, input$floor3, input$bedroom3,T)
}
})
output$trend <- renderPlotly({
data1 <- hdb_housing_data[hdb_housing_data$town==input$town2&hdb_housing_data$bedrooms==input$bedroom2,] %>% group_by(Year) %>% summarise(mean_price = mean(resale_price))
data1$town <- input$town2
data1$bedrooms <- input$bedroom2
data2 <- hdb_housing_data[hdb_housing_data$town==input$town3&hdb_housing_data$bedrooms==input$bedroom3,] %>% group_by(Year) %>% summarise(mean_price = mean(resale_price))
data2$town <- input$town3
data2$bedrooms <- input$bedroom3
data <- rbind(data1,data2)
plot <- ggplot() + geom_smooth(data=data, aes(x=Year, y=mean_price, color=town, text=paste("bedrooms: ",bedrooms)), size=1.5) + theme(axis.text.x = element_text(size = 12), axis.text.y=element_text(size=15))+coord_cartesian(ylim=c(200000,ceiling(max(data$mean_price))))+ scale_y_continuous(name="Asking Price", labels = function(x){dollar_format()(x)}) + theme(axis.title.x = element_text(size=13))+ theme(axis.title.y = element_text(size=13)) + scale_colour_manual(values=c("dodgerblue1", "lightsalmon2"))
return(ggplotly(plot))
})
}
|
c2ec12b41df7d0498756add81663bc21ee7d8286
|
f6aee4d3a145140a277c8668c391f9cca8db3a95
|
/Functions/Model/General/CalcAttnWeights.R
|
7687561fe10867f1ddc2439e45be5ce6732c6fc7
|
[] |
no_license
|
peter-hitchcock/rum_derails_rl
|
b981336e08882959e9888009e23de8cd25354556
|
1c5ea8737acd2f1f83f4f259cf9218f65d1c5c61
|
refs/heads/main
| 2023-03-09T17:00:32.649539
| 2021-03-02T04:33:08
| 2021-03-02T04:33:08
| 343,197,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,469
|
r
|
CalcAttnWeights.R
|
CalcAttnWeights <- function(pars,
weights_RL,
param_labels,
reward,
schosen_vector_indices,
color_RL_weights,
texture_RL_weights,
shape_RL_weights,
pstate,
this_trial,
bayes_posterior_target=NULL) {
### Calculates various kinds of attention weights based on feature weight magnitudes or stimulus values by
# calling fxs in LearningandChoiceComps ###
# CalcAttnWeights is alled from RunOneTrial #
## Feature attention weights based on a softmax of feature weights (Jaskir et al 17) #
if ('attn_beta' %in% param_labels) {
attn_weights <- exp(PerfSoftmax(pars["attn_beta"],
weights_RL,
pstate,
this_trial,
identifier="attn"))
if (pstate) (cat('\n Attention weights \n:', attn_weights))
}
######################################################################
# Phi RL for dimension-based attn weighting based on the RL-based weights (Daniel et al 20) #
if ('delta' %in% param_labels) {
attn_weights <- CalcDimPhiRLAttnWeights(pars["delta"],
color_RL_weights,
shape_RL_weights,
texture_RL_weights,
pstate,
this_trial)
}
######################################################################
################## Phi RL for RL/Bayes attn weight mixtures #########
# Mix the just-calculated DIMENSIONAL RL-based weights (via delta) #
# via kappa with weights based on the Bayesian posterior of the target #
# (Daniel et al 20; kappa here=alpha in that paper) .. #
if ('kappa' %in% param_labels & 'delta' %in% param_labels) {
# The posterior prob by target vector is arranged as CST so for the Bayesian dimension
# weights just need the sum
bayes_dim_weights <-
c(sum(bayes_posterior_target[1:3]),
sum(bayes_posterior_target[4:6]),
sum(bayes_posterior_target[7:9])
)
attn_weights <- CalcMixedPhiDimWeights(pars["kappa"], # ** doesn't yet exist
bayes_dim_weights, # bayes probs (either of stims or features)
RL_attn_weights=attn_weights, # RL attn weights (either for features or dimensions)
pstate)
}
# .. or mix the just-calculated FEATURE RL-based weights (via attn beta) #
# via kappa with weights based on the Bayesian posterior of the target #
# (Daniel et al 20; kappa here=alpha in that paper) .. #
if ('kappa' %in% param_labels & 'attn_beta' %in% param_labels) {
attn_weights <- CalcMixedPhiFeatWeights(pars["kappa"],
bayes_attn_weights=bayes_posterior_target, # bayes probs (either of stims or features)
RL_attn_weights=attn_weights, # RL attn weights (either for features or dimensions)
pstate)
}
if ('kappa' %in% param_labels & (!'attn_beta' %in% param_labels) & (!'delta' %in% param_labels)) {
## Trying out kappa just in the early trials. This is based on the insight that it can take on a unique
# fx in the eary trials by guiding attn away from losses, in contrast to the RL model that's initialized at
# 0 and so never experiences negative prediction errors until it's actually had a reward
# (conditional on 0 initializations, so this suggests initializing at a non-zero
# value might allow this via the RL-based system). More generally the Bayesian early can stand in for early
# MB learning whose marginal utility I assume will decrease as the RL-based components become more reliable
if (this_trial < 5) {
attn_weights <- CalcBayesAttnWeights(pars["kappa"],
bayes_attn_weights=bayes_posterior_target,
pstate)
} else {
attn_weights <- rep(1, 9)
}
}
attn_weights
}
|
b233272c3705a847792611c5a1b35c33fd5328cb
|
77ff13c4c17a8f0c7469cd914eb856ebda6e52a2
|
/R/generics.R
|
f7fce08ed298912a24417166947bf45939eed8ce
|
[] |
no_license
|
carlonlv/DataCenterSim
|
f88623620c32816e97bd53b78ef6931f66ca8521
|
fa2cc2592969c40d3e8494c2be46a94641b235f1
|
refs/heads/master
| 2022-01-19T12:04:49.255542
| 2022-01-07T19:40:39
| 2022-01-07T19:40:39
| 228,258,775
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,655
|
r
|
generics.R
|
#' @include sim_class.R
NULL
#' Get The Slots that Are Considered Hyperparameters of Simulation
#'
#' @param object An S4 sim or pred object
#' @rdname get_param_slots
#' @export
setGeneric("get_param_slots", function(object) standardGeneric("get_param_slots"))
#' Get The Slots that Are Considered Charactersitics of Simulation
#'
#' @param object An S4 sim or pred object
#' @rdname get_characteristic_slots
#' @export
setGeneric("get_characteristic_slots", function(object) standardGeneric("get_characteristic_slots"))
#' Get The Slots that Are Not Displayed
#'
#' @param object An S4 sim or pred object
#' @rdname get_hidden_slots
#' @export
setGeneric("get_hidden_slots", function(object) standardGeneric("get_hidden_slots"))
#' Train Model
#'
#' This is a generic function that trains model according to the input object type, with additional arguments supplied by attributes of the object.
#'
#' @param object An S4 sim object.
#' @param train_x A numeric of length m representing the training set.
#' @param train_xreg A numeric or matrix of length or row number m representing the additional regressors for training.
#' @param trained_model A list representing the past trained model to update, can be an empty list.
#' @return A list containing trained result.
#' @name train_model
#' @rdname train_model
setGeneric("train_model", function(object, train_x, train_xreg, trained_model) standardGeneric("train_model"))
#' Do Prediction
#'
#' This is a generic function that do prediction according to the input object type.
#'
#' @param object An S4 sim object.
#' @param trained_result A list or other class returend by \code{train_model}, containing trained model information.
#' @param predict_info A dataframe representing all the past predicted or scheduled information.
#' @param test_x A numeric vector representing the test dataset up to current time.
#' @param test_xreg A dataframe representing the external predictors.
#' @return The updated \code{predict_info} on the last row.
#' @name do_prediction
#' @rdname do_prediction
setGeneric("do_prediction", function(object, trained_result, predict_info, test_x, test_xreg) standardGeneric("do_prediction"))
#' Get Representation
#'
#' This is a generic function that return a character representation according to the input object type.
#'
#' @param object An S4 sim object.
#' @param type A character representing the different type of representation to be returned.
#' @return A character representation of \code{object}.
#' @name get_representation
#' @rdname get_representation
setGeneric("get_representation", function(object, type) standardGeneric("get_representation"))
|
3cf9e00a57ae739750cae2aa5d17af2289f0fd71
|
1fbce482fa0cd8117e0a80e38323c5eb5f67ca7a
|
/R/makeData.R
|
6479ac0a72c6333f57af481e7ebc120d74687617
|
[] |
no_license
|
bioinfo16/RIPAT
|
adf1ef88a37e033d3b4961272d8846370bb685c4
|
4e736b60e9bc2695a67ba13e9a50ed56c9c4d38a
|
refs/heads/master
| 2021-02-06T21:47:10.233559
| 2020-10-13T06:09:25
| 2020-10-13T06:09:25
| 243,133,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,548
|
r
|
makeData.R
|
#' @title Make data files for RIPAT.
#'
#' @description
#' Download datafiles for running RIPAT.
#'
#' @usage
#' makeData(organism = 'GRCh37', dataType = 'cpg')
#'
#' @param organism a single character. Two versions of organism such as GRCh37, GRCh38 (Human).\cr
#' Default is 'GRCh37'.
#' @param dataType a single character. Data type what user needs (cpg, repeat and variant).\cr
#' Default is 'cpg'.
#'
#' @examples
#' makeData(organism = 'GRCh37')
#'
#' @export
makeData = function(organism = 'GRCh37', dataType = 'cpg'){
message('----- Make data files for RIPAT. (Time : ', date(), ')')
message('- Validate options')
if(length(which(c('GRCh37', 'GRCh38') %in% organism)) == 0){
stop("[ERROR] Please use GRCh37/GRCh38 data.\n----- This process is halted. (Time : ", date(), ")\n")
}
message('- OK!')
outPath = system.file("extdata", package = 'RIPAT')
cat('+ The data file path : ', outPath, '\n')
if(organism == 'GRCh37'){otherkey = 'hg19'}else if(organism == 'GRCh38'){otherkey = 'hg38'}
if(length(which(dataType == c('cpg', 'repeat'))) != 0){
message('- Load UCSC data')
UCSCSession = rtracklayer::browserSession("UCSC")
rtracklayer::genome(UCSCSession) <- otherkey
chr_info = readRDS(file = system.file("extdata", paste0(organism, '_chrom.rds'), package = 'RIPAT'))
gr_chr = GenomicRanges::GRanges(seqnames = paste0('chr', chr_info$chrom),
ranges = IRanges::IRanges(start = 1, end = chr_info$length))
if(dataType == 'cpg'){
ctab_list = lapply(c(1:length(gr_chr)), function(a){
rtracklayer::getTable(rtracklayer::ucscTableQuery(UCSCSession, track = "cpgIslandExt", range = gr_chr[a], table = "cpgIslandExt"))
})
ctab = data.frame(do.call(rbind, ctab_list), stringsAsFactors = FALSE)[,-1]
names(ctab) = c("chrom", "start", "end", "name", "length", "cpgNum", "gcNum", "perCpg", "perGc", "obsExp")
message('- Save CpG island data')
saveRDS(ctab, file = paste0(outPath, '/', organism, '_cpg.rds'))
} else if(dataType == 'repeat'){
rtab_list = lapply(c(1:length(gr_chr)), function(a){
rtracklayer::getTable(rtracklayer::ucscTableQuery(UCSCSession, track = "rmsk", range = gr_chr[a], table = "rmsk"))
})
rtab = data.frame(do.call(rbind, rtab_list), stringsAsFactors = FALSE)[,c(6:8,10:13)]
names(rtab) = c("genoName", "genoStart", "genoEnd", "strand", "repName", "repClass", "repFamily")
rtab = subset(rtab, rtab$repClass != 'Simple_repeat')
rtab = subset(rtab, rtab$repClass != 'Unknown')
rtab = subset(rtab, !stringr::str_detect(rtab$repClass, "[?]"))
rtab = subset(rtab, !stringr::str_detect(rtab$repFamily, "[?]"))
rtab = subset(rtab, !stringr::str_detect(rtab$repName, '[?]'))
mtab_list = lapply(c(1:length(gr_chr)), function(a){
rtracklayer::getTable(rtracklayer::ucscTableQuery(UCSCSession, track = "microsat", range = gr_chr[a], table = "microsat"))
})
mtab = data.frame(do.call(rbind, mtab_list), stringsAsFactors = FALSE)[,-1]
names(mtab) = c("chrom", "chromStart", "chromEnd", "name")
message('- Save repeat and microsatellite data')
saveRDS(rtab, file = paste0(outPath, '/', organism, '_repeat.rds'))
saveRDS(mtab, file = paste0(outPath, '/', organism, '_microsat.rds'))
}
message('- OK!')
} else if(dataType == 'variant'){
message('- Load NCBI Clinvar data')
utils::download.file(url = "http://ftp.ncbi.nlm.nih.gov/pub/clinvar/tab_delimited/variant_summary.txt.gz",
destfile = paste0(outPath, '/variant_summary.txt.gz'))
vtab_raw = utils::read.delim(gzfile(paste0(outPath, '/variant_summary.txt.gz')), header = TRUE, stringsAsFactors = FALSE)
vtab = subset(vtab_raw, vtab_raw$Assembly == organism)
vtab = subset(vtab, vtab$Chromosome != 'MT')
vtab1 = subset(vtab, vtab$ReviewStatus %in% c('reviewed by expert panel', 'criteria provided, multiple submitters, no conflicts'))
vtab1 = subset(vtab, vtab$NumberSubmitters >= 2)
vtab1$Chromosome = paste0('chr', vtab1$Chromosome)
message('- Save clinical variant data')
saveRDS(vtab1, file = paste0(outPath, '/', organism, '_clinvar.rds'))
message('- OK!')
} else {
stop("[ERROR] Please enter cpg, repeat and variant.\n----- This process is halted. (Time : ", date(), ")\n")
}
message('----- Finish. (Time : ', date(), ')\n')
}
|
e62eea240bb1fd5ed4050dd5e01efbc2c27f163e
|
ed633d145dfa8b32511b3cb13ba95b822e2559c8
|
/doc/Calculate.rwg.R
|
22f08ffcfd084b2713269c368c89962da8740008
|
[] |
no_license
|
wendellopes/rvswf
|
51a09f034e330fbb7fd58816c3de2b7f7fdba9dc
|
ee243c3e57c711c3259a76051a88cc670dfe9c4b
|
refs/heads/master
| 2020-05-19T19:38:18.987560
| 2016-09-11T22:57:37
| 2016-09-11T22:57:37
| 19,242,694
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,580
|
r
|
Calculate.rwg.R
|
#-------------------------------------------------------------------------------
# WAVE GUIDE PARAMETERS
#-------------------------------------------------------------------------------
rm(list=ls())
#-------------------------------------------------------------------------------
# Basic Parameters
#-------------------------------------------------------------------------------
lambda<-.5e-6 # Propagating wavelength
a<-7*lambda # Size x of the waveguide
b<-5*lambda # Size y of the waveguide
l<-3*lambda
M<-6 # x wavefield mode
N<-5 # y wavefield mode
#-------------------------------------------------------------------------------
# Wave Field Parameters
#-------------------------------------------------------------------------------
k<-2*pi/lambda # Propagating wavenumber
kx<-M*pi/a # x component of the wavevector
ky<-N*pi/b # y component of the wavevector
gama<-sqrt(kx^2+ky^2) # gama component of the wavevector
kz<-sqrt(k^2-gama^2) # z component of the wavevector
#-------------------------------------------------------------------------------
# Geometry of the calculations
#-------------------------------------------------------------------------------
NPX=200 # Number of points in each direction (all equal)
NPY=200 # Number of points in each direction (all equal)
NPZ=2 # Number of points in each direction (all equal)
#-------------------------------------------------------------------------------
# Vectors
#-------------------------------------------------------------------------------
dx<-a/(NPX-1)
dy<-b/(NPY-1)
dz<-l/(NPZ-1)
x<-seq(0,a,dx) # x vector of positions
y<-seq(0,b,dy) # y vector of positions
z<-seq(0,l,dz) # z vector of positions
#-------------------------------------------------------------------------------
TM<-FALSE
lmax<- 4
#-------------------------------------------------------------------------------
# POSITION AT WHICH THE EXPANSION WILL BE PERFORMED (REFERENCE SYSTEM)
#-------------------------------------------------------------------------------
# ARBITRARY
set.seed(512)
xo<-sample(x,1)
yo<-sample(y,1)
zo<-sample(z,1)
# FIXED
xo<-x[NPX%/%2+1]
yo<-y[NPY%/%2+1]
zo<-0
#-------------------------------------------------------------------------------
# CHANGE THE REFERENCE SYSTEM TO THE NEW POSITIONS
#-------------------------------------------------------------------------------
x<-x-(xo+dx/2)
y<-y-(yo+dy/2)
z<-z-(zo+dz/2)
z<-0;NPZ<-1
# #-------------------------------------------------------------------------------
# # BSC CALCULATIONS
# #-------------------------------------------------------------------------------
RWG<-vwfd.rwg(TE=!TM,kx,ky,kz,x+xo,y+yo,z+zo)
BSC<-vswf.rwg(TM,kx,ky,kz,xo,yo,zo,lmax)
PWE<-vswf.pwe(k,x,y,z,lmax,BSC$GTE,BSC$GTM)
if(TM){ # TM implies Hz=0
tez.RWG<-array(RWG$Ez,c(NPZ,NPY,NPX))[1,,]
tez.PWE<-array(PWE$Ez,c(NPZ,NPY,NPX))[1,,]
}else{ # TE implies Ez=0
thz.RWG<-array(RWG$Hz,c(NPZ,NPY,NPX))[1,,]
thz.PWE<-array(PWE$Hz,c(NPZ,NPY,NPX))[1,,]
}
#-------------------------------------------------------------------------------
# NAMES
#-------------------------------------------------------------------------------
nm.vwf<-"rwg.vwfd.tm.00"
md<-ifelse(TM,"tm","te")
nm.pwe<-ifelse(lmax<10, paste("rwg.vswf.",md,".0",lmax,sep=""),
paste("rwg.vswf.",md,"." ,lmax,sep=""))
nm.vwf.i<-paste(nm.vwf,".png",sep="")
nm.vwf.d<-paste(nm.vwf,".Rdata",sep="")
nm.pwe.i<-paste(nm.pwe,".png",sep="")
nm.pwe.d<-paste(nm.pwe,".Rdata",sep="")
#-------------------------------------------------------------------------------
# IMAGE
#-------------------------------------------------------------------------------
source("plots.rwg.R")
#-------------------------------------------------------------------------------
#source("Image.R")
#if(TM){
# zl<-range(Re(tez.RWG))
# #1
# if(!file.exists(nm.vwf.i)){
# png(nm.vwf.i)
# Image((y+yo)/lambda,(x+xo)/lambda,z=Re(tez.RWG),nlevels=256,axes=TRUE,color.palette=cm.colors,#zlim=zl,
# plot.axes={axis(1);axis(2);abline(h=xo/lambda,v=yo/lambda,col='green')},
# xlab=expression(y/lambda),ylab=expression(x/lambda))
# dev.off()
# }
# #2
# png(nm.pwe.i)
# Image((y+yo)/lambda,(x+xo)/lambda,z=Re(tez.PWE),nlevels=256,axes=TRUE,color.palette=cm.colors,#zlim=zl,
# plot.axes={axis(1);axis(2);abline(h=xo/lambda,v=yo/lambda,col='green')},
# xlab=expression(y/lambda),ylab=expression(x/lambda))
# dev.off()
#}else{
# zl<-range(Re(thz.RWG))
# #1
# if(!file.exists(nm.vwf.i)){
# png(nm.vwf.i)
# Image((y+yo)/lambda,(x+xo)/lambda,z=Re(thz.RWG),nlevels=256,axes=TRUE,color.palette=cm.colors,#zlim=zl,
# plot.axes={axis(1);axis(2);abline(h=xo/lambda,v=yo/lambda,col='green')},
# xlab=expression(y/lambda),ylab=expression(x/lambda))
# dev.off()
# }
# #2
# png(nm.pwe.i)
# Image((y+yo)/lambda,(x+xo)/lambda,z=Re(thz.PWE),nlevels=256,axes=TRUE,color.palette=cm.colors,#zlim=zl,
# plot.axes={axis(1);axis(2);abline(h=xo/lambda,v=yo/lambda,col='green')},
# xlab=expression(y/lambda),ylab=expression(x/lambda))
# dev.off()
#}
##-------------------------------------------------------------------------------
## DATASETS
##-------------------------------------------------------------------------------
#if(TM){
# save(RWG, file=nm.vwf.d)
# save(PWE, file=nm.pwe.d)
#}else{
# save(RWG, file=nm.vwf.d)
# save(PWE, file=nm.pwe.d)
#}
|
8387f0f0fe6d3c1dcc132bf6c871ceeff275bb2d
|
627d6dc6554e35fc1835f380157899ecfb8ee377
|
/diffSupers.R
|
8068a806d69a75a100d25e217db187fe2f98a4cb
|
[] |
no_license
|
gauravj49/CLL_TFnetworks_2018
|
4ccd98230cc9cda4aba452403185d2215c94adc7
|
adcace3c398573c74762534bb730e73beb4dbe38
|
refs/heads/master
| 2021-07-01T12:17:55.046831
| 2020-11-25T09:26:36
| 2020-11-25T09:26:36
| 197,175,310
| 1
| 1
| null | 2019-07-16T10:45:45
| 2019-07-16T10:45:45
| null |
UTF-8
|
R
| false
| false
| 7,418
|
r
|
diffSupers.R
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE,
cache = F,
warning = F,
message = F,
tidy = F)
## ----message=FALSE, warning=FALSE----------------------------------------
library(magrittr)
library(tidyverse)
library(readr)
library(readxl)
library(rtracklayer)
library(GenomicRanges)
library(Rsubread)
library(EDASeq)
library(DESeq2)
library(apeglm)
library(BSgenome.Hsapiens.UCSC.hg19)
library(iheatmapr)
set.seed(12345)
genome <- BSgenome.Hsapiens.UCSC.hg19
final_metadata <- read_tsv("results/primary_sample_metadata.txt")
## ----results='hide'------------------------------------------------------
rose_se_table_colnames <- c("REGION_ID","CHROM","START", "STOP", "NUM_LOCI", "CONSTITUENT_SIZE",
"chip.signal", "input.signal", "enhancerRank", "isSuper")
metadata_for_diff_se <- final_metadata %>%
filter(INCL_K27)
supers <- metadata_for_diff_se$K27.SUPERS %>%
lapply(read_tsv, skip = 6, col_names = rose_se_table_colnames) %>%
bind_rows() %>%
select(CHROM, START, STOP) %>%
mutate(START = START + 1) %>% # GRanges are 1-indexed
GRanges()
## ----quantify-all-primary-se, cache=T, message=F, warning=F, results='hide'----
quantifyReads <- function(gr, bamlist, nthreads = 8, paired_end = T) {
GenomicRanges::strand(gr) <- "*"
saf <- data.frame(GeneID = as.character(gr),
Chr = GenomicRanges::seqnames(gr),
Start = GenomicRanges::start(gr),
End = GenomicRanges::end(gr),
Strand = GenomicRanges::strand(gr))
cts <- Rsubread::featureCounts(bamlist, annot.ext = saf, nthreads = nthreads,
isPairedEnd = paired_end,
allowMultiOverlap = F,
largestOverlap = T,
requireBothEndsMapped = F)
cts$counts
}
cts <- quantifyReads(supers, metadata_for_diff_se$K27.BAM,
paired_end = T, # These are paired k27 chips
nthreads = 22)
counts <- cts %>% set_colnames(metadata_for_diff_se$ID)
## ----se-deseq------------------------------------------------------------
saveRDS(counts, file = "results/k27_counts.rds")
k27_rse <- SummarizedExperiment(assays = list(counts = counts),
rowRanges = GRanges(rownames(counts)))
k27_rse %<>% sort %>% chromVAR::filterPeaks(non_overlapping =T) # remove overlaps
gcview <- Biostrings::Views(genome, rowRanges(k27_rse))
gcFrequency <- Biostrings::letterFrequency(gcview,
letters = "GC",
as.prob = TRUE) %>%
set_colnames("GC")
mcols(k27_rse) <- cbind(mcols(k27_rse), gcFrequency)
coldata <- metadata_for_diff_se[c("ID","K27.bench.batch","K27.seq.batch","CONDITION")] %>%
as.data.frame() %>%
tibble::column_to_rownames("ID")
coldata[c("K27.bench.batch","K27.seq.batch","CONDITION")] %<>%
lapply(as.factor)
colData(k27_rse) <- DataFrame(coldata)
eda_data <- newSeqExpressionSet(counts = as.matrix(counts(k27_rse)),
featureData = as.data.frame(mcols(k27_rse,
use.names = T)),
phenoData = coldata["K27.bench.batch"])
# for color coding corrected signal plots
dataOffset <- EDASeq::withinLaneNormalization(eda_data, "GC",
which = "full", offset = T)
dataOffset <- EDASeq::betweenLaneNormalization(dataOffset,
which = "full", offset = T)
EDASeq::biasPlot(eda_data, "GC", log = TRUE, ylim =c(0,10))
EDASeq::biasPlot(dataOffset, "GC", log = TRUE, ylim = c(0, 10))
EDASeqNormFactors <- exp(-1 * EDASeq::offst(dataOffset))
EDASeqNormFactors <- EDASeqNormFactors/exp(rowMeans(log(EDASeqNormFactors)))
counts(k27_rse) <- as.matrix(counts(k27_rse)) # deseq2 wants this to a vanilla matrix
dds <- DESeqDataSet(k27_rse, design = ~ CONDITION)
dds$CONDITION <- relevel(dds$CONDITION, ref = "CD19")
normalizationFactors(dds) <- EDASeqNormFactors
dds <- DESeq(dds,quiet = T)
saveRDS(dds, file = "results/pCLL_se_dds.rds")
## ----se-deseq-res--------------------------------------------------------
res <- lfcShrink(dds, coef = "CONDITION_pCLL_vs_CD19", type = "apeglm") %>%
as.data.frame %>%
rownames_to_column("Locus") %>%
as_tibble()
sig <- res %>% filter(padj < 0.1) %>% arrange(desc(log2FoldChange))
## ----export-diff-se-table------------------------------------------------
export_enhancer_table <- function(gr, file) {
region_id <- as.character(gr)
chrom <- seqnames(gr) %>% as.vector
start <- start(gr) %>% as.vector
stop <- end(gr) %>% as.vector
num_loci <- 1
constituent_size <- width(gr)
bam <- "meta"
enhancer_rank <- 1
is_super <- 1
rangedata <- tibble(region_id,chrom,start,stop,num_loci,
constituent_size,bam,enhancer_rank,is_super) %>%
set_names(paste0("V",1:9))
exp <- matrix(nrow = 6, ncol = 9)
exp[1,] <- c("# Differential Results",rep("",8))
exp[2,] <- c("# DESeq2",rep("",8))
exp[3,] <- c("# Multiple bams",rep("",8))
exp[4,] <- c(paste0("# ", Sys.Date()),rep("",8))
exp[5,] <- c("# ",rep("",8))
cn <- c("REGION_ID", "CHROM","START", "STOP",
"NUM_LOCI", "CONSTITUENT_SIZE", "[bam]",
"enhancerRank","isSuper")
exp[6,] <- cn
exp %<>% as.tibble()
rbind(exp, rangedata) %>% write_tsv(file, col_names = F)
}
sig %>% subset(log2FoldChange > 0) %>%
.$Locus %>% GRanges %>%
export_enhancer_table("results/gained.SuperEnhancers.txt")
sig %>% subset(log2FoldChange < 0) %>%
.$Locus %>% GRanges %>%
export_enhancer_table("results/lost.SuperEnhancers.txt")
rownames(counts) %>%
GRanges() %>%
export_enhancer_table("results/searchspace.SuperEnhancers.txt")
## python2.7 ~/pipeline/ROSE2_geneMapper.py -i ./results/gained.SuperEnhancers.txt -g HG19 -o ./results/
## ----se-norm-ct-z, results='hide'----------------------------------------
gained_gene_calls <- read_tsv("results/gained_ENHANCER_TO_GENE.txt", skip = 1, col_names = F) %>%
select(X1, X12) %>%
set_colnames(c("Locus", "Closest.Gene"))
lost_gene_calls <- read_tsv("results/lost_ENHANCER_TO_GENE.txt", skip = 1, col_names = F) %>%
select(X1, X12) %>%
set_colnames(c("Locus", "Closest.Gene"))
to_hilite <- read_excel("tables/DifSEs_to_highlight.xlsx",
col_names = c("Locus", "Gene"))
sig_cts <- counts(dds, normalized = T) %>%
subset(rownames(.) %in% sig$Locus)
row_z <- t(scale(t(sig_cts)))
is_hilite <- rownames(row_z) %>%
lapply(FUN = function(x ) {
ifelse(x %in% to_hilite$Locus, subset(to_hilite, Locus == x)[["Gene"]], "")
} ) %>% unlist
## ----se-diff-heat--------------------------------------------------------
se_heat <- main_heatmap(row_z, name = "Row Z-score Norm Cts") %>%
add_col_annotation(colData(dds)["CONDITION"]) %>%
add_row_clustering() %>%
add_col_clustering() %>%
add_row_labels(tickvals = 1:length(is_hilite),
ticktext = is_hilite, side = "right",
font = list(size = 6))
se_heat
## ----save-se-heat, include=F, echo=F-------------------------------------
save_iheatmap(se_heat, "results/diff-supers.heat.pdf")
|
014df094fd31376ed48aa3d18394340d52965409
|
2ed676cabbe5cc9531a1d159a95a04c0bcc7cbb1
|
/plot2.R
|
5d8d7f7aff574fd3a47d9a047782d99306c7f061
|
[] |
no_license
|
imga2020/ExData_Plotting1
|
add1beaa9c50c6caf2aadf175ecf127b47b1272f
|
c572b9a7e43b7432ae5664a8d73f1bc1c63248f5
|
refs/heads/master
| 2022-12-01T06:41:07.678739
| 2020-07-25T23:33:43
| 2020-07-25T23:33:43
| 282,058,148
| 0
| 0
| null | 2020-07-23T21:23:36
| 2020-07-23T21:23:35
| null |
UTF-8
|
R
| false
| false
| 1,740
|
r
|
plot2.R
|
#Coursera Exploratory Data Analysis
#Course Project 1
#Plot2
#Load the dataset into R
#Download from zip file
fileUrl1 <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "DataHouseholdpower.zip"
download.file(fileUrl1, filename, method = "curl")
unzip(filename)
#Path to read files
path1 <- "C:/Users/Economics05/Documents/Coursera"
#Read the file and see the data
datahouseholdpower <- read.table(file.path(path1,"household_power_consumption.txt"), header = TRUE, sep = ";")
head(datahouseholdpower)
str(datahouseholdpower)
summary(datahouseholdpower)
names(datahouseholdpower)
#Subset the data from 2007-02-01 and 2007-02-02
subdatahouseholdpower <- subset(datahouseholdpower,datahouseholdpower$Date == "1/2/2007" | datahouseholdpower$Date == "2/2/2007")
head(subdatahouseholdpower)
#Convert time and date to Posixlt and time
subdatahouseholdpower$Time <- strptime(subdatahouseholdpower$Time, format = "%H:%M:%S")
subdatahouseholdpower$Date <- as.Date(subdatahouseholdpower$Date, format = "%d/%m/%Y")
#fix date
grep("2007-02-02",subdatahouseholdpower$Date)
sum(grepl("2007-02-02",subdatahouseholdpower$Date))
subdatahouseholdpower[1:1440,"Time"] <- format(subdatahouseholdpower[1:1440,"Time"], "2007-02-01 %H:%M:%S")
subdatahouseholdpower[1441:2880,"Time"] <- format(subdatahouseholdpower[1441:2880,"Time"], "2007-02-02 %H:%M:%S")
#Plot2
# convertir active power de factor a character y a numeric.
y1 <- as.numeric(as.character(subdatahouseholdpower$Global_active_power))
png(filename = "plot2.png", width = 480, height = 480)
plot(subdatahouseholdpower$Time, y1, type = "l", xlab ="", ylab = "Global Active Power (kilowatts)")
dev.off()
|
db640f7bdcf08f7e72122ec091db0ac2a8526b60
|
dba2f6213ec4130b7f8344f8a06855a33f2f5be9
|
/plot3.R
|
5ae1fa9f080b8a7a92d3d0a4f142031dbef932bb
|
[] |
no_license
|
yrahan/ExData_Plotting1
|
5720a809b96db0b2a891bd31af20a0b56454c6e5
|
6f0fec97960e8b5f2cc18659e29fb64b585267b3
|
refs/heads/master
| 2021-01-18T11:45:11.973609
| 2014-05-11T14:17:20
| 2014-05-11T14:17:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,297
|
r
|
plot3.R
|
# read data
consumption <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.string="?")
# convert column Date to Date format
consumption$Date <- as.Date(as.character(consumption$Date),format="%d/%m/%Y")
datesPerimeter <- c(as.Date("2007-02-01" , format="%Y-%m-%d"),
as.Date("2007-02-02" , format="%Y-%m-%d"))
# select data in the date perimeter
selection <- consumption[consumption$Date %in% datesPerimeter,]
# open png device
png(file="plot3.png",width=480,height=480)
# make the figure
# Sys.setlocale(category = "LC_TIME", locale = "C")
plot(strptime(paste(selection$Date,selection$Time),format="%Y-%m-%d %H:%M:%S"),
selection$Sub_metering_1,
xlab="",
ylab="Energy sub metering",
type="l"
)
lines(strptime(paste(selection$Date,selection$Time),format="%Y-%m-%d %H:%M:%S"),
selection$Sub_metering_2,
col="red"
)
lines(strptime(paste(selection$Date,selection$Time),format="%Y-%m-%d %H:%M:%S"),
selection$Sub_metering_3,
col="blue"
)
legend("topright",
lty=c(rep(1,3)),
col = c("black","red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3")
)
# close file
dev.off()
# Sys.setlocale(category = "LC_TIME", locale = "")
|
5c836fb6cd4a02463206030931545ad287964473
|
3c2715e0dfade25fbedb65aaa21b99a677c2e1d2
|
/LDA_logreg_functions.R
|
ea9f10b0f2f76bfe8e6110d90c5f6bd6c4d4bc2c
|
[] |
no_license
|
AakashAhuja30/Topic-Modelling-using-Latent-Dirichlet-Allocation-Algorithm
|
02be9586f013cfcf332e3249e1e5cbc532643e7c
|
f75cc4b7687287dc2cfbb7f536e091e1b549d26b
|
refs/heads/main
| 2023-01-14T05:54:25.086423
| 2020-11-10T15:24:08
| 2020-11-10T15:24:08
| 311,695,379
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,880
|
r
|
LDA_logreg_functions.R
|
#Main Function
Main_function<-function(files,K,top_words){
#Getting the vocab from the docs
temp2<-unlist(files)
docs<-strsplit(temp2, split=' ', perl = T)
t1 <- vector(mode = "list", length = length(docs))
fre_tab <- vector(mode = "list", length = length(docs))
fre_tab2 <- vector(mode = "list", length = length(docs))
for (j in 1:length(docs)) {
t1[[j]] <- data.frame(table(docs[[j]]))
fre_tab[[j]]<-t(t1[[j]][2])
colnames(fre_tab[[j]])<- as.character(t1[[j]][,1])
fre_tab2[[j]] <-data.frame(fre_tab[[j]])
}
allNms <- unique(unlist(lapply(fre_tab2, names)))
bag_of_words<-do.call(rbind,
c(lapply(fre_tab2,
function(x) data.frame(c(x, sapply(setdiff(allNms, names(x)),
function(y) NA)))),
make.row.names=FALSE))
bag_of_words[is.na(bag_of_words)]<-0
vocab <- unique(unlist(docs))
## Replace words in documents with wordIDs
for (i in 1:length(docs)) {
docs[[i]]<- match(docs[[i]],vocab)
}
#Setting parameters
alpha <- 5/K
Beta <- .01
iterations <- 500
# Generate null word-topic count matrix.
word_topic_count<-matrix(0,K,length(vocab))
# Create an empty topic list with same dimensions as document list
topic_assignment<-sapply(docs, function(x) rep(0, length(x)))
#Assign topics to each word in the docs list randomly and then update the count word_topic count matrix
for (line in 1:length(docs)) {
for (word in 1:length(docs[[line]])) {
topic_assignment[[line]][word]<-sample(1:K,1)
word_index <- docs[[line]][word]
topic_index <- topic_assignment[[line]][word]
word_topic_count[topic_index,word_index]<-word_topic_count[topic_index, word_index] + 1
}
}
#Document topic count
document_topic <- matrix(0, length(docs), K)
for (eachdoc in 1:length(docs)) {
for (eachtopic in 1:K) {
document_topic[eachdoc,eachtopic]<- sum(topic_assignment[[eachdoc]]==eachtopic)
}
}
p_temp<-c()
for(i in 1:iterations){
for (eachdoc in 1:length(docs)) {
for (eachword in 1:length(docs[[eachdoc]])) {
t0 <- topic_assignment[[eachdoc]][eachword] #Pick up topic id of the word
word_id <- docs[[eachdoc]][eachword] # Pick up word id of word
document_topic[eachdoc,t0] <- document_topic[eachdoc,t0]-1
word_topic_count[t0,word_id] <- word_topic_count[t0,word_id]-1
#for (t in 1:K) {
# p_temp[t]<-((document_topic[eachdoc,t] + alpha) / ( (K * alpha) + sum(document_topic[eachdoc,1:K]))) * ((word_topic_count[t,1] + Beta) / ((length(vocab) * Beta) + (rowSums(word_topic_count)[t])))
#}
denom_a <- sum(document_topic[eachdoc,]) + K * alpha
denom_b <- rowSums(word_topic_count) + (length(vocab) * Beta)
p_temp <- ((word_topic_count[,word_id] + Beta) / denom_b) * ((document_topic[eachdoc,] + alpha) / denom_a)
t1 <- sample(1:K,1,prob = p_temp/sum(p_temp))
topic_assignment[[eachdoc]][eachword] <- t1
document_topic[eachdoc,t1] <- document_topic[eachdoc,t1] + 1
word_topic_count[t1,word_id] <- word_topic_count[t1,word_id] + 1
}
}
}
#word_distribution <- (word_topic_count + Beta) / ((rowSums(word_topic_count)+(word_topic_count*Beta))) # topic probabilities per word
word_distribution<-word_topic_count
colnames(word_distribution) <- vocab
colnames(word_topic_count)<-vocab
top_5<-t(apply(word_distribution,1,function(x) names(x)[order(x,na.last=NA, decreasing = T)][1:top_words]))
theta<-document_topic
#theta <- (document_topic+alpha) / ((rowSums(document_topic))+ (K*alpha))
theta<-data.frame(theta)
write.csv(top_5,'FinalOutput.csv')
#return(list(document_topic, word_topic_count))
return(list(theta,bag_of_words,top_5))
}
W_Map_test <- function(X,Y) {
#Taking random sample
N<-nrow(X)
test_size<-round((1/3)*N)
train_size<- N- test_size
random_sample<-sample(N,test_size, replace = F)
#Test Data
X_test<-X[random_sample,]
Y_test<-data.frame(Y[random_sample,])
#Train Data
X_train<-X[-random_sample,]
Y_train<-data.frame(Y[-random_sample,])
#Converting the data frames into matrices for computation
X_train<-as.matrix(X_train)
Y_train<-as.matrix(Y_train)
X_test<-as.matrix(X_test)
#Y_test<-as.matrix(Y_test)
#Adding the intercept term to the test set
x0_test = rep(1,nrow(Y_test)) #bias
X_test = cbind(x0_test,X_test)
training_set_sizes<-c()
W_map_result<-c()
iterations<-c()
run_time<-c()
for (i in seq(0.1,1,0.1)) {
#This gives the 10 training set sizes at each index of training_set_sizes
training_set_sizes[10*i]<-round(i*train_size)
#Adding the intercept term to training data
x0 = rep(1,training_set_sizes[10*i]) #bias
training_data = cbind(x0,X_train[1:training_set_sizes[10*i],])
#Calculating Wmap for the given training set size
Betas<- matrix(NA,ncol=1,nrow=ncol(training_data)) #Store Betas in this matrix
Betas[,1] <- rep(0, ncol(training_data)) #starting values
j<-2
start_time <- Sys.time()
repeat {
a <- plogis( as.matrix(training_data) %*% Betas[,j-1])
R <- diag(c(a*(1-a)))
S.inv <-solve((1/100))
first_derivative <- crossprod(training_data,(Y_train[1:training_set_sizes[10*i],] - a)) - as.matrix(Betas[,j-1])%*% S.inv
temp1<-crossprod(training_data,R)%*%training_data
diag(temp1)<-diag(temp1)+S.inv
Hessian<- - temp1
Hessian_inverse<-solve(Hessian)
Betas <- cbind(Betas,Betas[,j-1] - (Hessian_inverse%*%first_derivative))
if (all(abs(Betas[,j]-Betas[,j-1]) < (1e-3) )) break
if (j>100) stop("Failure to find root after 100 iterations.")
j<-j+1
}
end_time <- Sys.time()
run_time[10*i]<-end_time - start_time
#Checking the error for the given training set size with its wmap
accuracy_table<-as.data.frame(plogis(X_test %*% t(Betas)[nrow(t(Betas)),]))
colnames(accuracy_table)[1]<-"Sigmoid_Values"
accuracy_table$prediction<-ifelse(accuracy_table$Sigmoid_Values >=0.5, 1,0)
accuracy_table$actual_values<-Y_test
accuracy_table$error<-ifelse(accuracy_table$prediction==accuracy_table$actual_values, 0,1)
errors<-sum(accuracy_table$error)
W_map_result[10*i]<-errors/nrow(Y_test)
iterations[10*i]<-j-1
}
return(data.frame(cbind(Error_rate=W_map_result,iterations, run_time)))
}
GraphPlot<-function(sums, X, title ){
N<-nrow(X)
test_size<-round((1/3)*N)
train_size<- N- test_size
trainsetsize<-c()
for (i in seq(0.1,1,0.1)) {
trainsetsize[10*i]<-round(i*train_size)
}
ErrorsSample<- data.frame(sums[seq(1, length(sums), 3)])
Accuracy<- 1- ErrorsSample
#IterationsSample<- data.frame(sums[seq(2, length(sums), 3)])
#run_time_sample<-data.frame(sums[seq(3, length(sums), 3)])
error_mean<-apply(ErrorsSample,1, mean)
accuracy_mean<-1 - error_mean
#error_stddeviation<-apply(ErrorsSample,1, sd)
accuracy_stderror<-apply(Accuracy,1, function(x){sd(x)/sqrt(length(x))})
#IterationsSample_mean<-apply(IterationsSample,1, mean)
#Run_time_mean<-apply(run_time_sample,1, mean)
#error_rate<-error_mean/nrow(test_size)
plot(trainsetsize,accuracy_mean , type = 'o',
ylim=range(c(accuracy_mean - accuracy_stderror, accuracy_mean + accuracy_stderror)),
pch=19, xlab="Train Set Size", ylab="Accuracy +/- SD Error",
main=title)
arrows(trainsetsize, accuracy_mean-accuracy_stderror, trainsetsize, accuracy_mean+accuracy_stderror, length=0.05, angle=90, code=3)
#return(data.frame(trainsetsize,accuracy_mean))
#return(data.frame(trainsetsize,IterationsSample_mean,Run_time_mean,error_mean))
#return(Accuracy)
}
|
f02e4e4f76c8101ccfd39f16dd9e3cd7566a76d4
|
71821a5612e50fc8120afc8c5dc18019dadb9e84
|
/1BM17CS024_DSR Lab/lab 6 04-11-20/dotchart.R
|
62971527683174d5102aac98d5fd9d222b64f386
|
[] |
no_license
|
dikshajain228/Semester-7
|
825229cd63c4a047ac5dd5c3896a43b9835a791d
|
996def1ada173ac60d9fd0e4c9da4f954d2de4f0
|
refs/heads/master
| 2023-02-04T22:19:25.984283
| 2020-12-20T07:48:06
| 2020-12-20T07:48:06
| 297,544,965
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
dotchart.R
|
install.packages("ggplot2")
library("gcookbook")
mtcars
dotchart(mtcars$mpg, labels = row.names(mtcars),cex = 0.6, xlab = "mpg")
|
c14c44d4ae78f9052cb378fe364304e03c4208d2
|
aa29ec4169d341764c714e2f161a09b6d58396ef
|
/Triangle BLDS/triangle BLDS.R
|
c4bee9e41f31ee46001dcbac20d9c3ea5570377b
|
[] |
no_license
|
dnzengou/R_Projects
|
3c6b63244041ab3536f48c2e43ef5438172d1a44
|
9e66033b694e53017af496b263ee74631d053f83
|
refs/heads/master
| 2020-04-02T00:12:49.428521
| 2016-03-15T04:29:03
| 2016-03-15T04:29:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,507
|
r
|
triangle BLDS.R
|
library("RSocrata")
library("httr")
library("devtools")
library("jsonlite")
library("curl")
library("gtools")
install_github("Chicago/RSocrata")
#city of Raliegh old and deprecated
cdi4_url = "https://permits.partner.socrata.com/resource/pjib-v4rg.csv?$limit=50000"
raleigh <- read.csv(curl(cdi4_url))
raleigh$city <- "City of Raleigh"
raleigh$applieddates <- strptime(raleigh$AppliedDate, format = "%m/%d/%Y %H:%M:%S")
raleigh$issueddates <- strptime(raleigh$IssuedDate, format = "%m/%d/%Y %H:%M:%S")
raleigh$Location_extra <-paste("(",raleigh$Latitude,",",raleigh$Longitude,")")
raleigh$difftime <- as.numeric(difftime(raleigh$applieddates,raleigh$issueddates, units = "days"))
raleigh$in_out_city_limits_extra <- NULL
raleigh$county_extra <- NULL
raleigh$county_account_number_extra <- NULL
raleigh$dwelling_units_total_extra <- NULL
raleigh$number_of_stories_extra <- NULL
raleigh$owner_name_extra <- NULL
raleigh$lot_number_extra <- NULL
raleigh$proposed_work_extra <- NULL
raleigh$county_account_number_extra <- NULL
raleigh$dwelling_units_total_extra <- NULL
raliraleigh$proposed_work_extra <- NULL
raleigh$development_plan_name_extra <- NULL
raleigh$geom <- "NA"
#Raleigh New Dataset all good now
cdi5_url <- 'https://data.raleighnc.gov/resource/xce4-kemu.csv?$limit=50000'
raliegh2 <- read.csv(curl(cdi5_url))
raliegh2$AppliedDate <- NA
raliegh2$CompletedDate <- NA
raliegh2$PermitTypeDescription <- NA
raliegh2$StatusDate <- NA
raliegh2$Link <- NA
raliegh2$Latitude <- raliegh2$Latitude_perm
raliegh2$Latitude_perm <- NULL
raliegh2$Longitude <- raliegh2$Longitude_perm
raliegh2$Longitude_perm <- NULL
raliegh2$EstProjectCostText <- NA
raliegh2$Units <- raliegh2$HousingUnitsTotal
raliegh2$HousingUnitsTotal <- NULL
raliegh2$ContractorTradeMapped <- NA
raliegh2$ContractorStateLic <- NA
raliegh2$AddedSqFt <- NA
raliegh2$MasterPermitNum <- NA
raliegh2$HoldDate <- NA
raliegh2$ProjectID <- raliegh2$DevPlanID
raliegh2$DevPlanID <- NULL
raliegh2$TotalFinishedSqFt <- NA
raliegh2$TotalUnfinishedSqFt <- NA
raliegh2$TotalHeatedSqFt <- NA
raliegh2$TotalUnHeatedSqFt <- NA
raliegh2$TotalAccSqFt <- NA
raliegh2$TotalSprinkledSqFt <- NA
raliegh2$ContractorFullName <- NA
raliegh2$ContractorCompanyDesc <- NA
raliegh2$city <- "City of Raleigh"
raliegh2$applieddates <- NA
raliegh2$issueddates <- NA
raleigh2$Location_extra <-paste("(",raleigh2$Latitude,",",raleigh2$Longitude,")")
raliegh2$CoRLotId <- NULL
raliegh2$CensusLandUse <- NULL
raliegh2$CensusLandUseCode <- NULL
raliegh2$CntyAcctNum <- NULL
raliegh2$COCIssuedDate <- NULL
raliegh2$COCNbr <- NULL
raliegh2$ConstCompletedOfficial <- NULL
raliegh2$ContractorDBA <- NULL
raliegh2$CountyLocation <- NULL
raliegh2$ExpiredNewPermNum <- NULL
raliegh2$Fee <- NULL
raliegh2$FiscalYear <- NULL
raliegh2$Geocoded_MatchScore <- NULL
raliegh2$Location_extra <- raliegh2$Geocoded_PermAddr
raliegh2$GovtOwnedProp<- NULL
raliegh2$Geocoded_PermAddr <- NULL
raliegh2$GroupTenantName <- NULL
raliegh2$GRP_COMMENT_1 <- NULL
raliegh2$GRP_B_BLDG_OCC_CLASS_NEW <- NULL
raliegh2$GRP_BLDG_BASEMENT_OCCUPIED <- NULL
raliegh2$GRP_COMMENT_2 <- NULL
raliegh2$GRP_CO_COMMENT <- NULL
raliegh2$GRP_REVIEW_PATH <-NULL
raliegh2$GRP_BLDG_FOOTPRINT <-NULL
raliegh2$GRP_TEMP_CO_COMMENT_1 <- NULL
raliegh2$GRP_TEMP_CO_COMMENT_2 <- NULL
raliegh2$HousingUnitsExist <- NULL
raliegh2$IssuedDate_Mth <-NULL
raliegh2$IssuedDate_Yr <- NULL
raliegh2$Jurisdiction_AtIssue <- NULL
raliegh2$Jurisdiction_InOut_Ral <- NULL
raliegh2$LandUseNewUse <- NULL
raliegh2$Location_Geocoded <- NULL
raliegh2$MapSheet <- NULL
raliegh2$NumberStories <- NULL
raliegh2$OriginalAddressFull <- NULL
raliegh2$PARC_OPEN_SPACE_ZONE_FEE <- NULL
raliegh2$ParcelOwnerName <- NULL
raliegh2$ParcelOwnerAddress1 <- NULL
raliegh2$ParcelOwnerAddress2 <- NULL
raliegh2$PERM_COMMENTS <- NULL
raliegh2$Publisher <- NULL
raliegh2$ProposedWorkDescription <- NULL
raliegh2$RecordUpdateDate <- NULL
raliegh2$StreetDirectionPrefix <- NULL
raliegh2$StreetDirectionSuffix <- NULL
raliegh2$ReviewerComments <- NULL
raliegh2$StreetType <- NULL
raliegh2$StreetNum <- NULL
raliegh2$difftime <- NA
raliegh2$geom <- NA
raliegh2$EstProjectCostDec <- NA
raliegh2$PermitTypeDesc <- raliegh2$PermitTypeDescription
raliegh2$PermitTypeDescription <- NULL
#Town of Cary all good
url <- 'https://data.townofcary.org/explore/dataset/permit-applications/download/?format=json&timezone=America/New_York'
document <- fromJSON(txt=url)
document$contractortrademapped <- document$fields$contractortrademapped
document$originalstate <- document$fields$originalstate
document$originaladdress1 <- document$fields$originaladdress1
document$permitclassmapped <- document$fields$permittypemapped
document$originalstate <- document$fields$originalstate
document$contractortrademapped <- document$fields$contractortrademapped
document$originaladdress1 <- document$fields$originaladdress1
document$permitclassmapped <- document$fields$permittypemapped
document$originalstate <- document$fields$originalstate
document$city <- "Town of Cary"
document$contractorphone <- documents$fields$contractorphone
document$contractorphone <- document$fields$contractorphone
document$permittypemapped <- document$fields$permittypemapped
document$permitnumber <- document$fields$permitnum
document$applieddate <- document$fields$applieddate
document$issuedate <- strptime(document$fields$issuedate, format = "%m/%d/%Y")
document$applieddate <- strptime(document$fields$applieddate, format = "%m/%d/%Y")
document$contractoraddress <- document$fields$contractoraddress1
document$ownerzip <- document$fields$ownerzip
document$ownerzip <- document$fields$ownerzip
document$ownerzip <- document$fields$ownerzip
document$ownername <- document$fields$ownername
document$Location_extra <-paste("(",document$fields$latitude,",",document$fields$longitude,")")
document$statuscurrentmapped <- document$fields$statuscurrentmapped
document$Link <- document$fields$link
document$PermitNum <- document$permitnumber
document$Description <- document$fields$description
document$applieddates <- document$applieddate
document$AppliedDate <- document$applieddate
docuemtn$IssuedDate <- document$issuedate
document$IssuedDate <- document$issuedate
document$issueddates <- document$issuedate
document$CompletedDate <- document$fields$completeddate
document$OriginalAddress1 <- document$originaladdress1
document$OriginalAddress2 <- "NA"
document$OriginalCity <- "Town of Cary"
document$OriginalState <- document$originalstate
document$originalstate <- NULL
document$OriginalZip <- "NA"
document$Jurisdiction <- "NA"
document$PermitClass <- document$fields$permitclass
document$PermitClassMapped <- document$permitclassmapped
document$StatusCurrent <- document$statuscurrentmapped
document$StatusCurrentMapped <- document$statuscurrentmapped
document$WorkClass <- document$fields$workclass
document$WorkClassMapped <- document$fields$workclassmapped
document$PermitType <- document$fields$permittype
document$PermitTypeMapped <- document$permittypemapped
document$permittypemapped <- NULL
document$PermitTypeDesc <- document$fields$permittypedesc
document$StatusDate <- document$fei
document$StatusDate <- document$fields$statusdate
document$TotalSqFt <- document$fields$totalsqft
document$Latitude <- document$fields$latitude
document$Longitude <- document$fields$longitude
document$Longitude <- document$fields$longitude
document$EstProjectCostDesc <- document$fields$projectcost
document$Units <- "NA"
document$Pin <- "NA"
document$ContractorCompanyName <- document$fields$contractortrademapped
document$issueddates <- strptime(document$fields$applieddate, format = "%Y-%m-%d")
document$applieddates <- strptime(document$CompletedDate, format = "%Y-%m-%d")
document$difftime <- as.numeric(difftime(document$applieddates,document$issueddates, units = "days"))
document$datasetid <- NULL
document$recordid <- NULL
document$fields$contractortrademapped <- NULL
document$fields$originaladdress1 <- NULL
document$fields$permitclassmapped <- NULL
document$fields$jurisdiction <- NULL
document$fields$contractorphone <- NULL
document$fields$contractoraddress1 <- NULL
document$fields$statuscurrentmapped <- NULL
document$fields$owneraddress1 <- NULL
document$fields$statuscurrentmapped <- NULL
document$fields$description <- NULL
document$fields$permittypedesc <- NULL
document$fields$link <- NULL
document$fields$totalsqft <- NULL
document$fields$statusdate <- NULL
document$fields$permittypemapped <- NULL
document$fields$permittypemapped <- NULL
document$fields$permitclass <- NULL
document$fields.originalstate <- NULL
document[1] <- NULL
document[1] <- NULL
document[1] <- NULL
document$ContractorPhonee <- NULL
document$contractorphone <- NULL
document[1] <- NULL
document[1] <- NULL
document[1] <- NULL
document[1] <- NULL
document[1] <- NULL
document[1] <- NULL
document[1] <- NULL
document[2] <- NULL
document$city <- "Town of Cary"
document$geom <- NA
document$EstProjectCostText <- document$EstProjectCost
document$EstProjectCost <- NULL
document$EstProjectCost <- NA
document$MasterPermitNumber <- document$MasterPermitNum
document$MasterPermitNumber <- NULL
document$OriginalAddress2 <- NA
document$OriginalZip <- NA
document$Jurisdiction <- NA
document$Units <- NA
document$Pin <- NA
document$ContractLicNum <- NA
document$ContractorLicNum <- NULL
document$PIN <- document$Pin
document$Pin <- NULL
document$PIN <- NA
document$MasterPermitNum <- NA
document$AddedSqFt <- NA
document$COIssuedDate <- NA
document$ContractorAddress1 <- document$contractoraddress
document$ContractorAddress2 <- NA
document$contractoraddress <- NULL
document$ContractorCity <- NA
document$ContractorCompanyDesc <- NA
document$ContractorEmail <- NA
document$ContractorFullName <- NA
document$ContractorLicNum <- NA
document$ContractorPhone <- NA
document$ContractorState <- NA
document$ContractorStateLic <- NA
document$ContractorTrade <- NA
document$ContractorTradeMapped <- NA
document$ContractorZip <- NA
document$difftime <- NA
document$Fee <- NA
document$ExpiresDate <- NA
document$HoldDate <- NA
document$issueddates <-NA
document$ProjectID <- NA
document$ProjectName <- NA
document$ProposedUse <- NA
document$ownername <- NULL
document$ownerzip <- NULL
document$statuscurrentmapped <- NULL
document$TotalAccSqFt <- NA
document$TotalFinishedSqFt <- NA
document$TotalHeatedSqFt <- NA
document$TotalSprinkledSqFt <- NA
document$TotalUnfinishedSqFt <- NA
document$TotalUnHeatedSqFt <- NA
document$VoidDate <- NA
document$ContractLicNum <- document$ContractLicNum
document$ContractLicNum <- NULL
document$LandUseDescription <- NA
document$StateLicNum <- NA
total2 <-smartbind(document, total)
#Wake County
url2 <- "http://data.wake.opendata.arcgis.com/datasets/8295268844ba4b7db2c22a1f7ff0f460_0.csv"
wake <- fromJSON(txt=url2)
wake$PermitNum <- wake$PERMITNUM
wake$Description <- wake$DESCRIPTION
wake$AppliedDate <- NA
wake$IssuedDate <- wake$ISSUEDDATE
wake$CompleteDate <- wake$COMPLETEDDATE
wake$OringalAddress1 <- wake$ORIGINALADDRESS
wake$ORIGINALADDRESS <- NULL
wake$PERMITNUM <- NULL
wake$DESCRIPTION <- NULL
wake$ISSUEDDATE <- NULL
wake$COMPLETEDDATE <- NULL
wake$OriginalAddress2 <-NULL
wake$OringalAddress2 <- NA
wake$OringinalCity <- wake$ORIGINALCITY
wake$ORIGINALCITY <- NULL
wake$OriginalState <- "NC"
wake$OriginalZip <- wake$ORIGINALZIP
wake$ORIGINALZIP <- NULL
wake$Jurisdiction <- wake$JURISDICTION
wake$JURISDICTION <- NULL
wake$PermitClass <- wake$PERMITCLASS
wake$PERMITCLASS <- NULL
wake$PermitClassMapped <- wake$PERMITCLASSMAPPED
wake$PERMITCLASSMAPPED <- NULL
wake$StatusCurrent <- wake$STATUSCURRENT
wake$STATUSCURRENT <- NULL
wake$StatusCurrentMapped <- wake$STATUSCURRENTMAPPED
wake$STATUSCURRENTMAPPED <- NULL
wake$WorkClass <- NA
wake$WorkClassMapped <-NA
wake$PermitType <- wake$WPERMITTYPEBLDG
wake$PermitTypeMapped <- wake$WPERMITTYPEBLDG
wake$WPERMITTYPEBLDG <- NULL
wake$PermitTypeDesc <- NA
wake$StatusDate <- NA
wake$TotalSqFt <- wake$TOTALSQFT
wake$Link <- NA
wake$Latitude <- wake$LATITUDE
wake$LATITUDE <- NULL
wake$Longitude <- wake$LONGITUDE
wake$LONGITUDE <- NA
wake$EstProjectCostDec <- NA
wake$EstProjectCostText <- NA
wake$Units <- NA
wake$ContractorCompanyName <- wake$MECHCONTRACTORNAME
wake$MECHCONTRACTORNAME <- NA
wake$ContractorTrade <- NA
wake$ContractorTradeMapped <- NA
wake$ContractorLicNum <- wake$MECHCONTRACTORLICNUM
wake$ContractorStateLic <- NA
wake$ProposedUSe <- NA
wake$ProposedUse <- NA
wake$EstProjectCost <- NA
wake$AddedSqFt <- NA
wake$MasterPermitNum <- NA
wake$ExpiresDate <- NA
wake$COIssuedDate <- wake$ADDDATE
wake$ADDDATE <- NULL
wake$HoldDate <- NA
wake$VoidDate <- NA
wake$ProjectName <- NA
wake$ProjectID <- NA
wake$TotalFinishedSqFt <- wake$TOTALSQFT
wake$TOTALSQFT <- NULL
wake$TotalUnfinishedSqFt <- NA
wake$TotalHeatedSqFt <- NA
wake$TotalUnHeatedSqFt <- NA
wake$TotalAccSqFt <- NA
wke$TotalSprinkledSqFt <- NA
wake$Publisher <- NULL
wake$Fee <- wake$FEE
wake$FEE <- NULL
wake$ContractorFullName <- wake$MECHCONTRACTORNAME
wake$MECHCONTRACTORNAME <- NULL
wake$ContractorCompanyDesc <- NA
wake$ContractorPhone <- wake$BUILDINGCONTRACTORPHONE
wake$BUILDINGCONTRACTORPHONE <- NULL
wake$ContractorAddress1 <- NA
wake$ContractorAddress2 <- NA
wake$ContractorCity <- NA
wake$ContractorState <- NA
wake$ContractorState <- NA
wake$ContractorEmail <- NA
wake$city <- wake$MAILINGADDRCITY
wake$MAILINGADDRCITY <- NA
wake$applieddates <- strptime(wake$AppliedDate, format = "%m/%d/%Y")
wake$issueddates <- strptime(wake$IssuedDate, format = "%m/%d/%Y")
wake$difftime<- as.numeric(difftime(wake$issueddates,wake$applieddates, units = "days"))
wake$Location_extra <-paste("(",wake$Latitude,",",wake$Longitude,")")
wake$geom <- NA
wake$OBJECTID <- NULL
wake$ID <-NULL
wake$RECEIPTNUMBER <- NULL
wake$RECEIPTDATE <- NULL
wake$APPLICANTNAME <- NULL
wake$OWNERNAME<-NULL
wake$USECODE <- NULL
wake$USECODEDESCRIPTION <- NULL
wake$IMPROVEMENTVALUE <- NULL
wake$WATERSYSTEMTYPE <- NULL
wake$SEWERSYSTEMTYPE <- NULL
wake$TOWNSHIPCODE <- NULL
wake$TOWNSHIPDESCRIPTION <- NULL
wake$ZONINGCODE <- NULL
wake$SUBDIVISIONNAME <- NULL
wake$SUBDIVISIONLOT <- NULL
wake$SUBDIVISIONSECTION <- NULL
wake$LONGITUDE <- NULL
wake$STREET <- NULL
wake$STREETNUMBER <- NULL
wake$STREETMISC <- NULL
wake$STREETDIRECTIONPREFIX <- NULL
wake$STREETNAME <- NULL
wake$STREETTYPE <- NULL
wake$STREETDIRECTIONSUFFIX <- NULL
wake$PINMAPNUMBER <- NULL
wake$PINMAPSCALE <- NULL
wake$PINBLOCKNUMBER <- NULL
wake$PINLOTNUMBER <- NULL
wake$PINEXTNUMBER <- NULL
wake$PINSPLIT <- NULL
wake$ACRES <- NULL
wake$MAILINGADDRLINE2 <- NULL
wake$MAILINGADDRCITY <- NULL
wake$MAILINGADDRSTATE <- NULL
wake$MAILINGADDRZIPCODE <- NULL
wake$BUILDINGCONTRACTORLICNUMPREFIX <- NULL
wake$BUILDINGCONTRACTORLICNUM <- NULL
wake$BUILDINGCONTRACTORAREACODE <- NULL
wake$PLUMBCONTRACTORNAME <- NULL
wake$PLUMBCONTRACTORLICNUMPREFIX <- NULL
wake$PLUMBCONTRACTORLICNUM <- NULL
wake$ELECCONTRACTORNAME <- NULL
wake$ELECCONTRACTORLICNUMPREFIX <- NULL
wake$ELECCONTRACTORLICNUM <- NULL
wake$MECHCONTRACTORLICNUMPREFIX <- NULL
wake$MECHCONTRACTORLICNUM <- NULL
wake$OTHERCONTRACTOR1NAME <- NULL
wake$OTHERCONTRACTOR1LICNUM <- NULL
wake$OTHERCONTRACTOR2NAME <- NULL
wake$OTHERCONTRACTOR2LICNUM <- NULL
wake$OTHERCONTRACTOR3NAME <- NULL
wake$OTHERCONTRACTOR3LICNUM <- NULL
wake$OTHERCONTRACTOR4NAME <- NULL
wake$OTHERCONTRACTOR4LICNUM <- NULL
wake$SUBDIVIMPROVENUM <- NULL
wake$PURGEFLAG <- NULL
wake$RECOVERYFUNDFLAG <- NULL
wake$OLDPROPERTYLOCATION <- NULL
wake$REALESTATEID <- NULL
wake$CENSUSTRACTNUMBER1 <- NULL
wake$CENSUSTRACTNUMBER2 <- NULL
wake$WPERMITTTYPESOLIDWASTE <- NULL
wake$WPERMITTYPEWATERQUALITY <- NULL
wake$WPERMITTYPELAND <- NULL
wake$WPERMITTYPEFIRE <- NULL
wake$TEMPORARYPOLEFLAG <- NULL
wake$PERMITHOLDSTATUSZON <- NULL
wake$PERMITHOLDSTATUSENG <- NULL
wake$PERMITHOLDSTATUSINS <- NULL
wake$PERMITHOLDSTATUSFIR <- NULL
wake$PERMITHOLDSTATUSSUB <- NULL
wake$PERMITHOLDSTATUSEFS <- NULL
wake$PERMITHOLDSTATUSWW <- NULL
wake$CONTRACTORHOLDSTATUSBUILDING <- NULL
wake$CONTRACTORHOLDSTATUSPLUMBING <- NULL
wake$CONTRACTORHOLDSTATUSELECTRICAL <- NULL
wake$CONTRACTORHOLDSTATUSMECHANICAL <- NULL
wake$CONTACTNAME <- NULL
wake$MAPPARCELBLOCK <- NULL
wake$MAPPARCELLOT <- NULL
wake$PLATREFYEAR <- NULL
wake$PLATREFPAGE <- NULL
wake$ROOMS <- NULL
wake$BEDROOMS <- NULL
wake$BASEMENT <- NULL
wake$GARBAGEDISPOSAL <- NULL
wake$PUMP <- NULL
wake$NUMBEROFEMPLOYEES <- NULL
wake$FOODHANDLING <- NULL
wake$BOAREQUIRED <- NULL
wake$BOADATE <- NULL
wake$BOACASENUMBER <- NULL
wake$BOAACTION <- NULL
wake$MHMAKE <- NULL
wake$MHSERIALNUMBER <- NULL
wake$MH_UL_HUDNUMBER <- NULL
wake$MHYEAR <- NULL
wake$MHSIZE <- NULL
wake$MHCOLOR <- NULL
wake$HEALTHPERMITNUMBER <- NULL
wake$HEALTHOPDATE <- NULL
wake$HEALTHOPBY <- NULL
wake$GEOCODESTATUS <- NULL
wake$ProposedUSe <- NULL
wake$LandUseDescription <- NA
wake$StateLicNum <- NA
wake$OriginalCity <- wake$OringinalCity
wake$OringinalCity <- NULL
wake$OriginalAddress1 <- wake$OringalAddress1
wake$OringalAddress1 <- NULL
wake$OriginalAddress2 <- wake$OringalAddress2
wake$OringalAddress2 <- NULL
wake$CompletedDate <- wake$CompleteDate
wake$CompleteDate <- NULL
wake$ContractorZip <- NA
total3 <- smartbind(wake, total2)
#town of cary cleanup
document$PermitClassMapped <- NA
#City and County of Durham (Active Permits)
cod_url <- "https://opendurham.nc.gov/explore/dataset/active-building-permits/download/?format=json&timezone=America/New_York"
doc <- fromJSON(txt=cod_url)
doc$city <- "Durham"
doc$PermitNum <- doc$PERMIT_ID
doc$Description <- doc$P_DESCRIPT
doc$AppliedDate <- NA
doc$IssuedDate <- NA
doc$CompletedDate <- NA
doc$StatusCurrent <- doc$P_STATUS
doc$OriginalAddress1 <- doc$SITEADD
doc$OriginalAddress2 <- NA
doc$OriginalCity <- "Durham"
doc$OriginalState <- "North Carolina"
doc$OriginalZip <- NA
doc$Jurisdiction <- doc$BUILD_DIST
doc$PermitClass <- NA
doc$PermitClassMapped <- NA
doc$StatusCurrentMapped <- "Permit Issued"
doc$WorkClass <- NA
doc$WorkClassMapped <- NA
doc$PermitType <- "Building"
doc$PermitTypeMapped <- "Building"
doc$PermitTypeDesc <- NA
doc$StatusDate <- NA
doc$TotalSqFt <- NA
doc$Link <- "https://opendurham.nc.gov/explore/dataset/active-building-permits/table/"
for (i in 1:length(doc$fields$geo_point_2d)) {
doc$Latitude[i] <- doc$fields$geo_point_2d[[i]][1]
doc$Longitude[i] <- doc$fields$geo_point_2d[[i]][2]
}
doc$EstProjectCost <- NA
doc$HousingUnits <- NA
doc$ContractorCompanyName <- NA
doc$ContractorTrade <- NA
doc$ContractorTradeMapped <- NA
doc$ContractorLicNum <- NA
doc$ContractorStateLic <- NA
doc$ProposedUse <- NA
doc$AddedSqFt <- NA
doc$RemovedSqFt <- NA
doc$MasterPermitNum <- NA
doc$ExpiresDate <- NA
doc$COIssuedDate <- NA
doc$HoldDate <- NA
doc$VoidDate <- NA
doc$ProjectName <- NA
doc$ProjectID <- NA
doc$TotalFinishedSqFt <- NA
doc$TotalUnfinishedSqFt <- NA
doc$TotalHeatedSqFt <- NA
doc$TotalUnHeatedSqFt <- NA
doc$TotalAccSqFt <- NA
doc$TotalSprinkledSqFt <- NA
doc$ExtraFields <- NULL
doc$Publisher <- "County of Durham"
doc$Fee <- NA
doc$ContractorFullName <- NA
doc$ContractorCompanyDesc <- NA
doc$ContractorPhone <- NA
doc$ContractorAddress1 <- NA
doc$ContractorAddress2 <- NA
doc$ContractorCity <- NA
doc$ContractorState <- NA
doc$ContractorZip <- NA
doc$ContractorEmail <- NA
doc$datasetid <- NULL
doc$recordid <- NULL
#works up til here with 59 columns in the table. need to get it to 68 to match the schema
doc$fields$in_city <- NULL
doc$fields$objectid <- NULL
doc$PIN <- doc$fields$pin
doc$fields$pin <- NULL
doc$fields$p_activity <- NULL
doc$fields$p_type <-NULL
doc$fields$bldg_insp <- NULL
doc$PermitID <- doc$fields$permit_id
doc$fields$permit_id <- NULL
doc$fields$bldg_lname <- NULL
doc$fields$pid <- NULL
doc$fields$geo_point_2d <- NULL
doc$fields$siteadd <- NULL
doc$fields$p_descript <- NULL
doc$fields$bldg_fname <- NULL
doc$fields$build_ph <- NULL
doc$fields$geo_shape <- NULL
doc$fields$geo_shape.coordinates <- NULL
doc$fields$build_dist <- NULL
doc$fields$bdmaptitle <- NULL
doc$fields$pin4 <- NULL
doc$fields$pin15 <- NULL
doc$fields$p_status <- NULL
doc$fields$in_county <- NULL
doc$fields$unit_type <- NULL
doc$geometry$type <- NULL
doc$geometry$coordinates <- NULL
total2 <- smartbind(total, wake)
SocrataEmail <- Sys.getenv("SOCRATA_EMAIL", "XXX@socrata.com")
socrataPassword <- Sys.getenv("SOCRATA_PASSWORD", "XXXX")
total <- smartbind(total, doc, fill=NA)
SocrataEmail <- Sys.getenv("SOCRATA_EMAIL", "xxx@socrata.com")
socrataPassword <- Sys.getenv("SOCRATA_PASSWORD", "xxxx")
datasetToAddToUrl <- "https://opendata.socrata.com/resource/9wjv-w4fx.json"
write.socrata(total,datasetToAddToUrl,"UPSERT",socrataEmail,socrataPassword)
|
0e609593020b1217f7674a88970c194c285dbfd0
|
2e95fc984d9893d7619d68f9dae638be0734cca5
|
/R/filter_rules.R
|
5adf94e5b4bd9521d39fe8f7ecc0ce9c1e426958
|
[] |
no_license
|
karsmo/VisuNet
|
cfc990ad85b20feecf036175e3f1eee5cd5646d8
|
938ddaddd2e9a39afba7bfb71b0ac4b5512bbb99
|
refs/heads/master
| 2020-05-24T00:43:45.535808
| 2019-12-03T11:51:39
| 2019-12-03T11:51:39
| 168,517,615
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
filter_rules.R
|
filter_rules = function(rules, minAcc, minSupp){
if(max(rules$SUPP_RHS) < minSupp){
'Please change min Support value. Your value is too high!'
}else if(max(rules$ACC_RHS) < minAcc){
'Please change min Accucary value. Your value is too high!'
}else{
NULL
}
}
|
c42ff0dba26bfc84eb8fde5786165e10fba0ea76
|
f801a9d6e316db85aad03630dfb660eba7416ad7
|
/AnalysisCode/ThinkPiece/old/aboveSaylorville_analyses.R
|
4cb765b24b31aafbacbe9811af0628e851bf176d
|
[] |
no_license
|
jonathan-walter/AquaTerrSynch
|
5458c9608faa737d66daf14a6e53d25c1d610d1a
|
4388f9b78d7890ed6c9973b2e4281c5ad4d4139c
|
refs/heads/master
| 2021-07-09T01:03:18.024976
| 2020-07-01T16:58:33
| 2020-07-01T16:58:33
| 148,833,977
| 2
| 0
| null | 2020-02-05T02:32:21
| 2018-09-14T19:38:23
|
R
|
UTF-8
|
R
| false
| false
| 7,866
|
r
|
aboveSaylorville_analyses.R
|
rm(list=ls())
library(lubridate)
library(wsyn)
##Data preparation
dat1<-read.csv("~/Box Sync/NSF EAGER Synchrony/Data/Iowa Lakes Data/DesMoinesRiver_Site1_AllData.csv", stringsAsFactors = F)
dat2<-read.csv("~/Box Sync/NSF EAGER Synchrony/Data/Iowa Lakes Data/ACE_Site1_TempFlow.csv", stringsAsFactors = F)
dat<-rbind(dat1,dat2)
dat$parameter<-rep(NA, nrow(dat))
dat$parameter[dat$PARAM_NUM==4]<-"turbidity_ntu"
dat$parameter[dat$PARAM_NUM==7]<-"tss_mgL"
dat$parameter[dat$PARAM_NUM==13]<-"toc_mgL"
dat$parameter[dat$PARAM_NUM==16]<-"bod_mgL"
dat$parameter[dat$PARAM_NUM==20]<-"nitrate_mgL"
dat$parameter[dat$PARAM_NUM==32]<-"tp_mgL"
dat$parameter[dat$PARAM_NUM==2]<-"flow_" #Ask Grace about units for flow and temp
dat$parameter[dat$PARAM_NUM==3]<-"temp_"
dat$sampleDate<-as.POSIXct(dat$DATE_OF_SAMPLE, format="%d-%b-%y")
for(ii in 1:nrow(dat)){
if(year(dat$sampleDate[ii])==2067){year(dat$sampleDate[ii])<-1967}
if(year(dat$sampleDate[ii])==2068){year(dat$sampleDate[ii])<-1968}
}
params<-c("nitrate_mgL","tss_mgL","flow_","temp_")
years<-1968:2016
months<-1:12
monthyear<-expand.grid(months, years)
param.monthly<-matrix(NA, nrow=length(params), ncol=12*length(years))
for(pp in 1:length(params)){
for(yy in 1:length(years)){
for(mm in months){
param.monthly[pp, which(monthyear$Var1==mm & monthyear$Var2==years[yy])]<-mean(
dat$SAMPLE_VALUE[dat$parameter==params[pp] & month(dat$sampleDate)==mm & year(dat$sampleDate)==years[yy]])
}
}
}
pdf("~/GitHub/AquaTerrSynch/AnalysisCode/ThinkPiece/DesMoinesAboveSaylorville_selectvars.pdf", width=8.5, height=11)
par(mfrow=c(length(params),1), mar=c(5.1,4.1,2.1,2.1))
for(pp in 1:length(params)){
plot(param.monthly[pp,], type="l", xlim=c(1,ncol(param.monthly)),
xlab="Time", ylab=params[pp], xaxt="n",
main=params[pp])
axis(1,at=seq(from=1,by=12,length.out=length(years)), labels=years, las=2)
}
dev.off()
yy<-1976:2016
no3<-param.monthly[1,]
no3<-no3[monthyear$Var2 %in% yy]
no3.cln<-cleandat(no3, 1:length(no3), clev=5)$cdat
wt.no3<-wt(no3.cln,1:length(no3.cln))
tss<-param.monthly[2,]
tss<-tss[monthyear$Var2 %in% yy]
tss.cln<-cleandat(tss, 1:length(tss), clev=5)$cdat
wt.tss<-wt(tss.cln,1:length(tss.cln))
flow<-param.monthly[3,]
flow<-flow[monthyear$Var2 %in% yy]
flow.cln<-cleandat(flow, 1:length(flow), clev=5)$cdat
wt.flow<-wt(flow.cln,1:length(flow.cln))
temp<-param.monthly[4,]
temp<-temp[monthyear$Var2 %in% yy]
temp.cln<-cleandat(temp, 1:length(temp), clev=5)$cdat
wt.temp<-wt(temp.cln,1:length(temp.cln))
pdf("~/GitHub/AquaTerrSynch/AnalysisCode/ThinkPiece/DesMoinesAboveSaylorville_selectvars_wts.pdf", width=8.5, height=11)
par(mfrow=c(4,1), mar=c(5.1,4.1,2.1,2.1))
plotmag(wt.no3, title="no3")
plotmag(wt.tss, title="tss")
plotmag(wt.flow, title="flow")
plotmag(wt.temp, title="temp")
dev.off()
plot(wt.no3$timescales, colMeans(Mod(wt.no3$values)^2, na.rm=T)/wt.no3$timescales,
type="l", xlab="Timescale (yr)", ylab="Wavelet power")
abline(v=5); abline(v=7)
abline(v=10); abline(v=14)
abline(v=42); abline(v=54)
abline(v=120); abline(v=162)
#############################################################################
## Do correlation analyses
acf(no3,lag.max=200)
pacf(no3, lag.max=200)
plot(tss,no3)
cor.test(tss,no3)
ccf(no3,tss, lag.max=200)
plot(temp,no3)
cor.test(temp,no3)
ccf(no3,temp, lag.max=200)
plot(flow,no3)
cor.test(flow,no3)
ccf(no3,flow, lag.max=200)
#############################################################################
## Do coherence analyses
b1<-c(5,7)
b2<-c(10,14)
b3<-c(42,54)
b4<-c(108,192)
b5<-c(16,36)
no3Xtss<-coh(no3.cln,tss.cln,1:length(no3.cln), norm="powall", sigmethod="fast", sigma=1.01)
no3Xtss<-bandtest(no3Xtss,b1)
no3Xtss<-bandtest(no3Xtss,b2)
no3Xtss<-bandtest(no3Xtss,b3)
no3Xtss<-bandtest(no3Xtss,b4)
print(no3Xtss$bandp)
no3Xflow<-coh(no3.cln,flow.cln,1:length(no3.cln), norm="powall", sigmethod="fast", sigma=1.01)
no3Xflow<-bandtest(no3Xflow,b1)
no3Xflow<-bandtest(no3Xflow,b2)
no3Xflow<-bandtest(no3Xflow,b3)
no3Xflow<-bandtest(no3Xflow,b4)
print(no3Xflow$bandp)
plot(no3Xflow$timescales, Mod(no3Xflow$coher))
no3Xtemp<-coh(no3.cln,temp.cln,1:length(no3.cln), norm="powall", sigmethod="fast", sigma=1.01)
no3Xtemp<-bandtest(no3Xtemp,b1)
no3Xtemp<-bandtest(no3Xtemp,b2)
no3Xtemp<-bandtest(no3Xtemp,b3)
no3Xtemp<-bandtest(no3Xtemp,b4)
print(no3Xtemp$bandp)
#####################################################################################
## plotting function--this is modified from the 'wsyn' function by Reuman et al.
plotmag.JW<-function(object,zlims=NULL,neat=TRUE,colorfill=NULL,colorbar=TRUE,title=NULL,filename=NA,xlocs=NULL,ylocs=NULL,xlabs=NULL,ylabs=NULL,...)
{
wav<-Mod(get_values(object))
times<-get_times(object)
timescales<-get_timescales(object)
if(is.null(zlims)){
zlims<-range(wav,na.rm=T)
}else
{
rg<-range(wav,na.rm=T)
if (rg[1]<zlims[1] || rg[2]>zlims[2])
{
stop("Error in plotmag.tts: zlims must encompass the z axis range of what is being plotted")
}
}
if(neat){
inds<-which(!is.na(colMeans(wav,na.rm=T)))
wav<-wav[,inds]
timescales<-timescales[inds]
}
if(is.null(colorfill)){
jetcolors <- c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")
colorfill<-grDevices::colorRampPalette(jetcolors)
}
if(is.null(xlocs)){
xlocs<-pretty(times,n=8)
}
if(is.null(ylocs)){
ylocs<-pretty(timescales,n=8)
}
if(is.null(xlabs)){
xlabs<-xlocs
}
if(is.null(ylabs)){
ylabs<-ylocs
}
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
if (!colorbar)
{
graphics::image(x=times,y=log2(timescales),z=wav,xlab="",zlim=zlims,
ylab="Timescale",axes=F,col=colorfill(100),main=title,...)
graphics::axis(1,at = xlocs,labels=xlabs)
graphics::axis(2,at = log2(ylocs),labels = ylabs)
}else
{
fields::image.plot(x=times,y=log2(timescales),z=wav,xlab="",zlim=zlims,
ylab="Timescale",axes=F,col=colorfill(100),main=title,...)
graphics::axis(1,at = xlocs,labels=xlabs)
graphics::axis(2,at = log2(ylocs),labels = ylabs)
}
if (!is.na(filename))
{
grDevices::dev.off()
}
}
############################################################################
## Nice plotting
# will need to modify plotting code from 'wsyn' to fix axes and other figure niceties
#pdf("~/GitHub/AquaTerrSynch/AnalysisCode/ThinkPiece/FigX_AnalysisExample.pdf", width=6.5, height=8)
laymat<-matrix(1,nrow=2,ncol=11)
laymat[1,6:10]<-2
laymat[2,1:5]<-3
laymat[2,6:10]<-4
laymat[1,11]<-6
laymat[2,11]<-5
jetcolors <- c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")
colorfill<-grDevices::colorRampPalette(jetcolors)
tiff("~/Box Sync/NSF EAGER Synchrony/Manuscripts/ThinkPiece/dmr_example.tif",
units="in", width=6.5, height=5, res=300)
layout(laymat)
par(mar=c(2.1,3.5,1.1,1.1),oma=c(2.1,0,0,0),mgp=c(2.2,0.8,0))
plot(no3, type="l", xlab="", ylab=expression("NO"[3]*~(mu*"gL"^-1)), xaxt="n")
axis(1,at=seq(0,500,by=60),labels=seq(1976,2016,by=5))
plot(flow,type="l", xlab="", ylab="Flow (cfs)", xaxt="n")
axis(1,at=seq(0,500,by=60),labels=seq(1976,2016,by=5))
plotmag.JW(wt.no3, xaxs="r", colorbar=F, zlim=c(0,8), ylocs=c(0,6,12,24,48,96,192), ylabs=c(0,0.5,1,2,4,8,16),
xlocs=seq(0,500,by=60), xlabs=seq(1976,2016,by=5))
plotmag.JW(wt.flow, xaxs="r", colorbar=F, zlim=c(0,8), ylocs=c(0,6,12,24,48,96,192), ylabs=c(0,0.5,1,2,4,8,16),
xlocs=seq(0,500,by=60), xlabs=seq(1976,2016,by=5))
par(mar=c(2.1,2.1,1.1,1.1))
image(y=seq(0,8,length.out=50),matrix(seq(0,8,length.out=50),nrow=1,ncol=50),xaxt="n",yaxt="n",col=colorfill(50))
axis(2,at=pretty(Mod(c(wt.no3$values,wt.flow$values)),n=5))
mtext("Time",1, outer=T, line=0.65)
dev.off()
|
bf5a47b912236845774f540e820896f314fc1684
|
d7c9e107ee8b85a72687b860669bbe20e5e1ab9b
|
/man/fn.Rd
|
419a583f1a71761e319364d34eeca2f705690d2c
|
[] |
no_license
|
iqis/lispr
|
7b13b257125fa513d51bd02fd396463f4fa816ed
|
b2fda54a426826a74883561c1f286c8a74b1c765
|
refs/heads/master
| 2020-05-09T13:46:36.249581
| 2019-04-13T12:29:43
| 2019-04-13T12:29:43
| 181,167,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 248
|
rd
|
fn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core.R
\name{fn}
\alias{fn}
\title{Construct a function}
\usage{
fn(arg, body)
}
\description{
construct a function with a list for arguments and a code block for body
}
|
748cf66b85932a785db02caf18090b0eb8ab525f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/newsmap/examples/accuracy.Rd.R
|
c9f23903b2c0eedcc6431f6e9ebc63cd2bf85900
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
accuracy.Rd.R
|
library(newsmap)
### Name: accuracy
### Title: Evaluate classification accuracy in precision and recall
### Aliases: accuracy
### ** Examples
class_pred <- c('US', 'GB', 'US', 'CN', 'JP', 'FR', 'CN') # prediction
class_true <- c('US', 'FR', 'US', 'CN', 'KP', 'EG', 'US') # true class
acc <- accuracy(class_pred, class_true)
print(acc)
summary(acc)
|
ec02e8fc4ec4d3a4eb2c65bb00a9f89e99e6ac27
|
03c11634037f8863b37a1981a6b7dce8a9a79f06
|
/_tests/test-or.r
|
31372f98d928643ab2be5b442601025f588fd1b1
|
[] |
no_license
|
klmr/parser-combinators
|
8f0fa50d4cac48a05bf8bff68937dc93a0ce9ce6
|
515e850a5fe294dd117f35a917151d2221fdc434
|
refs/heads/master
| 2021-01-10T08:26:13.704232
| 2016-03-20T17:18:58
| 2016-03-20T17:18:58
| 54,326,700
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,243
|
r
|
test-or.r
|
context('alternative parsers')
a = chr('a')
aa = lit('aa')
a2 = or(a, aa)
ab = or(a, chr('b'))
vowel = any_of('aeiou')
test_that('parsers can be alternated', {
expect_that(a2('a'), has_match(2L))
expect_that(a2('aa'), has_match(2L, 3L))
expect_that(or(empty, a, aa)('aa'), has_match(1L, 2L, 3L))
expect_that(a2('ab'), has_match(2L))
expect_that(a2(''), has_no_match())
expect_that(a2('b'), has_no_match())
expect_that(a2('ba'), has_no_match())
expect_that(ab(''), has_no_match())
expect_that(ab('a'), has_match(2L))
expect_that(ab('b'), has_match(2L))
expect_that(ab('c'), has_no_match())
expect_that(or(a, vowel)(''), has_no_match())
expect_that(or(a, vowel)('a'), has_match(2L))
expect_that(or(a, vowel)('b'), has_no_match())
expect_that(or(a, vowel)('e'), has_match(2L))
})
test_that('alternative parsers can be printed', {
expect_that(a2, prints_as('("a"|"aa")'))
expect_that(ab, prints_as('("a"|"b")'))
})
test_that('nested alternations are flattened', {
expect_that(or(ab, a2), prints_as('("a"|"b"|"a"|"aa")'))
expect_that(as.character(or(lit('a'), lit('b'), lit('c'))),
equals(as.character(or(or(lit('a'), lit('b')), lit('c')))))
})
|
5ffb80ba6191f0cb59d66383d9031b4ea7c9b0fb
|
d7f314a6661e5e56ec695594f25f4b52de1a18de
|
/Exam_1/Exam_1_complete.R
|
303691e2bd77c1ff5accc676d8a866069fce320e
|
[] |
no_license
|
twedwards/Data_Course_Edwards
|
7aad18a84611b52d98c592b79f0f7eccbc26420d
|
a78fd68e1d49aec21034fc81ffca0c2489245d33
|
refs/heads/master
| 2021-07-17T19:20:27.838906
| 2020-08-22T00:08:36
| 2020-08-22T00:08:36
| 203,419,841
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,898
|
r
|
Exam_1_complete.R
|
library(tidyverse)
##Once you get the file loaded into an R object as a data frame, feel free to do some exploratory visualizations or summaries to get a feel for the data if you like.
##Your first task, though, is to create separate histograms of the DNA concentrations for Katy and Ben. Make sure to add nice labels to these (x-axis and main title).
df <- read.csv("../../Data_Course/Exam_1/DNA_Conc_by_Extraction_Date.csv")
ggplot(df, aes(x=DNA_Concentration_Katy)) +
geom_histogram()+
labs(title="Katy's Concentrations", x="Katy's DNA Concentrations")
ggplot(df, aes(x=DNA_Concentration_Ben)) +
geom_histogram()+
labs(title="Ben's Concentrations", x="Ben's DNA Concentrations")
##II.
##Your second task is to look at DNA concentrations from the different extraction years.
##One way to do this is a separate figure for each student is demonstrated in those two files: ZAHN_Plot1.jpeg and ZAHN_Plot2.jpeg
##Open those files in some image viewing program and take a look. I'd like you to re-create these exactly, including the labels.
##This is tricky, so I'll give a hint: the plot() function behaves differently depending on the classes of vectors that are given to it.
# III.
# Once you have your code for creating the figures correctly, you need to save those two images in YOUR Exam_1 directory. Name them similarly to how I named mine, but with your LASTNAME
# Make sure your code is saving the files. Don't do it manually with the mouse!
#
ggplot(df, aes(x=as.character(Year_Collected), y=DNA_Concentration_Katy))+
geom_boxplot()+
labs(title="Katy's Extractions", x="Year", y="DNA Concentration") +
theme(plot.title=element_text(hjust=.5))
ggsave("./EDWARDS_Plot1.jpg")
ggplot(df, aes(x=as.character(Year_Collected), y=DNA_Concentration_Ben))+
geom_boxplot()+
labs(title="Ben's Extractions", x="Year", y="DNA Concentration") +
theme(plot.title=element_text(hjust=.5))
ggsave("./EDWARDS_Plot2.jpg")
##IV.
##Take a look at Ben's concentrations vs Katy's concentrations. You can do this however you like... with a plot or with summary stats or both.
##It looks like Ben had consistently higher DNA yields than Katy did...but surely it wasn't uniformly better, right? With some samples, he only had a marginal improvement over Katy.
##With other samples, he had a relatively massive improvement over her.
##Your task here is to write some code that tells us: in which extraction YEAR, was Ben's performance the lowest RELATIVE TO Katy's performance?
difference <- df$DNA_Concentration_Ben - df$DNA_Concentration_Katy
max.difference <- which(difference == max(difference))
df[max.difference,"Year_Collected"]
##V.
##Do another subset of the data for me. Subset the data frame so it's just the "Downstairs" lab.
##Now, make a scatterplot of the downstairs lab data such that "Date_Collected" is on the x-axis and
#"DNA_Concentration_Ben" is on the y-axis. Save this scatterplot as "Ben_DNA_over_time.jpg" in your Exam_1
#directory. See the file "Downstairs.jpg" for an example of how yours should look. If it looks different, you
#might need to do some class conversions so the plot() function treats things correctly. HintHintHint: POSIXct
downstairs <- df[df$Lab == "Downstairs",]
ggplot(downstairs, aes(x=as.POSIXct(Date_Collected), y=DNA_Concentration_Ben)) +
geom_point() +
labs(title="Ben's DNA Extractions by Year", x="Year Collected", y="DNA Concentrations") +
theme(plot.title = element_text(hjust = .5))
ggsave("./Ben_DNA_Over_Time.jpg")
#VI.
#For this final (BONUS) problem, let's just look at Ben's DNA concentration values. I
#think Katy messed up her PCRs, and at any rate, we can't use them for sequencing.
#Besides, our original purpose for this experiment was to see if DNA extractions sitting in a freezer degraded over time.
#To that end, I want you to make a new data frame (just using Ben's values) that has one
#column containing the years that DNA extractions were made,
#and another column that contains the AVERAGE of the values within that year.
#Just to be clear, this data frame should have only 12 rows (one for each year)! You will need to
#find a way to take the average of Ben's DNA values in each separate year.
#A for-loop, or repeated subsetting, or some other way...
#Once you have this new data frame of averages by year, write some code that shows which extraction
#year has the highest average DNA concentration (and what that concentration is) and then save the 12-row
#dataframe as a new csv file called "Ben_Average_Conc.csv"
ben2000 <- mean(df[df$Year_Collected == 2000,"DNA_Concentration_Ben"])
ben2001 <- mean(df[df$Year_Collected == 2001,"DNA_Concentration_Ben"])
ben2002 <- mean(df[df$Year_Collected == 2002,"DNA_Concentration_Ben"])
ben2003 <- mean(df[df$Year_Collected == 2003,"DNA_Concentration_Ben"])
ben2004 <- mean(df[df$Year_Collected == 2004,"DNA_Concentration_Ben"])
ben2005 <- mean(df[df$Year_Collected == 2005,"DNA_Concentration_Ben"])
ben2006 <- mean(df[df$Year_Collected == 2006,"DNA_Concentration_Ben"])
ben2007 <- mean(df[df$Year_Collected == 2007,"DNA_Concentration_Ben"])
ben2008 <- mean(df[df$Year_Collected == 2008,"DNA_Concentration_Ben"])
ben2010 <- mean(df[df$Year_Collected == 2010,"DNA_Concentration_Ben"])
ben2011 <- mean(df[df$Year_Collected == 2011,"DNA_Concentration_Ben"])
ben2012 <- mean(df[df$Year_Collected == 2012,"DNA_Concentration_Ben"])
vec <- c(ben2000,ben2001,ben2002,ben2003,ben2004,ben2005,ben2006,
ben2007,ben2008,ben2010,ben2011,ben2012)
ben_dat <- data.frame(Year = levels(as.factor(df$Year_Collected)),
Ben_Mean = vec)
ben_dat
max.row <- which(ben_dat$Ben_Mean == max(ben_dat$Ben_Mean))
ben_dat[max.row,]
write.csv(ben_dat, file = "./Ben_Average_Conc.csv")
|
806e7be769dacb46df68363271273741d9ce09b4
|
a6d778e2897498d87f12fa6a40f40c009e0aed7c
|
/marg_vdchildren.R
|
3ae9ccd97f205c0cfb1ab460d88636be20a874a3
|
[] |
no_license
|
margarc/meta-analysis-
|
73b3a6d5062cc95252da68c3bbb162c2dea7ae92
|
2f654c490b80a3813a31ad4f1d5ffaa70327b4c0
|
refs/heads/master
| 2021-01-13T16:35:18.272067
| 2018-09-28T09:52:50
| 2018-09-28T09:52:50
| 79,135,581
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,383
|
r
|
marg_vdchildren.R
|
getwd()
setwd("C:/Users/INSPIRON/Documents/Rch")
preval
metaprop(vddch, totch, studlab=paste(study), data = preval)
mpreval <- metaprop(vddch, totch, studlab=paste(study), data = preval)
mpreval
forest(mpreval, comb.fixed=FALSE, xlab= "proportion")
forest(mpreval, comb.random=FALSE, xlab= "proportion")
funnel(mpreval)
mort <- read.csv("mortality.rda", as.is=TRUE)
metabin(deaddef, allvdd, deadnotvdd, allnotvdd, sm= "OR", method="I", data=mort, studlab=study)
mmo
forest(mmo, comb.fixed=FALSE, xlab= "proportion")
forest(mmo, comb.random=FALSE, xlab= "proportion")
funnel(mmo)
mortn <- read.csv("mortnew.rda", as.is=TRUE)
mor <- metabin(Eedeaddef, Nealldef, Ecdeadnodef, Ncallnondef, sm= "OR", method="I", data=mortn, studlab=study)
forest(mor, comb.fixed=FALSE, xlab= "proportion")
forest(mor, comb.random=FALSE, xlab= "proportion")
funnel(mor)
#######################################################################################
#sensitivity analysis high quality studies only in mortality outcome
#################################################################################
hqm <- read.csv("highqmort.rda", as.is=TRUE)
hqmm <- metabin(deaddef, alldef, deadnodef, allnodef, sm= "OR", method="I", data=hqm, studlab=study)
forest(hqmm, comb.fixed=FALSE, xlab= "proportion")
forest(hqmm, comb.random=FALSE, xlab= "proportion")
funnel(hqmm)
####################################################
#sens analysis high qualiy studies only prevalence
##################################################
hqprev <- read.csv("hqonlyprev.rda", as.is=TRUE)
metaprop(vddch, totch, studlab=paste(study), data = hqprev)
sm150 <- read.csv("ssm150.rda", as.is=TRUE)
sm150
metaprop(vddch, totch, studlab=paste(study), data = sm150)
sless150 <- read.csv("ssless150new.rda", as.is=TRUE)
sless150
metaprop(vddch, totch, studlab=paste(study), data = sless150)
#prevalence in those 21 studies that reported vdd under our set threshold <20ng/ml
getwd()
setwd("C:/Users/INSPIRON/Documents/Rch")
onlyset <- read.csv("only21st.rda", as.is=TRUE)
onlyset
#
# Bias Indicators
# Begg and Mazumdar test rank correlation (tau^2)
metabias(metap1, method="rank")
# Egger's test linear regression
metabias(metap1, method="rank")
#trim-and-fill method
tf1 <- trimfill(metap1)
#######
#meta regression
#################################################
|
d6a6a9bcb62a8ab2883ee323dde830119c3e5934
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/embryogrowth/examples/weightmaxentropy.Rd.R
|
95396372c39cde2f476c15870dec8d0f2ee7a6cf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
weightmaxentropy.Rd.R
|
library(embryogrowth)
### Name: weightmaxentropy
### Title: Search for the weights of the nests which maximize the entropy
### of nest temperatures distribution
### Aliases: weightmaxentropy
### ** Examples
## Not run:
##D library(embryogrowth)
##D data(nest)
##D formated <- FormatNests(nest)
##D w <- weightmaxentropy(formated, control_plot=list(xlim=c(20,36)))
##D x <- structure(c(120.940334922916, 467.467455887442,
##D 306.176613681557, 117.857995419495),
##D .Names = c("DHA", "DHH", "T12H", "Rho25"))
##D # pfixed <- c(K=82.33) or rK=82.33/39.33
##D pfixed <- c(rK=2.093313)
##D # K or rK are not used for dydt.linear or dydt.exponential
##D resultNest_4p_weight <- searchR(parameters=x,
##D fixed.parameters=pfixed, temperatures=formated,
##D derivate=dydt.Gompertz, M0=1.7, test=c(Mean=39.33, SD=1.92),
##D method = "BFGS", weight=w)
##D data(resultNest_4p_weight)
##D plotR(resultNest_4p_weight, ylim=c(0,0.50), xlim=c(15, 35))
##D # Standard error of parameters can use the GRTRN_MHmcmc() function
## End(Not run)
|
ccb4c997b977e4cb8192d5dbb80e9c032acc1597
|
f832c182bc84a0a5892b4d84a75acdacdd434e48
|
/quiz3.R
|
b36e8ef365a498d7fc42b64ae74e71030fe4abb0
|
[] |
no_license
|
briholt100/GetClean
|
1864c7cd3c5b37855d8f0b24bfb334a0e14d2269
|
261da03c3b7dfbb0316861b84f69574e176669e0
|
refs/heads/master
| 2021-01-18T18:42:52.633419
| 2014-12-30T02:11:26
| 2014-12-30T02:11:26
| 27,646,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,283
|
r
|
quiz3.R
|
getwd()
#for Dater
setwd("/home/brian/Projects/Coursera/GetAndCleanData")
#for latitude
setwd("/home/brian/Projects/Coursera/GetAndClean")
#for dater_bridge
setwd("C:\\Users\\Brian\\Documents\\Projects\\GetClean")
#for campus
setwd("I:\\My Data Sources\\mooc\\GetCleanData")
if (!file.exists("data")) { dir.create("data")}
#q1
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv",destfile="./data/idahoHousing.csv")
#https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf
idaho<-read.csv("./data/idahoHousing.csv")
idaho[which(idaho$ACR==3 & idaho$AGS == 6),c("ACR","AGS")]
idaho$agricultureLogical<-(c(idaho$ACR==3 & idaho$AGS == 6))
which(idaho$agricultureLogical)
idaho[order(idaho$NP),]
#q2
library("jpeg")
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg",destfile="./data/leek.jpg")
#https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf
leekPhoto<-readJPEG("./data/leek.jpg",native =T)
quantile(leekPhoto,probs=seq(0,1,.1))
#q3
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv",destfile="./data/gdp.csv")
#https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf
gdp<-read.csv("./data/gdp.csv",skip=4,nrows=190,col.names=c("CountryCode","Ranking","v3","Country","dollars","v6","v7","v8","v9","v10"))
gdp<-gdp[,c(1:2,4:5)]
str(gdp)
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv",destfile="./data/edu.csv")
#https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf
edu<-read.csv("./data/edu.csv",header=T)
str(edu)
names(edu)
table(edu$"Income.Group")
sort(gdp$CountryCode)
names(gdp)
sort(intersect(gdp[,3],edu[,31]))
table(edu[,31] %in% gdp[,"Country"])
table(edu[,1] %in% gdp[,1])
table( gdp[,1] %in% edu[,1])
head(gdp)
df<-merge(edu,gdp,by.x="CountryCode", by.y="CountryCode")
names(df)
df<-df[order(df[,32],decreasing=T),]
head(df,13)
#q4
tapply(df$Ranking,df$Income.Group,mean)
#q5
df$GdpGroups<-cut(df$Ranking,breaks=quantile(df$Ranking,probs=seq(0,1,.2)))
head(df)
table(df$GdpGroups)
table(df$GdpGroups,df$Income.Group)
"how many are lower middle income but amount top 38 ranking?"
df[df$Ranking < 39 & df$Income.Group == "Lower middle income",31]
|
a4dcb2164ae930f630ed607a089a374365cca262
|
5003102e0f392e1e7618f37d1670af813eaf2ed9
|
/ScriptforAMCtry.R
|
2d0b9bab14282dc2fb6b5831931b14ab952e4e97
|
[] |
no_license
|
nilsmy/AMCTestmakeR
|
4b08c17ecea2586845394b9563d0f56aa9367acf
|
54c75a7b1733d0ea6d4e5d13dab9b5168bf6f357
|
refs/heads/master
| 2021-01-19T14:13:06.808718
| 2018-11-10T18:40:06
| 2018-11-10T18:40:06
| 84,669,794
| 1
| 0
| null | 2018-11-10T18:30:49
| 2017-03-11T18:04:07
|
R
|
UTF-8
|
R
| false
| false
| 207
|
r
|
ScriptforAMCtry.R
|
#Full try !!
AMCcreatetest("How much is $1+2$?",2,list("3", "11"), filepath = "~/Google Drive/AMC/essaiR2/groups.tex", title = "This is the title", paper = "a4", instructions = F, separateanswersheet = F)
|
f1f524f1a0444738cd14c4ff44f3c2c41b2e92de
|
6e663d8df86b2d291f710b3f469772a84965852c
|
/SPADE-analysis/dataprep_viz_sibilants_v1.R
|
7ea2eefee2d3188ddcca459abcfb7b8289e61e5e
|
[
"MIT"
] |
permissive
|
jeffmielke/SPADE
|
a0c90724ac8c81d2529c3870bab7fcede2f5e461
|
e110d5797bf793a7f6b8e3fb0496d9b092b3499e
|
refs/heads/master
| 2021-07-11T09:41:27.268020
| 2018-12-04T15:51:53
| 2018-12-04T15:51:53
| 136,250,150
| 0
| 0
|
MIT
| 2018-12-04T15:51:55
| 2018-06-06T00:32:30
|
Python
|
UTF-8
|
R
| false
| false
| 7,890
|
r
|
dataprep_viz_sibilants_v1.R
|
##
## first script doing rough visualization of s-retraction, for four datasets processed so far
##
## Morgan, late 10/2017
##
## you must have:
## '../SOTC/SOTC_sibilants.csv'
## '../buckeye/buckeye_sibilants.csv'
## same for raleigh and icecan
##
## (or change paths for your computer)
##
library(stringr)
library(ggplot2)
library(dplyr)
# 0. FUNCTIONS ------------------------------------------------------------
## function to make summary df with mean value of four vraibles (cog, frontslope, etc.)
## for each speaker/word pair
summaryDf <- function(x){
## dataframe summarizing measures for each word and speaker:
summDf <- x %>% group_by(word, onset, speaker) %>% summarise(n=n(), cog=mean(cog), slope=mean(slope), spread=mean(spread), peak=mean(peak))
## long format:
summDf <- gather(summDf, var, val, -word, -onset, -speaker, -n)
return(summDf)
}
# 1. RALEIGH --------------------------------------------------------------
ral.sib = read.csv('../Raleigh/Raleigh_sibilants.csv')
## there are just 100 voiced sibilants in whole dataset (0.1% of total), so exclude them:
ral.sib <- subset(ral.sib, phone_label %in% c('S', 'SH'))
## we are interested in onset effects. let's exclude onset levels with few observations (<100):
excludeLevels <- names(which(xtabs(~onset, ral.sib)<100))
cat("Excluding onsets:", paste(excludeLevels, sep=' '))
ral.sib <- droplevels(filter(ral.sib, !onset%in%excludeLevels))
## reorder onset so that /esh/ < /str/ < /sCr/ < others < /s/
ral.sib$onsetOrder <- 4
ral.sib[ral.sib$onset=='SH','onsetOrder'] <- 1
ral.sib[str_detect(ral.sib$onset,'R'),'onsetOrder'] <- 3
ral.sib[str_detect(ral.sib$onset,'S/T/R'),'onsetOrder'] <- 2
ral.sib[ral.sib$onset=='S','onsetOrder'] <- 5
ral.sib$onset <- with(ral.sib, reorder(onset, onsetOrder))
## subset of primary interest: /s/ versus /str/ versus /esh/ onsets
ral.sib.sub <- droplevels(filter(ral.sib, onset%in%c('S','SH','S/T/R')))
## reorder factors to expected order
ral.sib.sub$onset <- factor(ral.sib.sub$onset, levels=c('S','S/T/R', 'SH'))
ral.sib.sub.summ <- summaryDf(ral.sib.sub)
ral.sib.summ <- summaryDf(ral.sib)
## plot for just es/str/esh
ggplot(aes(x=onset, y=val), data=ral.sib.sub.summ) + geom_violin() + facet_wrap(~var, scales='free_y')
## looks basically OK, but why such low values for cog?
## comapre: Baker et al. Fig. 1
## examine by speaker, for cog:
## ggplot(aes(x=onset, y=val), data=filter(ral.sib.sub.summ, var=='cog')) + geom_violin() + facet_wrap(~speaker)
## plot for all onsets
ggplot(aes(x=onset, y=val), data=ral.sib.summ) + geom_violin() + facet_wrap(~var, scales='free')
## compare: Baker et al. Fig 2 for COG
# 2. BUCKEYE --------------------------------------------------------------
buck.sib = read.csv('../Buckeye/Buckeye_sibilants.csv')
## exclude z and zh onsets (though there are 750):
buck.sib <- subset(buck.sib, phone_label %in% c('s', 'sh'))
## we are interested in onset effects. let's exclude onset levels with few observations (<100):
excludeLevels <- names(which(xtabs(~onset, buck.sib)<100))
cat("Excluding onsets:", paste(excludeLevels, sep=' '))
buck.sib <- droplevels(filter(buck.sib, !onset%in%excludeLevels))
## reorder onset so that /esh/ < /str/ < /sCr/ < others < /s/
buck.sib$onsetOrder <- 4
buck.sib[buck.sib$onset=='sh','onsetOrder'] <- 1
buck.sib[str_detect(buck.sib$onset,'r'),'onsetOrder'] <- 3
buck.sib[str_detect(buck.sib$onset,'s/t/r'),'onsetOrder'] <- 2
buck.sib[buck.sib$onset=='s','onsetOrder'] <- 5
buck.sib$onset <- with(buck.sib, reorder(onset, onsetOrder))
## subset of primary interest: /s/ versus /str/ versus /esh/ onsets
buck.sib.sub <- droplevels(filter(buck.sib, onset%in%c('s','sh','s/t/r')))
## reorder factors to expected order
buck.sib.sub$onset <- factor(buck.sib.sub$onset, levels=c('s','s/t/r', 'sh'))
buck.sib.sub.summ <- summaryDf(buck.sib.sub)
buck.sib.summ <- summaryDf(buck.sib)
## plot for just es/str/esh
ggplot(aes(x=onset, y=val), data=buck.sib.sub.summ) + geom_violin() + facet_wrap(~var, scales='free_y')
# 3. SOTC -----------------------------------------------------------------
sotc.sib = read.csv('../SOTC/SOTC_sibilants.csv')
## exclude z onsets (no ZH apparently?)
sotc.sib <- subset(sotc.sib, phone_label %in% c('s', 'S'))
## we are interested in onset effects. let's exclude onset levels with few observations (<100):
excludeLevels <- names(which(xtabs(~onset, sotc.sib)<100))
cat("Excluding onsets:", paste(excludeLevels, sep=' '))
sotc.sib <- droplevels(filter(sotc.sib, !onset%in%excludeLevels))
## reorder onset so that /esh/ < /str/ < /sCr/ < others < /s/
sotc.sib$onsetOrder <- 4
sotc.sib[sotc.sib$onset=='S','onsetOrder'] <- 1
sotc.sib[str_detect(sotc.sib$onset,'r'),'onsetOrder'] <- 3
sotc.sib[str_detect(sotc.sib$onset,'s/t/r'),'onsetOrder'] <- 2
sotc.sib[sotc.sib$onset=='s','onsetOrder'] <- 5
sotc.sib$onset <- with(sotc.sib, reorder(onset, onsetOrder))
## subset of primary interest: /s/ versus /str/ versus /esh/ onsets
sotc.sib.sub <- droplevels(filter(sotc.sib, onset%in%c('s','S','s/t/r')))
## reorder factors to expected order
sotc.sib.sub$onset <- factor(sotc.sib.sub$onset, levels=c('s','s/t/r', 'S'))
sotc.sib.sub.summ <- summaryDf(sotc.sib.sub)
sotc.sib.summ <- summaryDf(sotc.sib)
icecan.sib = read.csv('../ICECAN/ICECAN_sibilants.csv')
## exclude z and zh onsets
icecan.sib <- subset(icecan.sib, phone_label %in% c('S', 'SH'))
## we are interested in onset effects. let's exclude onset levels with few observations (<50 in this corpus):
excludeLevels <- names(which(xtabs(~onset, icecan.sib)<50))
cat("Excluding onsets:", paste(excludeLevels, sep=' '))
icecan.sib <- droplevels(filter(icecan.sib, !onset%in%excludeLevels))
## reorder onset so that /esh/ < /str/ < /sCr/ < others < /s/
icecan.sib$onsetOrder <- 4
icecan.sib[icecan.sib$onset=='SH','onsetOrder'] <- 1
icecan.sib[str_detect(icecan.sib$onset,'R'),'onsetOrder'] <- 3
icecan.sib[str_detect(icecan.sib$onset,'S/T/R'),'onsetOrder'] <- 2
icecan.sib[icecan.sib$onset=='S','onsetOrder'] <- 5
icecan.sib$onset <- with(icecan.sib, reorder(onset, onsetOrder))
## subset of primary interest: /s/ versus /str/ versus /esh/ onsets
icecan.sib.sub <- droplevels(filter(icecan.sib, onset%in%c('S','SH','S/T/R')))
## reorder factors to expected order
icecan.sib.sub$onset <- factor(icecan.sib.sub$onset, levels=c('S','S/T/R', 'SH'))
icecan.sib.sub.summ <- summaryDf(icecan.sib.sub)
icecan.sib.summ <- summaryDf(icecan.sib)
#
# ## 'Rness': where is there an adjacent R?
# ## phone preceding sibialnt = R
# ## syllable nucleus = r-colored vowel
# ## syllable onset contains R
# ral.sib$Rness <- 'None'
# ral.sib[str_detect(ral.sib$previous_phone,'R'),]$Rness <- "Rprevious"
# ral.sib[str_detect(ral.sib$nucleus,'ER'),]$Rness <- "Rnucleus"
# ral.sib[str_detect(ral.sib$onset,'R'),]$Rness <- "Ronset"
#
# ggplot(ral.sib, aes(x=Rness, y = cog))+ geom_violin() + facet_wrap(~phone_label)
all.sib.sub.summ <- rbind(data.frame(buck.sib.sub.summ, dataset='buckeye'),
data.frame(ral.sib.sub.summ, dataset='raleigh'),
data.frame(sotc.sib.sub.summ, dataset='sotc'),
data.frame(icecan.sib.sub.summ, dataset='icecan')
)
## standardize onset names
## change S in SOTC to sh
temp <- as.character(all.sib.sub.summ$onset)
temp[which(with(all.sib.sub.summ, onset=='S' & dataset=='sotc'))] <- 'sh'
all.sib.sub.summ$onset <- factor(temp)
## lowercase
all.sib.sub.summ$onset <- factor(tolower(as.character(all.sib.sub.summ$onset)), levels=c('s','s/t/r','sh'))
## plot for just es/str/esh, across datasets and variables
dialectVarPlot <- ggplot(aes(x=dataset, y=val), data=all.sib.sub.summ) + geom_violin(aes(fill=onset)) + facet_wrap(~var, scales='free') + ylab("Value (Hz)")
## check it out
dialectVarPlot
ggsave(dialectVarPlot, file="dialectVarPlot.pdf", width=6,height=4)
|
e333052b95249e50ba396687b9bd6a644bb77ad7
|
ec87eef707dbf374965aef156dda6de8433892b1
|
/julia/code/prismaread/pr_rastwrite_lines.R
|
ffc93241a37e86295d779ae71918466a24d396c1
|
[] |
no_license
|
alveraboquet/stage-Machine-learning
|
622f6090feeb63f3ade8100ba5e368236cc0e674
|
2446d8c6ceece39a410dc6f9ad75be901b315a99
|
refs/heads/master
| 2023-07-02T21:44:43.148221
| 2021-08-09T19:58:43
| 2021-08-09T19:58:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,202
|
r
|
pr_rastwrite_lines.R
|
#' @title pr_rastwrite_lines
#' @description Write a raster object by blocks of lines
#' @param rast_in `Raster* object` to be written to disk
#' @param out_file `character` full path of output image
#' @param out_format `character` [\"TIF\" | \"ENVI\"], Default: 'tif'
#' @param proc_lev `character` [\"1\" | \"2D\"], Default: '1'
#' @param scale_min `numeric` coefficients use to compute values from DN on
#' products
#' @param scale_max `numeric` coefficients use to compute values from DN on L2
#' products
#' @param join `logical` flag used to indicate if we are saving the "joined"
#' VNIR+SWIR cube
#' @return the function is called for its side effects
#' @details DETAILS
#' @rdname pr_rastwrite_lines
#' @author Lorenzo Busetto, phD (2017) <lbusett@gmail.com>
#' @importFrom raster nlayers brick raster blockSize writeStart getValues
#' writeValues writeStop
pr_rastwrite_lines <- function(rast_in,
out_file,
out_format = "tif",
proc_lev = "1",
scale_min = NULL,
scale_max = NULL,
join = FALSE) {
if (raster::nlayers(rast_in) > 1) {
out <- raster::brick(rast_in, values = FALSE)
} else {
out <- raster::raster(rast_in)
}
bs <- raster::blockSize(out)
if (proc_lev == "ERR") {
datatype <- "INT1U"
}
if (substring(proc_lev, 1,1) == "1") {
datatype <- "FLT4S"
} else {
datatype <- "FLT4S"
}
out <- raster::writeStart(out,
filename = out_file,
overwrite = TRUE,
options = c("COMPRESS=LZW"),
datatype = datatype)
for (i in 1:bs$n) {
message("Writing Block: ", i, " of: ", bs$n)
v <- raster::getValues(rast_in, row = bs$row[i], nrows = bs$nrows[i] )
if (substring(proc_lev, 1, 1) == "2" & !join) {
v <- scale_min + (v * (scale_max - scale_min)) / 65535
}
out <- raster::writeValues(out, v, bs$row[i])
}
out <- raster::writeStop(out)
invisible(NULL)
}
|
17117d3be9cd080a66a86441fbdd573f77c4e661
|
f898801224c1f17ba62089b28f3f69c7c525e766
|
/binomial/tests/testthat.R
|
9d9594c2aafaaf7789cbb41a7b531da05e02e3e9
|
[] |
no_license
|
stat133-sp19/hw-stat133-nadia1212
|
44079944e7b5ab9dffdddbbb3fb82033d2de79a9
|
57ba3ab524660f9d3e8162f1b53a6d030eac6dd6
|
refs/heads/master
| 2020-04-28T12:33:00.104710
| 2019-05-03T19:14:44
| 2019-05-03T19:14:44
| 175,279,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 101
|
r
|
testthat.R
|
library(testthat)
library(binomial)
source("../R/functions.R")
test_file("tests/testthat/tests.R")
|
579752588cf81489d8008807088ad0185c8cfb56
|
0b589418cd520393c7489f022f30c113f59d7a8a
|
/3. Linear Regression And Modelling/Module 1/residuals.R
|
a6eb4b3f174338428a8f7cace59daad87d0b3f97
|
[] |
no_license
|
papas8105/Statistics-With-R
|
373998dac918f53fb01abda37370da5eba746ecd
|
0d897fe80c8f508ea7fa956c77122451b11bc279
|
refs/heads/main
| 2023-03-11T20:45:14.156590
| 2021-03-02T16:42:29
| 2021-03-02T16:42:29
| 318,364,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,674
|
r
|
residuals.R
|
# Derived from http://econometricsbysimulation.shinyapps.io/OLS-App/
# Load packages ----------------------------------------------------------------
library(shiny)
library(openintro)
library(plotrix)
# Define inputs ----------------------------------------------------------------
input <- list(rseed = 1)
seed <- as.numeric(Sys.time())
# Fundtion for generating the data ---------------------------------------------
draw.data <- function(type){
n <- 250
if (type == "linear.up") {
x <- c(runif(n - 2,0,4),2,2.1)
y <- 2*x + rnorm(n, sd = 2)
}
if (type == "linear.down") {
x <- c(runif(n - 2,0,4),2,2.1)
y <- -2 * x + rnorm(n, sd = 2)
}
if (type == "curved.up") {
x <- c(runif(n - 2, 0, 4),2,2.1)
y <- 2 * x^4 + rnorm(n,sd = 16)
}
if (type == "curved.down") {
x <- c(runif(n - 2, 0, 4),2,2.1)
y <- -2*x^3 + rnorm(n,sd = 9)
}
if (type == "fan.shaped") {
x = seq(0,3.99,4/n)
y = c(rnorm(n/8,3,1),rnorm(n/8,3.5,2),rnorm(n/8,4,2.5),rnorm(n/8,4.5,3),rnorm(n/4,5,4),rnorm((n/4) + 2,6,5))
}
data.frame(x = x,y = y)
}
# UI ---------------------------------------------------------------------------
ui <- pageWithSidebar(
# Title ----
headerPanel("Diagnostics for simple linear regression"),
# Sidebar ----
sidebarPanel(
radioButtons("type", "Select a trend:",
list("Linear up" = "linear.up",
"Linear down" = "linear.down",
"Curved up" = "curved.up",
"Curved down" = "curved.down",
"Fan-shaped" = "fan.shaped")),
br(),
checkboxInput("show.resid", "Show residuals", FALSE),
br(),
helpText("This app uses ordinary least squares (OLS) to fit a regression line to the data with the selected trend
. The app is designed to help you practice evaluating whether or not the linear model is an appropriate
fit to the data. The three diagnostic plots on the lower half of the page are provided to help you
identify undesirable patterns in the residuals that may arise from non-linear trends in the data."),
br(),
helpText(a(href = "https://github.com/ShinyEd/ShinyEd/tree/master/slr_diag",target = "_blank", "View code")),
helpText(a(href = "http://shinyed.github.io/intro-stats", target = "_blank", "Check out other apps")),
helpText(a(href = "https://openintro.org", target = "_blank", "Want to learn more for free?"))),
# Main panel ----
mainPanel(
plotOutput("scatter"),
br(),
br(),
plotOutput("residuals")
)
)
# Server -----------------------------------------------------------------------
server <- function(input, output) {
mydata <- reactive({
draw.data(input$type)
})
lmResults <- reactive({
regress.exp <- "y~x"
lm(regress.exp, data = mydata())
})
# Show plot of points, regression line, residuals
output$scatter <- renderPlot({
data1 <- mydata()
x <- data1$x
y <- data1$y
# For confidence interval
xcon <- seq(min(x) - 0.1, max(x) + 0.1, 0.025)
predictor <- data.frame(x = xcon)
yhat <- predict(lmResults())
yline <- predict(lmResults(), predictor)
par(cex.main = 1.5,cex.lab = 1.5,cex.axis = 1.5,mar = c(4,4,4,1))
r.squared = round(summary(lmResults())$r.squared, 4)
corr.coef = round(sqrt(r.squared), 4)
plot(c(min(x),max(x))
,c(min(y,yline),max(y,yline)),
type = "n",
xlab = "x",
ylab = "y",
main = paste0("Regression Model\n","(R = ", corr.coef,", ","R-squared = ", r.squared,")"))
newx <- seq(min(data1$x),max(data1$x),length.out = 400)
confs <- predict(lmResults(),newdata = data.frame(x = newx),
interval = 'confidence')
preds <- predict(lmResults(),newdata = data.frame(x = newx),
interval = 'predict')
polygon(c(rev(newx),newx),c(rev(preds[ ,3]),preds[ ,2]),col = grey(.95),border = NA)
polygon(c(rev(newx),newx),c(rev(confs[ ,3]),confs[ ,2]),col = grey(.75),border = NA)
points(x,y,pch = 19,col = COL[1,2])
lines(xcon,yline,lwd = 2,col = COL[1])
if (input$show.resid) for (j in 1:length(x))
lines(rep(x[j],2),c(yhat[j],y[j]),col = COL[4])
legend_pos = ifelse(lmResults()$coefficients[1] < 1,"topleft","topright")
if (input$type == "linear.down") legend_pos = "topright"
if (input$type == "fan.shaped") legend_pos = "topleft"
legend(legend_pos,inset = .05,
legend = c("Regression Line","Confidence Interval","Prediction Interval"),
fill = c(COL[1],grey(.75),grey(.95)))
box()
})
output$residuals <- renderPlot({
par(mfrow = c(1,3),cex.main = 2,cex.lab = 2,cex.axis = 2,mar = c(4,5,2,2))
residuals = summary(lmResults())$residuals
predicted = predict(lmResults(),newdata = data.frame(x = mydata()$x))
plot(residuals ~ predicted,
main = "Residuals vs. Fitted Values",xlab = "Fitted Values",ylab = "Residuals",
pch = 19,col = COL[1,2])
abline(h = 0,lty = 2)
d = density(residuals)$y
h = hist(residuals,plot = FALSE)
hist(residuals,main = "Histogram of Residuals",xlab = "Residuals",
col = COL[1,2],prob = TRUE, ylim = c(0,max(max(d),max(h$density))))
lines(density(residuals),col = COL[1],lwd = 2)
qqnorm(residuals,pch = 19,col = COL[1,2],main = "Normal Q-Q Plot of Residuals")
qqline(residuals,col = COL[1],lwd = 2)
},height = 280)
}
# Create the Shiny app object --------------------------------------------------
shinyApp(ui = ui, server = server)
|
383db93343cf9739294506cd8eb9ab53eab7e872
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/examples/sqs.R
|
cc7263139fe6ff49e80b75d1e4951aa47ae657ed
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 623
|
r
|
sqs.R
|
# Simple Queue Service examples
sqs <- paws::sqs()
# Create a queue.
sqs <- sqs$create_queue(
QueueName = "ExampleQueue"
)
# Add a message to the queue.
sqs$send_message(
QueueUrl = sqs$QueueUrl,
MessageBody = "foo"
)
# Get the queue's attributes.
sqs$get_queue_attributes(
QueueUrl = sqs$QueueUrl,
AttributeNames = "All"
)
# Get the next message from the queue.
msg <- sqs$receive_message(
QueueUrl = sqs$QueueUrl
)
# Delete the message.
sqs$delete_message(
QueueUrl = sqs$QueueUrl,
ReceiptHandle = msg$Messages[[1]]$ReceiptHandle
)
# Delete the queue.
sqs$delete_queue(
QueueUrl = sqs$QueueUrl
)
|
01216ea048e1b7b734ec8773a641db185bdda6cf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/seewave/examples/synth2.Rd.R
|
6ce88e6706030711d4d9800c3c31e123fba6fca7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
r
|
synth2.Rd.R
|
library(seewave)
### Name: synth2
### Title: Synthesis of time wave (tonal model)
### Aliases: synth2
### Keywords: datagen ts
### ** Examples
## You can use plot=TRUE and spectro() options
## to directly 'see' the new-built sounds
## MODIFICATION OF A REFERENCE SIGNAL
data(tico)
env.tico <- env(tico, f=22050, plot=FALSE)
ifreq.tico <- ifreq(tico, f=22050, plot=FALSE)$f[,2]
# recover the original signal
s <- synth2(env=env.tico, ifreq=ifreq.tico*1000, f=22050)
# original signal with instantaneous frequency reversed
s <- synth2(env=env.tico, ifreq=rev(ifreq.tico)*1000, f=22050)
# original signal with a +1000 Hz linear frequency shift
s <- synth2(env=env.tico, ifreq=ifreq.tico*1000+1000, f=22050)
# original signal with instantaneous frequency multiplied by 2
s <- synth2(env=env.tico, ifreq=ifreq.tico*1000*2, f=22050)
# original signal with a linear instantaneous frequency at 2000 Hz
s <- synth2(env=env.tico, ifreq=rep(2000, times=length(tico@left)), f=22050)
## DE NOVO SYNTHESIS
# instantaneous frequency increasing by step of 500 Hz
s <- synth2(ifreq=rep(c(500,1000,1500,2000,2500,3000,3500,4000), each=2000), f=16000)
# square function of the instantaenous frequency
s <- synth2(ifreq=500+seq(-50,50, length.out=8000)^2, f=8000)
# linear increase of the amplitude envelope
s <- synth2(env=seq(0,1,length=8000), ifreq=rep(2000,8000), f=8000)
# square-root increase of the amplitude envelope
s <- synth2(env=sqrt(seq(0,1,length=8000)), ifreq=rep(2000,8000), f=8000)
# square-root increase and decrease of the amplitude envelope
s <- synth2(env=c(sqrt(seq(0,1,length=4000)), sqrt(seq(1,0,length=4000))),
ifreq=rep(2000,8000), f=8000)
# amplitude envelope and instantaneous frequency following a normal density shape
norm <- rep(dnorm(-4000:3999, sd=1000), 2)
s <- synth2(env=norm, ifreq=500+(norm/max(norm))*1000, f=8000)
|
1d48bb2524fd8d0b89f5f701079fd706f029a83b
|
35a4d25ef4da22e639ebd7411c4f1781a46b88d0
|
/man/mk_codons.Rd
|
23467b92ed07b43f10f34402501102f11578536b
|
[] |
no_license
|
rforbiodatascience21/2021_group_20_rpackage
|
882e1111331a73cd9eea1d4faa7cfdbe376c98e5
|
696132ba40cdef4052b6130bf708daf3e796c23f
|
refs/heads/main
| 2023-03-24T11:52:33.611925
| 2021-03-22T10:20:07
| 2021-03-22T10:20:07
| 350,259,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 380
|
rd
|
mk_codons.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mk_codons.R
\name{mk_codons}
\alias{mk_codons}
\title{DNA sequence to Codons}
\usage{
mk_codons(dna, s = 1)
}
\arguments{
\item{dna}{List of nucleotides (A,T,G,C)}
}
\value{
codons Triplets of nucleotides
}
\description{
Separates one sequence of aa into condons
}
\examples{
mk_codons("ATCGCTATG")
}
|
84d6d8dd6a4fb6212a1de852fc00cb3df698bd31
|
3bc412a57570785ad898af1a5e4283d84b109b68
|
/section-11/sec-11.R
|
289e2c1a068ab3b558d110946e1a1cad2fa2d239
|
[] |
no_license
|
pbaylis/ARE212
|
7889bafcc9fcdc1fee1d82f41404ec3506a55776
|
4ae1b7164d315dfea7fce622d0159983b8e9aa87
|
refs/heads/master
| 2020-05-31T10:48:39.896020
| 2019-04-23T20:12:50
| 2019-04-23T20:12:50
| 15,325,980
| 3
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,831
|
r
|
sec-11.R
|
rm(list = ls())
library(XML)
library(RCurl)
library(stringr)
options(show.error.messages = FALSE)
token <- "characters"
nameslist <- list()
i <- 1
time <- proc.time()
while (is.character(token) == TRUE & i < 100000) {
baseurl <- "http://oai.crossref.org/OAIHandler?verb=ListSets"
if (token == "characters") {
tok.follow <- NULL
} else {
tok.follow <- paste("&resumptionToken=", token, sep = "")
}
query <- paste(baseurl, tok.follow, sep = "")
xml.query <- xmlParse(getURL(query))
xml.query
set.res <- xmlToList(xml.query)
set.res
names <- as.character(sapply(set.res[["ListSets"]], function(x) x[["setName"]]))
names
nameslist[[token]] <- names
if (class(try(set.res[["request"]][[".attrs"]][["resumptionToken"]])) == "try-error") {
stop("no more data")
}
else {
token <- set.res[["request"]][[".attrs"]][["resumptionToken"]]
}
i <- i + 1
}
(proc.time() - time)
allnames <- do.call(c, nameslist)
length(allnames)
head(allnames)
econtitles <- allnames[str_detect(allnames, "^[Ee]conom|\\s[Ee]conom")]
econtitles2 <- allnames[str_detect(allnames, "[Ee]conomic|\\s[Ee]conomic")]
length(econtitles)
length(econtitles2)
sample(econtitles, 10)
countJournals <- function(regex) {
titles <- allnames[str_detect(allnames, regex)]
return(length(titles))
}
subj = c("economic", "business", "politic", "environment", "engineer", "history")
regx = c("^[Ee]conomic|\\s[Ee]conomic", "^[Bb]usiness|\\s[Bb]usiness",
"^[Pp]olitic|\\s[Pp]olitic", "^[Ee]nvironment|\\s[Ee]nvironment",
"^[Ee]ngineer|\\s[Ee]ngineer", "^[Hh]istory|\\s[Hh]istory")
subj.df <- data.frame(subject = subj, regex = regx)
subj.df[["count"]] <- sapply(as.character(subj.df[["regex"]]), countJournals)
library(ggplot2)
(g <- ggplot(data = subj.df, aes(x = subject, y = count)) + geom_bar(stat = "identity"))
|
78b66967f4a3cace052a2599b01a6c058acb6bd4
|
1cf1f5e094abdca9cf4222aeeaf393154d5b091d
|
/chap3/RINT302.R
|
bb1d0dc03bcd33c7e79b5e3600104bb0432e3385
|
[] |
no_license
|
Daigo1009/r_introduction
|
a0c14d5ccc1395118ca2cc1fe6141a743f3eabce
|
e9983214ce3b61a8197ac003f1ee62fd1b9c0943
|
refs/heads/master
| 2022-04-19T16:32:38.699232
| 2013-10-08T13:52:12
| 2013-10-08T13:52:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
RINT302.R
|
x = 0:7
y = dpois(x,3)
par(family="HiraMaruProN-W4")
plot(x,y,type='l',xlab='x',ylab='y',main=' ポワソン分布')
|
3244dbc2696dc7e7f1f2193893b64ff7470da038
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rprojroot/examples/root_criterion.Rd.R
|
b40a2fd2a9c0a15cac8b94e8ef45a29f0a8e2f7d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
r
|
root_criterion.Rd.R
|
library(rprojroot)
### Name: root_criterion
### Title: Is a directory the project root?
### Aliases: root_criterion is.root_criterion as.root_criterion
### as.root_criterion.character as.root_criterion.root_criterion
### |.root_criterion has_file has_dir has_file_pattern has_dirname
### ** Examples
root_criterion(function(path) file.exists(file.path(path, "somefile")), "has somefile")
has_file("DESCRIPTION")
is_r_package
is_r_package$find_file
## Not run:
##D is_r_package$make_fix_file(".")
## End(Not run)
|
2a062f0a03513c5c811e9d88ff60cb1eec0edc4e
|
23375da49f22e497f1d805f1a15199c3577b2e02
|
/README.rd
|
1ca595849c7d0254957ac04836784c3df55dce8b
|
[] |
no_license
|
acelan86/Jager
|
e8f44d4fc985d20a829b595f5ab5289f6eb74956
|
c47d4b2f378fc6e5262346bf2b1885005116153f
|
refs/heads/master
| 2021-01-19T21:28:10.340732
| 2014-06-27T03:25:37
| 2014-06-27T03:25:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30
|
rd
|
README.rd
|
# Fate Zero
# Update by acelan
|
c1f7fc1831df3680398062eaac4a0fb9f3a9d003
|
eb1c89cf89947b2c0222dfede4cd0f3885465e34
|
/Dose finding - KGC-v2.0.r
|
60af28923955b614e4469d8deff1ae953bf37ddc
|
[] |
no_license
|
AmirAli-N/DynamicProgramming-DoseFinding
|
b799b8164280a12e3283a4c365b937e39672883f
|
92aa8c618d56f10b801985f4173b719b1d798c34
|
refs/heads/master
| 2020-04-01T15:46:23.626118
| 2019-07-12T20:11:28
| 2019-07-12T20:11:28
| 153,350,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,018
|
r
|
Dose finding - KGC-v2.0.r
|
library(mvtnorm)
##########################################################################
y=c() #response vector
dose=c() #dose vector
J=11 #number of doses
patient=1000#number of patients
true_sigma=sqrt(10) #true deviation of responses
mu_0=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) #initial hyperparameter
sigma_0=diag(100, nrow=J, ncol=J) #initial hyperparameter
mu_n=lapply(1:patient, function(x) c())
sigma_n=lapply(1:patient, function(x) matrix(0, nrow=J, ncol=J))
theta_estimate=matrix(NA, nrow=patient, ncol=J)
var_estimate=matrix(NA, nrow=patient, ncol=J)
var_target=c()
M=1000
T=1000
n_simulation=30
start_time=0
end_time=0
alpha=1
############################################################################
true_theta=c(0.0, 0.07, 0.18, 0.47, 1.19, 2.69, 5, 7.31, 8.81, 9.53, 9.82)
curve_st="sigmoid-significant"
target_dose=10
############################################################################
evolution_eq <-function(mu, sigma, new_y, z_j){
e_z = rep(0, J)
e_z[z_j] <- 1
sigma_tilde <- (sigma %*% e_z)*(1/sqrt(true_sigma^2+sigma[z_j, z_j]))
new_sigma <- sigma - sigma_tilde %*% t(sigma_tilde)
Var_yF <- true_sigma^2 + sigma[z_j, z_j]
new_X <- (new_y - mu[z_j])/sqrt(Var_yF)
new_mu= mu + sigma_tilde * new_X
return (list("mu"=new_mu, "sigma"=new_sigma))
}
dose_allocation <-function(mu, sigma){
var_j=c()
#create a sample of M simulated thetas
theta_sample<-rmvnorm(M, mu, sigma)
theta_est<-apply(theta_sample, 2, mean)
var_j=sapply(1:ncol(theta_sample), function(i) {
var_jm=unlist(lapply(theta_sample[, i], function(y) {
y_jm=rnorm(1, y, true_sigma)
temp_res=evolution_eq(mu, sigma, y_jm, i)
temp_mu=temp_res$mu
temp_sigma=temp_res$sigma
temp_theta_sample<-rmvnorm(T, temp_mu, temp_sigma)
ED95=c()
ED95=apply(temp_theta_sample, 1,function(z) {
if (all(z<=0)){
return(NA)
}else{
return (min(which(z>=0.95*max(z))))
}
})
return(var(ED95, na.rm=TRUE))
}))
return(mean(var_jm))
})
return(list("variance"=var_j, "theta"=theta_est))
}
start_time=Sys.time()
for (reps in 1:n_simulation){
set.seed(reps)
print(paste("reps=", reps))
for (K in 1:patient){
if(K==1){
for (i in 1:J){
for(j in 1:J){
sigma_n[[K]][i,j]<-100*exp(-alpha*(i-j)^2)
}
}
mu_n[[K]]=mu_0
}
#after 30 patients
else{
res<-dose_allocation(mu_n[[K-1]], sigma_n[[K-1]])
temp_var<-res$variance #call dose allocation to variance vector for every dose
#theta_estimate<-res$theta
var_estimate[K-1,]<-temp_var
theta_estimate[K-1,]<-res$theta
z_j=min(which(temp_var==min(temp_var)))
y=c(y, rnorm(1, true_theta[z_j], true_sigma)) #observing and add the true response of the optimal dose
dose[K-1]<-z_j #add optimal dose to dose vector
#call a function of update equations for calculating posterior moments, i.e., mu_n, sigma_n
res=evolution_eq(mu_n[[K-1]], sigma_n[[K-1]], y[length(y)], dose[length(dose)])
mu_n[[K]]<-res$mu
sigma_n[[K]]<-res$sigma
var_target=c(var_target, sigma_n[[K]][target_dose,target_dose])
}
}
write(var_target, file=paste("C:/Results/",curve_st,"/KGC-target_var-",toString(reps),".txt", sep=""), append=FALSE, sep="\n")
write(dose, file=paste("C:/Results/",curve_st,"/KGC-doses-",toString(reps),".txt", sep=""), append=FALSE, sep="\n")
write.table(var_estimate, file=paste("C:/Results/",curve_st,"/KGC-var-",toString(reps),".txt", sep=""), sep="\t", eol="\n", row.names=FALSE, col.names=FALSE)
write.table(theta_estimate, file=paste("C:/Results/",curve_st,"/KGC-thetas-",toString(reps),".txt", sep=""), sep="\t", eol="\n", row.names=FALSE, col.names=FALSE)
y=c() #response vector
dose=c() #dose vector
mu_n=lapply(1:patient, function(x) c())
sigma_n=lapply(1:patient, function(x) matrix(0, nrow=J, ncol=J))
theta_estimate=matrix(NA, nrow=patient, ncol=J)
var_estimate=matrix(NA, nrow=patient, ncol=J)
var_target=c()
}
end_time=Sys.time()
start_time-end_time
|
dae99881c368cf984d9fb2310c70cbf300d95a2e
|
fd0ab0f09d3c07f03e0af82bf93875524c44a0e9
|
/tmp-tests/test-pcadapt3.R
|
f22eca37f547c849ae43a70f253b91b270f8f278
|
[] |
no_license
|
privefl/bigsnpr
|
b05f9e36bcab6d8cc86fb186c37fe94a6425960a
|
83db98f974b68132a9a3f3ee7ca388159a4c12b5
|
refs/heads/master
| 2023-08-02T13:31:18.508294
| 2023-06-30T12:15:55
| 2023-06-30T12:15:55
| 62,644,144
| 162
| 47
| null | 2022-10-12T16:46:15
| 2016-07-05T14:36:34
|
R
|
UTF-8
|
R
| false
| false
| 757
|
r
|
test-pcadapt3.R
|
snp_pcadapt <- function(G, U.row, ind.row = rows_along(G)) {
K <- ncol(U.row)
stopifnot(all.equal(crossprod(U.row), diag(K)))
zscores <- linRegPcadapt(attach.BM(G), U = U.row, rowInd = ind.row)
d <- covRob(zscores, estim = "pairwiseGK")$dist
fun.pred <- eval(parse(text = sprintf(
"function(xtr) stats::pchisq(xtr, df = %d, lower.tail = FALSE)", K)))
structure(data.frame(score = d),
class = c("mhtest", "data.frame"),
transfo = identity,
predict = fun.pred)
}
tmp <- snp_pcadapt(G, G.svd$u)
snp_qq(tmp)
snp_qq(snp_gc(tmp))
snp_manhattan(snp_gc(tmp), popres$map)
plot(G.svd$d, type = "b")
tmp <- snp_pcadapt(G, G.svd$u[, 1:5])
snp_qq(tmp)
snp_qq(snp_gc(tmp))
snp_manhattan(snp_gc(tmp), popres$map)
|
9e67db3a84c1601beb5e70d9389a4d3754266c81
|
5233a4040c4f2d3fc79d98f306e0f7cc1db72171
|
/RxODE/tests/test-parsing.R
|
524e68c66dd29df071409d0c2a648f10c8735e75
|
[] |
no_license
|
hallowkm/RxODE
|
fccac362359b4874d88292011558d4684aa8610f
|
5e0526d1c85a8ae943610ead167bca8b135909d5
|
refs/heads/master
| 2020-04-03T20:55:35.695093
| 2017-03-28T11:37:45
| 2017-03-28T11:37:45
| 38,068,516
| 18
| 15
| null | 2016-11-01T20:48:13
| 2015-06-25T19:08:25
|
C
|
UTF-8
|
R
| false
| false
| 2,254
|
r
|
test-parsing.R
|
# test ODE parsing for syntax errors
library("RxODE")
library("tools")
tmp <- tempdir()
# list of model specs with errors (test description and code)
errs <- list()
errs[[1]] <-
c(desc = 'incorrect d/dt operator',
code = 'd/dt(y = 1);'
)
errs[[2]] <-
c(desc = 'comments must be outside statements',
code = 'd/dt(y) = 1 # bad comment;'
)
errs[[3]] <-
c(desc = 'missing end of statement ";"',
code = paste(sep = "\n",
'd/dt(depot) = -ka * depot',
'd/dt(centr) = ka * depot - kout * centr;')
)
errs[[4]] <-
c(desc = 'arithmetic syntax error',
code = paste(sep = "\n",
'# comment, just to show error in line 3',
'd/dt(y) = -ka;',
'C1 = /y;')
)
errs[[5]] <-
c(desc = 'unexistent operator **',
code = paste(sep = "\n",
'd/dt(y) = -ka;',
'C1 = ka * y**2;')
)
errs[[6]] <-
c(desc = 'unexistent operator %',
code = paste(sep = "\n",
'remainder = 4 % 3;',
'd/dt(y) = -ka;',
'C1 = ka * y;')
)
errs[[7]] <-
c(desc = 'incorrect "if" statement',
code = paste(sep = "\n",
'if(comed==0){',
' F = 1.0;',
'else {', # missing "}"'
' F = 0.75;',
'};',
'd/dt(y) = F * y;')
)
errs[[8]] <-
c(desc = 'illegal variable name (starting w. a digit)',
code = paste(sep = "\n",
'F = 0.75;',
'12foo_bar = 1.0/2.0;',
'd/dt(y) = F * y;')
)
errs[[9]] <-
c(desc = 'illegal variable name (illegal ".")',
code = paste(sep = "\n",
'F = 0.75;',
'foo.bar = 1.0/2.0;',
'd/dt(y) = F * y;')
)
errs[[10]] <-
c(desc = 'illegal variable name in d/dt()',
code = paste(sep = "\n",
'd/dt(y_1) = F * y;', # okay
'd/dt(y.1) = F * y;') # not okay
)
N <- length(errs)
for(i in 1:N){
desc <- errs[[i]]["desc"]
code <- errs[[i]]["code"]
cat(sprintf('Syntax test %d of %d (%s)\n', i, N, desc))
cat("==========================================================\n")
cat("Input:\n", code, "\n", sep="")
cat("\nRxODE message is:\n")
assertError(RxODE(model = code, wd = tmp, modName=paste0("err",i)))
cat("\n")
}
unlink(tmp, recursive = TRUE)
|
6a16fc68c66b5c754e32eedea43f46d4f4e96111
|
b1ebb9059b912bd0bee6af370e04533e8ffe1e5d
|
/week5/67_lab_pcr_pls_regression.R
|
d9447ae2604ed2c46c6c9a5b30edf792737e3a0f
|
[] |
no_license
|
sebastianbautista/stat508
|
57c25364361782addba41a69cccb61c274d4dd4b
|
9b24d49639747b88e129c9c253d443a2e28cd86f
|
refs/heads/master
| 2020-04-16T11:57:43.166831
| 2019-04-24T21:48:50
| 2019-04-24T21:48:50
| 165,559,062
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,106
|
r
|
67_lab_pcr_pls_regression.R
|
# 6.7 Lab 3: PCR and PLS Regression
### 6.7.1: Principal Components Regression
library(ISLR)
library(pls)
# Remove missings
df = na.omit(Hitters)
x = model.matrix(Salary ~ ., df)[,-1]
y = df$Salary
# train test split
set.seed(1)
train = sample(1:nrow(df), nrow(df)/2)
test = (-train)
Xtrain = x[train,]
Xtest = x[test,]
ytrain = y[train]
ytest = y[test]
# Fit PCR, making sure to scale and use 10-fold CV
set.seed(2)
pcr.fit = pcr(Salary ~ ., data=df, scale=T, validation='CV')
# CV score reported is RMSE
# % variance explained shows the amount of information captured using M principal components
summary(pcr.fit)
# plot CV scores (MSE)
# M=16 components are chosen, but M=1 is also pretty good!
validationplot(pcr.fit, val.type='MSEP')
# PCR on training data and evaluate test set
# the lowest CV error occurs when M=7 components are used
set.seed(1)
pcr.fit = pcr(Salary ~ ., data=df, subset=train, scale=T, validation='CV')
validationplot(pcr.fit, val.type='MSEP')
# compute test MSE with M=7
pcr.pred = predict(pcr.fit, Xtest, ncomp=7)
mean((pcr.pred - ytest)^2)
# 96556, comparable to ridge/lasso, but is harder to interpret because it
# doesn't perform variable selection or directly produce coefficient estimates
# refit PCR on the full data set using M=7
pcr.fit.full = pcr(y~x, scale=T, ncomp=7)
summary(pcr.fit.full)
### 6.7.2: Partial Least Squares
set.seed(1)
pls.fit = plsr(Salary ~ ., data=df, subset=train, scale=T, validation='CV')
# lowest CV error is at M=2 pls directions
summary(pls.fit)
validationplot(pls.fit, val.type='MSEP')
# evaluate corresponding test MSE
pls.pred = predict(pls.fit, Xtest, ncomp=2)
mean((pls.pred - ytest)^2)
# finally refit using the full data set
pls.fit.full = plsr(Salary ~ ., data=df, scale=T, ncomp=2)
summary(pls.fit.full)
# the percentage of variance in Salary that this explains, 46.40%, is almost as much as
# the M=7 PCR fit, 46.69%.
# this is because PCR only attempts to maximize the amount of variance
# explained in the predictors, while
# PLS searches for directions that explain variance in both the predictors and response
|
065f3fa79c40938426232508ed2d8ff5b25a122f
|
bde57b95fe493922baded8d3090a7633c56b2b54
|
/doc/recipe-install-package.R
|
798d11fc6ff2b4c378bfb906b8eec92974661f76
|
[] |
no_license
|
datakolektiv/vincent
|
49e0110a1c336654e17760b2e20f6abd455eafd0
|
932d2ff1aefd95affdaf242e6cfdf7aa8d4113ea
|
refs/heads/master
| 2022-11-10T11:29:15.842678
| 2020-06-20T06:53:29
| 2020-06-20T06:53:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
recipe-install-package.R
|
# This script shows you how to install a new package to the project
# install a package -------------------------------------------------------
renv::install("shinydashboard") # 1. INSTALL the package
usethis::use_package("shinydashboard") # 2. fill in DESCRIPTION
renv::snapshot() # 3. update RENV
# add it in ROXYGEN comments # 4. add to ROXYGEN/NAMESPACE
# remove a package --------------------------------------------------------
renv::remove("shinydashboard")
renv::snapshot()
# remove it from DESCRIPTION
# remove it from ROXYGEN comments
|
f8a40f9306cfa1477533ffb6caa7a158b4aaddf7
|
9ec017b29b36c2c10468b230b943a592c26ff6d2
|
/man/paste.Rd
|
b042e9f6881c582447dbd12999aeb0041eb4a0fb
|
[] |
no_license
|
efinite/utile.tools
|
fa2736d329a31fd20ca33f3b5d1d72750523ce06
|
d059e684da40da3adb8c080fc945ffd661d756ba
|
refs/heads/master
| 2023-01-30T16:40:59.681104
| 2023-01-24T00:45:37
| 2023-01-24T00:45:37
| 212,999,245
| 5
| 1
| null | 2022-11-20T12:35:11
| 2019-10-05T12:54:39
|
R
|
UTF-8
|
R
| false
| true
| 1,057
|
rd
|
paste.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paste.R
\name{paste}
\alias{paste}
\alias{paste0}
\title{Concatenate strings}
\usage{
paste(..., sep = " ", collapse = NULL, na.rm = FALSE)
paste0(..., collapse = NULL, na.rm = FALSE)
}
\arguments{
\item{...}{R objects to be converted to character vectors.}
\item{sep}{A character. A string to separate the terms.}
\item{collapse}{A character. An string to separate the results.}
\item{na.rm}{A logical. Whether to remove NA values from 'x'.}
}
\value{
Character vector of concatenated values.
}
\description{
An augmented version of \code{\link[base:paste]{base::paste()}} with options
to manage `NA` values.
}
\examples{
# Base paste() NA handling behavior
paste(
'The', c('red', NA_character_, 'orange'), 'fox jumped', NA_character_, 'over the fence.',
collapse = ' '
)
# Removal of NA values
paste(
'The', c('red', NA_character_, 'orange'), 'fox jumped', NA_character_, 'over the fence.',
collapse = ' ',
na.rm = TRUE
)
}
\seealso{
\code{\link[base]{paste}}
}
|
ed1aa505cf099e0409e70faf7a320dfbab36cd44
|
f53cd52cab7c69718e63da3a5c42ac504e3f4249
|
/R/format_magma_raw.R
|
9dcf257ae892b250fbee9c370cfef9976c7c363d
|
[
"MIT"
] |
permissive
|
mbyvcm/correlation_matrix_for_twas
|
f27ea4a0bb4c30f9712fc6b5f4af90d24a8fa5cc
|
0e4dc0252983eaa056db0b6e8fa42b9c9b80cf2c
|
refs/heads/master
| 2020-03-06T18:26:31.641344
| 2018-03-27T15:31:15
| 2018-03-27T15:31:15
| 127,006,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,754
|
r
|
format_magma_raw.R
|
# Christopher Medway
# Requires that correlations between genes have already been calculated
# using twas_matrix_for_magma.R
format_twas_raw_file <- function(matrix_dir, pos_file, twas_file, sampleSize = 100000, nparam = 100, symbol2EntrezId) {
matrix_files <- list.files(matrix_dir, pattern = ".csv", full.names = T)
matrix_names <- basename(gsub(matrix_files, pattern = ".csv", replacement = ""))
pos_df <- read.table(pos_file, header=T, stringsAsFactors = F)
twas_results <- read.table(twas_file, header = T, stringsAsFactors = F)
symbol2EntrezId <- read.table(symbol2EntrezId, header = F)
# filter pos file to include only genes with i) entrezid and ii) valid twas statistic
pos_df <- filter_pos_file(pos_df, twas_results, symbol2EntrezId)
# order pos file
pos_df <- pos_df[order(pos_df$CHR, pos_df$P0, pos_df$P1),]
#there should be a matrix file for every row in pos file.
if (file.exists("./output/twagma_raw/twagma.missing")) {file.remove("./output/twagma_raw/twagma.missing")}
if (!(all(pos_df$ID %in% matrix_names))) {
warning(paste0("FILE NOT AVAILABLE FOR ALL TWAS GENES"))
write.table(pos_df[!(pos_df$ID %in% matrix_names),], file = "./output/twagma_raw/twagma.missing", row.names = F, col.names = F, quote = F)
}
# loop over pos_file
returned <- loop_over_files(pos=pos_df, symbol2EntrezId, twas_results, nparam, sampleSize, matrix_dir)
qc <- lapply(seq(length(returned)), function(x) returned[[x]][["QC"]])
qc <- do.call(rbind, qc)
qc_check(qcObject = qc, pos_df = pos_df)
raw <- lapply(seq(length(returned)), function(x) { raw <- returned[[x]][["RAW"]]})
i <- unlist(lapply(raw, function(x) {length(x) > 1}))
if (file.exists("./output/twagma_raw/twagma.raw")) {file.remove("./output/twagma_raw/twagma.raw")}
lapply(raw[i], function(x) {
write.table(x, file = "./output/twagma_raw/twagma.raw", append = T, quote = F, row.names = F, col.names = F, sep = " ")
})
covar <- lapply(seq(length(returned)), function(x) { covar <- returned[[x]][["COV"]]})
i <- unlist(lapply(raw, function(x) {length(x) > 1}))
if (file.exists("./output/twagma_raw/twagma.covar")) {file.remove("./output/twagma_raw/twagma.covar")}
lapply(covar[i], function(x) {
write.table(x, file = "./output/twagma_raw/twagma.covar", append = T, quote = F, row.names = F, col.names = F, sep = " ")
})
return(qc)
}
filter_pos_file <- function(pos_df, twas_results, symbol2EntrezId) {
pos_df$EntrezId <- symbol2EntrezId[match(pos_df$ID, symbol2EntrezId$V2),1]
pos_df$twas_z <- twas_results[match(pos_df$ID, twas_results$ID),"TWAS.Z"]
pos_df$twas_p <- twas_results[match(pos_df$ID, twas_results$ID),"TWAS.P"]
pos_df <- pos_df[!(is.na(pos_df$twas_z)),]
pos_df <- pos_df[complete.cases(pos_df[,c("twas_z","EntrezId")]),]
# remove duplicate entrezid
pos_df <- pos_df[!(duplicated(pos_df$EntrezId)),]
# renormalise twas
#pos_df$twas_z <- (pos_df$twas_z - mean(pos_df$twas_z)) / sd(pos_df$twas_z)
return(pos_df)
}
loop_over_files <- function(pos, symbol2EntrezId, twas_results, nparam, sampleSize, matrix_dir) {
# loop over rows of pos_file
out <- lapply( seq(dim(pos)[1]), function(x) {
# initialise empty df tp store qc info
qc <- as.data.frame(matrix(nrow = 1, ncol = 6))
row <- pos[x,]
gene <- row[["ID"]]
chr <- row[["CHR"]]
start <- row[["P0"]]
stop <- row[["P1"]]
qc["V1"] <- row["CHR"]
qc["V2"] <- gene
entrezid <- pos[match(gene, pos$ID),"EntrezId"]
twas_z <- pos[match(gene, pos$ID),"twas_z"]
twas_p <- pos[match(gene, pos$ID),"twas_p"]
# calculate probit transformed p-value - this is how magma does it
twas_probit <- qnorm(twas_p, lower.tail = F)
# p-values = 1 generate "-Inf" - conver to mean
twas_probit[twas_probit == "-Inf"] <- -3.09 # because -Inf will break MAGMA
# read ld file if exists
file <- paste0(matrix_dir,"/",gene,".csv")
if (file.exists(file)) {
df <- read.table(file, header = T, stringsAsFactors = F, sep = ",")
nsnps <- df$GENE1_NSNPS[1]
model <- df$GENE1_MODEL[1]
# check gene has snp weights
if (df$GENE1_NSNPS[1] > 0) {
# if upstream gene(s) exist, remove any upstream genes that are not in pos file
if ("GENE2" %in% names(df) && sum(!(is.na(df$GENE2))) > 0) {
df <- df[df$GENE2 %in% pos$ID,]
}
# after removing invalid upstream genes, check gene has valid upstream genes remaining. This does not warrent elimination of the index gene.
# Just because it has no upstream genes, it maybe upstream of another gene
if ((("COR" %in% names(df))) && sum(!(is.na(df$COR))) > 0) {
validUs <- df[!(is.na(df$COR)),]
qc$V3 <- "VALID"
qc$V4 <- dim(validUs)[1]
qc$V5 <- paste(validUs$GENE2, collapse = ",")
qc$V6 <- paste(validUs$COR, collapse = ",")
out <- data.frame(
entrezid,
#gene,
chr,
start,
stop,
nsnps,
nparam,
as.integer(sampleSize),
abs(twas_z),
#twas_probit,
paste(rev(abs(validUs$COR)), collapse = " ") # using the absolute correlation
)
} else {
qc$V3 <- "VALID"
qc$V4 <- 0
out <- data.frame(
entrezid,
#gene,
chr,
start,
stop,
nsnps,
nparam,
as.integer(sampleSize),
#twas_probit
abs(twas_z)
)
}
# covariate file
lasso <- as.integer(model == "lasso")
enet <- as.integer(model == "enet")
blup <- as.integer(model == "blup")
bslmm <- as.integer(model == "bslmm")
cov <- data.frame(
"ID" = entrezid,
"NSNPS" = nsnps,
"isLasso" = lasso,
"isEnet" = enet,
"isBlup" = blup,
"isBslmm" = bslmm,
"TWAS.Z" = twas_z,
"ABS.TWAS.Z" = abs(twas_z),
"PROBIT.TWAS.Z" = twas_probit,
"TWAS.P" = twas_p,
"GENE" = gene
)
} else {qc$V3 <- "NO_SNP_WEIGHTS"; out <- ""; cov = ""}
} else {qc$V3 <- "NO_CORR_FILE"; out <- ""; cov = ""}
return(list("RAW" = out, "QC" = qc, "COV" = cov))
})
}
qc_check <- function(qcObject, pos_df) {
l <- split(qcObject, qcObject$V3)
lapply(seq(dim(l[["VALID"]])[1]), function(row) {
r <- l[["VALID"]][row,]
gene <- r$V2
n_us <- r[[4]]
genes_us <- unlist(stringr::str_split(r[5], ","))
if (n_us == 0) {
} else {
rows_us <- l[["VALID"]][(row - n_us):(row - 1),]
if(!(dim(rows_us)[1] == n_us)){stop("DIFFERENT NUMBERS")}
if(!(all(rows_us$V2 == genes_us))) {stop(paste0(gene, ": CALCULATED UPSTREAM GENES DOES NOT MATCH AVAILABLE UPSTREAM GENES!!"))}
if(!(all(genes_us %in% pos_df$ID))) {stop(paste0(genes_us, " NOT ALL HAVE UPSTREAM GENES ARE VALID"))}
if (!(all(rows_us$V3 == "VALID"))) {stop(paste0(gene," NOT ALL UPSTREAM GENES ARE VALID"))}
}
if(!(gene %in% pos_df$ID)) {stop(paste0(gene, "INDEX GENE NOT VALID"))}
})
}
require("optparse")
option_list <- list(
make_option(c("-c","--correlation_files_dir"), type = "character", default = NULL, help = "files containing gene-gene correlations"),
make_option(c("-p","--fusion_pos_file"), type = "character", default = NULL, help = "containing gene coordinates (hg19)"),
make_option(c("-t","--twas_results_file"), type = "character", default = NULL, help = "twas results file"),
make_option(c("-n","--samplesize"), type = "integer", default = 10000, help = "integer givingsample number"),
make_option(c("-u","--number_parameters"), type = "integer", default = 100, help = "integer giving parameter number"),
make_option(c("-m","--symbol2entrez"), type = "character", default = NULL, help = "filename")
)
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
if (!(dir.exists("./output/twagma_raw"))) {dir.create("./output/twagma_raw")}
x <- format_twas_raw_file(
matrix_dir = opt$correlation_files_dir,
pos_file = opt$fusion_pos_file,
twas_file = opt$twas_results_file,
symbol2EntrezId = opt$symbol2entrez,
sampleSize = opt$samplesize,
nparam = opt$number_parameters
)
|
c1c00b7fd857e230fef9178048eca98e4f0d0383
|
24de0621f2a4ddfdb4696c071923610167a3c742
|
/eda_all.R
|
3319c55adb730e4acef76e2652436a4087d782e8
|
[] |
no_license
|
truongvv/fineco_as2
|
1d85acbc7a3ba208925d23b383c47ad8d3a95946
|
b3f4d015f0ad997e8d88451f7934eba4a49e943d
|
refs/heads/master
| 2020-07-02T16:42:35.472281
| 2019-10-05T01:00:28
| 2019-10-05T01:00:28
| 201,592,492
| 0
| 6
| null | 2019-10-05T01:00:32
| 2019-08-10T06:53:28
|
HTML
|
UTF-8
|
R
| false
| false
| 3,844
|
r
|
eda_all.R
|
install.packages("hrbrthemes")
library(hrbrthemes)
hrbrthemes::import_roboto_condensed()
#John EDA
Combi_df <- data.frame(date=index(Combi), coredata(Combi))
Combi_df$date <- as.Date(Combi_df$date)
#Plotting ASX 200
ggplot(Combi_df, aes(date, asx)) + geom_line() +
xlab("Date") + ylab("ASX Index") + ggtitle("Value of ASX 200 Index Over time") +
annotate(geom="text", x=as.Date("2009-01-01"), y=7000,
label="Market Peak before 2008 GFC (Sep 2007)") +
annotate(geom="text", x=as.Date("2010-01-01"), y=3200,
label="Market Trough (Jan 2009)") +
coord_cartesian(clip = 'off') +
annotate(geom="point", x=as.Date("2007-09-30"), y=6754, size=8, shape=21, fill="transparent") +
annotate(geom="point", x=as.Date("2009-01-31"), y=3400, size=8, shape=21, fill="transparent") +
geom_smooth(method='lm') +
theme_ipsum()
#Plotting Oil Prices
ggplot(Combi_df, aes(date, oil)) + geom_line() +
xlab("Date") + ylab("Oil Price Per Barrel") + ggtitle("Price of Oil Over time") +
annotate(geom="text", x=as.Date("2008-01-01"), y=35,
label="Oil Price Collapse due to GFC") +
annotate(geom="text", x=as.Date("2017-01-01"), y=40,
label="Price Collapse due to Increased Supply") +
annotate(geom="text", x=as.Date("2014-01-01"), y=140,
label="Price Spike due to Geopolitical Factors + Peak Oil Worries") +
coord_cartesian(clip = 'off') +
annotate(geom="point", x=as.Date("2009-01-31"), y=40, size=8, shape=21, fill="transparent") +
annotate(geom="point", x=as.Date("2015-01-31"), y=50, size=8, shape=21, fill="transparent") +
annotate(geom="point", x=as.Date("2008-07-01"), y=133, size=8, shape=21, fill="transparent") +
geom_smooth(method='lm') +
theme_ipsum()
#Gold price
ggplot(Combi_df, aes(date, gold_price_london_fixing)) + geom_line() +
xlab("Date") + ylab("Gold Price $") + ggtitle("Price of Gold Over time") +
annotate(geom="text", x=as.Date("2008-01-01"), y=1000,
label="ASX 200 Trough GFC") +
annotate(geom="text", x=as.Date("2007-01-01"), y=550,
label="Peak of Market prior to GFC") +
annotate(geom="text", x=as.Date("2011-09-01"), y=1850,
label="Spike Due to Low Interest Rates + Rise of Developing Economies") +
coord_cartesian(clip = 'off') +
annotate(geom="point", x=as.Date("2009-01-31"), y=930, size=8, shape=21, color ="orange", fill="transparent") +
annotate(geom="point", x=as.Date("2007-07-31"), y=650, size=8, shape=21, color ="orange", fill="transparent") +
annotate(geom="point", x=as.Date("2011-09-01"), y=1800, size=8, shape=21, color ="orange", fill="transparent") +
geom_smooth(method='lm') +
theme_ipsum()
#ASX/DJIA
ggplot(Combi_df, aes(date)) +
geom_line(aes(y=djia, color = "djia")) +
geom_line(aes(y=asx, color = "asx")) +
xlab("Date") + ylab("ASX and DJIA Index's") + ggtitle("ASX 200 Vs DJIA") +
annotate(geom="text", x=as.Date("2009-01-01"), y=7000,
label="Market Peak before 2008 GFC (Sep 2007)") +
annotate(geom="text", x=as.Date("2010-01-01"), y=3200,
label="Market Trough (Jan 2009)") +
annotate(geom="text", x=as.Date("2015-01-01"), y=10000,
label="Market drops due to Chinese Market fluctuations") +
annotate(geom="text", x=as.Date("2019-01-01"), y=28000,
label="Tech Boom - FAANG") +
coord_cartesian(clip = 'off') +
annotate(geom="point", x=as.Date("2007-09-30"), y=6754, size=8, shape=21, fill="transparent") +
annotate(geom="point", x=as.Date("2009-01-31"), y=3400, size=8, shape=21, fill="transparent") +
annotate(geom="point", x=as.Date("2015-09-30"), y=16466, size=8, shape=21, fill="transparent") +
annotate(geom="point", x=as.Date("2015-09-30"), y=5200, size=8, shape=21, fill="transparent") +
annotate(geom="point", x=as.Date("2017-12-31"), y=26000, size=10, shape=21, fill="transparent") +
theme_ipsum()
|
740dcddab32a7344b9ddc0d3569cea40b6893e1c
|
78497d81769d572ef666bf91e24be399e6c57de0
|
/R/C_MatrixW_treeHarvey.R
|
cbd878e2093a55596c1638740a8b7b5b236018e6
|
[] |
no_license
|
kostask84/MS_Tyrannidae_AgeAssemblage
|
112f201a1b9081d7d54afdd3200a2313cd7e2196
|
8dab8dffe3c7269ac13053a8007bbc49e9584bb3
|
refs/heads/master
| 2023-04-12T20:23:57.850187
| 2021-05-05T15:44:18
| 2021-05-05T15:44:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,516
|
r
|
C_MatrixW_treeHarvey.R
|
# loading tree from Harvey ------------------------------------------------
trfn = np(paste("T400F_AOS_HowardMoore.tre", sep=""))
moref(trfn)
tr = ape::read.tree(here::here("data", trfn))
# data with species codes
spp_codes <- read.csv(here::here("data", "Species_name_map_uids.csv")) # species codes for phylogenetic tree
names_tipHoward <- spp_codes[match(tr$tip.label, spp_codes$tipnamecodes),
"aos.howardmoore.species"]
tr$tip.label <- names_tipHoward
names_tipHoward_matchW <- unlist(lapply(strsplit(tr$tip.label, " "), function(x) paste(x[1], x[2], sep = "_")))
tr$tip.label <- names_tipHoward_matchW
moref(trfn)
tr = ape::read.tree(trfn)
# Editing matrix W for harvey´s tree --------------------------------------
W_edit <- W[, - match(c("Zimmerius_improbus", "Phylloscartes_flaviventris", "Phelpsia_inornatus"), colnames(W)
)
]
W_edit_sub <- W_edit[, match(c("Suiriri_islerorum", "Anairetes_agraphia", "Anairetes_agilis"), colnames(W_edit))]
colnames(W_edit_sub) <- c("Suiriri_affinis", "Uromyias_agraphia", "Uromyias_agilis")
W_edit <- W_edit[, - match(c("Suiriri_islerorum", "Anairetes_agraphia", "Anairetes_agilis"), colnames(W_edit))]
W_edit <- cbind(W_edit, W_edit_sub)
Onychorhynchus_coronatus <- ifelse(rowSums(W_edit[, match(c("Onychorhynchus_coronatus", "Onychorhynchus_swainsoni", "Onychorhynchus_mexicanus", "Onychorhynchus_occidentalis"), colnames(W_edit))]) >=1, 1, 0)
W_edit <- W_edit[, - match(c("Onychorhynchus_coronatus", "Onychorhynchus_swainsoni", "Onychorhynchus_mexicanus", "Onychorhynchus_occidentalis"), colnames(W_edit))]
W_edit <- cbind(W_edit, Onychorhynchus_coronatus)
Xolmis_rubetra <- ifelse(rowSums(W_edit[, match(c("Xolmis_salinarum", "Xolmis_rubetra"), colnames(W_edit))]) >= 1, 1, 0)
W_edit <- W_edit[, - match(c("Xolmis_salinarum", "Xolmis_rubetra"), colnames(W_edit))]
W_edit <- cbind(W_edit, Xolmis_rubetra)
colnames(W_edit)[which(is.na(match(colnames(W_edit), tr$tip.label)) == TRUE)] # checking
W_edit # matriz correta para analise com a arvore de Harvey
# saving results ----------------------------------------------------------
ct <- treedata(tr, geofile)
ct.tr<-ct$phy
write.tree(ct.tr,file="Tree_TF400Howard_Pruned.tre")
trfn = np(paste("Tree_TF400Howard_Pruned.tre", sep=""))
moref(trfn)
tr = ape::read.tree(here::here("data", trfn))
write.tree(tr,file="Tree_TF400Howard_tip_corrected.tre")
trfn = np(paste("Tree_TF400Howard_tip_corrected.tre", sep=""))
write.table(W_edit, here::here("data", "processed", "W_harvey.txt"))
|
819e81fdcb6ac9c9c0b57cecadd684463db3e601
|
fd1453bda46735d1c348e05c482f639f2222e490
|
/R/01proc-issp.R
|
9ac7807603b3ee1ca5446cc178ae1fc784416011
|
[] |
no_license
|
valentinaandrade/health-inequality
|
a39a0c129ae1408047a531cdaaeef0ef385885f2
|
3955832caf898720f97d77fb3d126aed3e00392b
|
refs/heads/main
| 2023-08-17T02:54:05.531630
| 2021-09-22T10:14:38
| 2021-09-22T10:14:38
| 353,554,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,288
|
r
|
01proc-issp.R
|
# Code 0: Preparation -----------------------------------------------------
# Valentina Andrade
# 1. Cargar librerias -----------------------------------------------------
pacman::p_load(tidyverse, haven,sjPlot,sjlabelled,sjmisc)
# 2. Cargar bases de datos ------------------------------------------------
## ISSP Module Social Inequlities
# issp99 <- read_dta("input/data/original/ISSP1999.dta") #remplazar por cep39
issp99 <- read_spss("input/data/original/cep39mar-abr2000.sav")
# issp09 <- read_dta("input/data/original/ISSP2009.dta") #remplazar por cep59
issp09 <- read_spss("input/data/original/cep59may-jun2009.sav")
issp19 <- read_dta("input/data/original/ISSP2019.dta")
# 3. Explorar -----------------------------------------------------------------
names(issp99)
issp99 <- issp99 %>% mutate(year = 1999,factor = pond)
issp09 <- issp09 %>% mutate(year = 2009, factor = Fcorr)
issp19 <- issp19 %>% mutate(year = 2019, factor = FACTOR)
# 4. Substancial variables ------------------------------------------------------
# 4.1 Rich people pay more taxes (tax) ------------------------------------------
## 1999: Variable te8
table(issp99$te8)
issp99$tax<- as.numeric(issp99$te8)
issp99 <- issp99 %>% mutate(tax=ifelse(tax %in% c(8,9),NA,tax),
tax=rec(tax,rec = "rev"),
tax=factor(tax,labels = c("Una proporción mucho menor","Una menor proporción","La misma proporción","Una proporción mayor","Una proporción mucho mayor"),ordered = T))
table(issp99$tax)
## 2009: Variable TE2P17_A
table(issp09$TE2P17_A)
issp09$tax<- as.numeric(issp09$TE2P17_A)
issp09 <- issp09 %>% mutate(tax=ifelse(tax %in% c(8,9),NA,tax),
tax=rec(tax,rec = "rev"),
tax=factor(tax,labels = c("Una proporción mucho menor","Una menor proporción","La misma proporción","Una proporción mayor","Una proporción mucho mayor"),ordered = T))
table(issp09$tax)
## 2019: Variable M2_P8A
sjmisc::find_var(issp19, "8A")
table(issp19$M2_P8A)
issp19$tax<- as.numeric(issp19$M2_P8A)
issp19 <- issp19 %>% mutate(tax=ifelse(tax %in% c(88,99),NA,tax),
tax=rec(tax,rec = "rev"),
tax=factor(tax,labels = c("Una proporción mucho menor","Una menor proporción","La misma proporción","Una proporción mayor","Una proporción mucho mayor"),ordered = T))
table(issp19$tax)
# 4.2 Percepcion Tax -----------------------------------------------------
#1999
# [no disponible]
## 2009
# Variable V37: Q7b Tax: Generally, how would you describe taxes in [Rs country] for those with high incomes?
table(issp09$TE2P17_B)
issp09$taxperc <- as.numeric(issp09$TE2P17_B)
issp09$taxperc=ifelse(issp09$taxperc %in% c(8,9),NA,issp09$taxperc)
issp09 <- issp09 %>% mutate(taxperc=rec(taxperc,rec = "rev"),
taxperc=factor(taxperc,labels = c("Muy bajos","Bajos","Casi lo que corresponde","Altos","Muy altos"),ordered = T))
table(issp09$taxperc)
## 2019
# En general, ¿cómo describiría Ud. los impuestos en Chile hoy en día para las personas con altos ingresos? Los impuestos son…
# [M2_P8B]
table(issp19$M2_P8B)
issp19$taxperc <- as.numeric(issp19$M2_P8B)
issp19$taxperc=ifelse(issp19$taxperc %in% c(88,99),NA,issp19$taxperc)
issp19 <- issp19 %>% mutate(taxperc=rec(taxperc,rec = "rev"),
taxperc=factor(taxperc,labels = c("Muy bajos","Bajos","Casi lo que corresponde","Altos","Muy altos"),ordered = T))
table(issp09$taxperc)
# 4.3 Redistribution (red) ----------------------------------------------------------
## 1999: Variable te7b
issp99$red<- as.numeric(issp99$te7b)
issp99 <- issp99 %>% mutate(red=ifelse(red %in% c(8,9),NA,red),
red=rec(red,rec = "rev"),
red=factor(red,labels = c('Muy en desacuerdo','En desacuerdo','Ni de acuerdo ni desacuerdo','De acuerdo','Muy de acuerdo'),ordered = T))
table(issp99$red)
## 2009: Variable TE2P16_B
table(issp09$TE2P16_B)
issp09$red<- as.numeric(issp09$TE2P16_B)
issp09$red=ifelse(issp09$red %in% c(8,9),NA,issp09$red)
issp09 <- issp09 %>% mutate(red=rec(red,rec = "rev"),
red=factor(red,labels = c('Muy en desacuerdo','En desacuerdo','Ni de acuerdo ni desacuerdo','De acuerdo','Muy de acuerdo'),ordered = T))
table(issp09$red)
## 2019: Variable M2_P4_1
sjmisc::find_var(issp19, "diferencias")
table(issp19$M2_P4_2)
issp19$red<- as.numeric(issp19$M2_P4_2)
issp19$red=ifelse(issp19$red %in% c(8,9),NA,issp19$red)
issp19 <- issp19 %>% mutate(red=rec(red,rec = "rev"),
red=factor(red,labels = c('Muy en desacuerdo','En desacuerdo','Ni de acuerdo ni desacuerdo','De acuerdo','Muy de acuerdo'),ordered = T))
table(issp19$red)
# 4.5 Meritocracy -------------------------------------------------------------
labs_perc_merit <- rev(sjlabelled::get_labels(x =issp19$M2_P1_1)[1:5])
# A. wealthy family--------------------------------------------------------------
#1999 - v4
#2009 - V6
#2019 - M2_P1_2
# B. well-educated parents ------------------------------------------------
#1999 - v5
#2009 - V7
#2019 - M2_P1_3
# C. education yourself --------------------------------------------------------------
#1999 -
#2009 - TE2P11_C
issp09$educself <- car:: recode(issp09$TE2P11_C,"c(8,9)=NA")
issp09$educself <-rec(issp09$educself,rec = "rev")
issp09$educself <- factor(x = issp09$educself,labels = labs_perc_merit)
table(issp09$educself)
#2019 - M2_P1_4
issp19$educself <- car:: recode(issp19$M2_P1_4,"c(8,9)=NA")
issp19$educself <-rec(issp19$educself,rec = "rev")
issp19$educself <- factor(x = issp19$educself,labels = labs_perc_merit)
table(issp19$educself)
# D. ambition ------------------------------------------------
#1999 -
#2009 - V9
issp09$ambition <- car:: recode(issp09$TE2P11_D,"c(8,9)=NA")
issp09$ambition <-rec(issp09$ambition,rec = "rev")
issp09$ambition <- factor(x = issp09$ambition,labels = labs_perc_merit)
table(issp09$ambition)
#2019 - M2_P1_1
issp19$ambition <- car:: recode(issp19$M2_P1_1,"c(8,9)=NA")
issp19$ambition <-rec(issp19$ambition,rec = "rev")
issp19$ambition <- factor(x = issp19$ambition,labels = labs_perc_merit)
table(issp19$ambition)
# E. hard work --------------------------------------------------------------
#1999 -
#2009 - V10
issp09$hwork <- car:: recode(issp09$TE2P11_E,"c(8,9)=NA")
issp09$hwork <-rec(issp09$hwork,rec = "rev")
issp09$hwork <- factor(x = issp09$hwork,labels = labs_perc_merit)
table(issp09$hwork)
#2019 - M2_P1_5
issp19$hwork <- car:: recode(issp19$M2_P1_5,"c(8,9)=NA")
issp19$hwork <-rec(issp19$hwork,rec = "rev")
issp19$hwork <- factor(x = issp19$hwork,labels = labs_perc_merit)
table(issp09$hwork)
# F. know right people ------------------------------------------------
#1999 -
#2009 - V11
#2019 - M2_P1_6
# G. political connections --------------------------------------------------------------
#1999 -
#2009 - V12
#2019 - M2_P1_7
# H. giving bribes ------------------------------------------------
#1999 -
#2009 - V13
#2019 - M2_P1_8
# I. person's race ------------------------------------------------
#1999 -
#2009 - V14
#2019 - M2_P1_9
# J. religion ------------------------------------------------
#1999 -
#2009 - V15
#2019 - M2_P1_10
# K. sex ------------------------------------------------
#1999 -
#2009 - V16
#2019 - M2_P1_11
# L. corrupt ------------------------------------------------
#1999 -
#2009 - V17
#2019 -
# M. best school ------------------------------------------------
#1999 -
#2009 - V18
#2019 -
# N. rich university ------------------------------------------------
#1999 -
#2009 - V19
#2019 -
# O. same chances uni ------------------------------------------------
#1999 -
#2009 - V20
#2019 -
# Justicia educacion y salud ------------------------------------------------------------------
# 1999 ----------------------------------------------------------------------------------------#
just_vlabels<- get_labels(issp19$M2_P9A)[1:5]
sjmisc::frq(issp99$te10a)
issp99$justsalud<- issp99$te10a #salud
issp99$justsalud[issp99$justsalud %in% c(8,9)] <- NA
issp99$justsalud <- factor(issp99$justsalud,labels=just_vlabels)
issp99$justsalud <- fct_rev(issp99$justsalud)
sjmisc::frq(issp99$justsalud)
sjmisc::frq(issp99$te10b)
issp99$justeduca<- issp99$te10b #educacion
issp99$justeduca[issp99$justeduca %in% c(8,9)] <- NA
issp99$justeduca <- factor(issp99$justeduca,labels=just_vlabels)
issp99$justeduca <- fct_rev(issp99$justeduca)
sjmisc::frq(issp99$justeduca)
# 2009 ----------------------------------------------------------------------------------------#
sjmisc::frq(issp09$TE2P18_A)
issp09$justsalud<- issp09$TE2P18_A #salud
issp09$justsalud[issp09$justsalud %in% c(8,9)] <- NA
issp09$justsalud <- factor(issp09$justsalud,labels=just_vlabels)
issp09$justsalud <- fct_rev(issp09$justsalud)
sjmisc::frq(issp09$justsalud)
sjmisc::frq(issp09$TE2P18_B)
issp09$justeduca<- issp09$TE2P18_B #educacion
issp09$justeduca[issp09$justeduca %in% c(8,9)] <- NA
issp09$justeduca <- factor(issp09$justeduca,labels=just_vlabels)
issp09$justeduca <- fct_rev(issp09$justeduca)
sjmisc::frq(issp09$justeduca)
# 2019 ----------------------------------------------------------------------------------------#
sjmisc::frq(issp19$M2_P9A)
issp19$justsalud<- issp19$M2_P9A #salud
issp19$justsalud[issp19$justsalud %in% c(88,99)] <- NA
issp19$justsalud <- factor(issp19$justsalud,labels=just_vlabels)
issp19$justsalud <- fct_rev(issp19$justsalud)
sjmisc::frq(issp19$justsalud)
sjmisc::frq(issp19$M2_P9B)
issp19$justeduca<- issp19$M2_P9B
issp19$justeduca[issp19$justeduca %in% c(88,99)] <- NA
issp19$justeduca <- factor(issp19$justeduca,labels=just_vlabels)
issp19$justeduca <- fct_rev(issp19$justeduca)
sjmisc::frq(issp19$justeduca)
# Escala Izquierda Derecha --------------------------------------------------------------------
pospol_label<- c("Derecha","Centro","Izquierda","Independiente","Ninguna")
#1999
sjmisc::frq(issp99$p7)
issp99$pospol <- issp99$p7
issp99$pospol <- car::recode(issp99$pospol,"c(1,2)=1;3=2;c(4,5)=3;6=4;7=5;c(8,9)=NA")
issp99$pospol <- factor(issp99$pospol,labels = pospol_label)
table(issp99$pospol)
sjmisc::frq(issp09$MBP16)
issp09$pospol <- issp09$MBP16
issp09$pospol <- car::recode(issp09$pospol,"c(1,2)=1;3=2;c(4,5)=3;6=4;7=5;c(8,9)=NA")
issp09$pospol <- factor(issp09$pospol,labels = pospol_label)
table(issp09$pospol)
# sjmisc::frq(issp09$POS_POL)
sjmisc::frq(issp19$MB_P14)
issp19$pospol <- issp19$MB_P14
issp19$pospol <- car::recode(issp19$pospol,"c(1,2)=1;3=2;c(4,5)=3;6=4;7=5;c(8,9)=NA")
issp19$pospol <- factor(issp19$pospol,labels = pospol_label)
table(issp19$pospol)
# 5.1 Income (pchhinc y pchhinc_a) --------------------------------------------------------------
# browseURL(url = "https://www.ine.gub.uy/indicadores?indicadorCategoryId=11421")
## 1999
### Variable hompop: How many persons in household
### rincome (respondent income) and incomer (Family income by decile 1-10)
issp99$hompop <- as.numeric(issp99$dat_26a)
issp99$hompop[issp99$hompop == 99] <- NA
issp99$hompop[issp99$hompop == 0] <- 1
sjmisc::find_var(issp99,"income")
issp99$income <- as.numeric(issp99$dat_23)
issp99 <- issp99 %>% mutate(income = case_when(
dat_23 == 1 ~45000,
dat_23 == 2 ~105500,
dat_23 == 3 ~135500,
dat_23 == 4 ~165500,
dat_23 == 5 ~195500,
dat_23 == 6 ~225500,
dat_23 == 7 ~265500,
dat_23 == 8 ~340500,
dat_23 == 9 ~495500,
dat_23 == 10~800500,
dat_23 == 11~8000000,
dat_23 == 12~10750000,
dat_23 == 13~16000000,
dat_23 == 14~1500000,
dat_23 == 97 ~NA_real_,
dat_23 == 98 ~NA_real_,
dat_23 == 99 ~NA_real_),
pchhinc=income/hompop,
pchhinc_a = pchhinc*186.62/100) #Art. Politicas Publicas UC (linea 149 dofile)
# [cambiamos el IPC acumulado de 2009 por el de diciembre 2018 que es 186.62] - (JI - 13 nov 2020)
summary(issp99$pchhinc)
## 2009
### Variable HOMPOP: How many persons in household
### CL_RINC:income specific in Chile
issp09$HOMPOP <- as.numeric(issp09$DDP35)
issp09$HOMPOP[issp09$HOMPOP == 99] <- NA
issp09$HOMPOP[issp09$HOMPOP == 0] <- 1
sjmisc::find_var(issp09,"income")
issp09$income <- as.numeric(issp09$DDP34)
issp09 <- issp09 %>% mutate(income = case_when(income == 1 ~17500,
income == 2 ~45500,
income == 3 ~67000,
income == 4 ~89500,
income == 5 ~117500,
income == 6 ~158500,
income == 7 ~201500,
income == 8 ~257000,
income == 9 ~324500,
income == 10 ~403000,
income == 11 ~724500,
income == 12 ~1500000,
income == 13 ~2500000,
income == 14 ~3500000,
income == 99 ~NA_real_,
income == 9999998 ~NA_real_,
income == 9999999 ~NA_real_),
pchhinc=income/HOMPOP,
pchhinc_a = pchhinc*186.62/100) # [cambiamos el IPC acumulado de 2009 por el de diciembre 2018 que es 186.62]
summary(issp09$pchhinc)
## 2019
### DS_P34 Numero personas en hogar
### DS_P38. De los siguientes tramos de ingresos mensuales
issp19$HOMPOP <- as.numeric(issp19$DS_P34)
issp19$HOMPOP[issp19$HOMPOP == 99] <- NA
issp19$HOMPOP[issp19$HOMPOP == 0] <- 1
issp19$income <- as.numeric(issp19$DS_P39)
issp19 <- issp19 %>% mutate(income = case_when(income == 1~17500,
income == 2~45500,
income == 3~67000,
income == 4~89500,
income == 5~117500,
income == 6~158500,
income == 7~201500,
income == 8~257000,
income == 9~324500,
income == 10~403000,
income == 11~724500,
income == 12~1500000,
income == 13~2500000,
income == 14~3500000,
income == 98 ~NA_real_,
income == 99 ~NA_real_),
pchhinc=income/HOMPOP,
pchhinc_a = pchhinc)
summary(issp19$pchhinc)
# 5. Educ. Level (educ) ---------------------------------------------------------
## 1999: Variable x_degr
sjmisc::frq(issp99$dat_6)
issp99$educ<- as.numeric(issp99$dat_6)
issp99$educ <- car::recode(issp99$educ, recodes = c("c(1,2)='No estudió';c(3,4)='Básica completa';c(5,6, 8)='Media completa';9='Superior no universitaria';7='Universitaria completa';99=NA"), as.factor = T,
levels =c('No estudió','Básica completa','Media completa','Superior no universitaria','Universitaria completa'))
table(issp99$educ)
## 2009: Variable CL_DGR
table(issp09$DDP06_ni)
issp09$educ<- as.numeric(issp09$DDP06_ni+1)
issp09$educ <- car::recode(issp09$educ, recodes = c("c(1,2)='No estudió';c(3,4)='Básica completa';c(5,6,8)='Media completa';9='Superior no universitaria';7='Universitaria completa';100=NA"), as.factor = T,
levels =c('No estudió','Básica completa','Media completa','Superior no universitaria','Universitaria completa'))
table(issp09$educ)
## 2019: Variable DS_P4
sjmisc::find_var(issp19, "DS_P4")
table(issp19$DS_P4)
issp19$educ<- as.numeric(issp19$DS_P4)
issp19$educ <- car::recode(issp19$educ, recodes = c("c(0,1)='No estudió';c(2,3)='Básica completa';c(4,5,7)='Media completa';6='Superior no universitaria';c(8,9)='Universitaria completa';99=NA"), as.factor = T,
levels =c('No estudió','Básica completa','Media completa','Superior no universitaria','Universitaria completa'))
table(issp19$educ)
# 5. Subjective status (ess) ------------------------------------------------------------------
# v46 Yourself on a scale from top to bottom
## 1999: Variable age
table(issp99$te12)
issp99$te12 <- as.numeric(car::recode(issp99$te12, "c(97,98,99)=NA"))
issp99$ess <- sjmisc::rec(issp99$te12,rec = "rev")
table(issp99$ess)
## 2009: Variable age
table(issp09$TE2P20_A)
issp09$ess <- as.numeric(car::recode(issp09$TE2P20_A, "c(88,99)=NA"))
table(issp09$ess)
## 2019: Variable age
issp19$ess <- as.numeric(car::recode(issp19$M2_P13A, "c(88,98,99)=NA"))
# 5. Age (age) ------------------------------------------------------------------
## 1999: Variable age
table(issp99$dat_2)
issp99$age<- as.numeric(issp99$dat_2)
issp99$age <- car::recode(issp99$age, recodes = c("18:29='18-29';30:44='30-44';45:54='45-54';55:64='55-64';65:94='65 o más'"),
as.factor = T,
levels =c('18-29','30-44','45-54','55-64','65 o más'))
table(issp99$age)
## 2009: Variable CL_DGR
table(issp09$DDP02)
issp09$age<- as.numeric(issp09$DDP02)
issp09$age <- car::recode(issp09$age, recodes = c("18:29='18-29';30:44='30-44';45:54='45-54';55:64='55-64';65:94='65 o más'"),
as.factor = T,
levels =c('18-29','30-44','45-54','55-64','65 o más'))
table(issp09$age)
## 2019: Variable DS_P2
sjmisc::find_var(issp19, "edad")
table(issp19$DS_P2_EXACTA)
issp19$age<- as.numeric(issp19$DS_P2_EXACTA)
issp19$age <- car::recode(issp19$age, recodes = c("18:29='18-29';30:44='30-44';45:54='45-54';55:64='55-64';65:96='65 o más'"),
as.factor = T,
levels =c('18-29','30-44','45-54','55-64','65 o más'))
table(issp19$age)
# 5. Sex (sex) ------------------------------------------------------------------
## 1999: Variable sex
table(issp99$dat_1)
issp99$sex<- as.numeric(issp99$dat_1)
issp99$sex <- car::recode(issp99$sex, recodes = c("1='Hombre';2='Mujer'"),
as.factor = T,
levels =c('Hombre','Mujer'))
table(issp99$sex)
## 2009: Variable CL_DGR
table(issp09$DDP01)
issp09$sex<- as.numeric(issp09$DDP01)
issp09$sex <- car::recode(issp09$sex, recodes = c("1='Hombre';2='Mujer'"),
as.factor = T,
levels =c('Hombre','Mujer'))
table(issp09$sex)
## 2019: Variable DS_P1
sjmisc::find_var(issp19, "DS_P1")
table(issp19$DS_P1)
issp19$sex<- as.numeric(issp19$DS_P1)
issp19$sex <- car::recode(issp19$sex, recodes = c("1='Hombre';2='Mujer'"),
as.factor = T,
levels =c('Hombre','Mujer'))
table(issp19$sex)
# Region ------------------------------------------------------------------
find_var(issp19, "REGION")
### 1999
issp99 <- issp99 %>% mutate(region_rm = case_when(region %in% 1:12 ~ "No RM",
region %in% 13 ~ "RM",
TRUE ~ NA_character_))
table(issp99$region_rm)
### 2009
table(issp09$Fregion)
issp09 <- issp09 %>% mutate(region_rm = case_when(Fregion %in% 1:12 ~ "No RM",
Fregion %in% 13 ~ "RM",
TRUE ~ NA_character_))
table(issp09$region_rm)
### 2019
table(issp19$REGION)
issp19 <- issp19 %>% mutate(region_rm = case_when(REGION %in% 1:12 ~ "No RM",
REGION %in% 14:16 ~ "No RM",
REGION %in% 13 ~ "RM",
TRUE ~ NA_character_))
table(issp19$region_rm)
# 6. Merge ISSP 99-09-19 --------------------------------------------------
issp <- bind_rows(issp99,issp09,issp19)
issp <- issp %>%
select(year, sex, age,educ, region_rm, pospol,ess , pchhinc, pchhinc_a, tax,taxperc,red,educself,ambition,hwork,justsalud,justeduca,factor)
save(issp,file = "input/data/proc/issp.Rdata")
|
9ecce8905a4147d92acdccdc38d263edaee83acb
|
a59b0019cd455e5c8c59263d5248b388eb235257
|
/tests/testthat/test-residuals.R
|
8f145be9cb83a4c6eccabf9feec9f8275166f29a
|
[
"MIT"
] |
permissive
|
dill/gratia
|
4df529f5e636a0139f5c355b52a2924bebf7aca4
|
26c3ece0e6a6298ab002b02019b0ea482d21dace
|
refs/heads/master
| 2023-04-08T18:35:18.730888
| 2023-03-20T12:52:33
| 2023-03-20T12:52:33
| 160,169,115
| 0
| 0
|
NOASSERTION
| 2018-12-03T09:54:30
| 2018-12-03T09:54:30
| null |
UTF-8
|
R
| false
| false
| 2,604
|
r
|
test-residuals.R
|
## Test partial_residuals() and related residuals functions
## load packages
library("testthat")
library("gratia")
library("mgcv")
library("gamm4")
N <- 400L
df <- data_sim("eg1", n = N, seed = 42)
## fit the model
m <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df, method = 'REML')
m_bam <- bam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df, method = 'fREML')
m_gamm <- gamm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df)
m_gamm4 <- gamm4(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df)
test_that("partial_residuals returns a tibble", {
expect_silent(p_res <- partial_residuals(m))
expect_s3_class(p_res, class = c("tbl_df", "tbl", "data.frame"), exact = TRUE)
expect_named(p_res, c("s(x0)", "s(x1)", "s(x2)", "s(x3)"))
expect_identical(nrow(p_res), N)
})
test_that("partial_residuals returns a tibble", {
expect_silent(p_res <- partial_residuals(m_bam))
expect_s3_class(p_res, class = c("tbl_df", "tbl", "data.frame"), exact = TRUE)
expect_named(p_res, c("s(x0)", "s(x1)", "s(x2)", "s(x3)"))
expect_identical(nrow(p_res), N)
})
test_that("partial_residuals returns a tibble", {
expect_silent(p_res <- partial_residuals(m_gamm))
expect_s3_class(p_res, class = c("tbl_df", "tbl", "data.frame"), exact = TRUE)
expect_named(p_res, c("s(x0)", "s(x1)", "s(x2)", "s(x3)"))
expect_identical(nrow(p_res), N)
})
test_that("partial_residuals returns a tibble", {
expect_silent(p_res <- partial_residuals(m_gamm4))
expect_s3_class(p_res, class = c("tbl_df", "tbl", "data.frame"), exact = TRUE)
expect_named(p_res, c("s(x0)", "s(x1)", "s(x2)", "s(x3)"))
expect_identical(nrow(p_res), N)
})
test_that("select works with partial_residuals", {
expect_silent(p_res <- partial_residuals(m, select = "s(x1)"))
expect_s3_class(p_res, class = c("tbl_df", "tbl", "data.frame"), exact = TRUE)
expect_named(p_res, "s(x1)")
expect_identical(nrow(p_res), N)
})
test_that("partial_match selecting works with partial_residuals", {
expect_silent(p_res <- partial_residuals(m, select = "x1", partial_match = TRUE))
expect_s3_class(p_res, class = c("tbl_df", "tbl", "data.frame"), exact = TRUE)
expect_named(p_res, "s(x1)")
expect_identical(nrow(p_res), N)
})
test_that("selecting throws an error if no match", {
err_msg <- "Failed to match any smooths in model `m`.
Try with 'partial_match = TRUE'?"
expect_error(partial_residuals(m, select = "foo", partial_match = TRUE),
err_msg)
expect_error(partial_residuals(m, select = "foo", partial_match = FALSE),
err_msg)
})
|
145566c350fa2890943cb99f7ee4cc3da5af53d1
|
c5c28143020868ee0ca15f8a99fafffb4c6ea056
|
/tests/testthat/test-xml_children.R
|
d80ee8338c92b1d8b56f0574c6f05acfe3e63e1e
|
[] |
no_license
|
chan0415/xml2
|
37b9c825cf87460722b48f96d43469db80e1c098
|
8bb23483a85389a053897111045a65381a8bc86f
|
refs/heads/master
| 2021-05-06T03:35:27.368023
| 2017-11-22T21:54:19
| 2017-11-22T21:56:28
| 114,906,229
| 0
| 1
| null | 2017-12-20T16:05:41
| 2017-12-20T16:05:40
| null |
UTF-8
|
R
| false
| false
| 1,535
|
r
|
test-xml_children.R
|
context("xml_children")
x <- read_xml("<foo> <bar><boo /></bar> <baz/> </foo>")
test_that("xml_child() returns the proper child", {
expect_equal(xml_child(x), xml_children(x)[[1L]])
expect_equal(xml_child(x, 2), xml_children(x)[[2L]])
})
test_that("xml_child() returns child by name", {
expect_equal(xml_child(x, "baz"), xml_find_first(x, "./baz"))
})
test_that("xml_child() errors if more than one search is given", {
expect_error(xml_child(x, 1:2), "`search` must be of length 1")
})
test_that("xml_child() errors if search is not numeric or character", {
expect_error(xml_child(x, TRUE), "`search` must be `numeric` or `character`")
expect_error(xml_child(x, as.factor("test")), "`search` must be `numeric` or `character`")
expect_error(xml_child(x, raw(1)), "`search` must be `numeric` or `character`")
expect_error(xml_child(x, list(1)), "`search` must be `numeric` or `character`")
})
test_that("xml_length", {
expect_equal(xml_length(x), 2)
all <- xml_find_all(x, "//*")
expect_equal(xml_length(all), c(2, 1, 0, 0))
})
test_that("xml_parent", {
expect_equal(unclass(xml_parent(xml_child(x))), unclass(x))
})
test_that("xml_parents", {
expect_equal(
xml_name(xml_parents(xml_find_first(x, "//boo"))),
c("bar", "foo"))
})
test_that("xml_root", {
doc <- xml_new_document()
expect_is(xml_root(doc), "xml_missing")
a <- xml_add_child(doc, "a")
b <- xml_add_child(doc, "b")
expect_that(xml_name(xml_root(b)), equals("a"))
expect_that(xml_name(xml_root(doc)), equals("a"))
})
|
76573022a6079e9f7f73df441043d6f4053f94d0
|
a5b8eb7b0f3f3c7a9668ee7e07abdb2ef3452cce
|
/sex.ethnicity.grs.may.2018/scripts/describe.grs.mr.publication.plots.180802.R
|
a19ccb81e223dba8cd68a3229037920df9a8bc94
|
[] |
no_license
|
lindgrengroup/causal.relationships.between.obesity.and.leading.causes.of.death.in.men.and.women
|
5073125353b38eed35dc28bc483098e1cfbc89a4
|
c991d84967d913c23c665389b17315cdb04a3a3c
|
refs/heads/master
| 2020-07-08T20:30:17.381304
| 2019-10-14T07:30:02
| 2019-10-14T07:30:02
| 203,767,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58,062
|
r
|
describe.grs.mr.publication.plots.180802.R
|
#!/bin/env Rscript
#$-cwd
library(ggplot2)
library(gridExtra)
library(lattice)
library(grid)
library(grDevices)
library(ggpubr)
X11(height = 30, width = 40)
####################################################################
########################## Start with GRSs
####################################################################
for (model in c("grs")) {
df_raw <- read.table("../results.logistic.regressions.180514/log.results.table.180627.txt",
header = T, stringsAsFactors = F)
smoking_columns <- unique(df_raw$case_column[grep("_smoking", df_raw$case_column)])
datasets <- c("pulit", "giukbb")
units <- "raw_scoresum"
for (eth_group in c("all.white", "brit.irish")) {
for (dataset in datasets) {
for (unit in units) {
df <- df_raw[!(df_raw$case_column %in% c("t1d_cases_prob", "t2d_cases_prob", "smoker_cases", smoking_columns)), ]
if (dataset == "pulit") {
comb_groups <- c("bmi.eur.comb.pulit.sig", "whr.eur.comb.pulit.sig", "whradjbmi.eur.comb.pulit.sig")
male_groups <- c("bmi.eur.men.pulit.sig", "whr.eur.men.pulit.sig", "whradjbmi.eur.men.pulit.sig")
female_groups <- c("bmi.eur.women.pulit.sig", "whr.eur.women.pulit.sig", "whradjbmi.eur.women.pulit.sig")
} else if (dataset == "giukbb") {
comb_groups <- c("bmi.eur.comb.giukbb.sig", "whr.eur.comb.giukbb.sig", "whradjbmi.eur.comb.giukbb.sig")
male_groups <- c("bmi.eur.men.giukbb.sig", "whr.eur.men.giukbb.sig", "whradjbmi.eur.men.giukbb.sig")
female_groups <- c("bmi.eur.women.giukbb.sig", "whr.eur.women.giukbb.sig", "whradjbmi.eur.women.giukbb.sig")
}
#Subset to only keep the actual analyses
df <- df[(df$grs_unit == unit & df$eth_group == eth_group) &
((df$sex_group == "comb" & df$snp_group %in% comb_groups) |
(df$sex_group == "men" & df$snp_group %in% male_groups) |
(df$sex_group == "women" & df$snp_group %in% female_groups)), ]
dict_traits <- list(breast_cancer_cases = "Breast Cancer",
cad_cases = "CAD",
colorectal_cancer_cases = "Colorectal Cancer",
copd_cases = "COPD",
dementia_cases = "Dementia",
lungcancer_cases = "Lung Cancer",
renal_failure_cases = "Renal Failure",
aki_cases = "Renal Failure - Acute",
ckd_cases = "Renal Failure - Chronic",
stroke_cases = "Stroke",
haem_stroke_cases = "Stroke - Hemorrhagic",
isch_stroke_cases = "Stroke - Ischemic",
t2d_cases_probposs = "Type 2 Diabetes",
t1d_cases_probposs = "Type 1 Diabetes",
any_infertility_cases = "Infertility",
nafld_cases = "NAFLD",
cld_cases = "CLD")
df$trait <- df$case_column
df$grs_trait_name <- gsub("\\.(.)+", "", df$snp_group)
df$sex_group <- ifelse(df$sex_group == "comb", "Combined",
ifelse(df$sex_group == "men", "Men",
ifelse(df$sex_group == "women", "Women", "missing")))
for (i in 1:length(dict_traits)) {
df$trait <- as.character(replace(df$trait,
df$trait == names(dict_traits[i]), dict_traits[i]))
}
df$ci <- paste(formatC(df$grs_or, digits = 2, format = "f"), " (",
formatC(df$grs_lci_or, digits = 2, format = "f"), ",",
formatC(df$grs_uci_or, digits = 2, format = "f"), ")", sep = "")
df$order <- ifelse(df$trait == "Type 2 Diabetes", "17",
ifelse(df$trait == "CAD", "16",
ifelse(df$trait == "Breast Cancer", "15",
ifelse(df$trait == "CLD", "14",
ifelse(df$trait == "Colorectal Cancer", "13",
ifelse(df$trait == "COPD", "12",
ifelse(df$trait == "Dementia", "11",
ifelse(df$trait == "Infertility", "10",
ifelse(df$trait == "Lung Cancer", "09",
ifelse(df$trait == "NAFLD", "08",
ifelse(df$trait == "Renal Failure", "07",
ifelse(df$trait == "Renal Failure - Acute", "06",
ifelse(df$trait == "Renal Failure - Chronic", "05",
ifelse(df$trait == "Stroke", "04",
ifelse(df$trait == "Stroke - Hemorrhagic", "03",
ifelse(df$trait == "Stroke - Ischemic", "02",
ifelse(df$trait == "Type 1 Diabetes", "01",
"missing")))))))))))))))))
df$grs_trait_name <- ifelse(df$grs_trait_name == "bmi", "BMI",
ifelse(df$grs_trait_name == "whr", "WHR",
ifelse(df$grs_trait_name == "whradjbmi", "WHRadjBMI", NA)))
df$unique_combinations <- paste(df$order, "_", df$case_column, "_",
ifelse(df$sex_group == "Combined", "03",
ifelse(df$sex_group == "Men", "02",
ifelse(df$sex_group == "Women", "01", "missing"))),
df$sex_group, sep = "")
df$lci_arrow <- ifelse(df$grs_lci_or < 0.5, 0.5, NA)
df$uci_arrow <- ifelse(df$grs_uci_or > 5, 4, NA)
title_name <- "Odds ratio (95% CI) per 1 unit higher weighted GRS"
breaks_number <- c(0.5, 1, 5)
ylim_number <- c(0.5, 5)
df$yend_arrow <- df$uci_arrow+1
critical_p <- 0.05/(length(unique(df[, "case_column"]))*length(unique(gsub("\\..*", "", df$snp_group))))
df$sig_or <- ifelse(df$grs_p < critical_p, df$grs_or, NA)
df$not_sig_or <- ifelse(df$grs_p >= critical_p, df$grs_or, NA)
critical_het_p <- 0.05/(nrow(df[!is.na(df$cochrans_p), ])/2)
df$sig_heterogeneity <- ifelse(df$cochrans_p < critical_het_p, df$grs_uci_or, NA)
print_df <- df[, c("trait", "grs_trait_name", "sex_group", "ci",
"grs_p", "cochrans_p", "cochrans_i2", "unique_combinations")]
print_df$grs_p <- ifelse(print_df$grs_p < 0.001, as.character(formatC(print_df$grs_p,
1, format = "e")), ifelse(print_df$grs_p < 0.01,
as.character(formatC(print_df$grs_p, format = "f", 3)),
as.character(formatC(print_df$grs_p, format = "f", 2))))
print_df$cochrans_p <- ifelse(print_df$cochrans_p < 0.001, as.character(formatC(print_df$cochrans_p,
1, format = "e")), ifelse(print_df$cochrans_p < 0.01,
as.character(formatC(print_df$cochrans_p, format = "f", 3)),
as.character(formatC(print_df$cochrans_p, format = "f", 2))))
print_df_bmi <- subset(print_df, print_df$grs_trait_name == "BMI")
colnames(print_df_bmi) <- paste("bmi_", colnames(print_df_bmi), sep = "")
print_df_whr <- subset(print_df, print_df$grs_trait_name == "WHR")
colnames(print_df_whr) <- paste("whr_", colnames(print_df_whr), sep ="")
print_df_whradjbmi <- subset(print_df, print_df$grs_trait_name == "WHRadjBMI")
colnames(print_df_whradjbmi) <- paste("whradjbmi_", colnames(print_df_whradjbmi), sep ="")
print_df <- merge(print_df_bmi, print_df_whr, by.x = c("bmi_trait", "bmi_sex_group"),
by.y = c("whr_trait", "whr_sex_group"))
print_df <- merge(print_df, print_df_whradjbmi, by.x = c("bmi_trait", "bmi_sex_group"),
by.y = c("whradjbmi_trait", "whradjbmi_sex_group"))
print_df <- print_df[order(print_df$bmi_unique_combinations, decreasing = T),
c("bmi_trait", "bmi_sex_group", "bmi_ci", "bmi_grs_p", "bmi_cochrans_p",
"whr_ci", "whr_grs_p", "whr_cochrans_p",
"whradjbmi_ci", "whradjbmi_grs_p", "whradjbmi_cochrans_p")]
colnames(print_df) <- c("bold(Outcome)", "bold(Sex-strata)",
"BMI OR", "BMI P", "BMI Pheterogeneity",
"WHR OR", "WHR P", "WHR Pheterogeneity",
"WHRadjBMI OR", "WHRadjBMI P", "WHRadjBMI Pheterogeneity")
print_df[, c("Outcome", "Sex-strata")] <- print_df[, c("bold(Outcome)", "bold(Sex-strata)")]
print_df[nrow(print_df)+1, ] <- ""
print_df[duplicated(print_df[["bold(Outcome)"]]), "bold(Outcome)"] <- ""
#Make the figure and table
grob_df_first <- tableGrob(print_df[, 1:2], rows = NULL,
theme = ttheme_minimal(base_size = 9, parse = T, base_family = "Times",
padding = unit(c(3.5,1.71), "mm"), colhead=list(fg_params=list(hjust=0, x=0.1)),
core=list(fg_params = list(hjust=0, x=0.1),
bg_params = list(fill = c(rep("white", 3),
rep("gray87", 3), "white", rep("gray87", 3), rep("white", 3),
rep("gray87", 3), rep("white", 3), rep("gray87", 3), rep("white", 3),
rep("gray87", 3), rep("white", 3), rep("gray87", 3), rep("white", 3),
rep("gray87", 3), rep("white", 3), rep("gray87", 3), rep("white", 3))))))
plot <- ggplot(df, aes(x=unique_combinations, y=grs_or, ymin=grs_lci_or, ymax=grs_uci_or)) +
geom_point(aes(color = sex_group), color = "white", shape = 18, size = 3, show.legend =F) +
geom_vline(xintercept = c(45, 41, 35, 29, 23, 17, 11, 5),
colour = "grey", alpha = 0.5, size = 16) +
geom_hline(yintercept = 1, linetype = "dashed") +
geom_errorbar(size = 0.3, width=.3) +
geom_point(aes(y = df$not_sig_or, color = sex_group), shape = 23, fill = "white",
size = 1.65, show.legend =F) +
geom_point(aes(y = df$sig_or, color = sex_group), shape = 18, size = 2.3, show.legend =F) +
geom_point(aes(y = df$sig_heterogeneity), shape = 8, size = 0.7,
position = position_nudge(x = 0, y = 0.12)) +
geom_segment(aes(yend = lci_arrow, y = lci_arrow, xend = unique_combinations), size = 0.1,
arrow = arrow(length = unit(0.15, "cm"), type = "closed")) +
geom_segment(aes(yend = yend_arrow, y = uci_arrow, xend = unique_combinations),
size = 0, arrow = arrow(length = unit(0.15, "cm"), type = "closed")) +
theme_classic() +
scale_color_brewer(palette = "Dark2") +
scale_y_continuous(trans = "log",
name = title_name,
breaks = breaks_number) +
theme(axis.title.y = element_blank(), axis.title.x = element_text(size = 8, family = "Times"),
axis.text.y=element_blank(), axis.text = element_text(color = "black", family = "Times"),
strip.background = element_blank(), strip.text = element_text(face = "bold", family = "Times",
vjust = -0.82)) +
coord_flip(ylim = ylim_number, xlim = c(0, 49.6), expand = F) +
theme(plot.margin=unit(c(0.61, 0.5, 0, 0), "cm")) +
facet_grid(. ~grs_trait_name, drop = T) +
theme(panel.spacing = unit(1, "lines"))
g <- arrangeGrob(grob_df_first, plot, nrow = 1, widths= c(2.7,3.8))
output_file <- paste("/Net/fs1/home/linc4222/", model, ".results.", dataset,
".", eth_group, ".", unit, ".180819.jpeg", sep = "")
ggsave(output_file, g, unit = "cm", height = 21.5,
width = 14, device = "jpeg")
print_df[, 1:2] <- print_df[, c("Outcome", "Sex-strata")]
output_file <- paste("/Net/fs1/home/linc4222/", model, ".binary.outcomes.table.", dataset, ".",
eth_group, ".", unit, ".180909.txt", sep = "")
#Same number of cases for BMI, WHR, and WHRadjBMI since GRSs
n_cases <- df[df$grs_trait_name == "BMI", c("sex_group", "trait", "n_cases")]
colnames(n_cases) <- c("sex_group", "trait", "N cases")
merging_df <- print_df[, c(12, 13, 3:11)]
merging_df$order <- 1:nrow(merging_df)
n_cases <- merge(merging_df, n_cases, by.x = c("Outcome", "Sex-strata"),
by.y = c("trait", "sex_group"), all.x = T)
n_cases <- n_cases[order(n_cases$order), ]
n_cases <- n_cases[, c("Outcome", "Sex-strata", "N cases", "BMI OR", "BMI P",
"BMI Pheterogeneity", "WHR OR", "WHR P", "WHR Pheterogeneity", "WHRadjBMI OR",
"WHRadjBMI P", "WHRadjBMI Pheterogeneity")]
write.table(n_cases, output_file,
quote = F, row.names = F, sep = "\t", na = "-")
}
}
}
}
###################################################################################
####################### PLOT COMPARING THE DIFFERENT APPROACHES ####################
####################################################################################
df_raw <- read.table("../results.linear.regressions.180514/anthro.results.table.180521.txt",
stringsAsFactors = F, header = T)
pulit_snp_groups <- unique(df_raw$snp_group[grep("pulit|fdr", df_raw$snp_group)])
eth_groups <- c("all.white", "brit.irish")
df_dataset <- df_raw[df_raw$snp_group %in% pulit_snp_groups, ]
for (eth_group in eth_groups) {
df <- df_dataset
df <- df[(df$grs_unit == "raw_scoresum" & df$eth_group == eth_group & df$extra_adjustment == "-" &
df$trait_unit == "sd") &
((df$snp_group %in% c("bmi.eur.comb.pulit.sig") &
df$sex_group %in% c("men", "women") &
df$trait == "bmi") |
(df$snp_group %in% c("bmi.eur.men.pulit.phet", "bmi.eur.men.pulit.sig",
"bmi.eur.men.0.01.fdr", "bmi.eur.men.0.05.fdr", "bmi.eur.men.0.1.fdr") &
df$sex_group == "men" &
df$trait == "bmi") |
(df$snp_group %in% c("bmi.eur.women.pulit.phet", "bmi.eur.women.pulit.sig",
"bmi.eur.women.0.01.fdr", "bmi.eur.women.0.05.fdr", "bmi.eur.women.0.1.fdr") &
df$sex_group == "women" &
df$trait == "bmi") |
(df$snp_group %in% c("whr.eur.comb.pulit.sig") &
df$sex_group %in% c("men", "women") &
df$trait == "whr") |
(df$snp_group %in% c("whr.eur.men.pulit.phet", "whr.eur.men.pulit.sig",
"whr.eur.men.0.01.fdr", "whr.eur.men.0.05.fdr", "whr.eur.men.0.1.fdr") &
df$sex_group == "men" &
df$trait == "whr") |
(df$snp_group %in% c("whr.eur.women.pulit.phet", "whr.eur.women.pulit.sig",
"whr.eur.women.0.01.fdr", "whr.eur.women.0.05.fdr", "whr.eur.women.0.1.fdr", "whr.eur.women.0.1.fdr") &
df$sex_group == "women" &
df$trait == "whr") |
(df$snp_group %in% c("whradjbmi.eur.comb.pulit.sig") &
df$sex_group %in% c("men", "women") &
df$trait == "res_whr_inv") |
(df$snp_group %in% c("whradjbmi.eur.men.pulit.phet", "whradjbmi.eur.men.pulit.sig",
"whradjbmi.eur.men.0.01.fdr", "whradjbmi.eur.men.0.05.fdr",
"whradjbmi.eur.men.0.1.fdr") &
df$sex_group == "men" &
df$trait == "res_whr_inv") |
(df$snp_group %in% c("whradjbmi.eur.women.pulit.phet", "whradjbmi.eur.women.pulit.sig",
"whradjbmi.eur.women.0.01.fdr", "whradjbmi.eur.women.0.05.fdr",
"whradjbmi.eur.women.0.1.fdr") &
df$sex_group == "women" &
df$trait == "res_whr_inv")), ]
df$sex_group <- ifelse(df$sex_group == "comb", "Combined",
ifelse(df$sex_group == "men", "Men",
ifelse(df$sex_group == "women", "Women", "missing")))
df$instrument <- gsub("bmi\\.eur\\.|whr\\.eur\\.|whradjbmi\\.eur\\.", "", df$snp_group)
df$instrument <- gsub("men\\.|women\\.", "", df$instrument)
df$instrument <- ifelse(df$instrument == "comb.pulit.sig", "Combined weights",
ifelse(df$instrument == "pulit", "Sex-specific index SNPs only",
ifelse(df$instrument == "pulit.phet", "P-heterogeneity, Bonferroni",
ifelse(df$instrument == "0.01.fdr", "P-heterogeneity, FDR 1%",
ifelse(df$instrument == "0.05.fdr", "P-heterogeneity, FDR 5%",
ifelse(df$instrument == "0.1.fdr", "P-heterogeneity, FDR 10%",
ifelse(df$instrument == "pulit.sig", "Sex-specific estimates", "MISSING")))))))
subset_df <- df[, c("instrument", "sex_group", "trait", "grs_r2", "grs_beta", "grs_lci", "grs_uci")]
new_df <- subset_df
new_df$instrument <- factor(new_df$instrument, levels = c("Combined weights",
"P-heterogeneity, Bonferroni",
"P-heterogeneity, FDR 1%", "P-heterogeneity, FDR 5%", "P-heterogeneity, FDR 10%",
"Sex-specific estimates", "Sex-specific index SNPs only"))
new_df <- new_df[order(new_df$instrument), ]
new_df$unique_combinations <- paste(gsub(" |,|\\.|-|%", "", new_df$instrument), sep = "_")
new_df$trait <- ifelse(new_df$trait == "bmi", "BMI",
ifelse(new_df$trait == "whr", "WHR",
ifelse(new_df$trait == "res_whr_inv", "WHRadjBMI", "MISSING")))
new_df_women <- new_df[new_df$sex_group == "Women", ]
new_df_men <- new_df[new_df$sex_group == "Men", ]
new_df <- merge(new_df_women, new_df_men, by = c("instrument", "trait"))
new_df$grs_r2.x <- new_df$grs_r2.x *100
new_df$grs_r2.y <- new_df$grs_r2.y *100
plot1 <- ggplot(new_df, aes(x=instrument, y=grs_beta.x, ymin=grs_lci.x, ymax=grs_uci.x, group = 1)) +
geom_errorbar(size = 0.3, width=.1, color = "grey28", position = position_nudge(x=-0.1)) +
geom_errorbar(aes(ymin = grs_lci.y, ymax = grs_uci.y), size = 0.3, width = .1, color = "grey28",
position = position_nudge(x = 0.1)) +
geom_point(shape = 18, color = "#d95f02",
size = 1.65, show.legend =F, position = position_nudge(x=-0.1)) +
geom_point(aes(y = grs_beta.y), color = "#7570b3", shape = 18,
size = 1.65, show.legend = F, position = position_nudge(x = 0.1)) +
scale_color_brewer(palette = "Dark2") +
theme(plot.margin=unit(c(1, 2, 0, 1), "cm")) +
theme(axis.text.x=element_blank()) +
scale_x_discrete(name = "SNP selection and weighting approach") +
scale_y_continuous(name = "Estimate (95% CI) in SD-units\nfor respective obesity trait",
breaks = c(0, 0.5, 1), labels = c("0", "0.5", "1.0"), limits=c(0, 1.35)) +
theme(axis.title.x = element_blank(), axis.title.y = element_text(color = "black", family = "Times"),
axis.text = element_text(color = "black", size = 7, family = "Times"), axis.ticks.x = element_blank(),
strip.background = element_blank(), strip.text = element_text(face = "bold", family = "Times",
vjust = -0.5), axis.text.y = element_text(color = "black", family = "Times")) +
scale_fill_manual(name="Sex",
values=c(Women="#d95f02", Men="#7570b3")) +
facet_grid(. ~trait, drop = T)
plot2 <- ggplot(new_df, aes(x=instrument, y = grs_beta.x, group = 1)) +
geom_col(aes(y = grs_r2.x, fill = "Women"), width = 0.2, position = position_nudge(x=-0.1)) +
geom_col(aes(y = grs_r2.y, fill = "Men"), width = 0.2, position = position_nudge(x=0.1)) +
theme_gray() +
scale_color_brewer(palette = "Dark2") +
theme(plot.margin=unit(c(1, 2, 1, 0.5), "cm")) +
theme(axis.text.x=element_text(angle=(-30), hjust = 0, vjust=1, family = "Times")) +
scale_x_discrete(name = "SNP selection and weighting approach") +
scale_y_continuous(name = "% Trait variance\nexplained", breaks = c(0, 3, 6), labels = c("0", "3.0", "6.0")) +
theme(axis.title.x = element_text(size = 10, family = "Times"), plot.margin=unit(c(0.1, 2, 1, 1), "cm"),
axis.title.y = element_text(family = "Times"),
axis.text = element_text(color = "black", size = 7, family = "Times", hjust = 0.5),
panel.grid.minor.y = element_blank(), axis.text.y = element_text(hjust=1, vjust = -0.5, family = "Times"),
strip.background = element_blank(), strip.text = element_blank(), legend.position = "bottom",
legend.text = element_text(family = "Times"), legend.title = element_text(family = "Times")) +
scale_fill_manual(name="Sex",
values=c(Women="#d95f02", Men="#7570b3")) +
facet_grid(. ~trait, drop = T)
g <- arrangeGrob(plot1, plot2, ncol = 1, heights = c(1,1))
output_file <- paste("/Net/fs1/home/linc4222/comparison.weight.strategies.separate.facets.", eth_group, ".181019.jpeg", sep = "")
ggsave(output_file, g, unit = "cm", height = 18,
width = 15, device = "jpeg")
output_file <- paste("/Net/fs1/home/linc4222/comparison.weight.strategies.separate.facets.", eth_group, ".181019.pdf", sep = "")
ggsave(output_file, g, unit = "cm", height = 18,
width = 15, device = "pdf")
}
######################################################################
###### Make a sort of heatmap and million deaths ##################
######################################################################
df_raw <- read.table("../results.mr.180730/ipd.mr.binary.results.180815.txt",
stringsAsFactors = F, header = T)
snp_groups <- unique(df_raw$snp_group[grep("pulit\\.sig", df_raw$snp_group)])
traits <- unique(df_raw$trait[!(grepl("_smoking|nafld_cases|cld_cases|smoker|infertility|prob$", df_raw$trait))])
df_raw <- df_raw[df_raw$eth_group == "all.white" & df_raw$snp_group %in% snp_groups &
df_raw$exposure_unit == "sd" & df_raw$outcome_unit == "clin" &
df_raw$trait %in% traits & df_raw$sex_group %in% c("men", "women"), ]
df_raw$log_p <- ifelse(df_raw$grs_p >= (0.05/51), NA, -log10(df_raw$grs_p))
df_raw <- df_raw[, c("grs_trait", "sex_group", "trait", "grs_or", "log_p")]
df <- data.frame(trait = c(unique(df_raw$trait), "diabetes", "breast_cancer_cases",
"colorectal_cancer_cases", "dementia_cases", "haem_stroke_cases"), stringsAsFactors = F)
for (trait in unique(df_raw$grs_trait)) {
for (sex_group in unique(df_raw$sex_group)) {
df_subset <- df_raw[df_raw$grs_trait == trait & df_raw$sex_group == sex_group, ]
df_subset[, c("grs_trait", "sex_group")] <- NULL
colnames(df_subset)[colnames(df_subset) %in% c("grs_or", "log_p")] <- paste(trait, sex_group,
colnames(df_subset)[colnames(df_subset) %in% c("grs_or", "log_p")], sep = "_")
df <- merge(df, df_subset, by = "trait", all = T)
}
}
#Number of 1000 deaths per sex, men first, then women.
#Data taken from "2016 Global" sheet of "Global summary estimates"
#downloadable here: http://www.who.int/healthinfo/global_burden_disease/estimates/en/
#on 2018-11-27. In the dict, first the "nice-looking" disease name, then men deaths,
#then women deaths (per 1000), then order in the table
dict_traits <- list(cad_cases = list("Coronary artery disease", "4,955", "4,478", 10.8),
copd_cases = list("Chronic obstructive\npulmonary disease", "1,668", "1,373", 7.2),
lungcancer_cases = list("Lung cancer", "1,177", 531, 5.4),
renal_failure_cases = list("Renal failure - chronic\nand acute", 623, 557, 1.8),
aki_cases = list("Acute:", 6, 6, 0.6),
ckd_cases = list("Chronic:", 617, 551, 1.2),
stroke_cases = list("Stroke - hemorrhagic\nand ischemic", "2,893", "2,887", 9),
isch_stroke_cases = list("Ischaemic:", "1,338", "1,473", 8.4),
diabetes = list("Diabetes - type 2 and\ntype 1 diabetes", "737", "862", 3.6),
t2d_cases_probposs = list("Type 2:", NA, NA, 3.6),
t1d_cases_probposs = list("Type 1:", NA, NA, 3))
for (i in 1:length(dict_traits)) {
df$deaths_men <- as.character(replace(df$deaths_men,
df$trait == names(dict_traits[i]), dict_traits[[i]][[2]]))
df$deaths_women <- as.character(replace(df$deaths_women,
df$trait == names(dict_traits[i]), dict_traits[[i]][[3]]))
df$order <- replace(df$order,
df$trait == names(dict_traits[i]), dict_traits[[i]][[4]])
df$trait <- as.character(replace(df$trait,
df$trait == names(dict_traits[i]), dict_traits[[i]][[1]]))
}
df$dm_deaths_men[df$trait %in% c("Type 1 diabetes", "Type 2 diabetes")] <- 737
df$dm_deaths_women[df$trait %in% c("Type 1 diabetes", "Type 2 diabetes")] <- 862
subtypes <- c("Acute:", "Chronic:", "Ischaemic:", "Type 2:", "Type 1:")
diseases_investigated <- c("Coronary artery disease:", "Stroke - hemorrhagic\nand ischemic:",
"Ischemic stroke:", "Chronic obstructive\npulmonary disease:", "Lung cancer:", "Type 2 diabetes:",
"Type 1 diabetes:", "Renal failure - chronic\nand acute: ", "Chronic renal failure:",
"Acute renal failure:")
df[df$trait %in% subtypes, c("deaths_men", "deaths_women")] <- NA
df <- df[order(df$order), ]
plot <- ggplot(df, aes(x=order)) +
geom_vline(xintercept = c(1.8, 3, 3.6, 5.4, 7.2, 9, 10.8), colour = "gray87", size = 10) +
geom_point(aes(y=11.5, size=bmi_men_grs_or, fill = bmi_men_log_p), shape = 21) +
geom_point(aes(y=12.5, size=bmi_women_grs_or, fill = bmi_women_log_p), shape = 21) +
geom_point(aes(y=14, size=whr_men_grs_or, fill = whr_men_log_p), shape = 21) +
geom_point(aes(y=15, size=whr_women_grs_or, fill = whr_women_log_p), shape = 21) +
geom_point(aes(y=16.5, size=res_whr_inv_men_grs_or, fill = res_whr_inv_men_log_p), shape = 21) +
geom_point(aes(y=17.5, size=res_whr_inv_women_grs_or, fill = res_whr_inv_women_log_p), shape = 21) +
geom_segment(aes(y =0, yend = (as.integer(gsub(",", "", df$deaths_men))/1000), x = order+0.1, xend = order+0.1),
colour = '#d95f02', size = 2.5) +
geom_segment(aes(y =0, yend = (as.integer(gsub(",", "", df$deaths_women))/1000), x = order-0.1, xend = order-0.1),
colour = '#7570b3', size = 2.5) +
geom_segment(aes(y=0, yend = 5.2, x = 0.02, xend = 0.02)) +
geom_segment(aes(y=0, yend = 0, x = 0, xend = 11.5)) +
geom_text(aes(y=6.3, x=order, label=deaths_men), size = 3, family = "Times", hjust = 1) +
geom_text(aes(y=7.3, x=order, label=deaths_women), size = 3, family = "Times", hjust = 1) +
geom_segment(aes(y =3.5, yend = 4, x = 0.6, xend = 0.6),
colour = '#d95f02', size = 2.5) +
geom_segment(aes(y =3.5, yend = 4, x = 0.4, xend = 0.4),
colour = '#7570b3', size = 2.5) +
geom_text(aes(y=3.5, x= 0.87, label="bold('Sex')"), parse = T, size = 3, family = "Times", hjust = 0) +
geom_text(aes(y=4.1, x= 0.6, label="Men"), size = 2.8, family = "Times", hjust = 0) + # was 2.5
geom_text(aes(y=4.1, x= 0.4, label="Women"), size = 2.8, family = "Times", hjust = 0) + #was 2.5
annotate("text", y=c(0, 7.9), x = rep(12.2, 2), label = c('bold("A. Number of deaths per disease globally/year,")',
'bold("B. Effect of obesity traits on leading mortality causes")'), hjust = 0, size = 3, family = "Times", parse = T) +
annotate("text", y=c(3.72, 12, 14.5, 17), x = rep(11.9, 4), label = c('bold("in 1,000 deaths as estimated by the WHO for 2016")',
'bold("BMI")', 'bold("WHR")', 'bold("WHRadjBMI")'), size = 3, family = "Times", parse = T) +
annotate("text", y=c(5.95, 6.85, 9.2, 11.5, 12.5, 14, 15, 16.5, 17.5), x = rep(11.5, 9),
label = c("Men", "Women", "Investigated disease", rep(c("Men", "Women"), 3)), size = 2.8, family = "Times") + #was 2.5
annotate("text", y=7.95, x = c(10.8, 9, 8.4, 7.2, 5.4, 3.6, 3, 1.8, 1.2, 0.6), label = diseases_investigated, size = 2.8, family = "Times", #was.25
hjust = 0) +
scale_fill_gradient(na.value = "white", low = "yellow1", high="red2", name = "-log10 P") +
scale_x_continuous(name = "WHO\nDisease", breaks = df$order, labels=ifelse(df$trait %in% subtypes, "", df$trait)) +
scale_size_continuous(range = c(min(df[, grep("_grs_or", colnames(df))], na.rm = T)*2.5, max(df[, grep("_grs_or", colnames(df))], na.rm = T)*2.5),
breaks = c(0.5, 1, 2, 3, 4), trans = "log", name = "Odds ratio") +
theme_classic() +
scale_y_continuous(name = "1,000 deaths per year", breaks = c(0, 2.5, 5), labels=c("0", "2,500", "5,000")) +
coord_flip(ylim = c(0,18), xlim = c(0, 15), expand = F) +
labs(title = "WHO\ndisease") +
theme(axis.line.y = element_blank(), axis.line.x = element_line(colour = "white"), axis.text = element_text(color = "black", family = "Times"),
axis.ticks.y = element_blank(), axis.ticks.x = element_line(colour = "black"),
axis.text.y = element_text(face = "bold", family = "Times"), axis.title.y = element_blank(),
legend.title=element_text(size=8, family = "Times", face = "bold"), legend.text=element_text(size=8, family = "Times"),
axis.title.x = element_text(colour = "black", family = "Times", size = 9, hjust = 0.1)) +
theme(plot.title = element_text(face="bold", size = 9, family = "Times", hjust = -0.075, margin=margin(b=-105.5, t = 18))) +
guides(size = guide_legend(order=1))
output_file <- paste("/Net/fs1/home/linc4222/heatmap.of.mr.results.and.million.deaths.181127.jpeg", sep = "")
ggsave(output_file, plot, unit = "cm", height = 16, width = 22.3, device = "jpeg")
output_file <- paste("/Net/fs1/home/linc4222/heatmap.of.mr.results.and.million.deaths.181127.pdf", sep = "")
ggsave(output_file, plot, unit = "cm", height = 16, width = 22.3, device = "pdf")
##############################################################################
############## MAKE PLOTS FOR THESIS AND ARTICLE ###################
################# MR PLOTS ######################################
##############################################################################
df_raw <- read.table("../results.mr.180730/ipd.mr.binary.results.180815.txt",
stringsAsFactors = F, header =T, sep = "\t")
for (dataset in c("pulit", "giukbb", "fdr.0.01", "fdr.0.05", "fdr.0.1", "phet", "index", "unweighted")) {
pulit_snp_groups <- unique(df_raw$snp_group[grep("pulit\\.sig", df_raw$snp_group)])
comb_snp_groups <- unique(df_raw$snp_group[grep("comb\\.pulit\\.sig", df_raw$snp_group)])
giukbb_snp_groups <- unique(df_raw$snp_group[grep("giukbb", df_raw$snp_group)])
fdr.0.01_snp_groups <- unique(df_raw$snp_group[grep("\\.0\\.01\\.fdr", df_raw$snp_group)])
fdr.0.05_snp_groups <- unique(df_raw$snp_group[grep("\\.0\\.05\\.fdr", df_raw$snp_group)])
fdr.0.1_snp_groups <- unique(df_raw$snp_group[grep("\\.0\\.1\\.fdr", df_raw$snp_group)])
phet_snp_groups <- unique(df_raw$snp_group[grep("pulit.phet", df_raw$snp_group)])
index_snp_groups <- unique(df_raw$snp_group[grep("pulit$", df_raw$snp_group)])
unweighted_snp_groups <- unique(df_raw$snp_group[grep("unweighted", df_raw$snp_group)])
if (dataset == "pulit") {
df_dataset <- df_raw[df_raw$snp_group %in% pulit_snp_groups, ]
} else if (dataset == "giukbb") {
df_dataset <- df_raw[df_raw$snp_group %in% giukbb_snp_groups, ]
} else if (dataset == "fdr.0.01") {
df_dataset <- df_raw[df_raw$snp_group %in% c(fdr.0.01_snp_groups, comb_snp_groups), ]
} else if (dataset == "fdr.0.05") {
df_dataset <- df_raw[df_raw$snp_group %in% c(fdr.0.05_snp_groups, comb_snp_groups), ]
} else if (dataset == "fdr.0.1") {
df_dataset <- df_raw[df_raw$snp_group %in% c(fdr.0.1_snp_groups, comb_snp_groups), ]
} else if (dataset == "phet") {
df_dataset <- df_raw[df_raw$snp_group %in% c(phet_snp_groups, comb_snp_groups), ]
} else if (dataset == "index") {
df_dataset <- df_raw[df_raw$snp_group %in% c(index_snp_groups, comb_snp_groups), ]
} else if (dataset == "unweighted") {
df_dataset <- df_raw[df_raw$snp_group %in% c(unweighted_snp_groups), ]
}
for (eth_group in c("all.white", "brit.irish")) {
for (unit in c("sd")) {
for (type in c("article", "thesis")) {
#Subset to relevant analyses
df <- df_dataset
df <- df[df$exposure_unit == unit &
df$eth_group == eth_group & !(df$trait %in% c("t1d_cases_prob", "t2d_cases_prob", "smoker_cases")) &
df$function_name == "wald" & df$extra_adjustment == "-", ]
#The first number is for the article order, the second for the thesis order
dict_traits <- list(breast_cancer_cases = c("Breast cancer", "15", "17"),
cad_cases = c("CAD", "16", "16"),
colorectal_cancer_cases = c("Colorectal cancer", "13", "14"),
copd_cases = c("COPD", "12", "13"),
dementia_cases = c("Dementia", "11", "12"),
lungcancer_cases = c("Lung cancer", "09", "10"),
renal_failure_cases = c("Renal failure", "07", "08"),
aki_cases = c("Renal failure - acute", "06", "07"),
ckd_cases = c("Renal failure - chronic", "05", "06"),
stroke_cases = c("Stroke", "04", "05"),
haem_stroke_cases = c("Stroke - hemorrhagic", "03", "04", "Stroke - haemorrhagic"),
isch_stroke_cases = c("Stroke - ischemic", "02", "03", "Stroke - ischaemic"),
t2d_cases_probposs = c("Type 2 diabetes", "17", "01"),
t1d_cases_probposs = c("Type 1 diabetes", "01", "02"),
any_infertility_cases = c("Infertility", "10", "11"),
nafld_cases = c("NAFLD", "08", "09"),
cld_cases = c("CLD", "14", "15"))
for (i in 1:length(dict_traits)) {
df[df$trait == names(dict_traits[i]), "order_article"] <- dict_traits[names(dict_traits[i])][[1]][2]
df[df$trait == names(dict_traits[i]), "order_thesis"] <- dict_traits[names(dict_traits[i])][[1]][2]
df[df$trait == names(dict_traits[i]), "trait_name"] <- dict_traits[names(dict_traits[i])][[1]][1]
}
df$grs_trait_name <- gsub("\\.(.)+", "", df$snp_group)
df$sex_group <- ifelse(df$sex_group == "comb", "Combined",
ifelse(df$sex_group == "men", "Men",
ifelse(df$sex_group == "women", "Women", "missing")))
df$ci <- paste(formatC(df$grs_or, digits = 2, format = "f")," (",
formatC(df$grs_lci_or, digits = 2, format = "f"), ",",
formatC(df$grs_uci_or, digits = 2, format = "f"), ")", sep = "")
df$grs_trait_name <- ifelse(df$grs_trait_name == "bmi", "BMI",
ifelse(df$grs_trait_name == "whr", "WHR",
ifelse(df$grs_trait_name == "whradjbmi", "WHRadjBMI", NA)))
df$unique_combinations <- paste(df[, paste("order_", type, sep = "")], "_", df$trait_name, "_",
ifelse(df$grs_trait_name == "BMI", "03",
ifelse(df$grs_trait_name == "WHR", "02",
ifelse(df$grs_trait_name == "WHRadjBMI", "01", "missing"))), ifelse(df$sex_group == "Combined", "03",
ifelse(df$sex_group == "Men", "02", ifelse(df$sex_group == "Women", "01", "missing"))),
df$sex_group, sep = "")
df$lci_arrow <- ifelse(df$grs_lci_or < 0.5, 0.5, NA)
df$uci_arrow <- ifelse(df$grs_uci_or > 6.8, 5.8, NA)
critical_p <- 0.05/51
critical_het_p <- 0.05/48
df$original_cochrans_p <- df$cochrans_p
df$original_grs_p <- df$grs_p
df$sig_or <- ifelse(df$grs_p < critical_p, df$grs_or, NA)
df$not_sig_or <- ifelse(df$grs_p >= critical_p, df$grs_or, NA)
save <- df
df[, c("grs_p", "cochrans_p")] <- lapply(df[, c("grs_p", "cochrans_p")],
function(x) ifelse(x < 0.01, paste(formatC(x,
1, format = "e")),
formatC(x, 2, format = "f")))
df$print_grs_p <- ifelse(df$original_grs_p < (1*10^-200), "\"<1.0 x\"~10^-200",
ifelse(df$original_grs_p < 0.01, gsub("^ ", "", paste("\"", gsub("e-0|e-",
" x\"~10^-", formatC(df$grs_p, 1, format = "e")), sep = "")),
paste("\"", formatC(df$grs_p, 2, format = "f"), "\"", sep = "")))
df$print_cochrans_p <- ifelse(is.na(df$original_cochrans_p), "",
ifelse(df$original_cochrans_p < (1*10^-200), "\"<1.0 x\"~10^-200",
ifelse(df$original_cochrans_p < 0.01, gsub("^ ", "",
paste("\"", gsub("e-0|e-", " x\"~10^-", formatC(df$cochrans_p, 1, format = "e")), sep = "")),
paste("\"", formatC(df$original_cochrans_p, 2, format = "f"), "\"", sep = ""))))
df <- df[order(df$unique_combinations, decreasing = T), ]
rownames(df) <- NULL
xlim_number <- c(0.3, nrow(df) +1.5)
gray_vline <- nrow(df) - as.integer(rownames(df[df$trait_name %in% c("CAD", "COPD", "NAFLD", "Renal failure - acute",
"Stroke", "Type 1 diabetes"), ])) + 1
height <- 22.23
breaks_number <- c(0.5, 1, 5)
title_name <- "Odds Ratio (95% CI) per 1-SD higher obesity trait"
ylim_number <- c(0.015, 100)
col_placement <- c(0.016, 0.08, 0.22, 16, 40, 85, 95)
header_placement <- c(0.016, 0.08, 0.22, 1.1, 5.5, 19, 45)
vjust_number <- ifelse(type == "article", -236,
ifelse(type == "thesis", 300, -50))
if (type == "article") {
palette_colors <- "Dark2"
shape_empty <- 23
shape_fill <- 18
font <- "Times"
size_fill <- 2.3
} else if (type == "thesis") {
palette_colors <- "Dark2"
shape_empty <- 23
shape_fill <- 18
font <- "Times"
size_fill <- 2.3
}
df[duplicated(df[, c("trait", "grs_trait_name")]), "grs_trait_name"] <- ""
df[duplicated(df$trait_name), "trait_name"] <- ""
df[df$sex_group == "Men", "print_cochrans_p"] <- ""
df$cochran_star <- ifelse(!is.na(df$print_cochrans_p) & df$print_cochrans_p != "" &
df$original_cochrans_p < critical_het_p, "*", "")
plot <- ggplot(df, aes(x=unique_combinations, y=grs_or, ymin=grs_lci_or, ymax=grs_uci_or)) +
geom_point(aes(color = sex_group), color = "white", shape = shape_fill, size = 3, show.legend =F) +
geom_vline(xintercept = gray_vline,
colour = "gray87", size = 4) +
geom_segment(aes(yend = 1, y = 1, xend = 0, x = nrow(df) + 0.5), linetype = "dashed", size = 0.3) +
geom_segment(aes(yend = breaks_number[1], y = breaks_number[1], xend = 0, x = nrow(df) + 0.5), size = 0.3) +
geom_segment(aes(yend = breaks_number[1], y = breaks_number[3], xend = 0.4, x = 0.4), size = 0.3) +
geom_errorbar(size = 0.3, width=.3) +
geom_point(aes(y = df$not_sig_or, color = sex_group), shape = shape_empty, fill = "white", size = 1.65,
show.legend =F) +
geom_point(aes(y = df$sig_or, color = sex_group), shape = shape_fill, size = size_fill, show.legend =F) +
geom_segment(aes(yend = lci_arrow, y = lci_arrow, xend = unique_combinations), size = 0.1,
arrow = arrow(length = unit(0.15, "cm"), type = "closed")) +
geom_segment(aes(yend = (uci_arrow+1), y = uci_arrow, xend = unique_combinations), size = 0.1,
arrow = arrow(length = unit(0.15, "cm"), type = "closed")) +
geom_text(aes(y = col_placement[1], x = unique_combinations), label = df$trait_name, family = font, size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[2], x = unique_combinations), label = df$grs_trait_name, family = font, size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[3], x = unique_combinations), label = df$sex_group, family = font, size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[4], x = unique_combinations), label = df$ci, family = font, size = 2.85, hjust = 1) +
geom_text(aes(y = col_placement[5], x = unique_combinations), label = df$print_grs_p, parse = T, family = font, size = 2.85, hjust = 1) +
geom_text(aes(y = col_placement[6], x = unique_combinations), label = df$print_cochrans_p, parse = T, family = font, size = 2.85, hjust = 1, vjust = -0.5) +
geom_text(aes(y = col_placement[7], x = unique_combinations), label = df$cochran_star, family = font, size = 3, hjust = 1, vjust = -0.5) +
annotate("text", y = header_placement, x = rep(nrow(df) +1, length(header_placement)),
label = c("bold(Outcome)", "bold(\"Risk factor\")", "bold(Sex)", "bold(Estimate)", "bold(\"OR (95% CI)\")",
"bold(P)", "bold(P[het])"), parse = T, size = 2.85, hjust = 0, family = font, fontface = "bold") +
theme_void() +
scale_color_brewer(palette = palette_colors) +
scale_y_continuous(trans = "log", name = title_name, breaks = breaks_number) +
theme(axis.text.y=element_blank(), axis.text = element_text(color = "black", size = 8, family = font)) +
coord_flip(ylim =ylim_number, xlim = xlim_number, expand = F) +
theme(plot.margin=unit(c(-0.35, 0, 0.4, 0), "cm")) +
ggtitle(title_name) +
theme(plot.title = element_text(family = font, size = 8, vjust = vjust_number, hjust = 0.55, face = "bold"))
output_file <- paste("/Net/fs1/home/linc4222/new.mr.results.", type, ".", dataset, ".", eth_group, ".", unit,
ifelse(type == "article", ".180816.pdf", ".180816.jpeg"), sep = "")
ggsave(output_file, plot, unit = "cm", height = height, width = 15.3, dpi = 800,
device = ifelse(type == "article", "pdf", "jpeg"))
}
}
}
}
##############################################################################################
############## MAKE IMAGE WITH SIG. SEX-SPECIFIC ANALYSES ONLY + WINNERS SNPS ##############
#############################################################################################
for (type in c("winner_comb", "winner_sex", "winner_unweighted")) {
df_raw <- read.table("../results.mr.180730/sens.ipd.mr.binary.results.180815.txt",
stringsAsFactors = F, header =T, sep = "\t")
if (type == "winner_comb") {
df <- df_raw[df_raw$eth_group == "all.white" & df_raw$exposure_unit == "sd" &
df_raw$snp_group %in% unique(df_raw$snp_group[grep("\\.comb\\.pulit\\.winner$", df_raw$snp_group)]) &
!is.na(df_raw$cochrans_p) &
!(df_raw$trait %in% c("smoker_cases", "t2d_cases_prob")), ]
} else if (type == "winner_sex") {
df <- df_raw[df_raw$eth_group == "all.white" & df_raw$exposure_unit == "sd" &
df_raw$snp_group %in% unique(df_raw$snp_group[grep("men\\.pulit\\.winner$", df_raw$snp_group)]) &
!is.na(df_raw$cochrans_p) &
!(df_raw$trait %in% c("smoker_cases", "t2d_cases_prob")), ]
} else if (type == "winner_unweighted") {
df <- df_raw[df_raw$eth_group == "all.white" & df_raw$exposure_unit == "sd" &
df_raw$snp_group %in% unique(df_raw$snp_group[grep("\\.comb\\.pulit\\.winner_unweighted", df_raw$snp_group)]) &
!is.na(df_raw$cochrans_p) &
!(df_raw$trait %in% c("smoker_cases", "t2d_cases_prob")), ]
}
dict_traits <- list(copd_cases = c("COPD", "3"),
renal_failure_cases = c("Renal failure", "2"),
ckd_cases = c("Renal failure - chronic", "1"),
t2d_cases_probposs = c("Type 2 diabetes", "4"))
for (i in 1:length(dict_traits)) {
df[df$trait == names(dict_traits[i]), "order_article"] <- dict_traits[names(dict_traits[i])][[1]][2]
df[df$trait == names(dict_traits[i]), "trait_name"] <- dict_traits[names(dict_traits[i])][[1]][1]
}
df$grs_trait_name <- gsub("\\.(.)+", "", df$snp_group)
df$sex_group <- ifelse(df$sex_group == "comb", "Combined",
ifelse(df$sex_group == "men", "Men",
ifelse(df$sex_group == "women", "Women", "missing")))
df$ci <- paste(formatC(df$grs_or, digits = 2, format = "f")," (",
formatC(df$grs_lci_or, digits = 2, format = "f"), ",",
formatC(df$grs_uci_or, digits = 2, format = "f"), ")", sep = "")
df$grs_trait_name <- ifelse(df$grs_trait_name == "bmi", "BMI",
ifelse(df$grs_trait_name == "whr", "WHR",
ifelse(df$grs_trait_name == "whradjbmi", "WHRadjBMI", NA)))
df$unique_combinations <- paste(df$order_article, "_", df$trait_name, "_", ifelse(df$grs_trait_name == "BMI", "03",
ifelse(df$grs_trait_name == "WHR", "02",
ifelse(df$grs_trait_name == "WHRadjBMI", "01", "missing"))), ifelse(df$sex_group == "Combined", "03",
ifelse(df$sex_group == "Men", "02", ifelse(df$sex_group == "Women", "01", "missing"))),
df$sex_group, sep = "")
critical_p <- 0.05/51
critical_het_p <- 0.05/48
df$original_cochrans_p <- df$cochrans_p
df$original_grs_p <- df$grs_p
df$sig_or <- ifelse(df$grs_p < critical_p, df$grs_or, NA)
df$not_sig_or <- ifelse(df$grs_p >= critical_p, df$grs_or, NA)
df[, c("grs_p", "cochrans_p")] <- lapply(df[, c("grs_p", "cochrans_p")],
function(x) ifelse(x < 0.01, paste(formatC(x,
1, format = "e")),
formatC(x, 2, format = "f")))
df$print_grs_p <- ifelse(df$original_grs_p < (1*10^-200), "\"<1.0 x\"~10^-200",
ifelse(df$original_grs_p < 0.01, gsub("^ ", "",
paste("\"", gsub("e-0|e-", " x\"~10^-", formatC(df$grs_p, 1, format = "e")), sep = "")),
paste("\"", formatC(df$grs_p, 2, format = "f"), "\"", sep = "")))
df$print_cochrans_p <- ifelse(is.na(df$original_cochrans_p), "",
ifelse(df$original_cochrans_p < (1*10^-200), "\"<1.0 x\"~10^-200",
ifelse(df$original_cochrans_p < 0.01, gsub("^ ", "",
paste("\"", gsub("e-0|e-", " x\"~10^-", formatC(df$cochrans_p, 1, format = "e")), sep = "")),
paste("\"", formatC(df$original_cochrans_p, 2, format = "f"), "\"", sep = ""))))
df <- df[order(df$unique_combinations, decreasing = T), ]
rownames(df) <- NULL
xlim_number <- c(0.3, nrow(df) +1.5)
breaks_number <- c(0.5, 1, 5)
title_name <- "Odds Ratio (95% CI) per 1-SD higher obesity trait"
ylim_number <- c(0.015, 100)
col_placement <- c(0.016, 0.08, 0.22, 16, 40, 85, 95)
header_placement <- c(0.016, 0.08, 0.22, 1.1, 6.25, 19.8, 45.1)
shape_empty <- 23
shape_fill <- 18
font <- "Times"
size_fill <- 2.3
save <- df
df[duplicated(df[, c("trait", "grs_trait_name")]), "grs_trait_name"] <- ""
df[duplicated(df$trait_name), "trait_name"] <- ""
df[df$sex_group == "Men", "print_cochrans_p"] <- ""
df$cochran_star <- ifelse(!is.na(df$print_cochrans_p) & df$print_cochrans_p != "" &
df$original_cochrans_p < critical_het_p, "*", "")
plot <- ggplot(df, aes(x=unique_combinations, y=grs_or, ymin=grs_lci_or, ymax=grs_uci_or)) +
geom_point(aes(color = sex_group), color = "white", shape = shape_fill, size = 3, show.legend =F) +
geom_segment(aes(yend = 1, y = 1, xend = 0, x = nrow(df) + 0.5), linetype = "dashed", size = 0.3) +
geom_segment(aes(yend = breaks_number[1], y = breaks_number[1], xend = 0, x = nrow(df) + 0.5), size = 0.3) +
geom_segment(aes(yend = breaks_number[1], y = breaks_number[3], xend = 0.4, x = 0.4), size = 0.3) +
geom_errorbar(size = 0.3, width=.3) +
geom_point(aes(y = df$not_sig_or, color = sex_group), shape = shape_empty, fill = "white", size = 1.65, show.legend =F) +
geom_point(aes(y = df$sig_or, color = sex_group), shape = shape_fill, size = size_fill, show.legend =F) +
geom_text(aes(y = col_placement[1], x = unique_combinations), label = df$trait_name, family = font, size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[2], x = unique_combinations), label = df$grs_trait_name, family = font, size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[3], x = unique_combinations), label = df$sex_group, family = font, size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[4], x = unique_combinations), label = df$ci, family = font, size = 2.85, hjust = 1) +
geom_text(aes(y = col_placement[5], x = unique_combinations), label = df$print_grs_p, parse = T, family = font, size = 2.85, hjust = 1) +
geom_text(aes(y = col_placement[6], x = unique_combinations), label = df$print_cochrans_p, parse = T, family = font, size = 2.85,
hjust = 1, vjust = -0.5) +
geom_text(aes(y = col_placement[6]+8, x = unique_combinations), label = df$cochran_star, family = font, size = 3, hjust = 1,
vjust = -0.5) +
annotate("text", y = header_placement, x = rep(nrow(df) +1, length(header_placement)),
label = c("bold(Outcome)", "bold(\"Risk factor\")", "bold(Sex)", "bold(Estimate)", "bold(\"OR (95% CI)\")",
"bold(P)", "bold(P[het])"), parse = T, size = 2.85, hjust = 0, family = font, fontface = "bold") +
theme_void() +
scale_colour_manual(values=c("#d95f02", "#7570b3")) +
scale_y_continuous(trans = "log", name = title_name, breaks = breaks_number, ) +
theme(axis.text.y=element_blank(), axis.text = element_text(color = "black", size = 8, family = font)) +
coord_flip(ylim =ylim_number, xlim = xlim_number, expand = F) +
theme(plot.margin=unit(c(-0.35, 0, 0.4, 0), "cm")) +
ggtitle(title_name) +
theme(plot.title = element_text(family = font, size = 8, vjust = -50.5, hjust = 0.55, face = "bold"))
output_file <- paste0("/Net/fs1/home/linc4222/grs.pic.sex.het.mr.results.pulit.eur.", type, ".sd.180816.pdf")
ggsave(output_file, plot, unit = "cm", height = 5, width = 17.3, dpi = 800, device = "pdf")
}
##################################################################
########### PLOT OF THE MRs WITH THE RISK FACTORS ################
##################################################################
#The FG, FI MRs - subset to the pulit.sig, IVW method
summary <- read.table("../results.mr.180730/summary.mr.results.180730.txt",
stringsAsFactors = F, header = T, sep = "\t")
summary$grs_trait <- gsub("\\.(.)+", "", summary$snp_group)
summary <- summary[summary$snp_group %in% unique(summary$snp_group[grep("pulit.sig", summary$snp_group)]) &
summary$method == "IVW", c("grs_trait", "sex_group", "trait", "beta", "se", "beta_lci", "beta_uci", "p", "cochrans_p")]
#The SBP, DBP MRs
bp <- read.table("../results.mr.180730/ipd.mr.continuous.results.180815.txt", stringsAsFactors = F, header =T, sep = "\t")
bp <- bp[bp$snp_group %in% unique(bp$snp_group[grep("pulit.sig", bp$snp_group)]) &
bp$trait %in% c("dbp", "sbp") & bp$exposure_unit == "sd" & bp$outcome_unit == "sd" & bp$eth_group == "all.white",
c("grs_trait", "sex_group", "trait", "grs_beta", "grs_se", "grs_lci_beta", "grs_uci_beta", "grs_p", "cochrans_p")]
#Merge summary FG and FI with BP
colnames(bp) <- colnames(summary)
summary_bp <- rbind(summary, bp)
#The smoking MRs - NOTE THAT IT'S NOT BETA, BUT OR!!! Just to make plotting easier with same headings
smok <- read.table("../results.mr.180730/ipd.mr.binary.results.180815.txt", stringsAsFactors = F, header =T, sep = "\t")
smok <- smok[smok$snp_group %in% unique(smok$snp_group[grep("pulit.sig", smok$snp_group)]) &
smok$trait == "smoker_cases" & smok$exposure_unit == "sd" & smok$eth_group == "all.white",
c("grs_trait", "sex_group", "trait", "grs_or", "grs_se", "grs_lci_or", "grs_uci_or", "grs_p", "cochrans_p")]
colnames(smok) <- colnames(summary_bp)
for (risk_factor in c("cont", "smok")) {
if (risk_factor == "cont") {
df <- summary_bp
} else if (risk_factor == "smok") {
df <- smok
}
dict_traits <- list(FG = c("FG", 4),
FI = c("FI", 3),
dbp = c("DBP", 1),
sbp = c("SBP", 2),
smoker_cases = c("Smoker", 5))
for (i in 1:length(dict_traits)) {
df[df$trait == names(dict_traits[i]), "order"] <- dict_traits[names(dict_traits[i])][[1]][2]
df[df$trait == names(dict_traits[i]), "trait_name"] <- dict_traits[names(dict_traits[i])][[1]][1]
}
df$grs_trait_name <- ifelse(df$grs_trait == "bmi", "BMI",
ifelse(df$grs_trait == "whr", "WHR",
ifelse(df$grs_trait == "res_whr_inv", "WHRadjBMI",
ifelse(df$grs_trait == "whradjbmi", "WHRadjBMI", NA))))
df$sex_group_name <- ifelse(df$sex_group == "comb", "Combined",
ifelse(df$sex_group == "men", "Men",
ifelse(df$sex_group == "women", "Women", "missing")))
#Note that for smoking is OR
df$ci <- paste(formatC(df$beta, digits = 2, format = "f")," (",
formatC(df$beta_lci, digits = 2, format = "f"), ",",
formatC(df$beta_uci, digits = 2, format = "f"), ")", sep = "")
df$unique_combinations <- paste(df$order, "_", df$trait, "_", ifelse(df$grs_trait_name == "BMI", "03",
ifelse(df$grs_trait_name == "WHR", "02",
ifelse(df$grs_trait_name == "WHRadjBMI", "01", "missing"))), ifelse(df$sex_group_name == "Combined", "03",
ifelse(df$sex_group_name == "Men", "02", ifelse(df$sex_group_name == "Women", "01", "missing"))),
df$sex_group, sep = "")
critical_p <- 0.05/15
critical_het_p <- 0.05/15
df$original_cochrans_p <- df$cochrans_p
df$original_grs_p <- df$p
df$sig_or <- ifelse(df$p < critical_p, df$beta, NA)
df$not_sig_or <- ifelse(df$p >= critical_p, df$beta, NA)
df[, c("p", "cochrans_p")] <- lapply(df[, c("p", "cochrans_p")],
function(x) ifelse(x < 0.01, paste(formatC(x,
1, format = "e")),
formatC(x, 2, format = "f")))
df$print_grs_p <- ifelse(df$original_grs_p < (1*10^-200), "\"<1.0 x\"~10^-200",
ifelse(df$original_grs_p < 0.01,
gsub("^ |^ ", "", paste("\"", gsub("e-0|e-", " x\"~10^-", formatC(df$p, 1, format = "e")), sep = "")),
gsub(" ", "", paste("\"", formatC(df$p, 2, format = "f"), "\"", sep = ""))))
df$print_cochrans_p <- ifelse(is.na(df$original_cochrans_p), "",
ifelse(df$original_cochrans_p < (1*10^-200), "\"<1.0 x\"~10^-200",
ifelse(df$original_cochrans_p < 0.01, gsub("^ ", "",
paste("\"", gsub("e-0|e-", " x\"~10^-", formatC(df$cochrans_p, 1, format = "e")), sep = "")),
paste("\"", formatC(df$original_cochrans_p, 2, format = "f"), "\"", sep = ""))))
df$cochrans_p_star <- ifelse(df$original_cochrans_p < critical_het_p, "*", "")
df <- df[order(df$unique_combinations, decreasing = T), ]
rownames(df) <- NULL
save <- df
df[duplicated(df[, c("trait", "grs_trait_name")]), "grs_trait_name"] <- ""
df[duplicated(df$trait_name), "trait_name"] <- ""
df[df$sex_group == "men", c("print_cochrans_p", "cochrans_p_star")] <- ""
if (risk_factor == "cont") {
dashed_line_place <- 0
axis_ends <- c(-0.02, 0.35)
axis_xend <- 0
xlim_number <- c(-0.5, nrow(df) + 1.5)
ylim_number <- c(-0.5, 0.9)
breaks_number <- c(0, 0.1, 0.2, 0.3)
estimate_name <- "bold(\"Beta (95% CI)\")"
col_placement <- c(-0.5, -0.35, -0.15, 0.55, 0.7, 0.82)
header_placement <- c(-0.5, -0.35, -0.15, 0.17, 0.405, 0.595, 0.77)
trans <- "identity"
height <- 15
vline <- c(10:18, 28:36)
estimate_label <- "Beta (95% CI) per 1-SD higher obesity trait"
estimate_vjust <- -148
estimate_hjust <- 0.47
} else if (risk_factor == "smok") {
dashed_line_place <- 1
axis_ends <- c(0.95, 1.6)
axis_xend <- 0.4
xlim_number <- c(0.3, nrow(df) +3)
ylim_number <- c(0.4, 5)
breaks_number <- c(1, 1.5)
estimate_name <- "bold(\"Odds Ratio (95% CI)\")"
col_placement <- c(0.42, 0.58, 0.78, 2.2, 3.3, 4.5)
header_placement <- c(0.42, 0.58, 0.78, 1.225, 1.67, 2.7, 3.7)
trans <- "log"
height <- 5
vline <- 30
estimate_label <- "Odds Ratio (95% CI) per 1-SD higher obesity trait"
estimate_vjust <- -41.5
estimate_hjust <- 0.44
}
plot <- ggplot(df, aes(x=unique_combinations, y=beta, ymin=beta_lci, ymax=beta_uci)) +
geom_point(aes(color = sex_group), color = "white", shape = 18, size = 3, show.legend =F) +
geom_vline(xintercept = vline, colour = "gray87", size = 5.1) +
geom_segment(aes(yend = dashed_line_place, y = dashed_line_place, xend = 0, x = nrow(df) + 0.5), linetype = "dashed", size = 0.3) +
geom_segment(aes(yend = axis_ends[1], y = axis_ends[1], xend = axis_xend, x = nrow(df) + 0.5), size = 0.3) +
geom_segment(aes(yend = axis_ends[1], y = axis_ends[2], xend = axis_xend, x = axis_xend), size = 0.3) +
annotate("segment", y = breaks_number, yend = breaks_number, x = rep(0, length(breaks_number)),
xend = rep(-0.3, length(breaks_number)), size = 0.3) +
geom_errorbar(size = 0.3, width=.3) +
geom_point(aes(y = df$not_sig_or, color = sex_group), shape = 23, fill = "white", size = 1.65, show.legend =F) +
geom_point(aes(y = df$sig_or, color = sex_group), shape = 18, size = 2.3, show.legend =F) +
geom_text(aes(y = col_placement[1], x = unique_combinations), label = df$trait_name, family = "Times", size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[2], x = unique_combinations), label = df$grs_trait_name, family = "Times", size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[3], x = unique_combinations), label = df$sex_group_name, family = "Times", size = 2.85, hjust = 0) +
geom_text(aes(y = col_placement[4], x = unique_combinations), label = df$ci, family = "Times", size = 2.85, hjust = 1) +
geom_text(aes(y = col_placement[5], x = unique_combinations), label = df$print_grs_p, parse = T,
family = "Times", size = 2.85, hjust = 1) +
geom_text(aes(y = col_placement[6], x = unique_combinations), label = df$print_cochrans_p, parse = T,
family = "Times", size = 2.85, hjust = 1, vjust = -0.5) +
geom_text(aes(y = col_placement[6] + 0.01, x = unique_combinations), label = df$cochrans_p_star, family = "Times",
size = 3.5, hjust = 0, vjust = -0.5) +
annotate("text", y = header_placement, x = rep(nrow(df) +1, length(header_placement)),
label = c("bold(Outcome)", "bold(\"Risk factor\")", "bold(Sex)", "bold(Estimate)", estimate_name,
"bold(P)", "bold(P[het])"), parse = T, size = 2.85, hjust = c(rep(0, 3), 0.5, rep(0, 3)), family = "Times", fontface = "bold") +
ggtitle(estimate_label)+
theme_void() +
scale_color_brewer(palette = "Dark2") +
scale_y_continuous(trans = trans, breaks = breaks_number) +
theme(axis.text.y=element_blank(), axis.text = element_text(color = "black", size = 8, family = "Times")) +
coord_flip(ylim = ylim_number, xlim = xlim_number, expand = F) +
theme(plot.title = element_text(family = "Times", size = 8, vjust = estimate_vjust, hjust = estimate_hjust, face = "bold"),
plot.margin=unit(c(0, 0, 1, 0), "cm"))
output_file <- paste("/Net/fs1/home/linc4222/mr.", risk_factor, ".mr.risk.estimates.all.white.sd.pdf", sep = "")
ggsave(output_file, plot, unit = "cm", height = height, width = 17.3, dpi = 800, device = "pdf")
}
|
f35277ee1fcfdb2787ffaec26df59f0458aedfc1
|
6312f6e7e2e22bb7cb7580b0b92c0a6bbeeb5627
|
/wltr_new/wltr.git/train.R
|
6d19caf5be909b12f2703430e631ce027ac19ba8
|
[] |
no_license
|
babyang/wltr
|
20708cee2661b9c6ae8b67bdf43343dfbeadac84
|
9a9a76d474aebf3fc350b9cdcf5734328b11be60
|
refs/heads/master
| 2020-05-17T02:40:21.406024
| 2014-12-02T09:30:23
| 2014-12-02T09:30:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,149
|
r
|
train.R
|
require(glmnet)
require(doMC)
registerDoMC(cores=12)
# zhiyuan code using absolute folder name
# source("/home/zhiyuan/Projects/wltr/utils.R")
# source("/home/zhiyuan/Projects/wltr/alarm_sms.R")
# minxing code using his folder name
#source("/home/minxing/projects/zhiyuan/wltr/utils.R")
#source("/home/minxing/projects/zhiyuan/wltr/alarm_sms.R")
source("./utils.R")
source("./alarm_sms.R")
ModelWithLR <- function(train) {
num.cols <- ncol(train)
if (num.cols < 1) {
SendSMS("训练集变量数少于1")
}
x <- as.matrix(train[ ,seq(num.cols - 1)])
y <- as.factor(train[ ,num.cols])
lr <- cv.glmnet(x,y,family="binomial",type.measure="auc",alpha=0,parallel=TRUE,nfolds=5)
coef <- as.data.frame(as.matrix(coef(lr$glmnet.fit,lr$lambda.1se)))
names(coef) <- c("coef")
coef <- data.frame(feature=row.names(coef),coef)
row.names(coef) <- NULL
return(coef)
}
Training <- function(instances,step) {
instances.matrix <- ReSample(instances)
model.mean <- ModelWithLR(instances.matrix)
for ( i in seq(step-1)) {
instances.matrix <- ReSample(instances)
model <- ModelWithLR(instances.matrix)
model.mean$coef <- model.mean$coef + model$coef
}
model.mean$coef <- model.mean$coef
return(model.mean)
}
ReSample <- function(instances) {
sample.plus <- subset(instances,y==1)
sample.minus <- subset(instances,y==0)
nrow.plus <- nrow(sample.plus)
nrow.minus <- nrow(sample.minus)
row <- sample(nrow.minus,nrow.plus)
sample.minus <- sample.minus[row, ]
train <- rbind(sample.plus,sample.minus)
return(as.matrix(train))
}
MergeSample <- function(src,dst) {
instances <- merge(src,dst,by="tradeItemId")
return (instances[ ,c(-1,-2)])
}
Train <- function(args) {
file.train <- args[1]
file.label <- args[2]
file.model <- args[3]
step = as.numeric(args[4])
train.feature <- ReadTxtData(file.train)
train.label <- ReadTxtData(file.label)
train.sample <- MergeSample(train.feature,train.label)
model <- Training(train.sample,step)
SaveTxtData(model,file.model)
}
args <- commandArgs(TRUE)
Train(args)
|
a936e09fbc18e8c840a8892f631b5f97fb347904
|
9efa134c757f6f8938cb17d565be9f5e87e8c8e9
|
/man/deleteGuild.Rd
|
21f87e892f27f8929951557397b53e9134f26700
|
[] |
no_license
|
bpb824/brewerydb
|
04f07279e18c63c054c62244669aeeaccacbf921
|
1fed6d68ac6a9543b8fa04c0efb11631fdc78d65
|
refs/heads/master
| 2022-01-20T05:11:49.833630
| 2019-06-23T02:37:58
| 2019-06-23T02:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 295
|
rd
|
deleteGuild.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Guild.R
\name{deleteGuild}
\alias{deleteGuild}
\title{Delete an Guild}
\usage{
deleteGuild(guildId)
}
\arguments{
\item{guildId}{The guildId}
}
\value{
none
}
\description{
Deletes an existing guild
}
\concept{Guild}
|
9223d9bf10031315f165f5fa19c33d9c06654bc4
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkTooltipsDisable.Rd
|
6e3fe617e533f77d1bffa80cbd25ca37725e8c6d
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 611
|
rd
|
gtkTooltipsDisable.Rd
|
\alias{gtkTooltipsDisable}
\name{gtkTooltipsDisable}
\title{gtkTooltipsDisable}
\description{
Causes all tooltips in \code{tooltips} to become inactive. Any widgets that have tips associated with that group will no longer display their tips until they are enabled again with \code{\link{gtkTooltipsEnable}}.
\strong{WARNING: \code{gtk_tooltips_disable} has been deprecated since version 2.12 and should not be used in newly-written code. }
}
\usage{gtkTooltipsDisable(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkTooltips}}.}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
0af56e631ce4e9edbed2e354938a72a2805de67f
|
d8674514f06e48dc264d82096dbd5517b115ff29
|
/script/known_filter.R
|
8c5fba65544d9cb2e7c69a332b763ddc22b0aa7d
|
[] |
no_license
|
nzhun/PAH
|
23d7414b8ea728094ce3d41b4facee4c7733f29f
|
beb8c52cca7d04b5aac6455d572998902ead9e86
|
refs/heads/master
| 2021-09-16T12:34:20.463132
| 2018-06-20T17:19:26
| 2018-06-20T17:19:26
| 107,172,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,369
|
r
|
known_filter.R
|
#setwd("~/server")
setwd("/home/local/ARCS/nz2274/")
source("Pipeline/NA_script/R/untils.R")
filter_allfreq_local <- function(data,freq_avg,freq_max){
data <- data[which(na.pass(as.numeric(data$ExAC_ALL)< freq_avg)
&na.pass(as.numeric(data$ExAC_AMR)< freq_max)
&as.numeric(data$ExAC_AFR)< freq_max
&as.numeric(data$ExAC_NFE)< freq_max
&as.numeric(data$ExAC_FIN)< freq_max
&as.numeric(data$ExAC_SAS)< freq_max
&as.numeric(data$ExAC_EAS)< freq_max
&as.numeric(data$ExAC_OTH)< freq_max
&as.numeric(data$gnomAD_exome_ALL)<freq_max
&as.numeric(data$gnomAD_exome_EAS)<freq_max
&as.numeric(data$gnomAD_exome_NFE)<freq_max
&as.numeric(data$gnomAD_exome_FIN)<freq_max
&as.numeric(data$gnomAD_exome_OTH)<freq_max
&as.numeric(data$gnomAD_exome_ASJ)<freq_max
&as.numeric(data$gnomAD_exome_AMR)<freq_max
# & as.numeric(data$`1KGfreq`) < freq2
# &as.numeric(data$ESPfreq)< freq2
# &as.numeric(data$gnomAD_Genome_AF)< freq2
),]
# data <- data[which( as.numeric(data$AC)< 25
# &as.numeric(data$AB)>0.2
# ),]
if(length(grep("Mappability",names(data)))>0){
data <- data[which(data$Mappability==1),]
}
if(length(grep("genomicSuperDups",names(data)))>0){
index<-grep("Score",data$genomicSuperDups)
as.numeric(unlist(lapply(index,FUN = function(x) unlist(strsplit(x = data$genomicSuperDups[x],split = ":|-"))[2])))
dup_indexs<-index[which(as.numeric(unlist(lapply(index,FUN = function(x) unlist(strsplit(x = data$genomicSuperDups[x],split = ":|-"))[2])))>0.9)]
if(length(dup_indexs)>0){
data <- data[-dup_indexs,]
}
}
return(data)
}
fill_proband<-function(set){
pedf<-"PAH/Result/Data/source/VCX_Control.ped"
ped<-read.table(pedf,header=1,comment.char = "",check.names = F,stringsAsFactors = F,fill = T)
set$proband=set$ID;
for(i in 1:dim(set)[1]){
index<-grep(paste(set$ID[i],"_",sep=""),ped$ID)
if(length(index)>0){
set$proband[i]=ped$ID[index]
}else{
if(length(grep("xgen",set$CAPTURE[i],ignore.case = T))>0){
print(paste("failed",set$ID[i],set$CAPTURE[i]));
}
}
}
return (set)
}
addfamily<-function(dat,set){
dat$age="";
dat$type="";
dat$gender=""
dat$family="";
for(f in unique(dat$proband)){
info<-set[which(set$proband==f),]
dat$age[which(dat$proband==f)]<-info$Age_dx
dat$age[which(dat$proband==f)]<-info$TYPE
dat$gender[which(dat$proband==f)]<-info$Gender
dat$family[which(dat$proband==f)]<-info$FamType
}
return(dat)
}
process<-function(file,setfile,fasso,outputf){
dat<-read.table(file,header = 1,stringsAsFactors = F,comment.char = "",check.names = F,sep="\t")
set<-read.csv(setfile,header = 1,comment.char ="",check.names = F,stringsAsFactors = F,strip.white = T)
dat<-formatFreq_new(dat)
filter_dat<-filter_allfreq_local(dat,0.0001,0.0001)
#set<-fill_proband(set)
#write.csv(set,file = setfile,quote = F,row.names = F)
gene_asso <- read.table(fasso,header = F,stringsAsFactors = F,check.names = F,strip.white = T)
gene_asso <- gene_asso[,1]
filter_dat<-filter_dat[which(filter_dat$Gene.refGene%in%gene_asso &filter_dat$ALT!="*" & filter_dat$AF<0.01),] # &filter_dat$VQSLOD > -2.75
cohort<-c()
if(length(grep("proband",names(set)))>0){cohort<-set$proband}else{cohort<-set[,1]}
sub_dat<-filter_dat[which(filter_dat$proband%in%cohort),]
if(length(grep("DP_ind",names(sub_dat))) >0){sub_dat<-sub_dat[which(as.numeric(sub_dat$DP_ind)>8| as.numeric(sub_dat$AD_ind)+as.numeric(sub_dat$RD_ind)>8),] }
#if(length(grep("GQ",names(sub_dat))) >0){sub_dat<-sub_dat[which(as.numeric(sub_dat$GQ)>90),] }
sub_dat<-addfamily(sub_dat,set)
write.csv(sub_dat,file = outputf,row.names = F)
}
paper_check<-function(sub_dat){
sub_dat$publish<-0;
sub_dat$publish_id<-""
paper<-read.csv("PAH/Result/Data/source/known_paper_adult.csv",header = 1,stringsAsFactors = F,strip.white = T)
paper$has=0;
for(i in 1:dim(sub_dat)[1]){
proband<-sub_dat$proband[i]
gene<-sub_dat$Gene.refGene[i]
ids<-paper$ID[which(paper$Gene==gene)]
for(id in ids){
a<-grep(paste(id,sep=""),proband)
if(length(a)>0){sub_dat$publish[i]=1; sub_dat$publish_id[i]=id; paper[which(paper$ID==id & paper$Gene==gene),"has"]<-1; print(paste(id,proband,i));}
}
}
write.csv(paper,file = "PAH/Result/Data/source/known_paper_adult.csv",row.names = F)
}
#file="PAH/Known/PAH.known.vcf.gz.2.txt"
#setfile="PAH/Result/Data/source/PAH_pediatric_adult_list.csv"
# fasso <- "PAH/DOC/HongJian/source/PAH_associated11-13.txt"
#outputf="PAH/PAH_known.variants.csv"
args<-commandArgs(trailingOnly = T)
if(length(args)<4){
stop("At least two arguments must be supplied (input file)\n\tinput format: the tab-delimited data file, cohort_ids,genesetfile,output_file ", call.=FALSE)
}else if(length(args)==3){
args[4]="out.csv"
}
file=args[1]
setfile=args[2]
outputf=args[4]
fgene=args[3]
print("input format: the conerted_txt, cohort_ids,genesetfile,output_file")
process(file,setfile,fgene,outputf)
|
5a2b90b0341ff2cdf35d848ca8ce138b0df2d0d7
|
72eea872c42a5197b1e79a53413d718b83d6aebc
|
/R/as_openadd.R
|
d4117667e9e84841553f438a80e6448e6164bc8b
|
[
"MIT"
] |
permissive
|
sckott/openadds
|
72b7a422a5363bebca017eb8d2b31c3a5300a0c4
|
7cb6991a5206caeeb0ce9821c83c61fbc40eeb03
|
refs/heads/master
| 2021-07-11T01:33:01.977702
| 2021-03-17T16:35:47
| 2021-03-17T16:35:47
| 34,580,790
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,383
|
r
|
as_openadd.R
|
#' Coerce to openadd object
#'
#' @export
#' @param country (characater) Country name
#' @param state (characater) State (or province) name
#' @param city (characater) City name
#' @param ... ignored
#' @details This is a helper function to let the user specify what they want
#' with any combination of country, state, and city - the output of which
#' can be passed to [oa_get()] to get data.
#'
#' If your search results in more than 1 result, we stop with message to
#' refine your search.
#' @return an object of class `openadd`
#' @examples \dontrun{
#' as_openadd(country="us", state="nv", city="las_vegas")
#'
#' # too coarse, will ask you to refine your search
#' # as_openadd(country="us", state="mi", city="detroit")
#' }
as_openadd <- function(country = NULL, state = NULL, city = NULL, ...) {
tmp <- oa_search(country, state, city)
if (NROW(tmp) == 1) {
make_openadd(tmp)
} else {
stop("Refine your search, more than 1 result", call. = FALSE)
}
}
make_openadd <- function(x) {
structure(x$url, class = "openadd", country = x$country, state = x$state,
city = x$city)
}
#' @export
print.openadd <- function(x, ...) {
cat("<<OpenAddreses>> ", sep = "\n")
cat(paste0(" <<country>> ", attr(x, "country")), sep = "\n")
cat(paste0(" <<state>> ", attr(x, "state")), sep = "\n")
cat(paste0(" <<city>> ", attr(x, "city")), sep = "\n")
}
|
107976aa2ebffac1c33900d30118ae1db701dfdb
|
64c6ff944757012350441458db8229cca0baa64f
|
/docs/Shiny_Apps/full_roster_compile/server.R
|
8d56e178add41d3dffd2b9ec288c943c0d6c20d1
|
[] |
no_license
|
noahknob/Baseball_App
|
c4a5e324a662ab9104976aa59adbfa9af33758a1
|
75c1bce369eab1c3e2b4c82aa1464593db845320
|
refs/heads/master
| 2022-02-25T09:39:02.650009
| 2019-10-26T19:10:13
| 2019-10-26T19:10:13
| 115,544,306
| 0
| 0
| null | 2018-01-29T23:48:56
| 2017-12-27T17:44:21
|
R
|
UTF-8
|
R
| false
| false
| 3,465
|
r
|
server.R
|
library(shinydashboard)
library(shiny)
library(tidyverse)
server <- function(input, output, session) {
avg_pitcher_stats <- read_delim("https://raw.githubusercontent.com/noahknob/Baseball_App/master/data/avg_pitcher_stats.txt", delim = "\t")
avg_batter_stats <- read_delim("https://raw.githubusercontent.com/noahknob/Baseball_App/master/data/avg_batter_stats.txt",delim = "\t")
season_stats <- read_delim("https://raw.githubusercontent.com/noahknob/Baseball_App/master/data/season_stats.txt", delim = "\t")
avg_batter_stats <- avg_batter_stats %>%
mutate_if(is.double,round,digits = 2)
avg_pitcher_stats <- avg_pitcher_stats %>%
mutate_if(is.double,round,digits = 3)
df_p <- reactive({
data <- avg_pitcher_stats %>%
filter(Name %in% input$SP | Name %in% input$RP )
return(data)
})
df_b <- reactive({
data <- avg_batter_stats %>%
filter(Name == input$Catcher | Name == input$FirstBase | Name == input$SecondBase | Name == input$ThirdBase |
Name == input$Shortstop | Name == input$UTL | Name %in% input$Outfield )
return(data)
})
df_b_pnorm <- reactive({
df <- data_frame(R = pnorm(sum(df_b()$R), mean = mean(season_stats$Runs), sd = sd(season_stats$Runs)),
RBI = pnorm(sum(df_b()$RBI), mean = mean(season_stats$RBI), sd = sd(season_stats$RBI)),
HR = pnorm(sum(df_b()$HR), mean = mean(season_stats$HR), sd = sd(season_stats$HR)),
SB = pnorm(sum(df_b()$SB), mean = mean(season_stats$SB), sd = sd(season_stats$SB)),
AVG = pnorm(mean(df_b()$BA), mean = mean(season_stats$AVG), sd = sd(season_stats$AVG)))
return(df)
})
df_p_pnorm <- reactive({
df <- data_frame(Wins = pnorm(sum(df_p()$W), mean = mean(season_stats$Wins), sd = sd(season_stats$Wins)),
SV = pnorm(sum(df_p()$SV), mean = mean(season_stats$Saves), sd = sd(season_stats$Saves)),
K = pnorm(sum(df_p()$K), mean = mean(season_stats$Strikeouts), sd = sd(season_stats$Strikeouts)),
ERA = 1 - pnorm(mean(df_p()$ERA), mean = mean(season_stats$ERA), sd = sd(season_stats$ERA)),
WHIP = 1 - pnorm(mean(df_p()$WHIP), mean = mean(season_stats$WHIP), sd = sd(season_stats$WHIP)))
return(df)
})
output$values_B <- renderPrint({
summed_df_b <- df_b()
list("Runs" = sum(summed_df_b$R), "Probability of Winning Runs" = df_b_pnorm()$R,
"RBI" = sum(summed_df_b$RBI),"Probability of Winning RBI" = df_b_pnorm()$RBI,
"HR" = sum(summed_df_b$HR), "Probability of Winning HR" = df_b_pnorm()$HR,
"SB" = sum(summed_df_b$SB), "Probability of Winning SB" = df_b_pnorm()$SB,
"AVG" = mean(summed_df_b$BA), "Probability of Winning AVG" = df_b_pnorm()$AVG)
})
output$table_B <- DT::renderDataTable(DT::datatable({
df_b()
}))
output$values_P <- renderPrint({
summed_df_p <- df_p()
list("Wins" = sum(summed_df_p$W), "Probability of Winning Wins" = df_p_pnorm()$Wins,
"Saves" = sum(summed_df_p$SV), "Probability of Winning Saves" = df_p_pnorm()$SV,
"Strikeouts" = sum(summed_df_p$K), "Probability of Winning Ks" = df_p_pnorm()$K,
"ERA" = mean(summed_df_p$ERA), "Probability of Winning ERA" = df_p_pnorm()$ERA,
"WHIP" = mean(summed_df_p$WHIP),"Probability of Winning WHIP" = df_p_pnorm()$WHIP)
})
output$table_P <- DT::renderDataTable(DT::datatable({
df_p()
}))
}
|
cb884583d7747fe36b5df4ec67fb388779015117
|
45ab1e397b5fc69ba84c8f5dfb66c09b79bca4c6
|
/Course_III/ECONOMETRICS/pract12/task3_reverse.r
|
ac751d0d9395e713ee444512faad752cbbc6e76a
|
[
"WTFPL"
] |
permissive
|
GeorgiyDemo/FA
|
926016727afa1ce0ee49e6ca9c9a3c60c755b35f
|
9575c43fa01c261ea1ed573df9b5686b5a6f4211
|
refs/heads/master
| 2023-06-28T00:35:43.166167
| 2023-06-16T14:45:00
| 2023-06-16T14:45:00
| 203,040,913
| 46
| 65
|
WTFPL
| 2022-04-09T21:16:39
| 2019-08-18T18:19:32
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 8,236
|
r
|
task3_reverse.r
|
###########Это неправильная модель, где зависимость площади от цены, а не наоборот
#####Тоже проанализитровал все коэффы тестов, поэтому жаль удалять
#install.packages("lmtest")
#install.packages("forecast")
#install.packages("tseries")
#install.packages("orcutt")
#install.packages("orcutt")
library(orcutt)
library(lmtest)
library(forecast)
library(tseries)
library(orcutt)
setwd("/Users/demg/Documents/Projects/FA/Course\ III/ECONOMETRICS/pract12")
data <- read.csv('./task3.csv', sep = ";")
x <- data$Цена; x
y <- data$Площадь; y
# Парная модель
p_many <- lm(y~x)
s_many <- summary(p_many)
s_many
# Корреляционная матрица
cor(data)
# Диаграммы рассеяния
plot(y, x, col = 'green')
# Доверительный интервал
confint(p_many, level = 0.95)
#######В) Проверьте значимость модели регрессии в целом и каждого коэффициента модели по отдельности.######
determ <- s_many$r.squared
adjust_determ <- s_many$adj.r.squared
st_error <- sqrt(deviance(p_many)/df.residual(p_many))
approx <- sum(abs(s_many$residuals/y)) / length(y) * 100
f_test <- s_many$fstatistic[1]
compare <- data.frame(
Коэффициент_детерминации=determ,
Скорректированный_коэффициент=adjust_determ,
Стандартная_ошибка_модели=st_error,
Ошибка_аппроксимации=approx,
F_тест=f_test
)
print(compare)
#Вывод о качестве: модель приемлима, R2 не идеален
gqtest(p_many, order.by = x, fraction = 0.25) # Тест Голдфельда-Квандта для x
#GQ < Fтаб – Гомоскедастичность – H0<br>
#GQ > Fтаб – Гетероскедастичность - Hа<br>
#GQ = 61.165, df1 = 6, df2 = 5, p-value = 0.0001618
#alternative hypothesis: variance increases from segment 1 to 2<br>
#p-value = 0.0001618 < (0.1; 0.05; 0.01), присутствует проблема гетероскедастичности<br>
bptest(p_many, studentize = TRUE) # Тест Бреуша-Пагана
#Гомоскедастичность – H0<br>
#Гетероскедастичность - Hа<br>
#BP = 12.493, df = 1, p-value = 0.0004086
#p-value = 0.0004086 < (0.1; 0.05; 0.01), присутствует проблема гетероскедастичности<br>
dw <- dwtest(p_many); dw # Тест Дарбина-Ватсона
#H0: нет автокорреляции<br>
#Ha: есть автокорреляция 1-го порядка<br>
#DW = 2.9609, p-value = 0.9821
#DW не стремится к 0, что говорит об отсутствии положительной автокорреляции
#p-value = 0.9821 > (0.01, 0.05, 0.1) - принимаем гипотезу об отсутствии автокорреляции, отвергаем гипотезу о существовании автокоррелции
bgtest(p_many, order = 1, order.by = NULL, type = c("Chisq", "F")) # Тест Бреуша-Годфри
bgtest(p_many, order = 2, order.by = NULL, type = c("Chisq", "F")) # Тест Бреуша-Годфри
bgtest(p_many, order = 3, order.by = NULL, type = c("Chisq", "F")) # Тест Бреуша-Годфри
#H0: нет автокорреляции<br>
#Ha: есть автокорреляция n порядка<br>
#LM test = 6.5807, df = 1, p-value = 0.01031<br>
#LM test = 25.283, df = 2, p-value = 0.01267<br>
#LM test = 25.304, df = 3, p-value = 0.03291<br>
#pv = 0.01031 > (0.01) => H0 принимается, автокорреляция 1 порядка отсутствует<br>
#pv = 0.01267 > (0.01) => H0 принимается, автокорреляция 2 порядка отсутствует<br>
#pv = 0.03291 > (0.01) => H0 принимается, автокорреляция 3 порядка отсутствует<br>
#pv = 0.01031 < (0.05, 0.1) => Ha принимается, автокорреляция 1 порядка присутствует<br>
#pv = 0.01267 < (0.05, 0.1) => Ha принимается, автокорреляция 2 порядка присутствует<br>
#pv = 0.03291 < (0.05, 0.1) => Ha принимается, автокорреляция 3 порядка присутствует<br>
#Делаю вывод о том, что автокорреляция отсутствует при уровне значимости 0.01
#При других уровнях (0.05, 0.1) - присутствует
#########################################МНЕ ЭТО НЕ ПОНРАВИЛОСЬ, ПОШЕЛ ПОПЫТАТЬСЯ УСТРАНЯТЬ АВТОКОРР########################################
DW<-dw$statistic
DW
p<-1-DW/2;p
y
y4<-y[2:20]-p*y[1:19]
y4
x4<-x[2:20]-p*x[1:19]
x4
m4<-lm(y4~x4);m4
s4<-summary(m4);s4
a <- s4$coefficients[1]/(1-p);a
b <- s4$coefficients[2]; b
dwtest(m4) # Тест Дарбина-Ватсона
#H0: нет автокорреляции<br>
#Ha: есть автокорреляция 1-го порядка<br>
#DW = 2.6513, p-value = 0.8916
#DW не стремится к 0, что говорит об отсутствии положительной автокорреляции
#p-value = 0.8916 > (0.01, 0.05, 0.1) - принимаем гипотезу об отсутствии автокорреляции, отвергаем гипотезу о существовании автокоррелции
bgtest(m4, order = 1, order.by = NULL, type = c("Chisq", "F")) # Тест Бреуша-Годфри
bgtest(m4, order = 2, order.by = NULL, type = c("Chisq", "F")) # Тест Бреуша-Годфри
bgtest(m4, order = 3, order.by = NULL, type = c("Chisq", "F")) # Тест Бреуша-Годфри
#H0: нет автокорреляции<br>
#Ha: есть автокорреляция n порядка<br>
#LM test = 2.0347, df = 1, p-value = 0.1537
#LM test = 2.038, df = 2, p-value = 0.361
#LM test = 2.4362, df = 3, p-value = 0.4869
#pv = 0.1537 > (0.01, 0.05, 0.1) => H0 принимается, автокорреляция 1 порядка отсутствует
#pv = 0.361 > (0.01, 0.05, 0.1) => H0 принимается, автокорреляция 2 порядка отсутствует
#pv = 0.4869 > (0.01, 0.05, 0.1) => H0 принимается, автокорреляция 3 порядка отсутствует
##########################################################Значит ее точно нет#######################################################
#############################################Значит устраняем только проблему гетероскедастичности#################################
y3<-y/predict(p_many)
x3<-x/predict(p_many)
m3<-lm(y3~x3)
s3<-summary(m3)
s3
gqtest(m3, order.by = x3, fraction = 0.25) # Тест Голдфельда-Квандта для x4
#GQ < Fтаб – Гомоскедастичность – H0
#GQ > Fтаб – Гетероскедастичность - Hа
#GQ = 2.9575, df1 = 6, df2 = 5, p-value = 0.1272
#alternative hypothesis: variance increases from segment 1 to 2<br>
#p-value = 0.1272 > (0.1; 0.05; 0.01), отсутствует проблема гетероскедастичности
bptest(m3, studentize = TRUE) # Тест Бреуша-Пагана
#Гомоскедастичность – H0
#Гетероскедастичность - Hа
#BP = 3.8625, df = 1, p-value = 0.04938
#p-value = 0.04938 < 0.1 присутствует проблема гетероскедастичности
#p-value = 0.04938 < 0.05 присутствует проблема гетероскедастичности
#p-value = 0.04938 > 0.01 отсутствует проблема гетероскедастичности
#Будем считать, что мы устранили проблему гетероскедастичности
#######################################################################################################################################
|
953f2b84da06d0d4582180ae35c552f3c1070b0d
|
01d9837a6754cb24d10a2874b7c49f224e3b97ba
|
/lab3package/R/laboratory3.R
|
3b7fc48d69b42f6e2f68d321f14a4b986acd5f9e
|
[] |
no_license
|
tenoglez/ARP_Laboratory3
|
89f81ee4fd351214d250766d41ee018e0c7e90c3
|
e60aac5379fcbf268e2136f995b13d199439cbb2
|
refs/heads/master
| 2020-04-10T19:45:54.487373
| 2016-09-15T12:34:16
| 2016-09-15T12:34:16
| 68,100,323
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 834
|
r
|
laboratory3.R
|
#' A package for computating the notorious Dijkstra and Euclidean algorithms.
#'
#' @description The lab3package provides two categories of important functions:
#' euclidean and dijkstra. Furthermore, it contains the dataset wiki_graph for dijkstra funtion testing.
#'
#' @details The lab3package functions consist of the implementation of the \code{\link{euclidean}} algorithm to obtain the greatest common divisor
#' for two numeric scalars and the \code{\link{dijkstra}} algorithm to obtain the shortest path from a node in a graph
#'
#' @author Teno Gonzalez Dos Santos, Enrique Josue Alvarez Robles, Jose Luis Lopez Ruiz
#'
#' @references Dijkstra - \url{https://en.wikipedia.org/wiki/Dijkstra}\cr
#' Euclidean - \url{https://en.wikipedia.org/wiki/Euclidean_algorithm}
#'
#'
#' @docType package
#' @name laboratory3
"_PACKAGE"
|
0be7a5f87087e59210601d81e70237cfc0117bee
|
7b82068433efacf8840c57e2c05b613dbe13d31c
|
/man/HyperparameterTuner.Rd
|
0c8263f5ddc93097ab68e351d3780708e81e6a16
|
[
"Apache-2.0"
] |
permissive
|
OwenGarrity/sagemaker-r-sdk
|
d25f0d264dcddcb6e0fa248af22d47fc22c159ce
|
3598b789af41ed21bb0bf65bd1b4dfe1469673c9
|
refs/heads/master
| 2022-12-09T04:50:07.412057
| 2020-09-19T13:02:38
| 2020-09-19T13:02:38
| 285,834,692
| 0
| 0
|
NOASSERTION
| 2020-09-19T13:02:39
| 2020-08-07T13:23:16
|
R
|
UTF-8
|
R
| false
| true
| 34,343
|
rd
|
HyperparameterTuner.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tuner.R
\name{HyperparameterTuner}
\alias{HyperparameterTuner}
\title{HyperparamerTuner}
\description{
A class for creating and interacting with Amazon SageMaker hyperparameter
tuning jobs, as well as deploying the resulting model(s).
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{TUNING_JOB_NAME_MAX_LENGTH}}{Maximumn length of sagemaker job name}
\item{\code{SAGEMAKER_ESTIMATOR_MODULE}}{Class metadata}
\item{\code{SAGEMAKER_ESTIMATOR_CLASS_NAME}}{Class metadata}
\item{\code{DEFAULT_ESTIMATOR_MODULE}}{Class metadata}
\item{\code{DEFAULT_ESTIMATOR_CLS_NAME}}{Class metadata}
}
\if{html}{\out{</div>}}
}
\section{Active bindings}{
\if{html}{\out{<div class="r6-active-bindings">}}
\describe{
\item{\code{sagemaker_session}}{Convenience method for accessing the
:class:`~sagemaker.session.Session` object associated with the estimator
for the ``HyperparameterTuner``.}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{HyperparameterTuner$new()}}
\item \href{#method-fit}{\code{HyperparameterTuner$fit()}}
\item \href{#method-attach}{\code{HyperparameterTuner$attach()}}
\item \href{#method-deploy}{\code{HyperparameterTuner$deploy()}}
\item \href{#method-stop_tunning_job}{\code{HyperparameterTuner$stop_tunning_job()}}
\item \href{#method-describe}{\code{HyperparameterTuner$describe()}}
\item \href{#method-wait}{\code{HyperparameterTuner$wait()}}
\item \href{#method-best_estimator}{\code{HyperparameterTuner$best_estimator()}}
\item \href{#method-best_training_job}{\code{HyperparameterTuner$best_training_job()}}
\item \href{#method-delete_endpoint}{\code{HyperparameterTuner$delete_endpoint()}}
\item \href{#method-hyperparameter_ranges}{\code{HyperparameterTuner$hyperparameter_ranges()}}
\item \href{#method-hyperparameter_ranges_list}{\code{HyperparameterTuner$hyperparameter_ranges_list()}}
\item \href{#method-analytics}{\code{HyperparameterTuner$analytics()}}
\item \href{#method-transfer_learning_tuner}{\code{HyperparameterTuner$transfer_learning_tuner()}}
\item \href{#method-identical_dataset_and_algorithm_tuner}{\code{HyperparameterTuner$identical_dataset_and_algorithm_tuner()}}
\item \href{#method-create}{\code{HyperparameterTuner$create()}}
\item \href{#method-.attach_estimator}{\code{HyperparameterTuner$.attach_estimator()}}
\item \href{#method-print}{\code{HyperparameterTuner$print()}}
\item \href{#method-clone}{\code{HyperparameterTuner$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Initialize a ``HyperparameterTuner``. It takes an estimator to obtain
configuration information for training jobs that are created as the
result of a hyperparameter tuning job.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$new(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions = NULL,
strategy = "Bayesian",
objective_type = "Maximize",
max_jobs = 1,
max_parallel_jobs = 1,
tags = NULL,
base_tuning_job_name = NULL,
warm_start_config = NULL,
early_stopping_type = c("Off", "Auto"),
estimator_name = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{estimator}}{(sagemaker.estimator.EstimatorBase): An estimator object
that has been initialized with the desired configuration. There
does not need to be a training job associated with this
instance.}
\item{\code{objective_metric_name}}{(str): Name of the metric for evaluating
training jobs.}
\item{\code{hyperparameter_ranges}}{(dict[str, sagemaker.parameter.ParameterRange]): Dictionary of
parameter ranges. These parameter ranges can be one
of three types: Continuous, Integer, or Categorical. The keys of
the dictionary are the names of the hyperparameter, and the
values are the appropriate parameter range class to represent
the range.}
\item{\code{metric_definitions}}{(list[dict]): A list of dictionaries that defines
the metric(s) used to evaluate the training jobs (default:
None). Each dictionary contains two keys: 'Name' for the name of
the metric, and 'Regex' for the regular expression used to
extract the metric from the logs. This should be defined only
for hyperparameter tuning jobs that don't use an Amazon
algorithm.}
\item{\code{strategy}}{(str): Strategy to be used for hyperparameter estimations
(default: 'Bayesian').}
\item{\code{objective_type}}{(str): The type of the objective metric for
evaluating training jobs. This value can be either 'Minimize' or
'Maximize' (default: 'Maximize').}
\item{\code{max_jobs}}{(int): Maximum total number of training jobs to start for
the hyperparameter tuning job (default: 1).}
\item{\code{max_parallel_jobs}}{(int): Maximum number of parallel training jobs to
start (default: 1).}
\item{\code{tags}}{(list[dict]): List of tags for labeling the tuning job
(default: None). For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.}
\item{\code{base_tuning_job_name}}{(str): Prefix for the hyperparameter tuning job
name when the :meth:`~sagemaker.tuner.HyperparameterTuner.fit`
method launches. If not specified, a default job name is
generated, based on the training image name and current
timestamp.}
\item{\code{warm_start_config}}{(sagemaker.tuner.WarmStartConfig): A
``WarmStartConfig`` object that has been initialized with the
configuration defining the nature of warm start tuning job.}
\item{\code{early_stopping_type}}{(str): Specifies whether early stopping is
enabled for the job. Can be either 'Auto' or 'Off' (default:
'Off'). If set to 'Off', early stopping will not be attempted.
If set to 'Auto', early stopping of some training jobs may
happen, but is not guaranteed to.}
\item{\code{estimator_name}}{(str): A unique name to identify an estimator within the
hyperparameter tuning job, when more than one estimator is used with
the same tuning job (default: None).}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fit"></a>}}
\if{latex}{\out{\hypertarget{method-fit}{}}}
\subsection{Method \code{fit()}}{
Start a hyperparameter tuning job.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$fit(
inputs = NULL,
job_name = NULL,
include_cls_metadata = FALSE,
estimator_kwargs = NULL,
...
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{inputs}}{: Information about the training data. Please refer to the
``fit()`` method of the associated estimator, as this can take
any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, TrainingInput]) -
If using multiple channels for training data, you can specify
a dict mapping channel names to strings or
:func:`~TrainingInput` objects.
* (TrainingInput) - Channel configuration for S3 data sources that can
provide additional information about the training dataset.
See :func:`TrainingInput` for full details.
* (sagemaker.session.FileSystemInput) - channel configuration for
a file system data source that can provide additional information as well as
the path to the training dataset.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (sagemaker.amazon.amazon_estimator.FileSystemRecordSet) -
Amazon SageMaker channel configuration for a file system data source for
Amazon algorithms.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects,
where each instance is a different channel of training data.
* (list[sagemaker.amazon.amazon_estimator.FileSystemRecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.FileSystemRecordSet` objects,
where each instance is a different channel of training data.}
\item{\code{job_name}}{(str): Tuning job name. If not specified, the tuner
generates a default job name, based on the training image name
and current timestamp.}
\item{\code{include_cls_metadata}}{: It can take one of the following two forms.
* (bool) - Whether or not the hyperparameter tuning job should include information
about the estimator class (default: False). This information is passed as a
hyperparameter, so if the algorithm you are using cannot handle unknown
hyperparameters (e.g. an Amazon SageMaker built-in algorithm that does not
have a custom estimator in the Python SDK), then set ``include_cls_metadata``
to ``False``.
* (dict[str, bool]) - This version should be used for tuners created via the
factory method create(), to specify the flag for each estimator provided in
the estimator_dict argument of the method. The keys would be the same
estimator names as in estimator_dict. If one estimator doesn't need the flag
set, then no need to include it in the dictionary.}
\item{\code{estimator_kwargs}}{(dict[str, dict]): Dictionary for other arguments needed for
training. Should be used only for tuners created via the factory method create().
The keys are the estimator names for the estimator_dict argument of create()
method. Each value is a dictionary for the other arguments needed for training
of the corresponding estimator.}
\item{\code{...}}{: Other arguments needed for training. Please refer to the
``fit()`` method of the associated estimator to see what other
arguments are needed.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-attach"></a>}}
\if{latex}{\out{\hypertarget{method-attach}{}}}
\subsection{Method \code{attach()}}{
Attach to an existing hyperparameter tuning job.
Create a HyperparameterTuner bound to an existing hyperparameter
tuning job. After attaching, if there exists a best training job (or any
other completed training job), that can be deployed to create an Amazon
SageMaker Endpoint and return a ``Predictor``.
The ``HyperparameterTuner`` instance could be created in one of the following two forms.
* If the 'TrainingJobDefinition' field is present in tuning job description, the tuner
will be created using the default constructor with a single estimator.
* If the 'TrainingJobDefinitions' field (list) is present in tuning job description,
the tuner will be created using the factory method ``create()`` with one or
several estimators. Each estimator corresponds to one item in the
'TrainingJobDefinitions' field, while the estimator names would come from the
'DefinitionName' field of items in the 'TrainingJobDefinitions' field. For more
details on how tuners are created from multiple estimators, see ``create()``
documentation.
For more details on 'TrainingJobDefinition' and 'TrainingJobDefinitions' fields in tuning
job description, see
https://botocore.readthedocs.io/en/latest/reference/services/sagemaker.html#SageMaker.Client.create_hyper_parameter_tuning_job
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$attach(
tuning_job_name,
sagemaker_session = NULL,
job_details = NULL,
estimator_cls = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{tuning_job_name}}{(str): The name of the hyperparameter tuning job to attach to.}
\item{\code{sagemaker_session}}{(sagemaker.session.Session): Session object which manages
interactions with Amazon SageMaker APIs and any other AWS services needed.
If not specified, one is created using the default AWS configuration chain.}
\item{\code{job_details}}{(dict): The response to a ``DescribeHyperParameterTuningJob`` call.
If not specified, the ``HyperparameterTuner`` will perform one such call with
the provided hyperparameter tuning job name.}
\item{\code{estimator_cls}}{: It can take one of the following two forms.
(str): The estimator class name associated with the training jobs, e.g.
'sagemaker.estimator.Estimator'. If not specified, the ``HyperparameterTuner``
will try to derive the correct estimator class from training job metadata,
defaulting to :class:~`Estimator` if it is unable to
determine a more specific class.
(dict[str, str]): This form should be used only when the 'TrainingJobDefinitions'
field (list) is present in tuning job description. In this scenario training
jobs could be created from different training job definitions in the
'TrainingJobDefinitions' field, each of which would be mapped to a different
estimator after the ``attach()`` call. The ``estimator_cls`` should then be a
dictionary to specify estimator class names for individual estimators as
needed. The keys should be the 'DefinitionName' value of items in
'TrainingJobDefinitions', which would be used as estimator names in the
resulting tuner instance.
# Example #1 - assuming we have the following tuning job description, which has the
# 'TrainingJobDefinition' field present using a SageMaker built-in algorithm (i.e. PCA),
# and ``attach()`` can derive the estimator class from the training image.
# So ``estimator_cls`` would not be needed.
# .. code:: R
list(
'BestTrainingJob'= 'best_training_job_name',
'TrainingJobDefinition' = list(
'AlgorithmSpecification' = list(
'TrainingImage'= '174872318107.dkr.ecr.us-west-2.amazonaws.com/pca:1
)
)
)
#>>> my_tuner.fit()
#>>> job_name = my_tuner$latest_tuning_job$name
#Later on:
#>>> attached_tuner = HyperparameterTuner.attach(job_name)
#>>> attached_tuner.deploy()
#Example #2 - assuming we have the following tuning job description, which has a 2-item
#list for the 'TrainingJobDefinitions' field. In this case 'estimator_cls' is only
#needed for the 2nd item since the 1st item uses a SageMaker built-in algorithm
#(i.e. PCA).
#.. code:: R
list(
'BestTrainingJob' = 'best_training_job_name',
'TrainingJobDefinitions'= list(
list(
'DefinitionName'= 'estimator_pca',
'AlgorithmSpecification'= list(
'TrainingImage'= '174872318107.dkr.ecr.us-west-2.amazonaws.com/pca:1)
),
list(
'DefinitionName'= 'estimator_byoa',
'AlgorithmSpecification' = list(
'TrainingImage'= '123456789012.dkr.ecr.us-west-2.amazonaws.com/byoa:latest)
)
)
)
>>> my_tuner.fit()
>>> job_name = my_tuner.latest_tuning_job.name
Later on:
>>> attached_tuner = HyperparameterTuner.attach(
>>> job_name,
>>> estimator_cls={
>>> 'estimator_byoa': 'org.byoa.Estimator'
>>> })
>>> attached_tuner.deploy()}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
sagemaker.tuner.HyperparameterTuner: A ``HyperparameterTuner``
instance with the attached hyperparameter tuning job.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-deploy"></a>}}
\if{latex}{\out{\hypertarget{method-deploy}{}}}
\subsection{Method \code{deploy()}}{
Deploy the best trained or user specified model to an Amazon
SageMaker endpoint and return a ``sagemaker.Predictor`` object.
For more information:
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$deploy(
initial_instance_count,
instance_type,
accelerator_type = NULL,
endpoint_name = NULL,
wait = TRUE,
model_name = NULL,
kms_key = NULL,
data_capture_config = NULL,
...
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{initial_instance_count}}{(int): Minimum number of EC2 instances to
deploy to an endpoint for prediction.}
\item{\code{instance_type}}{(str): Type of EC2 instance to deploy to an endpoint
for prediction, for example, 'ml.c4.xlarge'.}
\item{\code{accelerator_type}}{(str): Type of Elastic Inference accelerator to
attach to an endpoint for model loading and inference, for
example, 'ml.eia1.medium'. If not specified, no Elastic
Inference accelerator will be attached to the endpoint. For more
information:
https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html}
\item{\code{endpoint_name}}{(str): Name to use for creating an Amazon SageMaker
endpoint. If not specified, the name of the training job is
used.}
\item{\code{wait}}{(bool): Whether the call should wait until the deployment of
model completes (default: True).}
\item{\code{model_name}}{(str): Name to use for creating an Amazon SageMaker
model. If not specified, the name of the training job is used.}
\item{\code{kms_key}}{(str): The ARN of the KMS key that is used to encrypt the
data on the storage volume attached to the instance hosting the
endpoint.}
\item{\code{data_capture_config}}{(sagemaker.model_monitor.DataCaptureConfig): Specifies
configuration related to Endpoint data capture for use with
Amazon SageMaker Model Monitoring. Default: None.}
\item{\code{...}}{: Other arguments needed for deployment. Please refer to the
``create_model()`` method of the associated estimator to see
what other arguments are needed.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
sagemaker.predictor.Predictor: A predictor that provides a ``predict()``
method, which can be used to send requests to the Amazon SageMaker endpoint
and obtain inferences.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-stop_tunning_job"></a>}}
\if{latex}{\out{\hypertarget{method-stop_tunning_job}{}}}
\subsection{Method \code{stop_tunning_job()}}{
Stop latest running hyperparameter tuning job.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$stop_tunning_job()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-describe"></a>}}
\if{latex}{\out{\hypertarget{method-describe}{}}}
\subsection{Method \code{describe()}}{
Returns a response from the DescribeHyperParameterTuningJob API call.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$describe()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-wait"></a>}}
\if{latex}{\out{\hypertarget{method-wait}{}}}
\subsection{Method \code{wait()}}{
Wait for latest hyperparameter tuning job to finish.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$wait()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-best_estimator"></a>}}
\if{latex}{\out{\hypertarget{method-best_estimator}{}}}
\subsection{Method \code{best_estimator()}}{
Return the estimator that has best training job attached. The trained model can then
be deployed to an Amazon SageMaker endpoint and return a ``sagemaker.Predictor``
object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$best_estimator(best_training_job = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{best_training_job}}{(dict): Dictionary containing "TrainingJobName" and
"TrainingJobDefinitionName".
Example:
.. code:: R
list(
"TrainingJobName"= "my_training_job_name",
"TrainingJobDefinitionName" "my_training_job_definition_name"
)}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
sagemaker.estimator.EstimatorBase: The estimator that has the best training job
attached.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-best_training_job"></a>}}
\if{latex}{\out{\hypertarget{method-best_training_job}{}}}
\subsection{Method \code{best_training_job()}}{
Return name of the best training job for the latest hyperparameter
tuning job.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$best_training_job()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-delete_endpoint"></a>}}
\if{latex}{\out{\hypertarget{method-delete_endpoint}{}}}
\subsection{Method \code{delete_endpoint()}}{
Delete an Amazon SageMaker endpoint.
If an endpoint name is not specified, this defaults to looking for an
endpoint that shares a name with the best training job for deletion.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$delete_endpoint(endpoint_name = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{endpoint_name}}{(str): Name of the endpoint to delete}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-hyperparameter_ranges"></a>}}
\if{latex}{\out{\hypertarget{method-hyperparameter_ranges}{}}}
\subsection{Method \code{hyperparameter_ranges()}}{
Return the hyperparameter ranges in a dictionary to be used as part
of a request for creating a hyperparameter tuning job.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$hyperparameter_ranges()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-hyperparameter_ranges_list"></a>}}
\if{latex}{\out{\hypertarget{method-hyperparameter_ranges_list}{}}}
\subsection{Method \code{hyperparameter_ranges_list()}}{
Return a dictionary of hyperparameter ranges for all estimators in ``estimator_dict``
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$hyperparameter_ranges_list()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-analytics"></a>}}
\if{latex}{\out{\hypertarget{method-analytics}{}}}
\subsection{Method \code{analytics()}}{
An instance of HyperparameterTuningJobAnalytics for this latest
tuning job of this tuner. Analytics olbject gives you access to tuning
results summarized into a pandas dataframe.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$analytics()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-transfer_learning_tuner"></a>}}
\if{latex}{\out{\hypertarget{method-transfer_learning_tuner}{}}}
\subsection{Method \code{transfer_learning_tuner()}}{
Creates a new ``HyperparameterTuner`` by copying the request fields
from the provided parent to the new instance of ``HyperparameterTuner``.
Followed by addition of warm start configuration with the type as
"TransferLearning" and parents as the union of provided list of
``additional_parents`` and the ``self``. Also, training image in the new
tuner's estimator is updated with the provided ``training_image``.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> transfer_learning_tuner = parent_tuner.transfer_learning_tuner(
>>> additional_parents={"parent-job-2"})
Later On:
>>> transfer_learning_tuner.fit(inputs={})
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$transfer_learning_tuner(
additional_parents = NULL,
estimator = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{additional_parents}}{(set{str}): Set of additional parents along with
the self to be used in warm starting}
\item{\code{estimator}}{(sagemaker.estimator.EstimatorBase): An estimator object
that has been initialized with the desired configuration. There
does not need to be a training job associated with this
instance.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
sagemaker.tuner.HyperparameterTuner: ``HyperparameterTuner``
instance which can be used to launch transfer learning tuning job.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-identical_dataset_and_algorithm_tuner"></a>}}
\if{latex}{\out{\hypertarget{method-identical_dataset_and_algorithm_tuner}{}}}
\subsection{Method \code{identical_dataset_and_algorithm_tuner()}}{
Creates a new ``HyperparameterTuner`` by copying the request fields
from the provided parent to the new instance of ``HyperparameterTuner``.
Followed by addition of warm start configuration with the type as
"IdenticalDataAndAlgorithm" and parents as the union of provided list of
``additional_parents`` and the ``self``
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> identical_dataset_algo_tuner = parent_tuner.identical_dataset_and_algorithm_tuner(
>>> additional_parents={"parent-job-2"})
Later On:
>>> identical_dataset_algo_tuner.fit(inputs={})
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$identical_dataset_and_algorithm_tuner(
additional_parents = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{additional_parents}}{(set{str}): Set of additional parents along with
the self to be used in warm starting}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
sagemaker.tuner.HyperparameterTuner: HyperparameterTuner instance
which can be used to launch identical dataset and algorithm tuning
job.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-create"></a>}}
\if{latex}{\out{\hypertarget{method-create}{}}}
\subsection{Method \code{create()}}{
Factory method to create a ``HyperparameterTuner`` instance. It takes one or more
estimators to obtain configuration information for training jobs that are created as the
result of a hyperparameter tuning job. The estimators are provided through a dictionary
(i.e. ``estimator_dict``) with unique estimator names as the keys. For individual
estimators separate objective metric names and hyperparameter ranges should be provided in
two dictionaries, i.e. ``objective_metric_name_dict`` and ``hyperparameter_ranges_dict``,
with the same estimator names as the keys. Optional metrics definitions could also be
provided for individual estimators via another dictionary ``metric_definitions_dict``.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$create(
estimator_list,
objective_metric_name_list,
hyperparameter_ranges_list,
metric_definitions_list = NULL,
base_tuning_job_name = NULL,
strategy = "Bayesian",
objective_type = "Maximize",
max_jobs = 1,
max_parallel_jobs = 1,
tags = NULL,
warm_start_config = NULL,
early_stopping_type = "Off"
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{estimator_list}}{(dict[str, sagemaker.estimator.EstimatorBase]): Dictionary of estimator
instances that have been initialized with the desired configuration. There does not
need to be a training job associated with the estimator instances. The keys of the
dictionary would be referred to as "estimator names".}
\item{\code{objective_metric_name_list}}{(dict[str, str]): Dictionary of names of the objective
metric for evaluating training jobs. The keys are the same set of estimator names
as in ``estimator_dict``, and there must be one entry for each estimator in
``estimator_dict``.}
\item{\code{hyperparameter_ranges_list}}{(dict[str, dict[str, sagemaker.parameter.ParameterRange]]):
Dictionary of tunable hyperparameter ranges. The keys are the same set of estimator
names as in estimator_dict, and there must be one entry for each estimator in
estimator_dict. Each value is a dictionary of sagemaker.parameter.ParameterRange
instance, which can be one of three types: Continuous, Integer, or Categorical.
The keys of each ParameterRange dictionaries are the names of the hyperparameter,
and the values are the appropriate parameter range class to represent the range.}
\item{\code{metric_definitions_list}}{(dict(str, list[dict]])): Dictionary of metric definitions.
The keys are the same set or a subset of estimator names as in estimator_dict,
and there must be one entry for each estimator in estimator_dict. Each value is
a list of dictionaries that defines the metric(s) used to evaluate the training
jobs (default: None). Each of these dictionaries contains two keys: 'Name' for the
name of the metric, and 'Regex' for the regular expression used to extract the
metric from the logs. This should be defined only for hyperparameter tuning jobs
that don't use an Amazon algorithm.}
\item{\code{base_tuning_job_name}}{(str): Prefix for the hyperparameter tuning job name when the
:meth:`~sagemaker.tuner.HyperparameterTuner.fit` method launches. If not specified,
a default job name is generated, based on the training image name and current
timestamp.}
\item{\code{strategy}}{(str): Strategy to be used for hyperparameter estimations
(default: 'Bayesian').}
\item{\code{objective_type}}{(str): The type of the objective metric for evaluating training jobs.
This value can be either 'Minimize' or 'Maximize' (default: 'Maximize').}
\item{\code{max_jobs}}{(int): Maximum total number of training jobs to start for the hyperparameter}
\item{\code{max_parallel_jobs}}{(int): Maximum number of parallel training jobs to start
(default: 1).}
\item{\code{tags}}{(list[dict]): List of tags for labeling the tuning job (default: None). For more,
see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.}
\item{\code{warm_start_config}}{(sagemaker.tuner.WarmStartConfig): A ``WarmStartConfig`` object that
has been initialized with the configuration defining the nature of warm start
tuning job.}
\item{\code{early_stopping_type}}{(str): Specifies whether early stopping is enabled for the job.
Can be either 'Auto' or 'Off' (default: 'Off'). If set to 'Off', early stopping
will not be attempted. If set to 'Auto', early stopping of some training jobs may
happen, but is not guaranteed to.}
\item{\code{tuning}}{job (default: 1).}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
sagemaker.tuner.HyperparameterTuner: a new ``HyperparameterTuner`` object that can
start a hyperparameter tuning job with one or more estimators.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-.attach_estimator"></a>}}
\if{latex}{\out{\hypertarget{method-.attach_estimator}{}}}
\subsection{Method \code{.attach_estimator()}}{
Add an estimator with corresponding objective metric name, parameter ranges and metric
definitions (if applicable). This method is called by other functions and isn't required
to be called directly
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$.attach_estimator(
estimator_name,
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{estimator_name}}{(str): A unique name to identify an estimator within the
hyperparameter tuning job, when more than one estimator is used with
the same tuning job (default: None).}
\item{\code{estimator}}{(sagemaker.estimator.EstimatorBase): An estimator object
that has been initialized with the desired configuration. There
does not need to be a training job associated with this
instance.}
\item{\code{objective_metric_name}}{(str): Name of the metric for evaluating
training jobs.}
\item{\code{hyperparameter_ranges}}{(dict[str, sagemaker.parameter.ParameterRange]): Dictionary of
parameter ranges. These parameter ranges can be one
of three types: Continuous, Integer, or Categorical. The keys of
the dictionary are the names of the hyperparameter, and the
values are the appropriate parameter range class to represent
the range.}
\item{\code{metric_definitions}}{(list[dict]): A list of dictionaries that defines
the metric(s) used to evaluate the training jobs (default:
None). Each dictionary contains two keys: 'Name' for the name of
the metric, and 'Regex' for the regular expression used to
extract the metric from the logs. This should be defined only
for hyperparameter tuning jobs that don't use an Amazon
algorithm.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-print"></a>}}
\if{latex}{\out{\hypertarget{method-print}{}}}
\subsection{Method \code{print()}}{
Printer.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$print(...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{...}}{(ignored).}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HyperparameterTuner$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
f121b8cab980c3d88c051a53512adb8d8dea2406
|
da54c2a9cbf91a38b68b8b5424e5297ed83600fb
|
/man/inspect_panelist_preference.Rd
|
6c1e51986c75d277057586a7a45bf6aefd75812e
|
[
"MIT"
] |
permissive
|
isoletslicer/sensehubr
|
19242d0d9bedb949bb617dbb274b2d1c95410182
|
85f70435439030a6d11ec5fd1e793c68ca692ae1
|
refs/heads/master
| 2020-06-19T07:30:55.226074
| 2019-08-20T04:20:51
| 2019-08-20T04:20:51
| 196,618,502
| 0
| 0
|
NOASSERTION
| 2019-08-20T03:15:11
| 2019-07-12T17:23:46
|
R
|
UTF-8
|
R
| false
| true
| 472
|
rd
|
inspect_panelist_preference.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspect-panelist-preference.R
\name{inspect_panelist_preference}
\alias{inspect_panelist_preference}
\title{Inspect preference}
\usage{
inspect_panelist_preference(res_preference, dimension = c(1, 2))
}
\arguments{
\item{res_preference}{output of preference analysis}
\item{dimension}{dimension to focus, integer vector of length 2}
}
\description{
Evaluate panelist in preference analysis.
}
|
41504d959b99702433255d920eea1f8190d7905d
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/864/jointDiag/ajd.R
|
2b1156b39fa6659be860a296d19d922845b89026
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,125
|
r
|
ajd.R
|
## a wrapper to joint approximate functions
ajd <- function(M,A0=NULL,B0=NULL,eps=.Machine$double.eps, itermax=200,
keepTrace=FALSE,methods=c("jedi")) {
nmeth <- length(methods)
if (nmeth==1) {
for (i in 1:nmeth) {
if (methods=="jedi")
res <- jedi(M,A0,eps,itermax,keepTrace)
if (methods=="uwedge")
res <- uwedge(M,B0,eps,itermax,keepTrace)
if (methods=="jadiag")
res <- jadiag(M,B0,eps,itermax,keepTrace)
if (methods=="ffdiag")
res <- ffdiag(M,B0,eps,itermax,keepTrace)
if (methods=="qdiag")
res <- qdiag(M,B0,eps,itermax,keepTrace)
}
return(res)
}
if (nmeth>1) {
res <- vector(nmeth,mode="list")
for (i in 1:nmeth) {
if (methods[i]=="jedi")
res[[i]] <- jedi(M,A0,eps,itermax,keepTrace)
if (methods[i]=="uwedge")
res[[i]] <- uwedge(M,B0,eps,itermax,keepTrace)
if (methods[i]=="jadiag")
res[[i]] <- jadiag(M,B0,eps,itermax,keepTrace)
if (methods[i]=="ffdiag")
res[[i]] <- ffdiag(M,B0,eps,itermax,keepTrace)
if (methods[i]=="qdiag")
res[[i]] <- qdiag(M,B0,eps,itermax,keepTrace)
}
names(res) <- methods
return(res)
}
}
|
3064a132ee7ecc104cb7fe8013a78e24f6592485
|
5bb2c8ca2457acd0c22775175a2722c3857a8a16
|
/man/coefficients-Zelig-method.Rd
|
d3bb4ee6b1876074721c862dcc8d247871e4410e
|
[] |
no_license
|
IQSS/Zelig
|
d65dc2a72329e472df3ca255c503b2e1df737d79
|
4774793b54b61b30cc6cfc94a7548879a78700b2
|
refs/heads/master
| 2023-02-07T10:39:43.638288
| 2023-01-25T20:41:12
| 2023-01-25T20:41:12
| 14,958,190
| 115
| 52
| null | 2023-01-25T20:41:13
| 2013-12-05T15:57:10
|
R
|
UTF-8
|
R
| false
| true
| 432
|
rd
|
coefficients-Zelig-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-zelig.R
\docType{methods}
\name{coefficients,Zelig-method}
\alias{coefficients,Zelig-method}
\title{Method for extracting estimated coefficients from Zelig objects}
\usage{
\S4method{coefficients}{Zelig}(object)
}
\arguments{
\item{object}{An Object of Class Zelig}
}
\description{
Method for extracting estimated coefficients from Zelig objects
}
|
f95e0d2ebe54469a5669f55e42d42d2068596f1e
|
6d5a7d0a5f55520fceb0a2868bc6b7fb7903075a
|
/man/as.tree.Rd
|
a7a65e07358ee33ca9d458f6af3b62106ab355b5
|
[] |
no_license
|
meta-QSAR/simple-tree
|
7dbb617aff4e637d1fcce202890f322b99364494
|
28ff7bf591d3330498a3c8a85d8ae5a3d27b37d5
|
refs/heads/master
| 2016-09-14T07:08:33.743705
| 2015-08-04T09:31:24
| 2015-08-04T09:31:24
| 58,349,277
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
rd
|
as.tree.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/class-tree.R
\name{as.tree}
\alias{as.tree}
\title{Convert a data frame to a tree object}
\usage{
as.tree(df, id.col = "id", parent.id.col = "parent.id", name.col = "name")
}
\arguments{
\item{df}{A data frame.}
\item{id.col}{Column name which contains the node IDs (default: "id").}
\item{parent.id.col}{Column name which contains the parent node IDs (default: "parent.id").}
\item{name.col}{Column name which contains the node names (default: "name").}
}
\value{
\code{as.tree} returns a tree object.
}
\description{
\code{as.tree} returns a tree object
}
|
4513860251374aec0a2f504ba33cddae5e51febd
|
3355b24230020e5fdcdef52a3d621cd0d96ee72e
|
/2PropTestMC1.R
|
088c261ac70a42165604744afc4602d55b1def6a
|
[] |
no_license
|
IKapenga/Stat1600-QGen
|
42f255fba63308b8b1376211066f2d6c01d1d329
|
aca46c451d5b45caf70f87d9a58a83bbcbe4f19a
|
refs/heads/master
| 2021-06-03T22:13:23.138457
| 2020-07-16T18:29:38
| 2020-07-16T18:29:38
| 134,891,183
| 0
| 2
| null | 2020-06-13T19:26:53
| 2018-05-25T18:26:50
|
R
|
UTF-8
|
R
| false
| false
| 4,679
|
r
|
2PropTestMC1.R
|
##### 2PropTestMC1 #####
twoPropTestMC1= function(
title = "2PropTestMC1", # Question-bank title that will be easily viewable in e-learning
n = 200, # Number of questions to generate
type = "MC", # The question type, one of many possible types on e-learning
answers = 5, # Number of answers per MC question
points.per.q = 4, # Number of points the question is worth (on the test)
difficulty = 1, # An easily viewable difficulty level on e-learning
quest.txt1 = "A new pesticide is tested on a group of crop-destroying beetles. The sample data shows that ",
quest.txt2 = " of this first group dies as a result. A second group of beetles is dosed with a standard pesticide, and ",
quest.txt3 = " of this second group dies as a result. ",
quest.txt4 = " beetles are in the first test-pesticide group and ",
quest.txt5 = " beetles are in the second standard-pesticide group. What is the Z test statistic for a hypothesis test on the difference between proportions (first group - second)?", # The above 5 question texts are static texts for the full question
dat.size = 1, # This is the number of values to be randomly generated for the dataset
digits = 2, # This is the number of decimal places to round off the data
loc.path , # This is the local path used to store any randomly generated image files
e.path , # This is the path on e-learning used to store any above-implemented image files
hint = "You need to calculate the Z test statistic. Don't take the absolute value. Pick the closest answer.", # This is a student hint, visible to them during the exam on e-learning
feedback = "Did you use (phat1 - phat2)/SE?" # This is student feedback, visible after the exam
)
{
param <- c("NewQuestion","ID","Title","QuestionText","Points","Difficulty",
rep("Option", answers),"Hint","Feedback") # These are setting row names for the CSV file
questions <- data.frame() # This opens a data frame to store the randomly generated questions below
for(i in 1:n)
{
ID <- paste(title, i, sep = "-") # The ID of the specific question within the bank, title + question number in the loop
points <- sample(c(rep(0,answers-1),100),replace=F) # The proportion of points assigned to each possible answer, 1 if correct or 0 if incorrect
corr.ind <- 6 + which.max(points) # This is the row index of the correct answer
data1 <- sample(seq(.4, .5, 10^-digits), size = 1) # randomly generating sample proportion for sample 1
data2 <- sample(seq(.35,.45, 10^-digits), size = 1) # randomly generating sample proportion for sample 2
data3 <- sample(100:200, size = 1) # randomly generating sample size for sample 1
data4 <- sample(100:200, size = 1) # randomly generating sample size for sample 2
corr.ans <- round((data1-data2)/sqrt(data1*(1-data1)/data3+data2*(1-data2)/data4), digits) # this is the correct answer to the question
up.min <- round(corr.ans + .05, digits) # This is the minimum value for incorrect answers above the correct answer
down.max <- round(corr.ans - .05, digits) # This is the maximum value for incorrect answers below the correct answer
ans.txt <- sample(if(corr.ans < -.8){seq(up.min, 4, 10^-digits)}
else{if(corr.ans > 2.85){seq(-1, down.max, 10^-digits)}
else{c(seq(-1, down.max, 10^-digits),
seq(up.min, 4, 10^-digits))}},
size = answers) # These are randomly generated incorrect answers.
content <- c(type, ID, ID, paste(quest.txt1, data1, quest.txt2,
data2, quest.txt3, data3, quest.txt4,
data4, quest.txt5,
collapse = "", sep= ""),
points.per.q, difficulty, points, hint, feedback) # This is collecting a lot of the above information into a single vector
options <- c(rep("",6), ans.txt, rep("",2)) # This is collecting the incorrect answers above, and indexing them correctly by row
options[corr.ind] <- corr.ans # This is imputing the correct answer at the appropriate row index
questions[(1+(8+answers)*i):((8+answers)*(i+1)),1] <- param # This is indexing and storing all the row names
questions[(1+(8+answers)*i):((8+answers)*(i+1)),2] <- content # Indexing and storing all the content
questions[(1+(8+answers)*i):((8+answers)*(i+1)),3] <- options # Indexing and storing the answers, both incorrect and correct
}
questions <- questions[(9+answers):((8+answers)*(n+1)),] # Storing only what's needed for e-learning upload as a CSV file
write.table(questions, sep=",", file=paste(title, ".csv", sep = ""),
row.names=F, col.names=F) # Writing the CSV file
}
twoPropTestMC1() # creating the csv file
|
957dfbbff734fa9303e60d7a2abd1bca431e9719
|
f547a9f2f59d51416399e7798c395b04c6f3713c
|
/HDT8.R
|
5d366eeec57cddffd599061d5b53d22515e46bc4
|
[] |
no_license
|
andreaeliasc/HDT8-RNA
|
e0a8fccbba125e6311b83ae6dcdca7f5d342650f
|
a0636f391058982125cb73036b08344cd688ae20
|
refs/heads/main
| 2023-04-14T22:00:08.979773
| 2021-05-04T06:01:49
| 2021-05-04T06:01:49
| 364,089,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,947
|
r
|
HDT8.R
|
ibrary(e1071)
library(caret)
library(rJava)
library(nnet)
library(RWeka)
library(neural)
library(dummy)
library(neuralnet)
library(dplyr)
library(tidyr)
library(rpart)
library(caret)
library(tree)
library(rpart.plot)
library(randomForest)
library(cluster)#Para calcular la silueta
library(e1071)#para cmeans
library(cluster)#Para calcular la silueta
library(mclust) #mixtures of gaussians
library(fpc)#para hacer el plotcluster
library(NbClust)#Para determinar el número de clusters optimo
library(factoextra)#Para hacer graficos bonitos de clustering
library(e1071)
library(caret)
library(corrplot) # install.packages("corrplot")
library(ANN2)
## Separación de datos y agregar variable dicotómica
# Se carga el set de datos
# Se detemina el porcentaje de datos que se utilizaran para train y para test
# utilizando el 70% de los datos para entrenamiento y el 30% de los datos para prueba.
porcentaje<-0.7
datos<-read.csv("train.csv", stringsAsFactors = FALSE)
set.seed(123)
# Basados en la agrupacion de la hoja de trabajo anterior con cluster se hace la categorizacion de las casas
# Agegando la columna de tipo de casa segun el clustering anterior
datos$grupo <- ifelse(datos$SalePrice<178000, "3",
ifelse(datos$SalePrice<301000, "2",
ifelse(datos$SalePrice<756000,"1",NA)))
# Se cambia la variable grupo a tipo factor
datos$grupo <- as.factor(datos$grupo)
## Analisis Exploratorio
scatter.smooth(datos$LotFrontage, datos$SalePrice)
scatter.smooth(datos$LotArea, datos$SalePrice)
scatter.smooth(datos$GrLivArea, datos$SalePrice)
scatter.smooth(datos$YearBuilt, datos$SalePrice)
scatter.smooth(datos$BsmtUnfSF, datos$SalePrice)
scatter.smooth(datos$TotalBsmtSF, datos$SalePrice)
scatter.smooth(datos$X1stFlrSF, datos$SalePrice)
scatter.smooth(datos$GarageYrBlt, datos$SalePrice)
scatter.smooth(datos$GarageArea, datos$SalePrice)
scatter.smooth(datos$YearRemodAdd, datos$SalePrice)
scatter.smooth(datos$TotRmsAbvGrd, datos$SalePrice)
scatter.smooth(datos$MoSold, datos$SalePrice)
scatter.smooth(datos$OverallQual, datos$SalePrice)
#Obtenemos los datos de las variables que nos serviran
datos <- datos[,c("LotFrontage","LotArea","GrLivArea","GarageArea","YearRemodAdd","SalePrice" ,"grupo")]
datos <- na.omit(datos)
head(datos, 10)
# Se realiza el corte de datos para el set de Train y el Set de Test
porcentaje<-0.7
corte <- sample(nrow(datos),nrow(datos)*porcentaje)
train<-datos[corte,]
test<-datos[-corte,]
head(train)
head(test)
#-------------------------------------------------
# Red Neuronal con nnet
#-------------------------------------------------
modelo.nn2 <- nnet(grupo~.,data = train[,c(1:5,7)], size=6, rang=0.0000001,
decay=5e-4, maxit=500)
modelo.nn2
# Se realiza la prediccion con este modelo
prediccion2 <- as.data.frame(predict(modelo.nn2, newdata = test[,1:5]))
columnaMasAlta<-apply(prediccion2, 1, function(x) colnames(prediccion2)[which.max(x)])
columnaMasAlta
test$prediccion2<-columnaMasAlta #Se le añade al grupo de prueba el valor de la predicción
head(test, 30)
# Se obtiene la matriz de confusion para este modelo
cfm<-confusionMatrix(as.factor(test$prediccion2),test$grupo)
cfm
#-------------------------------------------------
# Red Neuronal con RWeka
#-------------------------------------------------
NB <- make_Weka_classifier("weka/classifiers/functions/MultilayerPerceptron")
NB
WOW(NB)
nnodos='6'
modelo.bp<-NB(grupo~., data = train[,c(1:5,7)], control=Weka_control(H=nnodos, N=4000, G=TRUE), options=NULL)
# Se realiza la prediccion con este modelo
test$prediccionWeka<-predict(modelo.bp, newdata = test[,1:5])
head(test[,c(1:5,7,9)], 30)
# Se obtiene la matriz de confusion para este modelo
cfmWeka<-confusionMatrix(test$prediccionWeka,test$grupo)
cfmWeka
#weka2
NB <- make_Weka_classifier("weka/classifiers/functions/MultilayerPerceptron")
NB
WOW(NB)
nnodos='4'
modelo.bp<-NB(as.factor(grupo)~., data=train[,c(1:5,7)], control=Weka_control(H=nnodos, N=1000, G=TRUE), options=NULL)
test$prediccionWeka<-predict(modelo.bp, newdata = test[,1:5])
cfmWeka<-confusionMatrix(test$prediccionWeka,as.factor(test$grupo))
cfmWeka
corr <- data.frame(test$SalePrice,test$prediccionWeka)
#####
#-------------------------------------------------
# Red Neuronal con caret
#-------------------------------------------------
modeloCaret <- train(as.factor(group)~., data=train, method="nnet", trace=F)
modeloCaret
pc<-test$prediccionCaret<-predict(modeloCaret, newdata = test[,1:14])
test$prediccionCaret
cfmCaret<-confusionMatrix(as.factor(test$prediccionCaret),as.factor(test$group))
cfmCaret
corrN1 <- data.frame(test$SalePrice,test$prediccionCaret)
# Prepare test and train sets
random_draw <- sample(1:nrow(datos), size = 100)
X_train <- datos[random_draw, 1:4]
y_train <- datos[random_draw, 5]
X_test <- datos[setdiff(1:nrow(datos), random_draw), 1:4]
y_test <- datos[setdiff(1:nrow(datos), random_draw), 5]
# Train neural network on classification task
NN <- neuralnetwork(X = X_train, y = y_train, hidden.layers = c(5, 5),
, activ.functions = "relu" , optim.type = 'adam', learn.rates = 0.01, val.prop = 0 )
# Plot the loss during training
plot(NN)
# Make predictions
y_pred <- predict(NN, newdata = X_test)
View(y_pred)
# Plot predictions
correct <- (y_test == y_pred$predictions)
plot(X_test, pch = as.numeric(y_test), col = correct + 2)
cfm<-confusionMatrix(as.factor(test$correct),test$grupo)
cfm
prediccion3 <- as.data.frame(predict(NN, newdata = test[,1:4]))
columnaMasAlta<-apply(prediccion3, 1, function(x) colnames(prediccion2)[which.max(x)])
columnaMasAlta
test$prediccion3<-columnaMasAlta #Se le añade al grupo de prueba el valor de la predicción
head(test, 30)
#Modelo 2
random_draw2 <- sample(1:nrow(datos), size = 100)
X_train <- datos[random_draw2, 1:4]
y_train <- datos[random_draw2, 5]
X_test <- datos[setdiff(1:nrow(datos), random_draw2), 1:4]
y_test <- datos[setdiff(1:nrow(datos), random_draw2), 5]
# Train neural network on classification task
NN <- neuralnetwork(X = X_train, y = y_train, hidden.layers = c(5, 5),
, activ.functions = "tanh" , optim.type = 'adam', learn.rates = 0.01, val.prop = 0 )
# Plot the loss during training
plot(NN)
# Make predictions
y_pred <- predict(NN, newdata = X_test)
View(y_pred)
# Plot predictions
correct <- (y_test == y_pred$predictions)
plot(X_test, pch = as.numeric(y_test), col = correct + 2)
cfmax<-confusionMatrix(as.factor(test$correct),test$prediccion4)
cfmax
prediccion4 <- as.data.frame(predict(NN, newdata = test[,1:4]))
columnaMasAlta<-apply(prediccion4, 1, function(x) colnames(prediccion2)[which.max(x)])
columnaMasAlta
test$prediccion4<-columnaMasAlta #Se le añade al grupo de prueba el valor de la predicción
head(test, 30)
|
1346c54a6ab8cc64ffea6211dc47606e39cb4417
|
46b8efea7116a3808a2009faad5ed5b90238ec04
|
/man/rep_style_rel.Rd
|
770475b14f3de317c23d72433033f88cb05e303f
|
[] |
no_license
|
erge324/stylesim
|
205e49855323280f16d3c2dc8f604f3827ee1719
|
bd2ee33771ac5c7c480e34e71231f5504a1e488c
|
refs/heads/master
| 2022-02-25T08:20:50.295444
| 2016-03-04T10:57:00
| 2016-03-04T10:57:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,597
|
rd
|
rep_style_rel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_replicate-simulation.R
\name{rep_style_rel}
\alias{rep_style_rel}
\title{Replicate a Style Simulation and Investigate Effect on Cronbach's Alpha}
\usage{
rep_style_rel(reps = 1000, n = c(100, 1000), items = c(5, 10),
categ = c(3, 7), ndimc = 1, style = NULL, reversed = c(0, 0.5),
mu.s = c(-1, 1), var.s = c(0, 1), df = 10, sig = NULL, emp = TRUE,
...)
}
\arguments{
\item{reps}{Numeric, the desired number of replications.}
\item{n}{Numeric, the number of persons. If of length one, it's fixed to the
provided value. If of length two, it's sampled from a uniform distribution
using the two values as lower and upper limits, respectively.}
\item{items}{Numeric, the number of items. If of length one, it's fixed to
the provided value. If of length two, it's sampled from a uniform
distribution using the two values as lower and upper limits, respectively.}
\item{categ}{Numeric, the number of categories per item. If of length one,
it's fixed to the provided value. If of length two, it's sampled from a
uniform distribution using the two values as lower and upper limits,
respectively..}
\item{ndimc}{Numeric. Desired number of content-related latent variables (irrespective
of number of style-related latent variables).}
\item{style}{Parameter to specify which response style(s) influence the data,
can be either numeric or character. Users may choose one or more among
\code{"ERS1"} (e.g., 1 / 0 / 0 / 0 / 1), \code{"ERS2"} (e.g., 2 / 1 /
0 / 1 / 2), \code{"ARS"} (e.g., 0 / 0 / 0 / 1 / 1), \code{"ADRS"}
(e.g, -1 / -1 / 0 / 1 / 1), and \code{"MRS"} (e.g., 0 / 0 / 1 / 0 /
0). Alternatively, a user-specified vector of weights can be employed. Can
also be \code{NULL} indicating complete abscence of response styles.}
\item{reversed}{Numeric, the number of reverse-coded items. If of length one,
it's fixed to the provided value. If of length two, it's sampled from a
uniform distribution using the two values as lower and upper limits,
respectively.}
\item{mu.s}{Numeric, the response style mean. If of length one, it's fixed to
the provided value. If of length two, it's sampled from a uniform
distribution using the two values as lower and upper limits, respectively.}
\item{var.s}{Numeric, the response style variance. If of length one, it's
fixed to the provided value. If of length two, it's sampled from a uniform
distribution using the two values as lower and upper limits, respectively.}
\item{df}{Numeric. The df-parameter of the Wishart distribution from which
the covariance is drawn.}
\item{sig}{Numeric matrix. The variance-covariance matrix of the multivariate
distribution of thetas. If non-NULL, this overrides \code{var.s}.}
\item{emp}{Logical. If true, \code{mu.s} and \code{var.s}/\code{sig} specify the empirical not population mean and covariance matrix.}
\item{...}{Other parameters passed to \code{\link{sim_style_data}}.}
}
\value{
Returns a matrix of length \code{reps} with the following columns:
\item{bias}{\code{alpha} minus \code{true}}
\item{true}{Response style-free alpha}
\item{alpha}{Observed coefficient alpha}
\item{dep}{Response style-dependent alpha, equal to bias}
\item{ }{Further columns contain the input parameters such as the number of categories}
}
\description{
This function replicates \code{\link{sim_style_data}} and returns the observed
coefficient alpha as well as the response style-free alpha for every
replication sample.
}
\seealso{
The replicated function \code{\link{sim_style_data}}, covariate-free alpha \code{\link{alpha_cov}}
}
|
9cdc7b6e2e66260c11e0ddc3d02b1dca0be209e5
|
e9275362bc90afb1218c7035db5e993d2cf256aa
|
/statistical_rethinking/code/quadratic_curves_from_lines.r
|
bacf8f80bad2ca26716fc4117b45a185b323f91a
|
[] |
no_license
|
hanson377/textbook_notes_and_exercises
|
620cd844926a0553c3dfc46634ae991dadbe57eb
|
09c60c523ac8730c054c535515e9cda7271d642e
|
refs/heads/main
| 2023-06-05T20:57:54.007065
| 2021-07-07T11:54:15
| 2021-07-07T11:54:15
| 383,776,330
| 0
| 0
| null | 2021-07-07T11:54:16
| 2021-07-07T11:33:58
|
Python
|
UTF-8
|
R
| false
| false
| 1,716
|
r
|
quadratic_curves_from_lines.r
|
library(rethinking)
data(Howell1)
d <- Howell1
## first, standardize metrics
d$weight_s <- (d$weight - mean(d$weight))/sd(d$weight)
d$weight_s2 <- d$weight_s^2
m4.5 <- quap(
alist(
height ~ dnorm(mu,sigma),
mu <- a + b1*weight_s + b2*weight_s2,
a ~ dnorm(178,20),
b1 ~ dlnorm(0,1),
b2 ~ dnorm(0,1),
sigma ~ dunif(0,50)
), data=d
)
precis(m4.5)
## calculate mean relationship and the 89% intervals of the mean and the predictions
weight.seq <- seq(from=-2.2,to=2,length.out=30)
pred_dat <- list(weight_s=weight.seq,weight_s2=weight.seq^2)
mu <- link(m4.5,data=pred_dat)
mu.mean <- apply(mu,2,mean)
mu.PI <- apply(mu,2,PI,prob=0.89)
sim.height <- sim(m4.5,data=pred_dat)
height.PI <- apply(sim.height,2,PI,prob=0.89)
plot(height~weight_s,d,col=col.alpha(rangi2,0.5))
lines(weight.seq,mu.mean)
shade(mu.PI,weight.seq)
shade(height.PI,weight.seq)
## add a cubic term alongside the quadratic
d$weight_s3 <- d$weight_s^3
m4.6 <- quap(
alist(
height ~ dnorm(mu,sigma),
mu <- a + b1*weight_s + b2*weight_s2 + b3*weight_s3,
a ~ dnorm(178,20),
b1 ~ dlnorm(0,1),
b2 ~ dnorm(0,10),
b3 ~ dnorm(0,10),
sigma ~ dunif(0,50)
), data=d
)
## calculate mean relationship and the 89% intervals of the mean and the predictions
weight.seq <- seq(from=-2.2,to=2,length.out=30)
pred_dat <- list(weight_s=weight.seq,weight_s2=weight.seq^2,weight_s3=weight.seq^3)
mu <- link(m4.6,data=pred_dat)
mu.mean <- apply(mu,2,mean)
mu.PI <- apply(mu,2,PI,prob=0.89)
sim.height <- sim(m4.6,data=pred_dat)
height.PI <- apply(sim.height,2,PI,prob=0.89)
plot(height~weight_s,d,col=col.alpha(rangi2,0.5))
lines(weight.seq,mu.mean)
shade(mu.PI,weight.seq)
shade(height.PI,weight.seq)
|
03687b861878a70bc342de2d66146233ef9f7c06
|
47c81e91c91d6f321418042a69d5770b5aaadbdf
|
/tests/test_graphics.R
|
b1eede9699f183dba615e2b36ed48bc2ef02b369
|
[
"Apache-2.0"
] |
permissive
|
Kaggle/docker-rstats
|
f6e4c28638e5f9d33de59bcc56ac296da49f2176
|
2a42e7619ff99579011ca9cace98ee4604d9c068
|
refs/heads/main
| 2023-09-01T11:24:00.881089
| 2023-08-22T16:43:21
| 2023-08-22T16:43:21
| 33,904,503
| 135
| 103
|
Apache-2.0
| 2023-08-29T14:50:52
| 2015-04-14T01:46:50
|
R
|
UTF-8
|
R
| false
| false
| 179
|
r
|
test_graphics.R
|
context("graphics")
test_that("plot", {
testImage <- "/working/base_graphics_test.jpg"
jpeg(testImage)
plot(runif(10))
dev.off()
expect_true(file.exists(testImage))
})
|
c4fcebb2fb1d62914d81825ffac240fe948ac008
|
cb66ae3bf5bd2422e70df574340e0d5f5388eb8e
|
/Lorenz_test_presentation.R
|
768bce691beb7c6c470b675e90f754f3f1d63800
|
[] |
no_license
|
jvoorheis/MSA_Ineq
|
779f28947f243495d4c28b6841b56d2c51dc97e6
|
3dbec52e82d0ae86d6d88c6550aadba4b43cb81a
|
refs/heads/master
| 2016-08-02T22:44:29.331869
| 2013-12-28T07:50:20
| 2013-12-28T07:50:20
| 11,228,792
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,481
|
r
|
Lorenz_test_presentation.R
|
#load lorenz_data
library(data.table)
library(xtable)
load("/media/john/Shared Linux_Windows Files/MSA Level Inequality/Data/lorenz_stats.rda")
source("/media/john/Shared Linux_Windows Files/MSA Level Inequality/Code/functions.r")
load("/media/john/Shared Linux_Windows Files/MSA Level Inequality/Data/PersInc.rda")
library(plyr)
#Clunky nested for-loop way of doing things
mean_pop<-aggregate(Population~MSA, data=PersIncPC, FUN=mean)
MSA.unique<-unique(lorenz_vm$MSA)
year.unique<-data.frame("uni"=c(1986, 1995, 2000, 2005, 2010))
MSA<-c()
year_A<-c()
year_B<-c()
ord<-c()
Delta_k<-c()
for (k in MSA.unique){
for (i in c(1986, 1995, 2000, 2005)){
temp.unique<-subset(year.unique, year.unique$uni>i)
for (j in temp.unique$uni){
year_A1 <- subset(lorenz_vm, lorenz_vm$year==i & lorenz_vm$MSA==k)
year_B1 <- subset(lorenz_vm, lorenz_vm$year==j & lorenz_vm$MSA==k)
if (length(year_B1$MSA)==length(year_A1$MSA) & length(year_B1$MSA)!=0 & length(year_B1$MSA)!=0){
if (year_A1$Tp[1]>0 & year_B1$Tp[1]>0){
Delta_k1 <- (year_A1$qm - year_B1$qm)/(sqrt(year_A1$Tp + year_B1$Tp))
}
else{
Delta_k1 <- c(0, ((year_A1$qm[2:19] - year_B1$qm[2:19])/(sqrt(year_A1$Tp[2:19] + year_B1$Tp[2:19]))))
}
MSA<-append(MSA,c(rep(k, 19)))
year_A<-append(year_A, rep(i, 19))
year_B<-append(year_B, rep(j, 19))
ord<-append(ord,1:19)
Delta_k<-append(Delta_k, Delta_k1)
}
}
}
}
Test_stats<-data.frame(MSA, year_A, year_B, ord, Delta_k)
test_results<-ddply(Test_stats, .variables=c("MSA", "year_A", "year_B"), function(x) c("MSA"=x$MSA, "year_A"=x$year_A, "year_B"=x$year_B, "Test_result"=Lorenz_test_result(x$Delta_k), "Lorenz_dom"=A_dom_B(x$Delta_k)))
Test_stats<-data.table(Test_stats)
mean_pop<-data.table(mean_pop)
setkey(mean_pop, MSA)
setkey(Test_stats, MSA)
Test_stats<-Test_stats[mean_pop, allow.cartesian=T]
test_results_1<-aggregate(Delta_k~MSA+year_A+year_B+Population, data=Test_stats, FUN=Lorenz_test_result)
test_results_2<-aggregate(Delta_k~MSA+year_A+year_B+Population, data=Test_stats, FUN=A_dom_B)
test_results_3<-aggregate(Delta_k~MSA+year_A+year_B+Population, data=Test_stats, FUN=B_dom_A)
test_results_4<-aggregate(Delta_k~MSA+year_A+year_B+Population, data=Test_stats, FUN=Lorenz_cross)
all_MSAs<-data.frame()
result_type<-c("A Dominates B", "A Dominates B", "A Dominates B", "A Dominates B", "B Dominates A", "B Dominates A","B Dominates A", "Lorenz Curves Cross", "Lorenz Curves Cross", "Lorenz Curves Cross")
year_A<-c(2000,2000,1995,1995,2000,2000,1995,2000,2000,1995)
year_B<-c(2010,2005,2010,2005,2010,2005,2010,2010,2005,2010)
test_means<-c(mean(subset(test_results_2, test_results_2$year_A==2000 & test_results_2$year_B==2010)$Delta_k),
mean(subset(test_results_2, test_results_2$year_A==2000 & test_results_2$year_B==2005)$Delta_k),
mean(subset(test_results_2, test_results_2$year_A==1995 & test_results_2$year_B==2010)$Delta_k),
mean(subset(test_results_2, test_results_2$year_A==1995 & test_results_2$year_B==2005)$Delta_k),
mean(subset(test_results_3, test_results_3$year_A==2000 & test_results_3$year_B==2010)$Delta_k),
mean(subset(test_results_3, test_results_3$year_A==2000 & test_results_3$year_B==2005)$Delta_k),
mean(subset(test_results_3, test_results_3$year_A==1995 & test_results_3$year_B==2010)$Delta_k),
mean(subset(test_results_4, test_results_4$year_A==2000 & test_results_4$year_B==2010)$Delta_k),
mean(subset(test_results_4, test_results_4$year_A==2000 & test_results_4$year_B==2005)$Delta_k),
mean(subset(test_results_4, test_results_4$year_A==1995 & test_results_4$year_B==2010)$Delta_k))
all_MSAs<-data.frame("Test Result"=result_type, year_A, year_B, "Proportion of MSAs"=test_means)
print(xtable(all_MSAs, display=c("d","s", "d", "d", "f")), include.rownames=F)
#For only large MSAs
test_results_2<-subset(test_results_2, test_results_2$Population>=1000000)
test_results_3<-subset(test_results_3, test_results_3$Population>=1000000)
test_results_4<-subset(test_results_4, test_results_4$Population>=1000000)
all_MSAs<-data.frame()
result_type<-c("A Dominates B", "A Dominates B", "A Dominates B", "A Dominates B", "B Dominates A", "B Dominates A","B Dominates A", "Lorenz Curves Cross", "Lorenz Curves Cross", "Lorenz Curves Cross")
year_A<-c(2000,2000,1995,1995,2000,2000,1995,2000,2000,1995)
year_B<-c(2010,2005,2010,2005,2010,2005,2010,2010,2005,2010)
test_means<-c(mean(subset(test_results_2, test_results_2$year_A==2000 & test_results_2$year_B==2010)$Delta_k),
mean(subset(test_results_2, test_results_2$year_A==2000 & test_results_2$year_B==2005)$Delta_k),
mean(subset(test_results_2, test_results_2$year_A==1995 & test_results_2$year_B==2010)$Delta_k),
mean(subset(test_results_2, test_results_2$year_A==1995 & test_results_2$year_B==2005)$Delta_k),
mean(subset(test_results_3, test_results_3$year_A==2000 & test_results_3$year_B==2010)$Delta_k),
mean(subset(test_results_3, test_results_3$year_A==2000 & test_results_3$year_B==2005)$Delta_k),
mean(subset(test_results_3, test_results_3$year_A==1995 & test_results_3$year_B==2010)$Delta_k),
mean(subset(test_results_4, test_results_4$year_A==2000 & test_results_4$year_B==2010)$Delta_k),
mean(subset(test_results_4, test_results_4$year_A==2000 & test_results_4$year_B==2005)$Delta_k),
mean(subset(test_results_4, test_results_4$year_A==1995 & test_results_4$year_B==2010)$Delta_k))
all_MSAs<-data.frame("Test Result"=result_type, year_A, year_B, "Proportion of MSAs"=test_means)
print(xtable(all_MSAs, display=c("d","s", "d", "d", "f")), include.rownames=F)
unique(subset(test_results_2, test_results_2$Delta_k==1 & test_results_2$year_A==1995 & year_B==2012)$MSA)
test_results_top<-subset(test_results_1, test_results_1$MSA=="New York-Northern New Jersey-Long Island" |
test_results_1$MSA=="Los Angeles-Long Beach-Santa Ana, CA" |
test_results_1$MSA=="Chicago-Naperville-Joliet, IL-IN-WI" |
test_results_1$MSA=="Dallas-Fort Worth-Arlington, TX" |
test_results_1$MSA=="Houston-Baytown-Sugar Land, TX" |
test_results_1$MSA=="Philadelphia-Camden-Wilmington, PA/NJ/D" |
test_results_1$MSA=="Washington, DC/MD/VA" |
test_results_1$MSA=="Miami-Fort Lauderdale-Miami Beach, FL")
x1<-test_results_top[,c(1:3,5)]
x1<-subset(x1, x1$year_A!=1986 & x1$year_B!=2000)
print(xtable(x1, display=c("d","s", "d", "d", "s")),include.rownames=FALSE)
test_results_top<-subset(test_results_2, test_results_2$MSA=="New York-Northern New Jersey-Long Island" |
test_results_2$MSA=="Los Angeles-Long Beach-Santa Ana, CA" |
test_results_2$MSA=="Chicago-Naperville-Joliet, IL-IN-WI" |
test_results_2$MSA=="Dallas-Fort Worth-Arlington, TX" |
test_results_2$MSA=="Houston-Baytown-Sugar Land, TX" |
test_results_2$MSA=="Philadelphia-Camden-Wilmington, PA/NJ/D" |
test_results_2$MSA=="Washington, DC/MD/VA" |
test_results_2$MSA=="Miami-Fort Lauderdale-Miami Beach, FL" |
test_results_2$MSA=="Atlanta-Sandy Springs-Marietta, GA" |
test_results_2$MSA== "Boston-Cambridge-Quincy, MA-NH")
|
79967ebd58aeee3b540468d1a778129d10c64b39
|
e3259d8f489b093b246fe2fd0c4fb6999d6466bf
|
/man/camptoyear.Rd
|
4706f7e5539d724430e919c8da749fcf34283980
|
[] |
no_license
|
Franvgls/CampR
|
7baf0e8213993db85004b95d009bec33570c0407
|
48987b9f49ea492c043a5c5ec3b85eb76605b137
|
refs/heads/master
| 2023-09-04T00:13:54.220440
| 2023-08-22T14:20:40
| 2023-08-22T14:20:40
| 93,841,088
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 742
|
rd
|
camptoyear.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/camptoyear.R
\name{camptoyear}
\alias{camptoyear}
\title{Transforma series de nombres de campaña en años}
\usage{
camptoyear(x)
}
\arguments{
\item{x}{Vector con la serie de nombres de campaña a transformar a años}
}
\description{
Transforma series de nombres de campañas en formato Camp XYY a años, si se incluyen códigos de 3 caracteres que no corresponden a caracter número número, devuelve 0, si es
}
\examples{
camptoyear(Nsh)
}
\seealso{
Other datos_especies:
\code{\link{AbrvEsp}()},
\code{\link{BuscaAphia}()},
\code{\link{buscacod}()},
\code{\link{buscaesp}()},
\code{\link{hidrotodec}()},
\code{\link{talpes.camp}()}
}
\concept{datos_especies}
|
06cfe4859bb2db1118ec66fb92139c10d79eff01
|
5be5d6d7383922adb917c91caa03765eb68a37b9
|
/R/helper.R
|
aed9c5b04f80696b0f34f9d2d81f30c8f3cf9788
|
[] |
no_license
|
philipbarrett/debtLimit
|
f70c547bc5bc916ca847ab07c034572392ec6109
|
82c70931c8f6e0ee230e2ca3a8c2f597e86fdaae
|
refs/heads/master
| 2021-09-07T02:12:59.029093
| 2018-02-15T16:35:19
| 2018-02-15T16:35:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,928
|
r
|
helper.R
|
####################################################################################
# helper.R
#
# Various helper functions
# 01jun2017
# Philip Barrett, Washington DC
#
####################################################################################
rg.read <- function( cty = 'USA', start.date = "1960-01-01" ){
## Read and cleans in the data for the specified country
rfr <- read.csv('data/riskfreerates.csv')
gth <- read.csv('data/growthrates.csv')
# Read the data
cty.gth <- data.frame( date=as.Date(gth$DATE), gth=gth[[cty]] )
cty.rfr <- data.frame( date=as.Date(rfr$DATE), rfr = 100 * ( (1+rfr[[cty]]/100) ^ .25 - 1 ) )
# Create country-specific dataframes
cty.dta <- merge( subset( cty.gth, date >= start.date ),
subset( cty.rfr, date >= start.date ) )
# The country data after the start date
cty.dta$rmg <- apply( cty.dta[,-1], 1, diff )
# Create R minus G
return( cty.dta )
}
rg10.read <- function( cty = 'USA', start.date = "1960-01-01" ){
## Read and cleans in the ten-year data for the specified country
rfr <- read.csv('data/tenyrrates.csv')
gth <- read.csv('data/growthrates.csv')
# Read the data
cty.rfr <- data.frame( date=as.Date(rfr$DATE), rfr = 100 * ( (1+rfr[[cty]]/100) ^ .25 - 1 ) )
cty.gth <- data.frame( date=as.Date(gth$DATE), gth=gth[[cty]] )
# cty.gth <- data.frame( date=as.Date(gth$DATE)[1:(nrow(gth)-40)],
# gth=(exp(filter( log(1+gth[[cty]]/100), rep(1/40,40), sides=1 )[-(1:40)]) - 1) * 100 )
# Create country-specific dataframes
cty.dta <- merge( subset( cty.gth, date > start.date ),
subset( cty.rfr, date > start.date ) )
# The country data after the start date
cty.dta$rmg <- apply( cty.dta[,-1], 1, diff )
# Create R minus G
return( cty.dta )
}
hist.read <- function( cty = 'USA', start.year = 1880, ltr=FALSE ){
# Reads up the historical data and returns relevant series
cty.ifs <- switch( cty,
'USA'=111, 'UK'=112, 'GBR'=112, 'FRA'=132,
'DEU'=134, 'CAN'=156, 'JPN'=158, 'ITA'=136 )
# IFS code dictionary
mauro <- read.csv("data/mauro.csv")
names(mauro)[1] <- 'ifs'
mauro.cty <- subset(mauro, ifs==cty.ifs)
# The Mauro database
jst.gov <- read.csv('data/JSTgovernmentR2.csv')
jst.real <- read.csv('data/JSTrealR2.csv')
jst.mon <- read.csv('data/JSTmoneyR2.csv')
jst <- merge( merge( jst.gov, jst.real, by=c('ifs','year','country','iso') ),
jst.mon, by=c('ifs','year','country','iso') )
# Merge the JST data
jst.cty <- subset( jst, ifs==cty.ifs )
# Country-specific subset
out <- merge( mauro.cty, jst.cty, by=c('ifs','year'), all=TRUE )
out$gth <- c( NA, ( out$gdp[-1] / out$gdp[-nrow(out)] - 1 ) * 100 )
out$rfr <- if(ltr) out$ltrate else out$stir
out$date <- out$year
# Standard format
return(subset(out,year>=start.year))
}
|
e97b33cc8c32d1871099b8d13046de8b2ab2852d
|
740c286f1328664983afd3d920bdf2d74715dc5d
|
/2017/2017_Fall_RShiny_DC_Crime/global.R
|
35714d1ffaaa8a12e513c074f38d54a096a9c080
|
[] |
no_license
|
WeihaoZeng/Work_Sample
|
8935f0c3e060ffca113ea5f380c018c3e1cc1a01
|
ed888dbfb129ed7fb562a9ea478af3b491ea3dbf
|
refs/heads/master
| 2020-04-09T05:34:47.125166
| 2019-01-08T22:27:38
| 2019-01-08T22:27:38
| 160,070,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
global.R
|
# Loads the Shiny and leaflet libraries.
library(shiny)
library(leaflet)
# read the file
crime <- read.csv("Crime_Incidents_in_2017.csv", header=TRUE, stringsAsFactors=FALSE)
|
454171e81e81572a674a73c598c3afae8023ed09
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spatstat/examples/psp.Rd.R
|
442cd19f4aa519adec95502c90448f3aaedc4bc5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
psp.Rd.R
|
library(spatstat)
### Name: psp
### Title: Create a Line Segment Pattern
### Aliases: psp
### Keywords: spatial datagen
### ** Examples
X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
m <- data.frame(A=1:10, B=letters[1:10])
X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m)
|
78d2e13238bc3fa5bd4ca33e0071649bab178659
|
ddfd5c580e291f215eec57f18b43f37d9a017a10
|
/code/plot2.R
|
1601551468603151d22438a4476db6057a714629
|
[] |
no_license
|
yvgg/ExData_Plotting1
|
9eda40f4bd43cdee09a1a32e111653314f7af97d
|
e21975814252b977976d0b44e26198edbf8230ad
|
refs/heads/master
| 2021-01-14T08:40:04.901157
| 2014-11-06T11:57:56
| 2014-11-06T11:57:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
plot2.R
|
png(file = 'plot2.png')
plot(Datetime, Global_active_power, type="l", ylab='Global Active Power (Kilowatts)')
dev.off()
|
3387e4073e769b230c4a4caf941f26cd0740334f
|
75a6e5788f83437f1e3ea7eda1dc2bc452368b40
|
/plot4.R
|
484cacbb19c7b8e9f8a958e43e5c52a478e3571b
|
[] |
no_license
|
brbza/ExData_Plotting1
|
4e8c663d1b79b39b8433faed36c0daf8b60dbdfb
|
cbee4d4c811836830d626190ff645aadef2861fc
|
refs/heads/master
| 2021-01-18T08:56:48.839264
| 2015-01-11T01:42:39
| 2015-01-11T01:42:39
| 28,917,489
| 0
| 0
| null | 2015-01-07T14:19:08
| 2015-01-07T14:19:08
| null |
UTF-8
|
R
| false
| false
| 2,512
|
r
|
plot4.R
|
# Author: Carlos Barboza
# Date: 2015-01-10
# Coursera Exploratory Data Analysis Course, JHS
#
# This scripts creates plot4.png (4 graphs on the same graph device).
#
# It looks for the filtered set of measurements (filtered_power_consumption.csv) on the working directory.
# If the file is not found, it calls the filterByDays function to create this data set from the original one.
# After that it creates a png file with the appropriate size and labels on the working directory.
#
# In order to work properly, the script must be on the same directory as the filter_by_days.R and
# household_power_consumption.txt files.
# source script to filter the data on the required days
source("filter_by_days.R", local=TRUE)
# if file with filtered data already exists, load it, otherwise filter the original data.
if(!file.exists("filtered_power_consumption.csv")) {
filteredData <- filterByDays("household_power_consumption.txt", c("1/2/2007","2/2/2007"))
write.table(filteredData,"filtered_power_consumption.csv", sep= ",", row.names=FALSE)
} else {
filteredData <- read.table("filtered_power_consumption.csv", sep=",", stringsAsFactors=FALSE, header=TRUE)
}
# open png graphic device to store the graph
png("plot4.png", width=480, height=480,units="px")
# creates 4 graphs on the graphic device
par(mfrow = c(2, 2))
# create the first graph (Global Active Power)
# creates two vectors with x and y data to be plotted
x <- strptime(filteredData$Time, "%Y-%m-%d %H:%M:%S")
y <- filteredData$Global_active_power
# plot data assigning y label
plot(x, y, type="n", ylab = "Global Active Power", xlab = "")
lines(x,y)
# creates the second graph, same x from the previuos graph just change y (Voltage)
y <- filteredData$Voltage
plot(x, y, type="n", ylab = "Voltage", xlab = "datetime")
lines(x,y)
# creates the third graph, same x from the previous graphs, just add the y data (Energy Sub metering)
y1 <- filteredData$Sub_metering_1
y2 <- filteredData$Sub_metering_2
y3 <- filteredData$Sub_metering_3
# plot data assigning y label
plot(x, y1, type="n", ylab = "Energy sub metering", xlab = "")
lines(x, y1)
lines(x, y2, col="red")
lines(x, y3, col="blue")
# adds legend to the graph
legend("topright", bty="n", lty = c(1, 1, 1), col = c("black", "red", "blue"), legend = names(filteredData)[7:9])
# create the fourth graph (Global Reactive Power)
y <- filteredData$Global_reactive_power
plot(x, y, type="n", xlab = "datetime", ylab="Global_reactive_power")
lines(x,y)
# closes the device
dev.off()
|
7908b2b3582cb80f136bbf0a71875187baec97dd
|
2d17ffe9f953d3fe02c91406491e0ed0a00427da
|
/requirements.R
|
d48fed4d0e439d122c41851ca4bbe7b26751a3f2
|
[
"Apache-2.0"
] |
permissive
|
magnusnissel/pumpR
|
3ac7ae6048c441829722b8f1c89bcb26d42672ac
|
eea98410985b83edb5cc0642963328e109ec1175
|
refs/heads/master
| 2023-02-13T17:11:59.752463
| 2021-01-13T20:30:13
| 2021-01-13T20:30:13
| 80,954,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 197
|
r
|
requirements.R
|
install.packages("readr")
install.packages("dplyr")
install.packages("purrr")
install.packages("lubridate")
install.packages("ggplot2")
installed.packages("ggthemes")
installed.packages("bookdown")
|
3350748cc98b0f8562e4d108ccdbee7e4322e184
|
fc8b4f69821d433a3a5976a6b772dfb38eae31bd
|
/man/hrdiweibull.Rd
|
1ae70adb5743b9b4c687978c0c624a59bcf140eb
|
[] |
no_license
|
cran/DiscreteInverseWeibull
|
73ee734dc9b5b4653f68fa7b571bb6d3faa14f96
|
d43d2c6765383f73a90e75c762c0be131b8d61ac
|
refs/heads/master
| 2021-01-18T21:46:15.030737
| 2016-05-01T00:44:40
| 2016-05-01T00:44:40
| 17,678,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 706
|
rd
|
hrdiweibull.Rd
|
\name{hrdiweibull}
\alias{hrdiweibull}
\title{
Hazard rate function
}
\description{
Hazard rate function for the discrete inverse Weibull distribution
}
\usage{
hrdiweibull(x, q, beta)
}
\arguments{
\item{x}{
a vector of values
}
\item{q}{
the value of the \eqn{q} parameter
}
\item{beta}{
the value of the \eqn{\beta} parameter
}
}
\value{
the hazard rate function computed on the \code{x} values
}
\details{
The hazard rate function is defined as \eqn{r(x)=P(X=x)/P(X\ge x)=(q^{x^{-\beta}}-q^{(x-1)^{-\beta}})/(1-q^{(x-1)^{-\beta}})}
}
\seealso{
\code{\link{ahrdiweibull}}
}
\examples{
q<-0.5
beta<-2.5
x<-1:10
hrdiweibull(x, q, beta)
}
\keyword{distribution}
|
8b62dd67fdc831fc4df8ab2f8c17a5ce8559c1fa
|
3fa4ab911dd36a07456263dda7b94542a898fa5c
|
/code/plot_series.R
|
63c26cbe7465578312d7610ef30e82bed3f86453
|
[
"MIT"
] |
permissive
|
ridwan608/conteStreamTemperature_northeast
|
0f5351227078015fe19530fe9e6e477594259c4c
|
92df915cb263e0aab6afffa51fb2fd6fbb0e8709
|
refs/heads/master
| 2023-03-17T03:17:20.263863
| 2018-07-25T15:34:23
| 2018-07-25T15:34:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 595
|
r
|
plot_series.R
|
load(file = paste0(output_file4, "/data_dir.RData"))
df_south <- df_values %>%
dplyr::filter(latitude < 40.7127)
if(!file.exists(file.path(getwd(), data_dir, "plots"))) dir.create(file.path(getwd(), data_dir, "plots"))
series <- unique(df_south$series_id)
for(i in series) {
ggplot(df_south[which(df_south$series_id == i), ], aes(datetime, temp)) + geom_line(color = "blue")
ggsave(paste0(data_dir, "/plots/series_", i, ".png"))
}
df_series <- data.frame(series_id = series, stringsAsFactors = FALSE)
write.csv(df_series, file = paste0(data_dir, "/series_id.csv"), row.names = FALSE)
|
5f156bdd0da02fd4b8058c983d0731c436244f48
|
96c504984740d50f9d2446103bc5c560ec7b0271
|
/man/color.shape.Rd
|
0ca3a45f464a56975e9ae3bee545359dfccf4e59
|
[] |
no_license
|
ash129/penrose
|
0f5adfa05d18fba913b592d355e688d8bd409bca
|
c59716d2ae344f9a9612030ffc590852395b8ce1
|
refs/heads/master
| 2021-07-01T00:20:25.150513
| 2017-09-19T19:34:15
| 2017-09-19T19:34:15
| 104,007,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 786
|
rd
|
color.shape.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color.shape.R
\name{color.shape}
\alias{color.shape}
\title{Give the hex color to a shape based on hsv coordinates and additional rules}
\usage{
color.shape(ang, s = 1, v = 1, rot = 0, type = "kite")
}
\arguments{
\item{ang}{numeric angle in degrees}
\item{s}{numeric saturation for color [0,1]}
\item{v}{numeric value for color [0,1]}
\item{rot}{numeric angle in degrees for rotating the hue parameter}
\item{type}{either "kite" or "dart" for some additional arbitrary color rules}
}
\value{
list of numeric coordinates for the new shapes from the transform
}
\description{
\code{color.shape()} takes hsv coordinates and returns the rgb hexcode,
but with some additional rules, for artistic purposes
}
|
a387e055eb02c41b7a1f464c45ca7b7044e9b060
|
49d331fb01b73b043959793a66c03d1713d8f4db
|
/UPDATED_CODE/run_simulations_main/run_all.R
|
8dce334719766b84b3dd3fd4b3921a9f1cf59d3b
|
[] |
no_license
|
ruslana-tymchyk/masters-diss
|
31287ff61631c4fb2726474a48c6ca3adc47447f
|
bec892c6e335748dedfaf0bcb69196d6b270b16d
|
refs/heads/master
| 2022-11-05T03:49:14.047210
| 2022-10-24T10:32:22
| 2022-10-24T10:32:22
| 258,590,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,485
|
r
|
run_all.R
|
#From this file you can run any simulation
#Import all simulation files
wd <- getwd()
setwd(wd)
setwd("../logistic")
source("logistic_main.R")
setwd("../multivariate_ddm")
source("mvt_main.R")
setwd("../univariate_ddm")
source("uni_main.R")
#-----------------------------------------------------------------------------------
#------Logistic Between Subjects-----------------------------------------------------------
#-----------------------------------------------------------------------------------
#Parameters
# n: number of participants (does it need to be divided by 2???)
# size: number or trials per participant
# reruns: number of datasets to be simulated (NOTE, if reruns = 1,
# 10 datasets will be produced, 1 for each probability)
#Output
# aov_p: p-value generated by ANOVA
# glm_p: p-value generated by GLM
# diff_props: difference in proportion of successes between groups
# mean_prop_real: mean proportion of success for both groups combined
#(meant to converge with prob for large samples)
# g1_prop: proportion of successes for group 1
# g2_prop: proportion of successes for group 2
# n: number of participants
# size: number or trials per participant
# prob :probability of success for an experiment (same for both groups)
#-----------------------------------------------------------------------------------
logistic_data_bs <- simulate_logistic_bs(n = 10,
size = 10,
reruns = 1)
#-----------------------------------------------------------------------------------
#------Logistic Repeated Measures-----------------------------------------------------------
#-----------------------------------------------------------------------------------
#Parameters
# n: number of participants (does it need to be divided by 2???)
# size: number or trials per participant
# reruns: number of datasets to be simulated (NOTE, if reruns = 1,
# 10 datasets will be produced, 1 for each probability)
#Output
# aov_p: p-value generated by ANOVA
# glmm_p: p-value generated by GLMM
# diff_props: difference in proportion of successes between frames(gain vs loss)
# mean_prop_real: mean proportion of success for both frames combined
#(meant to converge with prob for large samples)
# frame1_prop: proportion of successes for frame 1
# frame2_prop: proportion of successes for frame 2
# n: number of participants
# size: number or trials per participant
# prob :probability of success for an experiment (same for both groups)
#-----------------------------------------------------------------------------------
start_time <- Sys.time()
logistic_data_rm <- simulate_logistic_rm(n = 10,
size = 10 ,
reruns = 10)
end_time <- Sys.time()
end_time - start_time
save("uni_log_rm_10pp10tr10runs.rda")
#-------------------------------------------------------------------------
#------Univariate BS & RM - identical code for both---------------------------------------------------------
#-------------------------------------------------------------------------
#Parameters
# model: reruns_rm OR reruns_bs, depending on the experiment you want to run
# pp: number of participants
# n: number or trials per participant
# runs: number of datasets to be simulated (NOTE, if reruns = 1,
# 10 datasets will be produced, 1 for each probability)
#-------------------------------------------------------------------------
univariate_data_bs <- simulate_uni_data(model = reruns_bs,
pp = 10,
n = 10,
runs = 1)
#need to update code downstrem so name of the file is based
#on a model that has been run
#-----------------------------------------------------------------------------------
#------MVT Between Subjects---------------------------------------------------------
#-----------------------------------------------------------------------------------
#Parameters
# sigma: rda file with drift diffusion parameters from an original model
# mu: combinations of possible drift diffusion model values
# pp: number of participants
# size: number or trials per participant
# runs: number of datasets to be simulated (NOTE, if reruns = 1,
# 10 datasets will be produced, 1 for each probability)
#Output
# aov_p: p-value generated by ANOVA
# glm_p: p-value generated by GLM
# diff_props: difference in proportion of successes between groups
# mean_prop_real: mean proportion of success for both groups combined
#(meant to converge with prob for large samples)
# g1_prop: proportion of successes for group 1
# g2_prop: proportion of successes for group 2
# size: number or trials per participant
# pp_g1: number of participants in group1
# pp_g2: number of participants in group2
# Other: mean and sd for all ddm parameters
#-----------------------------------------------------------------------------------
multivariate_data_bs <- simulate_data_mvt_bs(pp = 10,
size = 10,
reruns = 1)
#-----------------------------------------------------------------------------------
#------MVT Repeated Measures---------------------------------------------------------
#-----------------------------------------------------------------------------------
#Parameters
# sigma: rda file with drift diffusion parameters from an original model
# mu: combinations of possible drift diffusion model values
# pp: number of participants
# size: number or trials per participant
# runs: number of datasets to be simulated (NOTE, if reruns = 1,
# 10 datasets will be produced, 1 for each probability)
#Output
# aov_p: p-value generated by ANOVA
# glm_p: p-value generated by GLM
# glmm: : p-value generated by GLMM
# diff_props: difference in proportion of successes between groups
# mean_prop_real: mean proportion of success for both groups combined
#(meant to converge with prob for large samples)
# g1_prop: proportion of successes for group 1
# g2_prop: proportion of successes for group 2
# size: number or trials per participant (PER FRAME OR TOTAL - DUNNO)
# pp: number of participants
# Other: mean and sd for all ddm parameters
#------------------------------------------------------------------------------
multivariate_data_rm <- simulate_data_mvt_rm(pp = 10,
size = 10,
reruns = 1)
|
d9806018f348f70abc8f4cc678d623d33b23abc6
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.lexmodelbuildingservice/man/delete_bot_version.Rd
|
70118d607949a7bd8a3e861294ab510b3ab8d6f0
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 825
|
rd
|
delete_bot_version.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lexmodelbuildingservice_operations.R
\name{delete_bot_version}
\alias{delete_bot_version}
\title{Deletes a specific version of a bot}
\usage{
delete_bot_version(name, version)
}
\arguments{
\item{name}{[required] The name of the bot.}
\item{version}{[required] The version of the bot to delete. You cannot delete the \code{$LATEST} version of the bot. To delete the \code{$LATEST} version, use the DeleteBot operation.}
}
\description{
Deletes a specific version of a bot. To delete all versions of a bot, use the DeleteBot operation.
}
\details{
This operation requires permissions for the \code{lex:DeleteBotVersion} action.
}
\section{Accepted Parameters}{
\preformatted{delete_bot_version(
name = "string",
version = "string"
)
}
}
|
4836b79f4b4c7d0dc28b48aa23bb043aba6b8433
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ecolottery/examples/coalesc_abc.Rd.R
|
4eb231442c2e134b2d8ea763d725b1c4f65acd19
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,474
|
r
|
coalesc_abc.Rd.R
|
library(ecolottery)
### Name: coalesc_abc
### Title: Estimation of neutral and non-neutral parameters of community
### assembly using Approximate Bayesian Computation (ABC)
### Aliases: coalesc_abc do.simul
### Keywords: coalescent Approximate Bayesian Computation niche-based
### dynamics neutral dynamics
### ** Examples
# Trait-dependent filtering function
filt_gaussian <- function(t, params) exp(-(t-params[1])^2/(2*params[2]^2))
# Definition of parameters and their range
params <- data.frame(rbind(c(0, 1), c(0.05, 1)))
row.names(params) <- c("topt", "sigmaopt")
# Number of values to sample in prior distributions
nb.samp <- 10^6 # Should be large
## Not run:
##D # Basic summary statistics
##D f.sumstats <- function(com) array(dimnames=list(c("cwm", "cwv", "cws",
##D "cwk", "S", "Es")),
##D c(mean(com[,3]), var(com[,3]),
##D e1071::skewness(com[,3]),
##D e1071::kurtosis(com[,3]),
##D vegan::specnumber(table(com[,2])),
##D vegan::diversity(table(com[,2]))))
##D
##D # An observed community is here simulated (known parameters)
##D comm <- coalesc(J = 400, m = 0.5, theta = 50,
##D filt = function(x) filt_gaussian(x, c(0.2, 0.1)))
##D
##D # ABC estimation of the parameters based on observed community composition
##D ## Warning: this function may take a while
##D res <- coalesc_abc(comm$com, comm$pool, f.sumstats = f.sumstats,
##D filt.abc = filt_gaussian, params = params,
##D nb.samp = nb.samp, parallel = TRUE,
##D pkg = c("e1071","vegan"), method = "neuralnet")
##D plot(res$abc, param = res$par)
##D hist(res$abc)
##D
##D # Cross validation
##D ## Warning: this function is slow
##D res$cv <- abc::cv4abc(param = res$par, sumstat = res$ss, nval = 1000,
##D tols = c(0.01, 0.1, 1), method = "neuralnet")
##D plot(res$cv)
##D
##D # Multiple community option
##D # When the input is a site-species matrix, use argument multi="tab"
##D # See vignette Barro_Colorado for more details
##D
##D # When the input is a list of communities, use argument multi="seqcom"
##D comm.obs <- list()
##D
##D comm.obs[[1]] <- cbind(rep(1,400), coalesc(J = 400, m = 0.5, filt = function(x)
##D filt_gaussian(x, c(0.2, 0.1)),
##D pool = comm$pool)$com))
##D comm.obs[[2]] <- cbind(rep(2,400), coalesc(J = 400, m = 0.5, filt = function(x)
##D filt_gaussian(x, c(0.5, 0.1)),
##D pool = comm$pool)$com))
##D comm.obs[[3]] <- cbind(rep(3,400), coalesc(J = 400, m = 0.5, filt = function(x)
##D filt_gaussian(x, c(0.8, 0.1)),
##D pool = comm$pool)$com))
##D
##D comm.obs <- lapply(comm.obs, as.matrix)
##D
##D res <- coalesc_abc(comm.obs, comm$pool, multi="seqcom", f.sumstats=f.sumstats,
##D filt.abc = filt_gaussian, params = params, nb.samp = nb.samp,
##D parallel = TRUE, pkg = c("e1071","vegan"), tol = 0.1,
##D method = "neuralnet")
##D
##D lapply(res$abc, summary)
##D
## End(Not run)
|
7c159c2c36f686ed72a26293af966815b754dd20
|
27d846ed2b771abbd715e2f7e580e416f0b45f06
|
/plot1.r
|
976149b4e2dd9c02a03d07e68b6c8a9fe2533fd7
|
[] |
no_license
|
mikeaadd/ExData_Plotting1
|
9b0e2473842521755a5910f078b97efb9fa108ac
|
f9ff5812f2845ffc6eab0a2e3bd018a1c800c011
|
refs/heads/master
| 2021-01-18T01:56:30.187377
| 2015-06-08T13:59:51
| 2015-06-08T13:59:51
| 37,032,492
| 0
| 0
| null | 2015-06-07T21:13:10
| 2015-06-07T21:13:10
| null |
UTF-8
|
R
| false
| false
| 553
|
r
|
plot1.r
|
library(dplyr)
#find and clean table
setwd("/Users/josephaddonisio/Downloads/Cousera")
filepath <- "./Exploratory Data Analysis/household_power_consumption.txt"
dataset <- read.table(filepath, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
newdataset<-filter(dataset,Date == "1/2/2007" | Date =="2/2/2007")
#define variables
g.a.power <- as.numeric(newdataset$Global_active_power)
#plot histogram
png("plot1.png", width=480, height=480)
hist(g.a.power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.