blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
97edf77131d3c4cfbe98281093ecc71ce648f7ea | 6cfd36128f7021bca42efd9cc9b6ff6d0830883e | /plot4.R | 9cb0d8c1dc8c56733c8b27935fe47fca706ec180 | [] | no_license | BartvBekkum/ExData_Plotting1 | a9599a0eadd9bb013554c15a34664e16aeec6816 | 061d4f12a8c181f4b6f3b3647e36f897aa4993e7 | refs/heads/master | 2021-09-01T14:34:44.196835 | 2017-12-27T13:48:03 | 2017-12-27T13:48:03 | 115,285,777 | 0 | 0 | null | 2017-12-24T20:32:02 | 2017-12-24T20:32:01 | null | UTF-8 | R | false | false | 1,455 | r | plot4.R | ## use library
library(data.table)
library(dplyr)
library(sqldf)
## read file
setwd("C:/Users/bart/Documents/Coursera/4_Expl/Data/")
mydat <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where Date in ('1/2/2007','2/2/2007')",
header = TRUE,
sep = ";")
if (length(grep("\\?", mydat)) != 0) {print("nok")} else {print("ok")}
mydat$DateTime <- paste(mydat$Date,mydat$Time)
mydat$DateTime <- strptime(mydat$DateTime, format = "%d/%m/%Y %H:%M:%S")
##plot4
png('plot4.png', width = 480, height = 480)
par(mfrow=c(2,2))
##graph 1
plot(mydat$DateTime, mydat$Global_active_power,
type = "l",
ylab = "Global active power",
xlab = "")
##graph 2
plot(mydat$DateTime, mydat$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
##graph 3
plot(mydat$DateTime, mydat$Sub_metering_1, type = "l", col = 1, ylab = "Energy sub metering", xlab = "")
lines(mydat$DateTime, mydat$Sub_metering_2, col = 2)
lines(mydat$DateTime, mydat$Sub_metering_3, col = 4)
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c(1, 2, 4),
box.lty=0,
inset = .01,
lty=1)
##graph 4
plot(mydat$DateTime, mydat$Global_reactive_power,
type = "l",
ylab = "Global reactive power",
xlab = "datetime")
##close
dev.off()
par(mfrow=c(1,1))
|
c4f8614fc79ffac5f23c9b30a8fe45c8128e8e6b | 0a906cf8b1b7da2aea87de958e3662870df49727 | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609955644-test.R | bdbe4f09703adaebb25b401724b584ed1cf0957d | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,157 | r | 1609955644-test.R | testlist <- list(id = integer(0), x = c(1.90359856625529e+185, NaN, NaN, NaN, NaN, NaN, NaN, -5.48612930076931e+303, 2.78134231924851e-309, 0, 0, 0, 0, 9.61276249044187e+281, 0, 0, 9.61275984016214e+281, 2.35665862120477e-306, 6.65351697366701e-310, 2.11370674490681e-314, 7.50953090787226e-310, 0, 0, -2.06228041419356e+289, -2.19269565256511e+289, -1.75204598749771e-207, 2.23355193469622e+131, 2.88109526107323e+284, 2.56932150904881e-28, -5.48612407607418e+303, 1.65537435737578e-316, 2.92556071557108e+284, 27477427.2586722, 2.8810952601757e+284, 1.38523893523259e-309, 1.47154785030934e-71, 6.65340712592837e-310, 0, 0, 2.35665861844352e-306, 4.53802412334407e+279, 2.72846218802522e-310, -7.95487989975085e+304, 9.43907217312373e+281, 0, 1.390671161567e-309, 1.38940990052625e-312), y = c(2.03711628245548e-312, 3.47776224376669e-308, 1.39063016900734e+284, NaN, 9.61276248028237e+281, 9.61208401271974e+281, 7.22497126908459e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
a33b9a2ca6f89626cb5d33484c3d61da330d50c7 | f4b3a5a762e0d3cdf4f8a32d8927289df9643148 | /ui.R | aa662cdf30848364312659e9f4ddbde6ba18757c | [] | no_license | samchow/DevelopingDataProducts | eea176fbf4a357a843a1db81f547d78c77d5318e | 81bb3e3a8c77c9cee17a65399d60a81342a8eb45 | refs/heads/master | 2021-01-10T19:31:28.190255 | 2015-02-23T00:14:39 | 2015-02-23T00:14:39 | 31,157,698 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 400 | r | ui.R | library(shiny)
library(ggplot2) # diamonds dataset
dataset <- diamonds
shinyUI(pageWithSidebar(
headerPanel("Estimate Diamond Prices for all cuts and colors"),
sidebarPanel(
sliderInput('carat', 'Diamond size (carat)', min=0, max=10,
value=3, step=0.1),
helpText('Use the slider to select the size')
),
mainPanel(
dataTableOutput("mytable")
)
))
|
c15b9498afe3da7c98fac773e272acdbb1ade52f | 83d318c52ba68542677dfc854144863d5a25afc8 | /R/movr.r | 6827ce5ac9ae26a4de1dad6dc24d2479cc489be6 | [] | no_license | caesar0301/movr | bfec07420ee8d9bb3103b27ccd1a320337147533 | 4a5326a144911c26902021b5c185ecfa87b36f7e | refs/heads/master | 2022-07-04T17:44:06.635669 | 2022-06-15T12:20:32 | 2022-06-15T12:20:32 | 28,908,047 | 9 | 3 | null | null | null | null | UTF-8 | R | false | false | 281 | r | movr.r | #' movr: inspecting human mobility with R
#'
#' A package targeting at analyzing, modeling, and visualizing
#' human mobility from temporal and spatial perspectives.
#'
#' @name movr
#' @docType package
#' @useDynLib libmovr
#' @import dplyr tidyr data.table geosphere deldir
NULL
|
1a18df52ac15a76595567720b07481312a77a14b | 85b01c6070393f012bdd4bf8d6bc36832375434a | /tests/testthat/test-goodness.of.fit.R | 2c5981d4b7cd9a620e8f500dfe2ccc8fafad5c26 | [] | no_license | AEBilgrau/GMCM | 4f0aef2910389e18d461edc2a18a9f3e0e2eebc0 | dce017d7be16c9cdf26565f78162e7dbe0619c73 | refs/heads/master | 2021-11-24T08:48:22.631145 | 2021-11-04T22:28:46 | 2021-11-04T22:28:46 | 15,970,417 | 12 | 2 | null | 2020-01-27T19:25:32 | 2014-01-16T14:41:13 | R | UTF-8 | R | false | false | 456 | r | test-goodness.of.fit.R | context("Check goodness.of.fit function")
theta <- rtheta()
u <- Uhat(SimulateGMCMData(theta = theta)$u)
test_that("goodness.of.fit is working as intended", {
goodness.of.fit(theta, u) %>%
expect_length(1) %>%
expect_type("double")
goodness.of.fit(theta, u, method = "AIC", k = 3) %>%
expect_length(1) %>%
expect_type("double")
goodness.of.fit(theta, u, method = "BIC") %>%
expect_length(1) %>%
expect_type("double")
})
|
4a62dda12336e90332136a39a2cac2627de1aaa2 | c1f1c1d80d2ed66c782856655662b8ccd2f5eb95 | /rsWriteSPSS.R | fe584811445b9d6933675a1ea0fa490099f39b08 | [
"MIT"
] | permissive | BurninLeo/R-helpers | 7eb6b3f38fe54c5c5fa68434b21e88c8dc5a644e | 44a87582db435d5a4f150cb277640c663ef5316c | refs/heads/master | 2021-01-23T03:12:08.826889 | 2017-03-27T19:42:34 | 2017-03-27T19:42:34 | 86,055,346 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,202 | r | rsWriteSPSS.R | writeForeignMySPSS = function (df, datafile, codefile, varnames = NULL, len = 32767) {
adQuote <- function (x) paste("\"", x, "\"", sep = "")
# Last variable must not be empty for DATA LIST
if (any(is.na(df[[length(df)]]))) {
df$END_CASE = 0
}
# http://stackoverflow.com/questions/5173692/how-to-return-number-of-decimal-places-in-r
decimalplaces <- function(x) {
y = x[!is.na(x)]
if (length(y) == 0) {
return(0)
}
if (any((y %% 1) != 0)) {
info = strsplit(sub('0+$', '', as.character(y)), ".", fixed=TRUE)
info = info[sapply(info, FUN=length) == 2]
if (length(info) >= 2) {
dec = nchar(unlist(info))[seq(2, length(info), 2)]
} else {
return(0)
}
return(max(dec, na.rm=T))
} else {
return(0)
}
}
dfn <- lapply(df, function(x) if (is.factor(x))
as.numeric(x)
else x)
# Boolean variables (dummy coding)
bv = sapply(dfn, is.logical)
for (v in which(bv)) {
dfn[[v]] = ifelse(dfn[[v]], 1, 0)
}
varlabels <- names(df)
# Use comments where applicable
for (i in 1:length(df)) {
cm = comment(df[[i]])
if (is.character(cm) && (length(cm) > 0)) {
varlabels[i] = comment(df[[i]])
}
}
if (is.null(varnames)) {
varnames <- abbreviate(names(df), 8L)
if (any(sapply(varnames, nchar) > 8L))
stop("I cannot abbreviate the variable names to eight or fewer letters")
if (any(varnames != varlabels))
warning("some variable names were abbreviated")
}
varnames <- gsub("[^[:alnum:]_\\$@#]", "\\.", varnames)
dl.varnames <- varnames
chv = sapply(df, is.character)
if (any(chv)) {
for (v in which(chv)) {
dfn[[v]] = gsub("\\s", " ", dfn[[v]])
}
lengths <- sapply(df[chv], function(v) max(nchar(v), na.rm=T))
if (any(lengths > len)) {
warning(paste("Clipped strings in", names(df[chv]), "to", len, "characters"))
for (v in which(chv)) {
df[[v]] = substr(df[[v]], start=1, stop=len)
}
}
lengths[is.infinite(lengths)] = 0
lengths[lengths < 1] = 1
lengths <- paste("(A", lengths, ")", sep = "")
# star <- ifelse(c(FALSE, diff(which(chv) > 1)), " *",
dl.varnames[chv] <- paste(dl.varnames[chv], lengths)
}
# Dates
is.POSIXct = function(x) {
inherits(x, "POSIXct")
}
chd = sapply(df, is.POSIXct)
if (any(chd)) {
for (v in which(chd)) {
dfn[[v]] = format(dfn[[v]], format="%d-%m-%Y %H:%M:%S")
}
lengths = rep("DATE", length(df[chd]))
dl.varnames[chd] = paste(dl.varnames[chd], " (DATETIME)", sep="")
}
# decimals and bools
nmv = sapply(df, is.numeric)
dbv = sapply(df, is.numeric)
factors <- sapply(df, is.factor)
nv = (nmv | dbv)
if (any(nv)) {
decimals = sapply(df[nv], FUN=decimalplaces)
# if (length(decimals) == 0) {
dl.varnames[nv] = paste(dl.varnames[nv], " (F", decimals+8, ".", decimals, ")", sep="")
if (length(bv) > 0) {
dl.varnames[bv] = paste(dl.varnames[bv], "(F1.0)")
}
}
rmv = !(chv | nv | bv | chd)
if (length(rmv) > 0) {
dl.varnames[rmv] = paste(dl.varnames[rmv], "(F8.0)")
}
# Breaks in output
brv = seq(1, length(dl.varnames), 10)
dl.varnames[brv] = paste(dl.varnames[brv], "\n", sep=" ")
cat("SET LOCALE = ENGLISH.\n", file = codefile)
cat("DATA LIST FILE=", adQuote(datafile), " free (TAB)\n", file = codefile, append = TRUE)
cat("/", dl.varnames, " .\n\n", file = codefile, append = TRUE)
cat("VARIABLE LABELS\n", file = codefile, append = TRUE)
cat(paste(varnames, adQuote(varlabels), "\n"), ".\n", file = codefile,
append = TRUE)
if (any(factors)) {
cat("\nVALUE LABELS\n", file = codefile, append = TRUE)
for (v in which(factors)) {
cat("/\n", file = codefile, append = TRUE)
cat(varnames[v], " \n", file = codefile, append = TRUE)
levs <- levels(df[[v]])
cat(paste(1:length(levs), adQuote(levs), "\n", sep = " "),
file = codefile, append = TRUE)
}
cat(".\n", file = codefile, append = TRUE)
}
# Labels stored in attr()
attribs <- !unlist(lapply(sapply(df, FUN=attr, which="1"), FUN=is.null))
if (any(attribs)) {
cat("\nVALUE LABELS\n", file = codefile, append = TRUE)
for (v in which(attribs)) {
cat("/\n", file = codefile, append = TRUE)
cat(varnames[v], " \n", file = codefile, append = TRUE)
# Check labeled values
tc = list()
for (tcv in dimnames(table(df[[v]]))[[1]]) {
if (!is.null(tcl <- attr(df[[v]], tcv))) {
tc[tcv] = tcl
}
}
cat(paste(names(tc), tc, "\n", sep = " "),
file = codefile, append = TRUE)
}
cat(".\n", file = codefile, append = TRUE)
}
ordinal <- sapply(df, is.ordered)
if (any(ordinal)) {
tmp = varnames[ordinal]
brv = seq(1, length(tmp), 10)
tmp[brv] = paste(tmp[brv], "\n")
cat(paste("\nVARIABLE LEVEL", paste(tmp, collapse=" "), "(ORDINAL).\n"),
file = codefile, append = TRUE)
}
num <- sapply(df, is.numeric)
if (any(num)) {
tmp = varnames[num]
brv = seq(1, length(tmp), 10)
tmp[brv] = paste(tmp[brv], "\n")
cat(paste("\nVARIABLE LEVEL", paste(tmp, collapse=" "), "(SCALE).\n"),
file = codefile, append = TRUE)
}
cat("\nEXECUTE.\n", file = codefile, append = TRUE)
write.table(dfn, file = datafile, row = FALSE, col = FALSE,
sep = "\t", quote = F, na = "", eol = "\n", fileEncoding="UTF-8")
} |
ac61ddc68cc2382a62b7a241d87c198cc1b9ced5 | 391ad5a8f32ea0d0076f63885b995fb25603e2ad | /man/venn_plot.Rd | 160d5a4d359bf37580e1e00e1be84042567c23de | [] | no_license | cran/metan | a0d951dff5a151bd5a8b0b7a0a699fde0dddb50c | b3637d40a2fc9d955b928120d61399ba1bf6e11d | refs/heads/master | 2023-03-20T17:17:24.469173 | 2023-03-05T21:00:15 | 2023-03-05T21:00:15 | 236,625,199 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,829 | rd | venn_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/venn_plot.R
\name{venn_plot}
\alias{venn_plot}
\title{Draw Venn diagrams}
\usage{
venn_plot(
...,
names = NULL,
show_elements = FALSE,
show_sets = FALSE,
fill = ggplot_color(4),
alpha = 0.5,
stroke_color = "white",
stroke_alpha = 1,
stroke_size = 1,
stroke_linetype = "solid",
name_color = "black",
name_size = 6,
text_color = "black",
text_size = 4,
label_sep = ","
)
}
\arguments{
\item{...}{A list or a comma-separated list of vectors in the same class. If
vector contains duplicates they will be discarded. If the list doesn't have
names the sets will be named as \code{"set_1"}, "\verb{Set_2"}, \code{"Set_3"} and so on.
If vectors are given in \code{...}, the set names will be named with the names
of the objects provided.}
\item{names}{By default, the names of the sets are set as the names of the
objects in \code{...} (\code{names = NULL}). Use \code{names} to override this default.}
\item{show_elements}{Show set elements instead of count. Defaults to \code{FALSE}.}
\item{show_sets}{Show set names instead of count. Defaults to \code{FALSE}.}
\item{fill}{Filling colors in circles. Defaults to the default ggplot2 color
palette. A vector of length 1 will be recycled.}
\item{alpha}{Transparency for filling circles. Defaults to \code{0.5}.}
\item{stroke_color}{Stroke color for drawing circles.}
\item{stroke_alpha}{Transparency for drawing circles.}
\item{stroke_size}{Stroke size for drawing circles.}
\item{stroke_linetype}{Line type for drawing circles. Defaults to \code{"solid"}.}
\item{name_color}{Text color for set names. Defaults to \code{"black"}.}
\item{name_size}{Text size for set names.}
\item{text_color}{Text color for intersect contents.}
\item{text_size}{Text size for intersect contents.}
\item{label_sep}{The separator for labs when \code{show_elements = TRUE}. Defaults
to \code{","}.}
}
\value{
A ggplot object.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#stable}{\figure{lifecycle-stable.svg}{options: alt='[Stable]'}}}{\strong{[Stable]}}
Produces ggplot2-based Venn plots for 2, 3 or 4 sets. A Venn diagram shows
all possible logical relationships between several sets of data.
}
\examples{
\donttest{
library(metan)
(A <- letters[1:4])
(B <- letters[2:5])
(C <- letters[3:7])
(D <- letters[4:12])
# create a Venn plot
venn_plot(A, B)
# Three sets
venn_plot(A, B, C)
# Four sets
venn_plot(A, B, C, D)
# Use a list
dfs <- list(A = A, B = B, C = C, D = D)
venn_plot(dfs,
show_elements = TRUE,
fill = c("red", "blue", "green", "gray"),
stroke_color = "black",
alpha = 0.8,
text_size = 8,
label_sep = ".")
}
}
\author{
Tiago Olivoto \email{tiagoolivoto@gmail.com}
}
|
aaf9678b1665d1662210f39e729e15715f8d24b9 | 3cc21e7c4a4d6ae4a1b6f6117594e52594d47edb | /R/pdrf.R | 74e9bed953a66c62e22dec5c4502ae33d812bd1d | [
"MIT"
] | permissive | AllanCameron/PDFR | abbf377c26cccadf65200df112512471a7b317fb | 955c122cd0efa0ed4c0924e1ec8ec6340207d151 | refs/heads/master | 2023-05-23T20:56:44.545742 | 2023-05-05T14:43:52 | 2023-05-05T14:43:52 | 159,937,563 | 33 | 4 | NOASSERTION | 2023-04-06T01:59:14 | 2018-12-01T11:25:38 | C++ | UTF-8 | R | false | false | 14,639 | r | pdrf.R | ##---------------------------------------------------------------------------##
#' pdfpage
#'
#' Returns contents of a pdf page
#'
#' @param pdf a valid pdf file location
#' @param page the page number to be extracted
#' @param atomic a boolean - should each letter treated individually?
#' @param table_only a boolean - return data frame alone, as opposed to list
#'
#' @return a list containing data frames
#' @export
#'
#' @examples
#'
#' head(pdfpage(pdfr_paths$leeds, page = 1))
#'
#' head(pdfpage(pdfr_paths$chestpain, page = c(1:2)))
#'
##---------------------------------------------------------------------------##
pdfpage <- function(pdf, page = 1, atomic = FALSE, table_only = TRUE)
{
if (is_min_length(page, 2)) {
pages <- lapply(page, function(x) {
cbind(
pdfpage(pdf, x, atomic, table_only),
data.frame("page" = x)
)
})
return(do.call(rbind, pages))
}
if(is_raw(pdf))
{
x <- .pdfpageraw(pdf, page, atomic)
}
if(is_character(pdf, 1) & is_pdf_fileext(pdf[1]) & !is_fsep_path(pdf[1]))
{
x <- .pdfpage(paste0(path.expand("~/"), pdf), page, atomic)
}
if(is_character(pdf, 1) & is_pdf_fileext(pdf[1]) & is_fsep_path(pdf[1]))
{
x <- .pdfpage(pdf, page, atomic)
}
check_pdf(pdf, call)
Encoding(x$Elements$text) <- "UTF-8"
x$Elements <- x$Elements[order(-x$Elements$bottom, x$Elements$left),]
x$Elements$left <- round(x$Elements$left, 1)
x$Elements$right <- round(x$Elements$right, 1)
x$Elements$bottom <- round(x$Elements$bottom, 1)
x$Elements$size <- round(x$Elements$size, 1)
rownames(x$Elements) <- seq_along(x$Elements[[1]])
.stopCpp()
if(is_false(table_only)) return(x) else return(x$Elements)
}
##---------------------------------------------------------------------------##
#' Get a pdf's xref table as an R dataframe
#'
#' @param pdf a valid pdf file location or raw data vector
#'
#' @return a data frame showing the bytewise positions of each object in the pdf
#' @export
#'
#' @examples get_xref(pdfr_paths$leeds)
##---------------------------------------------------------------------------##
get_xref <- function(pdf)
{
if(is_raw(pdf)) .get_xrefraw(pdf) else .get_xref(pdf)
}
##---------------------------------------------------------------------------##
#' Get the contents of a pdf object
#'
#' Returns a list consisting of a named vector representing key:value pairs
#' in a specified object. It also contains any stream data associated with
#' the object.
#'
#' @param pdf a valid pdf file location
#' @param number the object number
#'
#' @return a named vector of the dictionary and stream of the pdf object
#' @export
#'
#' @examples get_object(pdfr_paths$leeds, 1)
##---------------------------------------------------------------------------##
get_object <- function(pdf, number)
{
if(is_raw(pdf)) .get_objraw(pdf, number) else .get_obj(pdf, number)
}
##---------------------------------------------------------------------------##
#' pdfplot
#'
#' Plots the text elements from a page as a ggplot.
#' The aim is not a complete pdf rendering but to help identify elements of
#' interest in the data frame of text elements to convert to data points.
#'
#' @param pdf a valid pdf file location
#' @param page the page number to be plotted
#' @param atomic a boolean - should each letter treated individually?
#' @param boxes Show the calculated text bounding boxes
#' @param textsize the scale of the text to be shown
#'
#' @return a ggplot
#' @export
#'
#' @examples pdfplot(pdfr_paths$leeds, 1)
##---------------------------------------------------------------------------##
pdfplot <- function(pdf, page = 1, atomic = FALSE, boxes = FALSE, textsize = 1)
{
check_installed("ggplot2")
x <- pdfpage(pdf, page, atomic, FALSE)
y <- x$Elements
y$midx <- (y$right + y$left) / 2
y$midy <- (y$top + y$bottom) / 2
G <- ggplot2::ggplot(data = y, ggplot2::aes(x = midx, y = midy,
size = I(textsize*170 * size / (x$Box[4] - x$Box[2]))),
lims = x$Box
) + ggplot2::geom_rect(ggplot2::aes(xmin = x$Box[1], ymin = x$Box[2],
xmax = x$Box[3], ymax = x$Box[4]),
fill = "white", colour = "black", size = 0.2
) + ggplot2::coord_equal(
) + ggplot2::scale_size_identity()
if(is_true(boxes))
{
G <- G + ggplot2::geom_rect(ggplot2::aes(xmin = left, ymin = bottom,
xmax = right , ymax = top),
fill = "grey", colour = "grey",
size = 0.2, alpha = 0.2)
}
if(is_false(atomic))
{
G + ggplot2::geom_text(ggplot2::aes(label = text),
hjust = 0.5, vjust = 0.5)
}
else
{
G + ggplot2::geom_text(ggplot2::aes(label = text),
hjust = 0.5, vjust = 0.5)
}
}
##---------------------------------------------------------------------------##
#' Return map of glyphs from a page
#'
#' Used mainly for debugging, this function returns an R dataframe, one row for
#' each byte that may be used as a glyph. It shows the unicode number of
#' each interpreted glyph, as well as its width in text space.
#'
#' @param pdf a valid pdf file location
#' @param page the page number from which to extract glyphs
#'
#' @return a dataframe of all entries of font encoding tables with width mapping
#' @export
#'
#' @examples getglyphmap(pdfr_paths$leeds, 1)
##---------------------------------------------------------------------------##
getglyphmap <- function(pdf, page = 1)
{
return(.getglyphmap(pdf, page))
}
##---------------------------------------------------------------------------##
#' pagestring
#'
#' Returns contents of a pdf page description program
#'
#' @param pdf a valid pdf file location
#' @param page the page number to be extracted
#'
#' @return a single string containing the page description program
#' @export
#'
#' @examples getpagestring(pdfr_paths$leeds, 1)
##---------------------------------------------------------------------------##
getpagestring <- function(pdf, page)
{
if(is_raw(pdf))
{
x <- .pagestringraw(pdf, page)
}
if(is_character(pdf, 1) & is_pdf_fileext(pdf[1]))
{
x <- .pagestring(pdf, page)
}
check_pdf(pdf, call)
.stopCpp()
return(x)
}
##---------------------------------------------------------------------------##
#' pdfdoc
#'
#' Returns contents of all pdf pages
#'
#' @param pdf a valid pdf file location
#'
#' @return a data frame of all text elements in a document
#' @export
#'
#' @examples pdfdoc(pdfr_paths$leeds)
##---------------------------------------------------------------------------##
pdfdoc <- function(pdf)
{
check_pdf(pdf)
is_pdf <- is_pdf_fileext(pdf[1])
valid_pdf_name <- (is_character(pdf) & length(pdf) == 1 & is_pdf)
if (is_raw(pdf)) x <- .pdfdocraw(pdf)
if (is_character(pdf) & !is_fsep_path(pdf[1]))
{
pdf <- paste0(path.expand("~/"), pdf)
}
if (is_character(pdf)) {
x <- .pdfdoc(pdf)
}
x <- x[order(x$page, -x$bottom, x$left),]
x$left <- round(x$left, 1)
x$right <- round(x$right, 1)
x$bottom <- round(x$bottom, 1)
x$size <- round(x$size, 1)
rownames(x) <- seq_along(x[[1]])
Encoding(x$text) <- "UTF-8"
.stopCpp()
return(x)
}
##---------------------------------------------------------------------------##
#' pdfboxes
#'
#' Plots the bounding boxes of text elements from a page as a ggplot.
#'
#' @param pdf a valid pdf file location
#' @param pagenum the page number to be plotted
#'
#' @return a ggplot
#' @export
#'
#' @examples pdfboxes(pdfr_paths$leeds, 1)
##---------------------------------------------------------------------------##
pdfboxes <- function(pdf, pagenum)
{
if(is_raw(pdf)) x <- .pdfboxesRaw(pdf, pagenum)
if(is_character(pdf) &
has_length(pdf, 1) &
is_pdf_fileext(pdf[1]) &
!is_fsep_path(pdf[1]))
x <- .pdfboxesString(paste0(path.expand("~/"), pdf), pagenum)
if(is_character(pdf) &
has_length(pdf, 1) &
is_pdf_fileext(pdf[1]) &
is_fsep_path(pdf[1])) x <- .pdfboxesString(pdf, pagenum)
check_pdf(pdf)
check_installed("ggplot2")
D <-
ggplot2::ggplot(
data = x,
ggplot2::aes(
xmin = xmin, ymin = ymin, xmax = xmax, ymax = ymax, fill = factor(box)
)
)
print(D + ggplot2::geom_rect(alpha = 0.5))
.stopCpp()
return(x)
}
##---------------------------------------------------------------------------##
#' pdfgraphics
#'
#' Plots the graphical elements of a pdf page as a ggplot
#'
#' @param file a valid pdf file location
#' @param pagenum the page number to be plotted
#' @param scale Scale used for linewidth and text size. Passed to
#' `ggplot2::geom_text()` size parameter as scale * size/3
#' @return a ggplot
#' @export
#'
#' @examples pdfgraphics(pdfr_paths$leeds, 1)
#'
#' @importFrom grDevices rgb
##---------------------------------------------------------------------------##
pdfgraphics <- function(file, pagenum, scale = 1) {
rlang::check_installed("ggplot2")
x <- pdfpage(file, pagenum, FALSE, FALSE)
a <- .GetPaths(file, pagenum)
dfs <- lapply(a, function(x) {
if(has_length(x$colour, 0)) x$colour <- c(0, 0, 0)
if(has_length(x$fill, 0)) {x$fill <- c(0, 0, 0); x$filled <- FALSE}
if(nchar(x$text) > 0) x$stroked <- TRUE
x$stroke <- grDevices::rgb(x$colour[1], x$colour[2], x$colour[3], as.numeric(x$stroked))
x$fill <- grDevices::rgb(x$fill[1], x$fill[2], x$fill[3], as.numeric(x$filled))
x$fill <- rep_len(x$fill, length(x$X))
x$stroke <- rep_len(x$stroke, length(x$X))
x$filled <- rep_len(x$filled, length(x$X))
x$text <- rep_len(x$text, length(x$X))
x$hasText <- nchar(x$text) > 0
x$size <- rep_len(abs(x$size), length(x$X))
as.data.frame(x[c("X", "Y", "stroke", "fill", "size",
"filled", "hasText", "text")])
})
dfs <- dfs[!sapply(dfs, function(x) any(x$X > 800) | any(x$Y > 800))]
dfs <- mapply(function(x, y) {x$poly <- rep_len(y, length(x$X)); x},
dfs, seq_along(dfs), SIMPLIFY = FALSE)
d <- do.call(rbind, dfs)
Encoding(d$text) <- "UTF-8"
ggplot2::ggplot(d[d$filled, ],
ggplot2::aes(X, Y, colour = stroke, group = poly, size = size)) +
ggplot2::geom_rect(ggplot2::aes(xmin = x$Box[1], ymin = x$Box[2],
xmax = x$Box[3], ymax = x$Box[4]),
fill = "white", colour = "black",
inherit.aes = FALSE) +
ggplot2::geom_polygon(ggplot2::aes(fill = fill)) +
ggplot2::geom_path(data = d[!d$filled,]) +
ggplot2::geom_text(ggplot2::aes(label = text, size = scale * size/3),
data = d[d$hasText,],
vjust = 0, hjust = 0) +
ggplot2::scale_fill_identity() +
ggplot2::scale_color_identity() +
ggplot2::scale_size_identity() +
ggplot2::coord_fixed() +
ggplot2::theme_void()
}
##---------------------------------------------------------------------------##
#' pdfgrobs
#'
#' Plots the graphical elements of a pdf page as grobs
#'
#' @param file_name a valid pdf file location
#' @param pagenum the page number to be plotted
#' @param scale Document scale. Defaults to `dev.size()[2]/10`
#' @param enc Document encoding. Defaults to "UTF-8"
#'
#' @return invisibly returns grobs as well as drawing them
#' @export
#'
#' @examples pdfgrobs(pdfr_paths$leeds, 1)
#' @importFrom grid grid.newpage grid.draw grid.rect gpar pushViewport viewport
#' @importFrom grDevices dev.size
##---------------------------------------------------------------------------##
pdfgrobs <- function(file_name, pagenum, scale = dev.size()[2]/10, enc = "UTF-8")
{
groblist <- .GetGrobs(file_name, pagenum)
x <- pdfpage(file_name, pagenum, FALSE, FALSE)
width <- x$Box[3] - x$Box[1]
height <- x$Box[4] - x$Box[2]
if(width >= height) {height <- height / width; width <- 1;}
if(width < height) {width <- width / height; height <- 1;}
for(i in seq_along(groblist))
{
if(!is.null(groblist[[i]]$label)) {
Encoding(groblist[[i]]$label) <- enc
groblist[[i]]$gp$fontsize <- scale * groblist[[i]]$gp$fontsize
}
}
grid::grid.newpage()
grid::grid.draw(grid::grid.rect( gp = grid::gpar(fill = "gray")))
grid::pushViewport(grid::viewport(width = width, height = height,
default.units = "snpc"))
grid::grid.draw(grid::grid.rect(gp = grid::gpar(fill = "white")))
lapply(groblist, grid::grid.draw)
invisible(groblist)
}
##---------------------------------------------------------------------------##
#' draw_glyph
#'
#' Draws glyphs from a truetype font as grid grobs
#'
#' @param fontfile a raw vector representing a font file
#' @param glyph the character to be drawn. Can be text or an integer
#'
#' @return no return
#' @export
#'
#' @examples
#' \dontrun{
#' if(interactive()){
#' # ttf <- "raw vector with font file"
#' draw_glyph(ttf, "a")
#' }
#' }
#' @importFrom grid grid.newpage pushViewport viewport grid.path gpar
##---------------------------------------------------------------------------##
draw_glyph <- function(fontfile, glyph)
{
header <- GetFontFileHeader(fontfile)
cmap <- GetFontFileCMap(fontfile)
enc <- names(cmap)
if("Unicode v2 BMP only" %in% enc) {
cmap <- cmap[[which(enc == "Unicode v2 BMP only" )[1]]]
} else if("Windows Unicode (BMP only)" %in% enc) {
cmap <- cmap[[which(enc == "Windows Unicode (BMP only)" )[1]]]
} else if("Mac" %in% enc) {
cmap <- cmap[[which(enc == "Mac")[1]]]
} else cli_abort("Appropriate cmap can't be found in {.arg fontfile}")
if(is_character(glyph)) glyph <- as.numeric(charToRaw(substr(glyph, 1, 1)))
index <- which(cmap$first == glyph)
if(has_length(index, 0)) {
cli_abort("{.arg glyph} can't be found in {.arg fontfile}")
}
glyph <- cmap$second[index[1]]
glyph <- GetFontFileGlyph(fontfile, glyph)
grid::grid.newpage()
dfs <- glyph$Contours
xrange <- glyph$xmax - glyph$xmin
yrange <- glyph$ymax - glyph$ymin
shrink_by <- if(xrange > yrange) xrange else yrange
shrink_by <- 1.2 * shrink_by
grid::pushViewport(
grid::viewport(width = xrange/shrink_by, height = yrange/shrink_by))
if(is.data.frame(dfs)) dfs <- list(dfs)
for(df in dfs)
{
if(nrow(df) == 0) next
df$xcoords <- (df$xcoords - glyph$xmin)/(shrink_by) + 0.25
df$ycoords <- (df$ycoords - glyph$ymin)/(shrink_by) + 0.25
grid::grid.path(df$xcoords, df$ycoords, id = df$shape, default.units = "snpc",
gp = grid::gpar(fill = "black"), rule = "winding")
}
}
|
08870f5f0ae457ebed200e916fd3da415a70d110 | 1459bb32324b7018862d770110aff5f9e8a4e03c | /man/seade.Rd | 4e1774a113cbe1c8453ff40c44a19c880854d5c3 | [] | no_license | rdurl0/spcrimr | 8d6818a750f3daccddb7326351feecf1ba61f5d9 | 848bab7aaad3076c1ae957cccd076e89ca5f616d | refs/heads/master | 2020-06-05T11:59:45.345746 | 2020-06-01T00:10:18 | 2020-06-01T00:10:18 | 142,907,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 425 | rd | seade.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dados.R
\docType{data}
\name{seade}
\alias{seade}
\title{Fundacao Seade}
\format{a `tibble` with 40 rows and 2 colums
\describe{
\item{ano}{years from 1980 to 2017}
\item{data}{a vector os \code{lists} with nested data}
}}
\usage{
seade
}
\description{
Socio economic statistcs of Sao Paulo estate, from SEADE foundation.
}
\keyword{datasets}
|
a5cc7c52e745f2e30ee32084880a301425ab9d0e | 17a39baa2862be00463075c9b039c361dc137b3b | /2016_09/IntroductionToBigDataAnalysis/Quiz/Quiz_2/Quiz_2.R | b41d61e26abdaf341889b3346211da9de38b4701 | [] | no_license | tnfsh810124/Courses | 76d50bdfa35de60dfef7a80818c835856a5c0443 | 65d62fc13977622905decb30ddc16a6bbbdddaa2 | refs/heads/master | 2021-06-27T00:47:56.552817 | 2017-09-15T05:44:31 | 2017-09-15T05:44:31 | 103,616,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,357 | r | Quiz_2.R | library(MASS)
df <- read.csv("train_u6lujuX_CVtuZ9i.csv", header = T, sep = ",")
sapply(1:dim(df)[2], function(x) c(names(df)[x], levels(df[, x])))
nafactor <-
sapply(c(1:6, 12,13), function(x){
sapply(1:dim(df)[1], function(n){
ifelse(df[n, x] == "", NA, as.character.factor(df[n, x]))
})
})
nafactor <- as.data.frame(nafactor)
names(nafactor) <- names(df)[c(1:6, 12,13)]
for(j in 1:6){
df[, j] <- nafactor[, j]
}
for(j in 7:8){
df[, j + 5] <- nafactor[, j]
}
df[,13] <- ifelse(df[, 13]=="Y", 1, 0)
df <- na.omit(df)
test <- read.csv("test_Y3wMUE5_7gLdaTN.csv", header = T)
formula_full <- as.formula(paste0("Loan_Status ~ ", paste(names(df)[2:11], collapse = " + ")))
################################################
glm_full <- glm(formula_full, data = df, family = binomial)
prdct <- ifelse(predict(glm_full, type = "response") > .5, 1, 0)
mean(prdct== df$Loan_Status)
################################################
set.seed(87)
tr_num <- sample(dim(df)[1], 0.5*dim(df)[1])
tr <- df[-tr_num, ]
te <- df[ tr_num, ]
glm_tr <- glm(formula_full, data = tr, family = binomial)
prdct_tr <- ifelse(predict(glm_tr, type = "response") > .5, 1, 0)
mean(prdct_tr== tr$Loan_Status)
################################################
library(glmnet)
tr.mat <- model.matrix(formula_full , data = tr)
te.mat <- model.matrix(formula_full , data = te)
grid <- 10^seq(4, -2, length = 100)
mod.lasso <- cv.glmnet(tr.mat, tr$Loan_Status, alpha = 1, lambda = grid, thresh = 1e-12)
lambda.best <- mod.lasso$lambda.min
lambda.best
lasso.prd <- predict(mod.lasso, newx = te.mat, s = lambda.best)
lasso.response <- ifelse(lasso.prd >= .5, 1, 0)
mean(te$Loan_Status == lasso.response)
mod.lasso <- glmnet(model.matrix(formula_full, data = tr), tr$Loan_Status, alpha = 1)
predict(mod.lasso, s = lambda.best, type = "coefficients")
################################################
Property_AreaSemiurban <- ifelse(df$Property_Area == "Semiurban", 1, 0)
train_final <- cbind.data.frame(df$Credit_History, Property_AreaSemiurban, df$Loan_Status)
names(train_final) <- c("Credit_History", "Property_AreaSemiurban", "Loan_Status")
final <- glm(Loan_Status ~ Credit_History + Property_AreaSemiurban, data = train_final, family = "binomial")
Property_AreaSemiurban <- ifelse(test$Property_Area == "Semiurban", 1, 0)
test_final <- cbind.data.frame(test$Credit_History, Property_AreaSemiurban)
names(test_final) <- c("Credit_History", "Property_AreaSemiurban")
predict_final <- ifelse(predict(final,
newdata = test_final,
type = "response") >= 0.5 , 1, 0)
predict_final <- data.frame(cbind(test$Loan_ID, predict_final))
predict_final <- cbind.data.frame(test$Loan_ID, predict_final)
predict_final <- predict_final[, -2]
write.csv(predict_final, "result.csv")
################################################
fit <- glm(Loan_Status ~. , data = df[, -1], family = "binomial")
summary(fit)
car::vif(fit)
fit <- glm(Loan_Status ~ Married + Credit_History + ifelse(Property_Area == "Semiurban", 1, 0)
, data = df)
summary(fit)
predict_final <- ifelse(predict(fit, newdata = test)<=.5, 0, 1)
predict_final[is.na(predict_final)] <- 0
result <- as.data.frame(cbind(as.character(test$Loan_ID), as.numeric(predict_final)))
names(result) <- names(df)[c(1, 13)]
write.csv(result, "result.csv")
|
0fa32333661914c5cb404bcff9342bbb1c946942 | f0ba683353c4e3faf242e56c84defda4972686e1 | /R/model_all_json.R | ae3dc975a937108aba2419f5f9f7497e3fd24738 | [
"MIT"
] | permissive | epongpipat/eepR | bf567c666eef0417b0dece4088ec95697f02cdba | 970c4699db1e005cabd282e903706239033c7b02 | refs/heads/main | 2023-04-01T22:44:23.247733 | 2023-03-28T17:42:07 | 2023-03-28T17:42:07 | 205,262,495 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 604 | r | model_all_json.R | model_all_json_list <- function(x_path, y_path, model_path, out_path) {
n_pad <- 3
x_path <- "/Volumes/eep/"
model_path <- "/Volumes/eep/kaggle/trends-assessment-prediction/models_list.json"
stop_if_dne(x_path)
x <- read.csv(x_path)
stop_if_dne(y_path)
y <- read.csv(y_path)
stop_if_dne(model_path)
models <- read_json(model_path)
for (i in 1:length(models)) {
models[[i]]$args$x <- x
models[[i]]$args$y <- y
fun_name <- glue("{models[[i]]$pkg}::{models[[i]]$fun}")
model <- do.call(eval(parse(text = fun_name)), models[[i]]$args)
save(model, out_path)
}
}
|
2feb45a48f6b0e8c82dffb4e1e8fcb52ae8dc29d | 9bca1a47407e07d5b29c0094947d11f05fee4529 | /20210215_Dashboard_Shiny_MOOC.R | 404ce9f41c3c06ff411627c22a68cfeeb71f7e2e | [] | no_license | Cesarp27/Projet_7_Mooc_Alexia_Jerome_Cesar | 820107d052c66e2c3586ef647acdeaeaf1531837 | 72b86e68dbb3a890fedc5f571b34025ec73b9a88 | refs/heads/main | 2023-03-07T05:01:17.112704 | 2021-02-20T12:39:38 | 2021-02-20T12:39:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,628 | r | 20210215_Dashboard_Shiny_MOOC.R | # fbrisadelamontaña()
library(mongolite)
library(syuzhet)
library(shiny)
library(shinydashboard)
library(shinydashboardPlus)
library(shinyWidgets)
library(tidyverse)
library(plotly)
library(dplyr)
library(NLP)
library(tm)
library(wordcloud)
library(wordcloud2)
library(RColorBrewer)
library(fresh)
mytheme <- create_theme(
adminlte_color(
light_blue = "#008080"
)
)
#####################
#Fonction pour faire l'association mot-sentiments sur les dfs
#####################
trait_sent <- function (df_t) {
df2 <- str_replace_all(df_t, "\n"," ")
char_v <- get_sentences(df2)
method <- "nrc"
lang <- "french"
mtv <- get_nrc_sentiment(char_v, language=lang)
mtv2 <- as.data.frame(cbind(sort(colSums(prop.table(mtv[,1:8]))))) %>%
mutate(sent=row.names(.)) %>%
rename(per=V1)
mtv3 <- as.data.frame(cbind(sort(colSums(prop.table(mtv[,9:10]))))) %>%
mutate(sent=row.names(.)) %>%
rename(per=V1)
return(list(char_v,mtv,mtv2,mtv3))
}
##################################
#édition de la librairie wordcloud2 afin que le nuage de mots et d'autres graphiques puissent être affichés en même temps
#--------------------------------------------------------------------------------------------
wordcloud2a <- function (data, size = 1, minSize = 0, gridSize = 0, fontFamily = "Segoe UI",
fontWeight = "bold", color = "random-dark", backgroundColor = "white",
minRotation = -pi/4, maxRotation = pi/4, shuffle = TRUE,
rotateRatio = 0.4, shape = "circle", ellipticity = 0.65,
widgetsize = NULL, figPath = NULL, hoverFunction = NULL)
{
if ("table" %in% class(data)) {
dataOut = data.frame(name = names(data), freq = as.vector(data))
}
else {
data = as.data.frame(data)
dataOut = data[, 1:2]
names(dataOut) = c("name", "freq")
}
if (!is.null(figPath)) {
if (!file.exists(figPath)) {
stop("cannot find fig in the figPath")
}
spPath = strsplit(figPath, "\\.")[[1]]
len = length(spPath)
figClass = spPath[len]
if (!figClass %in% c("jpeg", "jpg", "png", "bmp", "gif")) {
stop("file should be a jpeg, jpg, png, bmp or gif file!")
}
base64 = base64enc::base64encode(figPath)
base64 = paste0("data:image/", figClass, ";base64,",
base64)
}
else {
base64 = NULL
}
weightFactor = size * 180/max(dataOut$freq)
settings <- list(word = dataOut$name, freq = dataOut$freq,
fontFamily = fontFamily, fontWeight = fontWeight, color = color,
minSize = minSize, weightFactor = weightFactor, backgroundColor = backgroundColor,
gridSize = gridSize, minRotation = minRotation, maxRotation = maxRotation,
shuffle = shuffle, rotateRatio = rotateRatio, shape = shape,
ellipticity = ellipticity, figBase64 = base64, hover = htmlwidgets::JS(hoverFunction))
chart = htmlwidgets::createWidget("wordcloud2", settings,
width = widgetsize[1], height = widgetsize[2], sizingPolicy = htmlwidgets::sizingPolicy(viewer.padding = 0,
browser.padding = 0, browser.fill = TRUE))
chart
}
#-----------------------------fin de l'édition de libreirie--------------------------------------------------------------------------------------------------------------
ui <- dashboardPage(
dashboardHeader(title = "FUN MOOC"),
dashboardSidebar(
sidebarMenu(
menuItem("Général", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Sentiment Analysis", tabName = "donnees", icon = icon("file-code-o"))
)
),
## Body content
dashboardBody(
use_theme(mytheme),
tags$head(tags$style(HTML('
.my-class {
font-family: "Georgia", Times, "Times New Roman", serif;
font-weight: bold;
font-size: 24px;
}
.box.box-solid.box-primary>.box-header{
}
.box.box-solid.box-primary{
background:silver
}
}
'))),
tabItems(
# First tab content
tabItem(tabName = "dashboard",
fluidRow(
pickerInput(
inputId = "Mooc", label = "Les Moocs",
choices = c(
"Introduction à la physique quantique"="messages_Physique_Quantique",
"Apprendre à coder avec Python"="messages_Python_vf",
"Les mots du pouvoir"="messages_Les_mots_du_pouvoir",
"Introduction à la statistique avec R"="messages_R",
"L'Intelligence Artificielle… avec intelligence !"="messages_IA"
)
)
),
fluidRow(
# Dynamic infoBoxes
infoBoxOutput("progressBox_p", width = 3),
infoBoxOutput("progressBox2_p", width = 3),
infoBoxOutput("approvalBox_p", width = 3),
infoBoxOutput("approvalBox2_p", width = 3)
),
fluidRow(
# Clicking this will increment the progress amount
box(
title = "Nombre de commentaires par mois", status = "primary",solidHeader = TRUE,
collapsible = TRUE,width = 4,
plotlyOutput("plot1_p")),
box(
title = "Top 20 des utilisateurs les plus actifs", status = "primary",solidHeader = TRUE,
collapsible = TRUE,width = 4,
plotlyOutput("plot2_p")),
box(
title = "Word cloud", status = "primary",solidHeader = TRUE,
collapsible = TRUE,width = 4,
wordcloud2Output("cloud")
)
)
),
tabItem(tabName = "donnees",
fluidRow(
pickerInput(
inputId = "sentid",
label = "Les Moocs",
choices = c(
"Introduction à la physique quantique"="messages_Physique_Quantique",
"Les mots du pouvoir"="messages_Les_mots_du_pouvoir",
"Introduction à la statistique avec R"="messages_R",
"L'Intelligence Artificielle… avec intelligence !"="messages_IA"
)
)
),
fluidRow(
box(title = "La roue",status = "primary", solidHeader = TRUE, closable = FALSE, collapsible = TRUE, plotOutput("plot1")),
box(title = "La jauge",status = "primary", solidHeader = TRUE, closable = FALSE, collapsible = TRUE,plotlyOutput("jauge")),
box(title = "Le choix",status = "primary", solidHeader = TRUE, closable = FALSE, collapsible = TRUE,
radioGroupButtons(
inputId = "sentbutton",
label = "Examen des différents messages suivant les sentiments",
choices = c("anger", "trust", "anticipation", "disgust","fear","joy","sadness","surprise","trust"),
individual = TRUE,
checkIcon = list(
yes = tags$i(class = "fa fa-circle",
style = "color: steelblue"),
no = tags$i(class = "fa fa-circle-o",
style = "color: steelblue"))
)
),
box(title = "Les réactions", status = "primary", solidHeader = TRUE, closable = FALSE, collapsible = TRUE, collapsed = TRUE, tableOutput("table"))
)
)
)
)
)
server <- function(input, output) {
###############
# Partie server du second tab
###############
# fonction reactive de l'onglet "Les Moocs"
reac_dfsent <- reactive({
config <- yaml::yaml.load_file("config.yml")
id <- input$sentid
url <- paste("mongodb://", config$mongo$user, ":", config$mongo$password,"@127.0.0.1/bdd_grp4?authSource=admin", sep="")
o <- mongo(id, url = url)
plop <- o$aggregate('[{"$project": {"_id":0,"body":"$body"}}]')
trait_sent(plop$body)
})
# fonction reactive permettant de changer la table suivant les sentiments
reac_sentbutton <- reactive ({
df_sent <- reac_dfsent()[[2]][input$sentbutton]
sent_items <- which(df_sent > 2)
res_sent <- reac_dfsent()[[1]][sent_items]
head(res_sent)
})
# fonction reactive de la jauge en fonction de reac_dfsent
reac_jauge <- reactive ({
fig <- plot_ly(
domain = list(x=c(0,1, y =c(0,1))),
value = (reac_dfsent()[4][[1]][2,1])*100,
title = list(text="Degré de satisfaction (en %)"),
delta = list(reference = 400, increasing = list(color = "RebeccaPurple")),
gauge = list(
axis = list(range = list(0, 100), tickwidth = 1, tickcolor = "darkblue"),
bar = list(color = "darkblue"),
bgcolor = "white",
borderwidth = 2,
bordercolor = "gray",
steps = list(
list(range = c(0, 66), color = "cyan"),
list(range = c(66, 100), color = "royalblue"))),
type = "indicator",
mode = "gauge+number")
fig <- fig %>%
layout(margin = list(l=20,r=30), paper_bgcolor="lavender", font = list(color="darkblue", family = "Arial"))
fig
})
# Table des sentiments sortie
output$table <- renderTable({
reac_sentbutton()
})
# Jauge sortie
output$jauge <- renderPlotly(reac_jauge())
# Barplot coordonnées polaires sortie
output$plot1 <- renderPlot({
plot <- ggplot(reac_dfsent()[[3]],
aes(
x = sent,
y = per,
fill = sent,
text="sent"
)) +
geom_col(width = 1, color = "white") +
coord_polar()+ labs(
x = "",
y = ""#,
#title = "Your Title",
#subtitle = "Your Subtitle",
#caption = "Your Caption"
) +
theme_minimal()+
theme(
#strip.background = element_rect(fill = "grey", colour ="grey"),
panel.background = element_rect(fill = "grey", colour ="grey"),
panel.border = element_blank(),
plot.background = element_rect(fill = "darkgrey", colour = "black"),
panel.grid = element_line(size=0.5, linetype = "solid",color = "darkgrey"),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_text(face = "bold", colour = "white")
)
plot
})
################
reac_dfval <- reactive({
config <- yaml::yaml.load_file("config.yml")
url <- paste("mongodb://", config$mongo$user, ":", config$mongo$password,"@127.0.0.1/bdd_grp4?authSource=admin", sep="")
m <- mongo(input$Mooc, url = url)
mooc_python <- function() {
# Combien de publications chaque utilisateur a-t-il faites
pub_par_ut_python <- m$aggregate('[
{"$group": {"_id":"$username","publications": {"$sum": 1 } } },
{"$sort": {"publications": -1 } },
{"$limit": 20}
]')
pub_par_ut_2_python <- pub_par_ut_python[-1,] # nous éliminons le premier endroit qui est le formateur
# Top 20 des utilisateurs les plus actifs
plot_top_20_utilisateurs_python <- ggplot(pub_par_ut_2_python) +
aes(x = reorder(`_id`, publications), weight = publications) +
geom_bar(fill = "#0c4c8a") +
coord_flip() +
labs(title = "Top 20 des utilisateurs les plus actifs") +
labs(x = "Utilisateurs") +
theme_minimal()
plot_top_20_utilisateurs_plotly_python <- ggplotly(plot_top_20_utilisateurs_python)
# utilisateur qui a publié le plus grand nombre de messages
ut_plus_actif_python <- pub_par_ut_python[1,1]
# Combien de messages a-t-il postés
num_max_pub_python <- pub_par_ut_python[1,2]
# Combien de publications au total y a-t-il dans le MOOC?
tot_publications_python <- m$aggregate('[
{"$group": {"_id":"$username","publications": {"$sum": 1 } } },
{"$sort": {"publications": -1 } },
{"$group":{"_id":"", "total":{"$sum":"$publications"}}}
]')
# Combien d'utilisateurs sont dans le MOOC
list_user_python <- m$distinct("username")
nombre_d_utilisateurs_python <- length(list_user_python)
# dans MongoDB: db.countries.distinct('country').length
#--------------------------------------------------------------
# Combien de messages ont été publiés au total par mois
m$aggregate('[{
"$project":
{
"updated_at":1,
"username":1,
"date": { "$dateFromString": {"dateString": "$updated_at"} }
}
},
{"$group":{"_id":{ "annee":{"$year":"$date"}, "mois":{"$month":"$date"}}, "subtotal":{"$sum":1}}},
{"$sort":{"_id":1}}
]')
# On ajoute le résultat précédent dans une variable
df_par_mois_python <- m$aggregate('[{
"$project":
{
"updated_at":1,
"username":1,
"date": { "$dateFromString": {"dateString": "$updated_at"} }
}
},
{"$group":{"_id":{ "annee":{"$year":"$date"}, "mois":{"$month":"$date"}}, "subtotal":{"$sum":1}}},
{"$sort":{"_id":1}}
]')
#nous mettons les données à plat, convertissant les colonnes id en colonnes normales avec flatten
#puis avec #mutate nous faisons la concaténation des colonnes du mois et de l'année
#et le forçons à être une date en ajoutant 1 comme jour de chaque mois et un séparateur
df_votes_par_mois_python <- jsonlite::flatten(df_par_mois_python) %>%
mutate(mois_annee=as.Date(paste(`_id.annee`,`_id.mois`,'01',sep='-')))
# On ajoute une nouvelle colonne au df avec le nom du mooc
df_votes_par_mois_python$MOOC <- "Python"
# ici on fait le graphique "Nombre de commentaires par mois"
posts_par_mois_python <- ggplot(df_votes_par_mois_python) +
aes(x = mois_annee, y = subtotal) +
geom_line(size = 1L, colour = "#4292c6") +
labs(x = "mois (2020/2021)", y = "Nombre de commentaires", title = "Nombre de commentaires par mois") +
theme_classic()
posts_par_mois_plotly_python <- ggplotly(posts_par_mois_python)
# ------------------ wordcloud --------------------------------------------
text <- m$aggregate('[{"$project": {"_id":0,"body":"$body"}}]')
# Create a corpus
docs <- Corpus(VectorSource(text$body))
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, content_transformer(tolower))
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("french"))
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, stripWhitespace)
dtm <- TermDocumentMatrix(docs)
matrix <- as.matrix(dtm)
words <- sort(rowSums(matrix),decreasing=TRUE)
df_p <- data.frame(word = names(words),freq=words)
# ----------------------fin de wordcloud ----------------------------------
return(list(utilisateurs = nombre_d_utilisateurs_python,
plus_actif = ut_plus_actif_python,
publications = tot_publications_python,
pub_plus_actif = num_max_pub_python,
votes_par_mois = df_votes_par_mois_python,
pub_par_ut = pub_par_ut_2_python,
df_p = df_p))
}
result_python <- mooc_python()
})
#output$value <- renderPrint(input$Mooc)
#output$table <- renderTable(reac_dfval())
#----------- server de Python --------------------------------------------------------------------
# Début infoBoxes
# result_python$votes_par_mois -- est dans le fichier 20210208_MOOC_python.R
output$plot1_p <- renderPlotly({
ggplotly(ggplot(reac_dfval()$votes_par_mois) +
aes(x = mois_annee, y = subtotal) +
geom_line(size = 1L, colour = "#4292c6") +
labs(x = "mois", y = "Nombre de commentaires") +
theme_classic())
})
# result_python$pub_par_ut -- est dans le fichier 20210208_MOOC_python.R
output$plot2_p <- renderPlotly({
ggplotly(ggplot(reac_dfval()$pub_par_ut) +
aes(x = reorder(`_id`, publications), weight = publications) +
geom_bar(fill = "#0c4c8a") +
coord_flip() +
labs(x = "Utilisateurs") +
theme_minimal())
})
# result_W_python -- est dans le fichier wordcloud_cesar_python.r
output$cloud <- renderWordcloud2({
wordcloud2a(data=reac_dfval()$df_p, size=0.4, color='random-dark', shape = 'diamond')
})
# result_python$utilisateurs -- est dans le fichier 20210208_MOOC_python.R
output$progressBox_p <- renderInfoBox({
infoBox(
"Nombre d'utilisateurs", reac_dfval()$utilisateurs, icon = icon("users"),
color = "green"
)
})
# result_python$plus_actif -- est dans le fichier 20210208_MOOC_python.R
output$approvalBox_p <- renderInfoBox({
infoBox(
"Utilisateur le plus actif", reac_dfval()$plus_actif, icon = icon("send", lib = "glyphicon"),
color = "olive"
)
})
# Same as above, but with fill=TRUE
# result_python$publications -- est dans le fichier 20210208_MOOC_python.R
output$progressBox2_p <- renderInfoBox({
infoBox(
"Nombre total de publications", reac_dfval()$publications, icon = icon("comments"),
color = "green", fill = TRUE
)
})
# result_python$pub_plus_actif -- est dans le fichier 20210208_MOOC_python.R
output$approvalBox2_p <- renderInfoBox({
infoBox(
"Nombre de posts", reac_dfval()$pub_plus_actif, icon = icon("thumbs-up", lib = "glyphicon"),
color = "olive", fill = TRUE
)
})
# fin infoBoxes
}
shinyApp(ui, server)
|
123bba70fbc295ec0c7a3db4f0dbf3b70d6f8c20 | e12bfe3d525047b48e656a9de4e95dce3fff092e | /man/plothomol.Rd | 8fdb664de4913bdf3352141c614b5fcb6fe38b30 | [] | no_license | cran/nontarget | b3b2123169554ff1bc22b971ed52528837e5293d | 2ff45421e57ebca83edf99d66b8297112647dde4 | refs/heads/master | 2021-01-15T15:47:38.391313 | 2016-09-27T16:39:53 | 2016-09-27T16:39:53 | 17,697,934 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,551 | rd | plothomol.Rd | \name{plothomol}
\alias{plothomol}
\title{
Marks homologue series peaks in a scatterplot of retention time (RT) vs. m/z
}
\description{
Given results from \code{\link[nontarget]{homol.search}}, a scatterplot of peaks within m/z and RT is generated with homologue series marked.
Herein, homologue series receive a color code based on the mean m/z differences between adjacent peaks of a series; these differences
are rounded up to the second digit.
}
\usage{
plothomol(homol, xlim = FALSE, ylim = FALSE,plotlegend=TRUE,plotdefect=FALSE)
}
\arguments{
\item{homol}{
List of type homol produed by \code{\link[nontarget]{homol.search}}.
}
\item{xlim}{
\code{xlim=c(upper bound,lower bound)}, default = FALSE.
}
\item{ylim}{
\code{ylim=c(upper bound,lower bound)}, default = FALSE.
}
\item{plotlegend}{
Should a listing of m/z differences within homologue series and the concommittant color codes been added to the plot? If not, set to FALSE.
}
\item{plotdefect}{
Plot the mass defect instead of the m/z value.
}
}
\author{
Martin Loos
}
\seealso{
\code{\link[nontarget]{homol.search}}
}
\examples{
\donttest{
data(peaklist);
data(isotopes)
homol<-homol.search(
peaklist,
isotopes,
elements=c("C","H","O"),
use_C=TRUE,
minmz=5,
maxmz=120,
minrt=2,
maxrt=2,
ppm=TRUE,
mztol=3.5,
rttol=0.5,
minlength=5,
mzfilter=FALSE,
vec_size=3E6,
spar=.45,
R2=.98,
plotit=FALSE
)
plothomol(homol,xlim=FALSE,ylim=FALSE,plotlegend=FALSE,plotdefect=FALSE);
}
}
|
93b9e3cb291626d8dd39e963cce86e42f92ae6cd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/MCPMod/examples/planMM.Rd.R | 8cfecfc0cd498af04905a7c4df0fe8dd1310f1f0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 846 | r | planMM.Rd.R | library(MCPMod)
### Name: planMM
### Title: Calculate planning quantities for MCPMod
### Aliases: planMM print.planMM
### Keywords: design
### ** Examples
# Example from JBS paper
doses <- c(0,10,25,50,100,150)
models <- list(linear = NULL, emax = 25,
logistic = c(50, 10.88111), exponential= 85,
betaMod=matrix(c(0.33,2.31,1.39,1.39), byrow=TRUE, nrow=2))
plM <- planMM(models, doses, n = rep(50,6), alpha = 0.05, scal=200)
plot(plM)
## Not run:
##D # example, where means are directly specified
##D # doses
##D dvec <- c(0, 10, 50, 100)
##D # mean vectors
##D mu1 <- c(1, 2, 2, 2)
##D mu2 <- c(1, 1, 2, 2)
##D mu3 <- c(1, 1, 1, 2)
##D mMat <- cbind(mu1, mu2, mu3)
##D dimnames(mMat)[[1]] <- dvec
##D planMM(muMat = mMat, doses = dvec, n = 30)
## End(Not run)
|
68727849f3973a324efb363428562dafe35409be | 1e7fa4b25807299f732c68fb5560f661151fc34c | /R/update_all.R | 88ce8db3dc32c97ef31717dbaa8b4e62bc1c0448 | [] | no_license | gaospecial/rvcheck | 7cb4b0da40ca91bd8a6cab6f0b44ef6a7bfd6be6 | 151bd5756dd8f53670ebb42c0997a67848598924 | refs/heads/master | 2020-04-05T00:16:36.293757 | 2019-10-10T09:43:51 | 2019-10-10T09:43:51 | 156,390,608 | 0 | 0 | null | 2019-10-10T09:24:11 | 2018-11-06T13:49:12 | R | UTF-8 | R | false | false | 3,877 | r | update_all.R | ##' update all packages
##'
##'
##' @title update_all
##' @param check_R whether check R version
##' @param which repo (CRAN, BioC, github) to update
##' @return NULL
##' @importFrom utils update.packages
##' @importFrom utils remove.packages
##' @export
##' @examples
##' \dontrun{
##' library(rvcheck)
##' update_all()
##' }
##' @author Guangchuang Yu
update_all <- function(check_R=TRUE, which=c("CRAN", "BioC", "github")) {
if (check_R && !check_r()$up_to_date) {
stop("you need to upgrade your R first...")
}
if ('CRAN' %in% which) {
update_cran()
}
if ('BioC' %in% which) {
update_bioc()
}
if ('github' %in% which) {
update_github()
}
message("done....")
}
is_bioc_up_to_date <- function() {
suppressMessages(check_bioc()$up_to_date)
}
update_cran <- function() {
message("upgrading CRAN packages...")
tryCatch(update.packages(ask=FALSE, checkBuilt=TRUE),
error=function(e) NULL)
}
##' @importFrom utils install.packages
update_bioc <- function() {
pkg <- "BiocManager"
bioc_version <- tryCatch(packageVersion(pkg), error=function(e) NULL)
flag <- "BiocManager"
if (is.null(bioc_version)) {
biocLite <- tryCatch(packageVersion("BiocInstaller"), error=function(e) NULL)
if (is.null(biocLite)){
flag <- "No_BioC"
} else if (check_r()$installed_version < "R-3.5.0") {
flag <- "BiocInstaller"
} else {
message('Bioconductor has switched to a new package manager: "BiocManager".')
message("Removing BiocInstaller and install BiocManager")
remove.packages("BiocInstaller")
install.packages("BiocManager")
flag <- "BiocManager"
}
}
if (flag == "No_BioC"){
message("no Bioconductor packages found...")
} else if (flag == "BiocInstaller") {
message("Your R is out-dated.")
message('Bioconductor 3.8 has switched to a new package manager: "BiocManager".')
invisible(readline(prompt="Press [enter] to continue to update Bioconductor (outdated release that fit your R version)"))
if ("BiocInstaller" %in% loadedNamespaces()) {
detach("package:BiocInstaller", character.only=TRUE)
remove.packages("BiocInstaller")
source("https://www.bioconductor.org/biocLite.R")
}
suppressPackageStartupMessages(require(pkg, character.only = TRUE))
biocLite <- eval(parse(text="biocLite"))
biocLite(ask=FALSE, checkBuilt=TRUE)
} else {
bioc <- is_bioc_up_to_date()
if (is.na(bioc)) {
message("You are using devel branch of Bioconductor...")
} else if (!bioc) {
message("BiocManager is out of date...")
message("Upgrading BiocManager...")
if (pkg %in% loadedNamespaces())
detach("package:BiocManager", character.only=TRUE)
remove.packages("BiocManager")
install.packages("BiocManager")
}
message("upgrading BioC packages...")
suppressPackageStartupMessages(require(pkg, character.only = TRUE))
install <- eval(parse(text="BiocManager::install"))
install(ask=FALSE, checkBuilt=TRUE)
}
}
##' @importFrom utils installed.packages
##' @importFrom utils packageDescription
update_github <- function() {
message("upgrading github packages...")
pkgs <- installed.packages()[, 'Package']
install_github <- get_fun_from_pkg("devtools", "install_github")
tmp <- sapply(pkgs, function(pkg) {
desc <- packageDescription(pkg)
if (length(desc) <= 1 || is.null(desc$GithubSHA1))
return(NULL)
tryCatch(install_github(repo=paste0(desc$GithubUsername, '/', desc$GithubRepo), checkBuilt=TRUE),
error=function(e) NULL)
})
}
|
797761a0df4c6f3bb8ccf059ca3b05092c9425e3 | 4b4558c6cbc3cf2a5c0517a9d388572aa5a216ec | /Statistics/Statistical Software and Lab/0326.R | a0349c6252b2efcb87922e315abcb26d488a2c39 | [] | no_license | cow-coding/School-Project | 15269e3f7078521381453871c88ed36ff1008cff | 1e8fa3347da14f381cb73f2cd59d3ea144612bf8 | refs/heads/master | 2022-06-22T01:22:42.025813 | 2022-06-08T18:32:21 | 2022-06-08T18:32:21 | 247,887,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 430 | r | 0326.R | x <- c(1,2,3, 1); y <- c(1,2,3,4)
v <- 2 * x + y + 1
print(v)
3 ^ 2 %% 4
3 * 2 %% 4
log(exp(1))
range(x)
sum(x)
prod(x)
var(x)
vari <- sum((x-mean(x))^2)/(length(x)-1)
print(vari)
complex(real=-17,imaginary = 0)
complex(3,1)
complex(3,10,-2)
#practice2
#1
x <- c("0","21","12","16")
#2
x <- as.integer(x)
sort(x)
#3
x <- as.logical(x)
#4
y <- seq(0,30,10)
#5
answer <- x < y & x <= y
#6
ans <- rep(c(TRUE,FALSE),times = 10)
|
447d70f0cc581772556c1494666bc2da98a181f2 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/trafficlight-controller/tlc01-nonuniform-depth-68/tlc01-nonuniform-depth-68.R | 9ce3836d65be4ef09043a60fc92da717e78979fe | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 78 | r | tlc01-nonuniform-depth-68.R | c1c6068afad7e1fb0c7cda18727fff3c tlc01-nonuniform-depth-68.qdimacs 24220 64770 |
e2e2125d4375b03b38573d887e11acc36f690bb8 | 575c4f1be3540cdea2e66c5aa295ca0d9b88fca7 | /Assignment1/Scripts/case1/ForcastInputData.R | 37640463619e388c5af3da908fa2c65dad26c03f | [] | no_license | wenjin-cao/INFO7390-AdvancesDataScience | a270746343724996b6e05e59f2dbf2a2f9a17845 | 27007380aa99d5b1a8fca61be8f84edc0d09dcfc | refs/heads/master | 2021-06-08T16:49:54.928572 | 2016-12-14T16:55:23 | 2016-12-14T16:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,371 | r | ForcastInputData.R |
data<-read.csv("/Users/ling/Documents/INFO 7390 Data Science/Assignment/Assignment 1/forecastData.csv",header=T)
date = data[,1]
hour = data[,2]
temperature = data[,3]
month = gsub("/[0-9]*/[0-9]*","",date)
month = as.numeric(monthL)
data = cbind(data[,1],month,data[,2:3])
day = gsub("/[0-9][0-9][0-9][0-9]","",date)
day = gsub("[0-9]*/","",day)
day = as.numeric(day)
data = cbind(data[,1:2],day,data[,3:4])
year = gsub("[0-9]*/[0-9]*/","",date)
year = as.numeric(year)
data = cbind(data[,1:3],year,data[,4:5])
peakhour = c()
for(i in 1:length(date)){
if(data[i,5]%%24 < 7||data[i,5]%%24 > 19 ){
peakhour = append(peakhour,0)
}
else{
peakhour = append(peakhour,1)
}
}
data = cbind(data[,1:5],peakhour,data[,6])
DayofWeek = c()
for(i in 1:length(date)){
DayofWeek = append(DayofWeek, ((day[i]-1)%%7) )
}
data = cbind(data[,1:5],DayofWeek,data[,6:7])
weekdays= c()
for(i in 1:length(date)){
if(DayofWeek[i] == 0||DayofWeek[i] == 6){
weekdays = append(weekdays,0)
}
else{
weekdays = append(weekdays,1)
}
}
data = cbind(data[,1:6],weekdays,data[,7:8])
names(data)[names(data)=="data[, 6]"]="temperature";
names(data)[names(data)=="data[, 1]"]="Date";
names(data)[names(data)=="Hr"]="hour";
write.table(data, "/Users/ling/Documents/INFO 7390 Data Science/Assignment/Assignment 1/ready-forecast-data.csv", sep="," ,row.name=F) |
5e0c1c8c5da008e31ee3df54bd94bf70bb2fc1a1 | eb471a25a9174128700b648696c759be24ece0a2 | /주제분석2주차_통원/주제분석2주차_통원_newdata_Datahandling2.R | 11048126e003f964b46a0a56f9eea829be3184a6 | [] | no_license | wjddn803/PSAT | a57790255069c0f5156873e51e201c1615916f1d | 940cae1df3a0dbc57b1983c0decef1ae43f65c9b | refs/heads/master | 2020-05-03T12:01:26.671101 | 2019-03-30T21:42:04 | 2019-03-30T21:42:04 | 178,614,300 | 0 | 0 | null | null | null | null | UHC | R | false | false | 12,105 | r | 주제분석2주차_통원_newdata_Datahandling2.R | getwd()
setwd("C:/Users/Jungwoo Lim/Documents/2018년 과제/PSAT/통원팀/Data2")
final_total_data<-read.csv("final_total_data.csv",header=T)
final_via0_data<-read.csv("final_via0_data.csv",header=T)
final_via1_data<-read.csv("final_via1_data.csv",header=T)
final_via2_data<-read.csv("final_via2_data.csv",header=T)
final_via3_data<-read.csv("final_via3_data.csv",header=T)
#iata_code에 맞는 continent로 잡아주자!
airports_final2<-read.csv("airports_final2.csv",header=T)
airports_final2$continent<-as.character(airports_final2$continent)
airports_final2$new_continent<-ifelse(airports_final2$iso_country!="US" & airports_final2$iso_country!="CA",
airports_final2$continent,"NA")
airports_final2<-subset(airports_final2,subset=airports_final2$iata_code!="" & airports_final2$iata_code!="-" & airports_final2$iata_code!="0",
select=c("latitude_deg","longitude_deg","new_continent","iso_country","iata_code"))
airports_final3<-airports_final2
#iata_code를 key로 하여 우리 데이터랑 합치자!
final_total_data$Airports2<-ifelse(final_total_data$Airports2=="",NA,as.character(final_total_data$Airports2))
final_total_data$Airports4<-ifelse(final_total_data$Airports4=="",NA,as.character(final_total_data$Airports4))
final_total_data$Airports6<-ifelse(final_total_data$Airports6=="",NA,as.character(final_total_data$Airports6))
final_total_data$Airports8<-ifelse(final_total_data$Airports8=="",NA,as.character(final_total_data$Airports8))
airports_final3$iata_code<-as.character(airports_final3$iata_code)
final_total_data2<-left_join(final_total_data,airports_final3,by=c("Airports2"="iata_code"))
names(final_total_data2)
names(final_total_data2)[38:41]<-c("Airport2_latitude_deg","Airport2_longitude_deg","Airport2_new_continent","Airport2_iso_country")
final_total_data3<-left_join(final_total_data2,airports_final3,by=c("Airports4"="iata_code"))
names(final_total_data3)
names(final_total_data3)[42:45]<-c("Airport4_latitude_deg","Airport4_longitude_deg","Airport4_new_continent","Airport4_iso_country")
final_total_data4<-left_join(final_total_data3,airports_final3,by=c("Airports6"="iata_code"))
names(final_total_data4)
names(final_total_data4)[46:49]<-c("Airport6_latitude_deg","Airport6_longitude_deg","Airport6_new_continent","Airport6_iso_country")
final_total_data5<-left_join(final_total_data4,airports_final3,by=c("Airports8"="iata_code"))
names(final_total_data5)
names(final_total_data5)[50:53]<-c("Airport8_latitude_deg","Airport8_longitude_deg","Airport8_new_continent","Airport8_iso_country")
final_total_data6<-final_total_data5
final_total_data6$Airport2_latitude_deg<-as.numeric(as.character(final_total_data6$Airport2_latitude_deg))
final_total_data6$Airport2_longitude_deg<-as.numeric(as.character(final_total_data6$Airport2_longitude_deg))
final_total_data6$Airport4_latitude_deg<-as.numeric(as.character(final_total_data6$Airport4_latitude_deg))
final_total_data6$Airport4_longitude_deg<-as.numeric(as.character(final_total_data6$Airport4_longitude_deg))
final_total_data6$Airport6_latitude_deg<-as.numeric(as.character(final_total_data6$Airport6_latitude_deg))
final_total_data6$Airport6_longitude_deg<-as.numeric(as.character(final_total_data6$Airport6_longitude_deg))
final_total_data6$Airport8_latitude_deg<-as.numeric(as.character(final_total_data6$Airport8_latitude_deg))
final_total_data6$Airport8_longitude_deg<-as.numeric(as.character(final_total_data6$Airport8_longitude_deg))
str(final_total_data6)
#New variable (distance)
install.packages('geosphere')
library(geosphere)
F.dat<-final_total_data6
F.dat
#####Airports2의 거리를 구해보자!
names(F.dat)
x=NULL
y=NULL
for(i in 1:nrow(F.dat)){
x= distm(c(126.6083,37.4722), c(F.dat[i,39], F.dat[i,38]), fun = distHaversine)
y=c(y,x)
}
F.dat <- cbind(F.dat, y)
names(F.dat)[54] <- 'Airport2_Distance'
str(F.dat)
#####Airports4의 거리를 구해보자!
names(F.dat)
x=NULL
y=NULL
for(i in 1:nrow(F.dat)){
x= distm(c(126.6083,37.4722), c(F.dat[i,43], F.dat[i,42]), fun = distHaversine)
y=c(y,x)
}
F.dat <- cbind(F.dat, y)
names(F.dat)[55] <- 'Airport4_Distance'
str(F.dat)
#####Airports6의 거리를 구해보자!
names(F.dat)
x=NULL
y=NULL
for(i in 1:nrow(F.dat)){
x= distm(c(126.6083,37.4722), c(F.dat[i,47], F.dat[i,46]), fun = distHaversine)
y=c(y,x)
}
F.dat <- cbind(F.dat, y)
names(F.dat)[56] <- 'Airport6_Distance'
str(F.dat)
#####Airports8의 거리를 구해보자!
names(F.dat)
x=NULL
y=NULL
for(i in 1:nrow(F.dat)){
x= distm(c(126.6083,37.4722), c(F.dat[i,51], F.dat[i,50]), fun = distHaversine)
y=c(y,x)
}
F.dat <- cbind(F.dat, y)
names(F.dat)[57] <- 'Airport8_Distance'
str(F.dat)
final_total_data7<-F.dat
names(final_total_data7)
final2_total_data<-subset(final_total_data7,select=c(Year, Month, Date_num,BookingAgency1,BookingPrice1,BookingPrice2,
BookingPrice3,BookingPrice3,BookingPrice4,BookingPrice5,Airports2,
Airports4,Airports6,Airports8,Staylocation,Staylocation2,Staylocation3,
StayTime1_total,StayTime2_total,StayTime3_total,
MovingTime1_total,MovingTime2_total,MovingTime3_total,MovingTime4_total,
FirstFlightDep_hr_f,FirstFlightArr_hr_f,secondFlightDep_hr_f,secondFlightArr_hr_f,
thirdFlightDep_hr_f,thirdFlightArr_hr_f,fourthFlightDep_hr_f,fourthFlightArr_hr_f,
rank2016,rank2017,Airport2_latitude_deg,Airport2_longitude_deg,Airport2_new_continent,
Airport2_iso_country,Airport2_Distance,Airport4_latitude_deg,Airport4_longitude_deg,
Airport4_new_continent,Airport4_iso_country,Airport4_Distance,Airport6_latitude_deg,
Airport6_longitude_deg,Airport6_new_continent,Airport6_iso_country,Airport6_Distance,
Airport8_latitude_deg,Airport8_longitude_deg,Airport8_new_continent,Airport8_iso_country,
Airport8_Distance))
final2_via0_data<-subset(final_total_data7,subset=viaNum==0,select=c(Year, Month, Date_num,BookingAgency1,BookingPrice1,BookingPrice2,
BookingPrice3,BookingPrice3,BookingPrice4,BookingPrice5,Airports2,
MovingTime1_total,FirstFlightDep_hr_f,FirstFlightArr_hr_f,rank2016,
rank2017,Airport2_latitude_deg,Airport2_longitude_deg,Airport2_new_continent,
Airport2_iso_country,Airport2_Distance))
final2_via1_data<-subset(final_total_data7,subset=viaNum==1,select=c(Year, Month, Date_num,BookingAgency1,BookingPrice1,BookingPrice2,
BookingPrice3,BookingPrice3,BookingPrice4,BookingPrice5,Airports2,
Airports4,Staylocation,StayTime1_total,MovingTime1_total,MovingTime2_total,
FirstFlightDep_hr_f,FirstFlightArr_hr_f,secondFlightDep_hr_f,secondFlightArr_hr_f,
rank2016,rank2017,Airport2_latitude_deg,Airport2_longitude_deg,Airport2_new_continent,
Airport2_iso_country,Airport2_Distance,Airport4_latitude_deg,Airport4_longitude_deg,
Airport4_new_continent,Airport4_iso_country,Airport4_Distance))
final2_via2_data<-subset(final_total_data7,subset=viaNum==2,select=c(Year, Month, Date_num,BookingAgency1,BookingPrice1,BookingPrice2,
BookingPrice3,BookingPrice3,BookingPrice4,BookingPrice5,Airports2,
Airports4,Airports6,Staylocation,Staylocation2,StayTime1_total,StayTime2_total,
MovingTime1_total,MovingTime2_total,MovingTime3_total,
FirstFlightDep_hr_f,FirstFlightArr_hr_f,secondFlightDep_hr_f,secondFlightArr_hr_f,
thirdFlightDep_hr_f,thirdFlightArr_hr_f,rank2016,rank2017,Airport2_latitude_deg,Airport2_longitude_deg,Airport2_new_continent,
Airport2_iso_country,Airport2_Distance,Airport4_latitude_deg,Airport4_longitude_deg,
Airport4_new_continent,Airport4_iso_country,Airport4_Distance,Airport6_latitude_deg,
Airport6_longitude_deg,Airport6_new_continent,Airport6_iso_country,Airport6_Distance))
final2_via3_data<-subset(final_total_data7,subset=viaNum==3,select=c(Year, Month, Date_num,BookingAgency1,BookingPrice1,BookingPrice2,
BookingPrice3,BookingPrice3,BookingPrice4,BookingPrice5,Airports2,
Airports4,Airports6,Airports8,Staylocation,Staylocation2,Staylocation3,
StayTime1_total,StayTime2_total,StayTime3_total,
MovingTime1_total,MovingTime2_total,MovingTime3_total,MovingTime4_total,
FirstFlightDep_hr_f,FirstFlightArr_hr_f,secondFlightDep_hr_f,secondFlightArr_hr_f,
thirdFlightDep_hr_f,thirdFlightArr_hr_f,fourthFlightDep_hr_f,fourthFlightArr_hr_f,
rank2016,rank2017,Airport2_latitude_deg,Airport2_longitude_deg,Airport2_new_continent,
Airport2_iso_country,Airport2_Distance,Airport4_latitude_deg,Airport4_longitude_deg,
Airport4_new_continent,Airport4_iso_country,Airport4_Distance,Airport6_latitude_deg,
Airport6_longitude_deg,Airport6_new_continent,Airport6_iso_country,Airport6_Distance,
Airport8_latitude_deg,Airport8_longitude_deg,Airport8_new_continent,Airport8_iso_country,
Airport8_Distance))
write.csv(final2_total_data,file="C:/Users/Jungwoo Lim/Documents/2018년 과제/PSAT/통원팀/Data2/final_total_data.csv")
write.csv(final2_via0_data,file="C:/Users/Jungwoo Lim/Documents/2018년 과제/PSAT/통원팀/Data2/final_via0_data.csv")
write.csv(final2_via1_data,file="C:/Users/Jungwoo Lim/Documents/2018년 과제/PSAT/통원팀/Data2/final_via1_data.csv")
write.csv(final2_via2_data,file="C:/Users/Jungwoo Lim/Documents/2018년 과제/PSAT/통원팀/Data2/final_via2_data.csv")
write.csv(final2_via3_data,file="C:/Users/Jungwoo Lim/Documents/2018년 과제/PSAT/통원팀/Data2/final_via3_data.csv")
|
09c94039970c12088fe4af9081b8084c23275151 | 2fda626e4c9d843c222aa550e5fa07ea621bde8f | /ChIPseq/heatmaps/plot_heatmaps_differential.R | a1c833aba06bb8dd9f89fcc85d76660fe6c4daa3 | [
"MIT"
] | permissive | hodgeslab/workflows | 3419be8398bbceb51cf0507c29698a9b478e28db | 88929670fb5b19d143d8aebab6fd59410b567edc | refs/heads/master | 2022-11-04T04:30:08.470156 | 2022-10-07T17:41:34 | 2022-10-07T17:41:34 | 65,153,657 | 3 | 6 | null | null | null | null | UTF-8 | R | false | false | 3,637 | r | plot_heatmaps_differential.R | #!/usr/bin/env Rscript
# File to make peak heatmaps based on a matrix of read densities across peaks
# output from bwtool. If you use this code, please cite the following:
# Hodges et al., Nat Struct Mol Biol 25(1): 61-72 (2018), PMID: 29323272.
#
# Always ensure that tile_size and max_dist match those used in the matrix
# calculation performed by bwtool.
library(RColorBrewer)
library(reshape)
args <- commandArgs(TRUE)
c_name <- args[1]
t_name <- args[2]
size_fn <- args[3]
bed_name <- args[4]
# if colors are not defined, use defaults
if(length(args) == 7) {
c1 <- args[5]
c2 <- args[6]
c3 <- args[7]
} else {
c1 <- "#ffffff"
c2 <- "#e0e0e0"
c3 <- "#000000"
}
options(bitmapType = "cairo")
tile_size <- 10 # for bwtool tile-averages
max_dist <- 4000
# define colormap
color_len <- 64
colormap <- colorRampPalette(c(c1,c2,c3))(color_len)
# read in size factors previously output from DESeq2 script
size_factors <- read.delim(size_fn,row.names=1,header=FALSE)
colnames(size_factors) <- "value"
size_c1 <- size_factors$value[row.names(size_factors) == paste0(c_name,"_rep1")]
size_c2 <- size_factors$value[row.names(size_factors) == paste0(c_name,"_rep2")]
size_t1 <- size_factors$value[row.names(size_factors) == paste0(t_name,"_rep1")]
size_t2 <- size_factors$value[row.names(size_factors) == paste0(t_name,"_rep2")]
# useful functions needed later
rotate <- function(x) t(apply(x, 2, rev))
proc_file <- function(fn) {
mat <- read.table(fn, header=F, row.names=NULL, sep="\t")
mat[is.na(mat)] <- 0
mat <- as.matrix(mat[ ,-(1:6)])
}
# control 1
fn <- paste0(bed_name,"_",c_name,"_rep1.txt")
img_c1 <- proc_file(fn)
# control 2
fn <- paste0(bed_name,"_",c_name,"_rep2.txt")
img_c2 <- proc_file(fn)
# treat 1
fn <- paste0(bed_name,"_",t_name,"_rep1.txt")
img_t1 <- proc_file(fn)
# treat 2
fn <- paste0(bed_name,"_",t_name,"_rep2.txt")
img_t2 <- proc_file(fn)
# average across both replicates, adjusted by size factor
img_c <- (img_c1/size_c1 + img_c2/size_c2)/2
img_t <- (img_t1/size_t1 + img_t2/size_t2)/2
# identify peak: within middle 50%, sum both control and treat, find peak, and recenter
start_pos <- t(apply(img_c + img_t,1,function(x) {
open_range = c(round(0.25*length(x)),round(0.75*length(x)))
peaki = which.max(x[open_range[1]:open_range[2]])
}))
img_c2 <- matrix(NA,nrow = nrow(img_c),ncol = round(0.5*ncol(img_c)))
img_t2 <- matrix(NA,nrow = nrow(img_t),ncol = round(0.5*ncol(img_t)))
for(j in 1:nrow(img_c)) {
img_c2[j,] <- img_c[j, start_pos[j]:(start_pos[j]+round(0.5*ncol(img_c))-1)]
img_t2[j,] <- img_t[j, start_pos[j]:(start_pos[j]+round(0.5*ncol(img_t))-1)]
}
img_c <- img_c2
img_t <- img_t2
img_tot <- cbind(img_c, img_t)
low_lim <- apply(img_tot,1,function(x) { as.numeric(quantile(x,0.15)) } )
high_lim <- apply(img_tot,1,function(x) { as.numeric(quantile(x,0.99)) } )
img_tot <- (img_tot-low_lim)/(high_lim-low_lim)
img_c <- img_tot[,1:ncol(img_c)]
img_t <- img_tot[,(ncol(img_c)+1):(ncol(img_c)+ncol(img_t))]
# write images
out_file <- paste0(bed_name,'_', c_name, '.tif') # 100 pixels wide (spanning max_dist), and
tiff(out_file,w=100,h=round(dim(img_c)[1]/1)) # each verticle pixel represents 1 site
par(mar = c(0,0,0,0))
image(rotate(as.matrix(img_c)), col=colormap,axes = FALSE,breaks=c(seq(0,1,length.out=color_len),100))
dev.off()
out_file <- paste0(bed_name,'_', t_name, '.tif') # 100 pixels wide (spanning max_dist), and
tiff(out_file,w=100,h=round(dim(img_t)[1]/1)) # each verticle pixel represents 1 site
par(mar = c(0,0,0,0))
image(rotate(as.matrix(img_t)), col=colormap,axes = FALSE,breaks=c(seq(0,1,length.out=color_len),100))
dev.off()
|
dd7c022d21652d01011d6616bdfada961ebb868a | 736d35dfbe750b872865ff123a863529b07ccf98 | /tests/testthat/test-stations.R | aca0b3018a60d6c6640c772aaa4ca1c304988beb | [] | no_license | seananderson/glmmfields | 27e9dc767c8fd128ec986e52007b49e73defa501 | 92b44f1499834e5815823bde243183967990f1c1 | refs/heads/master | 2023-03-21T23:27:11.484397 | 2023-03-10T19:43:54 | 2023-03-10T19:43:54 | 73,511,815 | 42 | 10 | null | 2023-09-08T21:53:53 | 2016-11-11T21:12:07 | R | UTF-8 | R | false | false | 2,536 | r | test-stations.R | set.seed(42)
s <- sim_glmmfields(
df = 1000, n_draws = 2, gp_theta = 1.5,
gp_sigma = 0.3, sd_obs = 0.1, n_knots = 8, n_data_points = 30
)
test_that("Stations in second time slice can be in different order from first time slice", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
d <- s$dat
d$ID <- seq_len(nrow(d))
suppressWarnings({
m <- glmmfields(y ~ 0,
data = d, time = "time",
lat = "lat", lon = "lon", nknots = 8,
iter = 400, chains = 2, seed = 1
)
})
d$pred <- predict(m)$estimate
d2 <- d
d2[d2$time == 2, ] <- d2[d2$time == 2, ][sample(seq_len(30), size = 30), ] # scramble time 2
suppressWarnings({
m2 <- glmmfields(y ~ 0,
data = d2, time = "time",
lat = "lat", lon = "lon", nknots = 8,
iter = 400, chains = 2, seed = 1
)
})
d2$pred <- predict(m2)$estimate
d2 <- dplyr::arrange(d2, ID)
plot(d2$pred, d$pred)
expect_equal(d2$pred, d$pred, tolerance = 0.000001)
})
test_that("Stations in second time slice introduce new stations", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
d <- s$dat
d$ID <- seq_len(nrow(d))
suppressWarnings({
m <- glmmfields(y ~ 0,
data = d, time = "time",
lat = "lat", lon = "lon", nknots = 8,
iter = 800, chains = 2, seed = 1
)
})
d2 <- d[-c(2, 10), ]
suppressWarnings({
m2 <- glmmfields(y ~ 0,
data = d2, time = "time",
lat = "lat", lon = "lon", nknots = 8,
iter = 800, chains = 2, seed = 1
)
})
d2$pred <- predict(m2)$estimate
d$pred <- predict(m)$estimate
d <- dplyr::filter(d, ID %in% d2$ID)
plot(d2$pred, d$pred)
expect_equal(d2$pred, d$pred, tolerance = .02)
})
test_that("Ordering of time slices doesn't matter if stations aren't always present", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
d <- s$dat
d$ID <- seq_len(nrow(d))
d <- d[-c(2, 10), ]
suppressWarnings({
m <- glmmfields(y ~ 0,
data = d, time = "time",
lat = "lat", lon = "lon", nknots = 8,
iter = 800, chains = 2, seed = 1, cores = 1
)
})
sd <- m$stan_data
d2 <- rbind(d[d$time == 2, ], d[d$time == 1, ])
suppressWarnings({
m2 <- glmmfields(y ~ 0,
data = d2, time = "time",
lat = "lat", lon = "lon", nknots = 8,
iter = 800, chains = 2, seed = 1, cores = 1
)
})
sd2 <- m2$stan_data
d2$pred <- predict(m2)$estimate
d$pred <- predict(m)$estimate
d2 <- dplyr::arrange(d2, ID)
plot(d2$pred, d$pred)
expect_equal(d2$pred, d$pred, tolerance = .01)
})
|
2b6770240d246c87619a1f4f345d844886cf9f56 | 30365be9723f65eff45bab5bb936fd289158310a | /man/MakeGuideTablePlots.Rd | 395e954a2b484e739e4d5a71718a0a8104c78a8b | [] | no_license | pginart/SNPFISHAnalysis | 188f1ada11a62f31efdadc22db0d892432105b41 | b19c0bfc42934abf7d55f622fa1a44dfccb0de05 | refs/heads/master | 2021-01-10T05:35:15.860439 | 2016-03-31T21:05:47 | 2016-03-31T21:05:47 | 55,182,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 509 | rd | MakeGuideTablePlots.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MakeGuideTablePlots.R
\name{MakeGuideTablePlots}
\alias{MakeGuideTablePlots}
\title{Makes a standard set of graphs for guide table}
\usage{
MakeGuideTablePlots(exp.data, guide.table, outdir)
}
\arguments{
\item{guide.table}{SNP Table for guide probe}
\item{outdir}{directory in which to output SNP plots}
\item{exp.Data}{experiment data extracted from YAML file}
}
\description{
Makes a standard set of graphs for guide table
}
|
c292ac71f9b9069fb6aabe6e033b85da45238952 | 62869f3fcec8a6e08a4a1e290f7fa1399d17fb35 | /cor.test_byID.R | 41d9d14ac046ee87f1cd108cb54d59d65ecea11a | [] | no_license | Bwarule/Data-Exploration | abe2a8b8ccc52184df7ff87dd7602c472a6411d0 | 9936fc582a3ab374cc6663205c6af9decccbc674 | refs/heads/master | 2020-05-15T04:01:10.150284 | 2018-05-17T06:41:54 | 2018-05-17T06:41:54 | 41,899,672 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 478 | r | cor.test_byID.R | ## still need convert into function
selectD <- unique(iris$Species)
for(j in 1:length(selectD)){
selectD_val <- as.character(selectD[j])
p_value <- cor.test(iris[which(iris$Species %in% selectD_val),c("Sepal.Length")],
iris[which(iris$Species %in% selectD_val),c("Sepal.Width")])$"p.value"
data_out <- data.frame(Species_type = selectD_val,p_value =p_value )
if(j==1){
final_data <- data_out
}else{
final_data <- rbind(final_data , data_out)
}
}
|
e63c8a5a6659828db8425bcb2b8f1372374ec6a2 | 79716a2b60c5567933b631ff11f67ef42f266d65 | /demo1.R | c8fda691191d11e2e5393072845ed9b82bf25866 | [] | no_license | Thossakrai/MLCourse | c49eee29bf7e1b1127269b736a41db7d19840224 | 549514e4ce52461702b0121d625a20a1b38acba0 | refs/heads/master | 2020-11-27T21:00:08.159341 | 2019-12-22T02:37:52 | 2019-12-22T02:37:52 | 229,599,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,568 | r | demo1.R | # subject_name <- c("John Doe", "Jane Doe", "Steve Graves")
# temperature <- c(98.1, 98.6, 101.4)
# flu_status <- c(FALSE, FALSE, TRUE)
#
# typeof(flu_status[1])
#
# cat("Hello world")
# print something onthe console
# hello <- function() cat("Hello!!!")
# hello()
# elasticband <- data.frame(stretch=c(46,54,48,50,44,42,52),
# distance=c(148,182,173,166,109,141,166))
# library(dslabs)
# x <- c("sidfn", "test")
# class(x)
#
# install.packages("RWeka")
# library(RWeka)
# vector
# subject_name <- c("John Doe", "Jane Doe", "Steve Graves")
# temperature <- c(98.1, 98.6, 101.4)
# flu_status <- c(FALSE, FALSE, TRUE)
#
# gender <- factor(c("MALE", "MALE", "FEMALE"))
# factor
# blood <- factor(c("O", "AB", "A"), levels = c("A", "B", "AB", "O"))
# symptoms <- factor(c("SEVERE", "MILD", "MODERATE"), levels = c("MILD", "MODERATE", "SEVERE"), ordered = TRUE)
# symptoms > "MODERATE"
# list
# subject1 <- list(fullname = subject_name[1], temperature = temperature[1], flu_status = flu_status[1],
# gender = gender[1], blood = blood[1], symptoms = symptoms[1])
# subject1$temperature
# data frame
# pt_data <- data.frame(subject_name, temperature, flu_status, gender, blood, symptoms, stringsAsFactors = FALSE)
#
# pt_data[1, 2]
# pt_data[c(1, 3), c(2,4)]
# pt_data[,1]
# pt_data[2,]
# pt_data[c(1, 3), c("temperature", "gender")]
# pt_data[-2, c(-1, -3, -5, -6)]
# matrix
# m <- matrix(c(1, 2, 3, 4), nrow = 2)
# m1 <- matrix(c(1,2, 3, 4, 5, 6), nrow = 2)
# m1
# m2 <- matrix(c(1, 2, 3, 4, 5, 6), ncol = 2)
# m2
# m2[1,]
# m2[, 1]
# array (more dimensions than matrix)
# str(usedcars)
# summary(usedcars$year)
#
# mn <- mean(c(36000, 44000, 56000))
# mn
# q <- quantile(usedcars$price, seq(0, 10, 1))
# q
#
# hist(usedcars$price, main = "Histogram of used cars prices")
# var(usedcars$price)
# var(usedcars$mileage)
# sd(usedcars$price)
# sd(usedcars$mileage)
# table(usedcars$model)
# plot(x = usedcars$mileage, y = usedcars$price, main = "Price vs. Mileage", xlab = "Used card odometer", ylab = "used cars price")
# install.packages("gmodels")
# library(gmodels)
# usedcars$conservative <- usedcars$color %in% c("Black", "Gray", "Silver", "White")
# CrossTable(x= usedcars$model, y = usedcars$conservative, chisq= TRUE)
# grape <- sqrt((9 - 6) ** 2 + (5 - 4) ** 2)
# nuts <- sqrt((3 - 6) ** 2 + (6 - 4) ** 2)
# orange <- sqrt((8 - 6) ** 2 +( 3 - 4) ** 2)
# fish <- sqrt((3 - 6) ** 2 + (1 - 4) ** 2)
# grape
# nuts
# orange
# fish
# str(wbcd)
# wbcd <- wbcd[-1] #drop the id feature
# wbcd$diagnosis <- factor(wbcd$diagnosis, levels = c("B", "M"), labels = c("Benign", "Malignant"))
# round(prop.table(table(wbcd$diagnosis))*100, digits = 1)
# summary(wbcd[c("radius_mean", "area_mean", "smoothness_mean")])
normalize <- function(x) {return ((x - min(x))/(max(x)-min(x)))}
wbcd_n <- as.data.frame(lapply(wbcd[2:31], normalize))
summary(wbcd_n$area_mean)
# wbcd_train <- wbcd_n[1:469,]
# wbcd_test <- wbcd_n[470:569, ]
# install.packages("class")
# wbcd_train_labels = wbcd[1:469, 1]
# wbcd_test_labels = wbcd[470:569, 1]
library(class)
# wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k = 3)
wbcd_z <- as.data.frame(scale(wbcd[-1]))
summary(wbcd_z$area_mean)
wbcd_train_z <- wbcd_z[1:469,]
wbcd_test_z <- wbcd_z[470:569, ]
wbcd_train_labels_z = wbcd_z[1:469, 1]
wbcd_test_labels_z = wbcd_z[470:569, 1]
wbcd_test_z_pred <- knn(train = wbcd_train_z, test = wbcd_test_z, cl = wbcd_train_labels_z, k = 3)
# CrossTable(x = wbcd_test_labels_z, y = wbcd_test_z_pred)
|
a2ed70948ad3c820a5d8db141e42b7b24768a6eb | d5d199df5d5d1d9196bceb9711696ebcd77299df | /Programming Coursework/R Coursework(UMD)/Lab 5 Q2 - R code.R | cc0bf99cfdd3b04c29f72a4b00703cc558258002 | [] | no_license | JulioClaros321/JulioClaros321.github.io | c42d576a7544b32906e35e7cab4ae7cff0d00486 | ad6a03fedcc943a23ddeaa7a63b8cb436ef166c7 | refs/heads/master | 2023-08-08T00:28:44.990148 | 2023-07-24T15:33:23 | 2023-07-24T15:33:23 | 237,238,959 | 2 | 2 | null | 2023-03-07T23:54:52 | 2020-01-30T15:08:46 | Python | UTF-8 | R | false | false | 887 | r | Lab 5 Q2 - R code.R | ##
## simulate tire mileage 500 times for mean of 36500 and standard deviation of 5000 miles
## Calculate payoff of $1 per 100 miles that is less than 30000 miles
## Mean of that payoff would be the expected cost of promotion per tire
#
set.seed(105)
sim <- as.integer (rnorm (1000, mean = 36500, sd = 5000))
sim2 = as.integer(rnorm (1000, mean = 40000, sd = 5000))
diff <- as.integer (sim - 30000)
diff2 = as.integer(sim2 - 30000)
payoff <- as.integer (ifelse (diff < 0, (abs (diff) * 0.01), 0))
payoff2 = as.integer(ifelse (diff < 0, (abs (diff2) * .05), 0))
pertire <- mean (payoff)
pertire2 = mean(payoff2)
## Expeccted cost of promotion per tire
print (pertire)
print(pertire2)
morethan50 <- sum (payoff > 50)
more = sum(payoff2 > 50)
probformorethan50 <- morethan50 /1000
prob = more/1000
## Probability of refund more than $50 for a tire
print (probformorethan50)
print(prob)
|
0bfbcc167a4500318175cb292ee3ae764f78200d | 7a91b2ef3a5157e9ddf115be9eac0ff06e787108 | /man/GO_hs_1.Rd | 381210d67156c1845e8c0372748e292b41055513 | [] | no_license | SHerresthal/DESeqtools | 5980491993158fc7df4dc5c39a45feac2d8f3033 | 5b52b6c996c4347f40825c586f9ad43ac0430242 | refs/heads/master | 2020-04-22T00:45:31.437119 | 2019-09-27T12:17:35 | 2019-09-27T12:17:35 | 169,991,773 | 3 | 1 | null | 2019-09-20T15:09:20 | 2019-02-10T15:18:59 | HTML | UTF-8 | R | false | true | 712 | rd | GO_hs_1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{GO_hs_1}
\alias{GO_hs_1}
\title{List of GO terms (human) - Part 1}
\format{A data frame
\describe{
\item{GENEID}{Ontology term}
\item{SYMBOL}{Entrez ID}
\item{TERM}{GO term description}
\item{DEFINITION}{GO term definition}
\item{DOMAIN}{GO domain. biological_process, cellular_component or molecular_function}
}}
\usage{
GO_hs_1
}
\description{
A dataset containing GO term description, the definition and the domain for human gene IDs.
Needs to be combined with GO_hs_2 to get the full dataframe.
}
\references{
original file GO_hg38p12_ensembl181121.txt
}
\keyword{gene}
\keyword{ontology}
|
a4f4544778f582e83511a4a9b27a1762f32329f5 | 688185e8e8df9b6e3c4a31fc2d43064f460665f1 | /R/emuR-create_DBconfigFromTextGrid.R | 16e57592f7053ca733a751b6f1731cdcab6635c5 | [] | no_license | IPS-LMU/emuR | 4b084971c56e4fed9032e40999eeeacfeb4896e8 | eb703f23c8295c76952aa786d149c67a7b2df9b2 | refs/heads/master | 2023-06-09T03:51:37.328416 | 2023-05-26T11:17:13 | 2023-05-26T11:17:13 | 21,941,175 | 17 | 22 | null | 2023-05-29T12:35:55 | 2014-07-17T12:32:58 | R | UTF-8 | R | false | false | 3,363 | r | emuR-create_DBconfigFromTextGrid.R | ## Create emuDB DBconfig object from a TextGrid file
##
## @param tgPath path to TextGrid file
## @param dbName name of the database
## @param basePath project base path
## @param tierNames character vector containing names of tiers to extract and convert. If NULL (the default) all
## tiers are converted.
## @return object of class emuDB.schema.db
## @import stringr uuid wrassp RSQLite
## @keywords internal
##
create_DBconfigFromTextGrid = function(tgPath, dbName, basePath, tierNames = NULL){
####################
# check parameters
if(is.null(tgPath)) {
stop("Argument tgPath (path to TextGrid file) must not be NULL\n")
}
if(is.null(dbName)) {
stop("Argument dbName (name of new DB) must not be NULL\n")
}
#
####################
# parse TextGrid
tgAnnotDFs = TextGridToBundleAnnotDFs(tgPath,
sampleRate = 2000,
name = "tmpBundleName",
annotates = "tmpBundleName.wav") # sampleRate/name/annotates don't matter!! -> hardcoded
# remove unwanted levels
if(!is.null(tierNames)){
# filter items
tgAnnotDFs$items = dplyr::filter(tgAnnotDFs$items, .data$level %in% tierNames)
# filter labels
tgAnnotDFs$labels = dplyr::filter(tgAnnotDFs$labels, .data$name %in% tierNames)
}
levels = dplyr::distinct(tgAnnotDFs$items, .data$level, .keep_all = TRUE)
# create level definitions
levelDefinitions = list()
# generate defaultLvlOrder
defaultLvlOrder=list()
levIdx = 1
for(lineIdx in 1:nrow(levels)){
lev = levels[lineIdx,]
if(lev$type == 'SEGMENT' || lev$type == 'EVENT'){
defaultLvlOrder[[length(defaultLvlOrder)+1L]]=lev$level
}else{
stop(paste0('Found levelDefinition that is not of type SEGMENT|EVENT ",
"while parsing TextGrid...this should not occur! This ",
"TextGrid file caused the problem:', tgPath))
}
# add new leveDef.
levelDefinitions[[levIdx]] = list(name = lev$level,
type = lev$type,
attributeDefinitions = list(list(name = lev$level, type = "STRING")))
levIdx = levIdx + 1
}
# create signalCanvas config
sc = list(order = c("OSCI","SPEC"),
assign = list(),
contourLims = list())
# create perspective
defPersp = list(name = 'default',
signalCanvases = sc,
levelCanvases = list(order = defaultLvlOrder),
twoDimCanvases = list(order = list()))
# create EMUwebAppConfig
waCfg = list(perspectives = list(defPersp),
activeButtons = list(saveBundle = TRUE,
showHierarchy = TRUE))
# generate full schema list
dbSchema = list(name = dbName,
UUID = uuid::UUIDgenerate(),
mediafileExtension = 'wav',
ssffTrackDefinitions = list(),
levelDefinitions = levelDefinitions,
linkDefinitions = list(),
EMUwebAppConfig = waCfg)
return(dbSchema)
}
# FOR DEVELOPMENT
# library('testthat')
# test_file('tests/testthat/test_aaa_initData.R')
# test_file('tests/testthat/test_emuR-create_DBconfigFromTextGrid.R')
|
618e7612ed6e8945a2fdb0c6d65a64b88d325e4c | 6d0a2dae1fdaf0cbab6e5cb38819f24b4f1d616f | /data/caret_models/LogitBoost.R | ec959bea3cdb9278ed38e53ef923f2d65dc9e322 | [
"MIT"
] | permissive | BTopcuoglu/code_review | 38e89878828c93fc3f6456384652363d4eec822e | 6092b77b623a21129936ab5f421839b20ad3f7e3 | refs/heads/master | 2020-04-23T04:36:52.320488 | 2019-02-27T14:37:25 | 2019-02-27T14:37:25 | 170,912,886 | 0 | 6 | MIT | 2019-02-27T14:14:58 | 2019-02-15T18:51:16 | R | UTF-8 | R | false | false | 4,688 | r | LogitBoost.R | modelInfo <- list(label = "Boosted Logistic Regression",
library = "caTools",
loop = function(grid) {
## Get the largest value of ncomp to fit the "full" model
loop <- grid[which.max(grid$nIter),,drop = FALSE]
submodels <- grid[-which.max(grid$nIter),,drop = FALSE]
## This needs to be excased in a list in case there are more
## than one tuning parameter
submodels <- list(submodels)
list(loop = loop, submodels = submodels)
},
type = "Classification",
parameters = data.frame(parameter = 'nIter',
class = 'numeric',
label = '# Boosting Iterations'),
grid = function(x, y, len = NULL, search = "grid") {
if(search == "grid") {
out <- data.frame(nIter = 1 + ((1:len)*10))
} else {
out <- data.frame(nIter = unique(sample(1:100, size = len, replace = TRUE)))
}
out
},
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
## There is another package with a function called `LogitBoost`
## so we call using the namespace
caTools::LogitBoost(as.matrix(x), y, nIter = param$nIter)
},
predict = function(modelFit, newdata, submodels = NULL) {
## This model was fit with the maximum value of nIter
out <- caTools::predict.LogitBoost(modelFit, newdata, type="class")
## submodels contains one of the elements of 'submodels'. In this
## case, 'submodels' is a data frame with the other values of
## nIter. We loop over these to get the other predictions.
if(!is.null(submodels))
{
## Save _all_ the predictions in a list
tmp <- out
out <- vector(mode = "list", length = nrow(submodels) + 1)
out[[1]] <- tmp
for(j in seq(along = submodels$nIter))
{
out[[j+1]] <- caTools::predict.LogitBoost(modelFit,
newdata,
nIter = submodels$nIter[j])
}
}
out
},
prob = function(modelFit, newdata, submodels = NULL) {
out <- caTools::predict.LogitBoost(modelFit, newdata, type = "raw")
## I've seen them not be on [0, 1]
out <- t(apply(out, 1, function(x) x/sum(x)))
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
for(j in seq(along = submodels$nIter))
{
tmpProb <- caTools::predict.LogitBoost(modelFit,
newdata,
type = "raw",
nIter = submodels$nIter[j])
tmpProb <- out <- t(apply(tmpProb, 1, function(x) x/sum(x)))
tmp[[j+1]] <- as.data.frame(tmpProb[, modelFit$obsLevels,drop = FALSE])
}
out <- tmp
}
out
},
predictors = function(x, ...) {
if(!is.null(x$xNames))
{
out <- unique(x$xNames[x$Stump[, "feature"]])
} else out <- NA
out
},
levels = function(x) x$obsLevels,
tags = c("Ensemble Model", "Boosting", "Implicit Feature Selection",
"Tree-Based Model", "Logistic Regression"),
sort = function(x) x[order(x[,1]),])
|
7168eb2854c6643e5aa5e00f8f0b905454ca47a1 | ad6ca1838f4edcc3925bd5a7aa77f8a1744b46c8 | /man/pgsm.Rd | f489cab874226678b3d10757c09c098dedeb092e | [] | no_license | cran/ForestFit | 74482a6a2aee5979f0b3cde9ca837afbe7df2f6a | a23d6772bb45572effce397be551851050a33f72 | refs/heads/master | 2023-03-11T18:18:42.583265 | 2023-02-28T04:02:29 | 2023-02-28T04:02:29 | 207,810,340 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,559 | rd | pgsm.Rd | \name{pgsm}
\alias{pgsm}
\title{Computing cumulative distribution function of the gamma shape mixture model}
\description{Computes cumulative distribution function (cdf) of the gamma shape mixture (GSM) model. The general form for the cdf of the GSM model is given by
\deqn{F(x,{\Theta}) = \sum_{j=1}^{K}\omega_j F(x,j,\beta),}
where
\deqn{F(x,j,\beta) = \int_{0}^{x} \frac{\beta^j}{\Gamma(j)} y^{j-1} \exp\bigl( -\beta y\bigr) dy,}
in which \eqn{\Theta=(\omega_1,\dots,\omega_K, \beta)^T} is the parameter vector and known constant \eqn{K} is the number of components. The vector of mixing parameters is given by \eqn{\omega=(\omega_1,\dots,\omega_K)^T} where \eqn{\omega_j}s sum to one, i.e., \eqn{\sum_{j=1}^{K}\omega_j=1}. Here \eqn{\beta} is the rate parameter that is equal for all components.}
\usage{pgsm(data, omega, beta, log.p = FALSE, lower.tail = TRUE)}
\arguments{
\item{data}{Vector of observations.}
\item{omega}{Vector of the mixing parameters.}
\item{beta}{The rate parameter.}
\item{log.p}{If \code{TRUE}, then log(cdf) is returned.}
\item{lower.tail}{If \code{FALSE}, then \code{1-cdf} is returned.}
}
%\details{}
\value{
A vector of the same length as \code{data}, giving the cdf of the GSM model.
}
\references{
S. Venturini, F. Dominici, and G. Parmigiani, 2008. Gamma shape mixtures for heavy-tailed distributions, \emph{The Annals of Applied Statistics}, 2(2), 756–776.}
\author{Mahdi Teimouri}
\examples{
data<-seq(0,20,0.1)
omega<-c(0.05, 0.1, 0.15, 0.2, 0.25, 0.25)
beta<-2
pgsm(data, omega, beta)
}
|
53e5b80de32850f6ea0ce638794526f049b3c3f5 | a8d781d3c638037542b62135421f2ffeed3a58f0 | /Vorjahresklausur-sport-blut.R | ee6b817a109e2553a2540f14964b910b778ffc0d | [] | no_license | paulzwoelfer/AKIT2 | ffc90ca6001d42fef20ccfdacbb6e8e99a47842d | a587822c1ed9176cd077d9a36879ae80762fcf2b | refs/heads/master | 2022-01-16T21:03:35.944779 | 2019-06-26T17:56:28 | 2019-06-26T17:56:28 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 13,127 | r | Vorjahresklausur-sport-blut.R | # AKIT2 SS18, Hauptklausur, 15.6.2018
library(ggplot2)
library(car)
library(corrplot)
library(effects)
library(pwr)
library(ROCR)
library(runjags)
library(coda)
rjags::load.module("glm")
library(akit2)
df <- read.csv('C:\\Users\\Dominik\\Downloads\\sport-blut.csv')
# Wir haben Daten von über 200 SportlerInnen erhoben. Wir stellen uns die Frage, wovon
# die Hemoglobin-Konzentration der SportlerInnen abhängt.
#
# Relevante Daten im Datensatz:
# - hg ... Hemoglobin-Konzentration (Einheit: g/dl)
# - wcc ... Anzahl weiße Blutkörperchen (Einheit: 1/nl)
# - ht ... Größe (cm)
# - sex ... Frau (f) oder Mann (m)
# - lbm ... Magermasse des Körpers, auch fettfreie Masse genannt (kg)
# - pcbfat ... Anteil Körperfett (%)
# - sport ... Sportart
summary(df)
describe(df)
#y = hg
#Varianz = sex
mapply(hist,as.data.frame(df),main=colnames(df))
#wcc aufpassen wegen den paar höheren Werten.
#pcbfat eventuell log-transformieren.
plot(df$pcbfat, df$hg)
#z-Transformieren (machen wir bei Bayes immer!)
dfz = prepare.df.bayes(df, drop.originals = TRUE)
summary(dfz)
#-------------------Vorlage fuer 2 Gruppen und 4 Variablen----------------------------#
#Modell mit 2 Gruppen und vier Variablen
#Wenn Interaktion gefragt ist
#bei den Variablen hinzufuegen.
#zwei Variablen: beta.interaktion*wcc*variable 2 -> interkation von wcc und variable 2
#einer Variable und einer Gruppe: beta[gruppe[i]]*wcc[i] -> nicht ganzs sicher ob das stimmt
#Folgende Variablen sind zum ersetzen
#sex = ist die Gruppenvariable nach der die Varianz gefragt ist was später auch Simga im Monitor darstellt
#sport = 2 Gruppenvariable
#hg = stellt die abhaengige Variable dar
#wcc = erste metrische Variable
#pcbfat = zweite metrische Varibale
#lbm = dritte metrische Varibale
#ht = vierte metrische Variable
#--------------------------Beginn von Modell definition---------------------------------#
modell = "
data {
N <- length(hg[])
Nsex <- max(sex)
Nsport <- max(sport)
}
model {
for (i in 1:N) {
hg[i] ~ dnorm(mu[i], 1/sigma[sex[i]]^2) #!!Grup-pe1 ACHTUNG: hier könnte sein das die Gruppe 2 genommen werden muss jenachdem wir man oben die Gruppe mit der Varianz definiert
mu[i] <- interceptsport[sport[i]] + # Gruppe fuer sport / intercept
interceptsex[sex[i]] + # Gruppe fuer sex /intercept
beta.wcc*wcc[i] +
beta.pcbfat*pcbfat[i] +
beta.lbm*lbm[i] +
beta.ht*ht[i]
#----------------------------Beginn von Vorhersage-----------------------------------#
#hg.hat[i] ~ dnorm(mu[i], 1/sigma[sex[i]]^2) #ist gleich erste Zeile im Modell
}
#-----------------------------Ende der Vorhersage------------------------------------#
#-----------------------------------Priors-------------------------------------------#
for(l in 1:Nsex){
sigma[l]~dexp(3/1)
}
#--------------------------ACHTUNG Partial Pooling-----------------------------------#
#interceptsex[l]~dnorm(0,1)
#sigma[l]~dexp(3/1)
#wenn in der Fragestellung nach der Gruppe gefragt wird die Pooling verlangt, dann gehört
#diese Funktion in die forschleife des Partial-Poolings
#------------------------------------------------------------------------------------#
beta.wcc ~ dnorm(0,1/1^2)
beta.pcbfat ~ dnorm(0,1/1^2)
beta.lbm ~ dnorm(0,1/1^2)
beta.ht ~ dnorm(0,1/1^2)
for(l in 1:Nsex)
{
interceptsex[l] ~ dnorm(0, 1/1^2)
}
# sex hat nur 2 Werte: kein Pooling
# alle sexs bekommen das selbe sgima & intercept
#VT: wenn 7 sexs wären, dann muesste man sonst 7 Zeilen fuer intercept und 7 fuer sigma schreiben
#--------------------------------Ende von Priors-------------------------------------#
#---------------------------Partial-Pooling fuer sport-------------------------------#
interceptsport.mu ~ dnorm(0,1/1^2)
interceptsport.sigma ~ dexp(1)
for (d in 1:Nsport) {
interceptsport[d] ~ dnorm(interceptsport.mu, 1/interceptsport.sigma^2)
}
# stabilere Faktoren ausrechnen (Gruppen beeinflussen sich gegeNsporteitig)
for (l in 1:Nsex) {
for (d in 1:Nsport) {
mtx[l,d] <- interceptsex[l] +interceptsport[d]
}
}
intercept <- mean(mtx[1:Nsex,1:Nsport])
for(l in 1:Nsex)
{
alphasex[l] <- mean(mtx[l,1:Nsport]) - intercept
}
for (d in 1:Nsport)
{
Gammasport[d] <- mean(mtx[1:Nsex,d]) - intercept # mtx ist definiert mit mtx[l,d]
}
#------------------------Ende Partial-Pooling fuer sport-----------------------------#
}
"
#Modell fuer Variablen aufrufen
modell.fit = run.jags(model=modell,
data=dfz,
burnin = 5000,
monitor = c("intercept", "alphasex", "Gammasport", "sigma",
"beta.wcc", "beta.pcbfat", "beta.lbm", "beta.ht"),
n.chains = 3,
sample= 10000,
thin=2,
inits = list(list(.RNG.name="base::Mersenne-Twister", .RNG.seed=456),
list(.RNG.name="base::Super-Duper", .RNG.seed=123),
list(.RNG.name="base::Wichmann-Hill", .RNG.seed=789)),
method = "parallel")
summary(df)
fit.samples = as.matrix(modell.fit$mcmc)
fit.summary = view(modell.fit)
#Wenn MC%ofSD um 1 dann mcmc machen bzw wenn SSeff unter 10%
diagMCMC(modell.fit$mcmc,"beta.lbm")
diagMCMC(modell.fit$mcmc,"beta.ht")
#Schauen noch gut aus.
#------------------------------------Interpretation----------------------------------#
#Verzeichnis wo unsere abhaengige Variable, etc. drinnen steckt!
#"N" oder "hg" oder
#"Nsex" oder "interceptsex" oder "alphasex"
#"Nsport" oder "interceptsport" oder "Gammasport" oder "mu"
#"beta.wcc" oder "wcc"
#"beta.wcc" oder "wcc"
#"beta.wcc" oder "wcc"
#"beta.wcc" oder "wcc"
#Warum ist sigma die Varianz? Ist das immer so?
#Wann schauen wir den intercept an?
#"intercept" "sigma"
# Frage 1: Wie groß ist der Unterschied zwischen Männern und Frauen?
# Haben die beiden Gruppen auch eine unterschiedliche Varianz?
plotcoef(modell.fit, c("sex"))
table(df$sex) #herrausfinden welche Nummer was ist
diff = (fit.samples[,"alphasex[1]"]-fit.samples[,"alphasex[2]"])
plotPost(diff, compVal = 0)
diff_z = (diff*sd(df$hg))
plotPost(diff_z, compVal = 0)
#Umgerechnet liegen sie -1.51 auseinander
#Schließt 0 nicht mit ein = also signifikant!
#In 95% der Fälle liegt der Unterschied zwischen Männern und Frauen zwischen einen HDI
#von -1.99 und -0.979
diff = (fit.samples[,"sigma[1]"]-fit.samples[,"sigma[2]"])
plotPost(diff, compVal = 0)
diff_z = (diff*sd(df$hg))
plotPost(diff_z, compVal = 0)
#Varianz hat einen unterschied von -0.137.
#0 ist mit eingeschlossen also nicht signifikant.
# Frage 2: Welchen Einfluss haben weiÃe Blutkörperchen, die GröÃe, die Magermasse und
# der Körperfettanteil auf die Hemoglobin-Konzentration?
plotPost((fit.samples[, "beta.wcc"]*sd(df$hg)/sd(df$wcc)), compVal = 0)
#Im 95%-Level pro 1 erhöhung von wcc, verringert sich hg auf das 0.096-fache.
mean(fit.samples[,"beta.wcc"]*sd(df$hg))
sd(df$wcc)
#Pro sd erhöhung von wcc (1.8 1/nl), erhöt sich hg um 0.17 g/dl.
#0 ist nicht mit eingeschlossen = also signifikant.
plotPost((fit.samples[, "beta.ht"]*sd(df$hg)/sd(df$ht)), compVal = 0)
#Im 95%-Level pro 1 erhöhung von ht, verringert sich hg auf das 0.0188-fache.
mean(fit.samples[,"beta.ht"]*sd(df$hg))
sd(df$ht)
#Pro sd erhöhung von ht (9.7cm), verringert sich hg um -0.16 g/dl.
#0 ist mit eingeschlossen = also nicht signifikant.
plotPost((fit.samples[, "beta.lbm"]*sd(df$hg)/sd(df$lbm)), compVal = 0)
#Im 95%-Level pro 1 erhöhung von lbm, erhöht sich hg auf das 0.0213-fache.
mean(fit.samples[,"beta.lbm"]*sd(df$hg))
sd(df$lbm)
#Pro sd erhöhung von lbm (13kg), verringert sich hg um +0.3 g/dl.
#0 ist mit eingeschlossen = also nicht signifikant.
plotPost((fit.samples[, "beta.pcbfat"]*sd(df$hg)/sd(df$pcbfat)), compVal = 0)
#Im 95%-Level pro 1 erhöhung von pcbfat, verringert sich hg auf das -0.00103-fache.
mean(fit.samples[,"beta.pcbfat"]*sd(df$hg))
sd(df$pcbfat)
#Pro sd erhöhung von pcbfat (6.2%), verringert sich hg um +0.00916 g/dl.
#0 ist mit eingeschlossen = also nicht signifikant.
#Signifikant ist nur beta.wcc in einem 95%-Level. beta.lbm > beta.wcc/beta.ht/beta.pcbfat
# Frage 3: Gibt es eine Sportart, bei der die Hemoglobin-Konzentration im
# 75%-Signifikanzniveau von den anderen abweicht?
plotcoef(modell.fit, c("sport"))
table(df$sport)
diff = (fit.samples[,"Gammasport[1]"]-fit.samples[,"Gammasport[4]"])
plotPost(diff, compVal = 0, credMass = 0.75)
diff_z = (diff*sd(df$hg))
plotPost(diff_z, compVal = 0, credMass = 0.75)
#1=B_Ball - 4=Netball = 0.46
#B_Ball um 0.46 g/dl (75%-HDI: [0.232, 0.77]) höhere HG-Konzentration als Netball
#0 ist nicht eingeschlossen also signifikant
diff = (fit.samples[,"Gammasport[8]"]-fit.samples[,"Gammasport[2]"])
plotPost(diff, compVal = 0, credMass = 0.75)
diff_z = (diff*sd(df$hg))
plotPost(diff_z, compVal = 0, credMass = 0.75)
#2=Field - 8=T_Sprnt = 0.148
#Field um 0.148 g/dl (75%-HDI: [-0.217, 0.441]) höhere HG-Konzentration als T_Sprnt
#0 ist eingeschlossen also nicht signifikant
# Frage 4: Hat das Modell eine gute Vorhersagekraft bzw. ist das Modell eine gute
# Beschreibung des vorhandenen Datensatzes?
#--------------------------Beginn von Modell definition---------------------------------#
modellp = "
data {
N <- length(hg[])
Nsex <- max(sex)
Nsport <- max(sport)
}
model {
for (i in 1:N) {
hg[i] ~ dnorm(mu[i], 1/sigma[sex[i]]^2) #!!Grup-pe1 ACHTUNG: hier könnte sein das die Gruppe 2 genommen werden muss jenachdem wir man oben die Gruppe mit der Varianz definiert
mu[i] <- interceptsport[sport[i]] + # Gruppe fuer sport / intercept
interceptsex[sex[i]] + # Gruppe fuer sex /intercept
beta.wcc*wcc[i] +
beta.pcbfat*pcbfat[i] +
beta.lbm*lbm[i] +
beta.ht*ht[i]
#----------------------------Beginn von Vorhersage-----------------------------------#
hg.hat[i] ~ dnorm(mu[i], 1/sigma[sex[i]]^2) #ist gleich erste Zeile im Modell
}
#-----------------------------Ende der Vorhersage------------------------------------#
#-----------------------------------Priors-------------------------------------------#
for(l in 1:Nsex){
sigma[l]~dexp(3/1)
}
#--------------------------ACHTUNG Partial Pooling-----------------------------------#
#interceptsex[l]~dnorm(0,1)
#sigma[l]~dexp(3/1)
#wenn in der Fragestellung nach der Gruppe gefragt wird die Pooling verlangt, dann gehört
#diese Funktion in die forschleife des Partial-Poolings
#------------------------------------------------------------------------------------#
beta.wcc ~ dnorm(0,1/1^2)
beta.pcbfat ~ dnorm(0,1/1^2)
beta.lbm ~ dnorm(0,1/1^2)
beta.ht ~ dnorm(0,1/1^2)
for(l in 1:Nsex)
{
interceptsex[l] ~ dnorm(0, 1/1^2)
}
# sex hat nur 2 Werte: kein Pooling
# alle sexs bekommen das selbe sgima & intercept
#VT: wenn 7 sexs wären, dann muesste man sonst 7 Zeilen fuer intercept und 7 fuer sigma schreiben
#--------------------------------Ende von Priors-------------------------------------#
#---------------------------Partial-Pooling fuer sport-------------------------------#
interceptsport.mu ~ dnorm(0,1/1^2)
interceptsport.sigma ~ dexp(1)
for (d in 1:Nsport) {
interceptsport[d] ~ dnorm(interceptsport.mu, 1/interceptsport.sigma^2)
}
# stabilere Faktoren ausrechnen (Gruppen beeinflussen sich gegeNsporteitig)
for (l in 1:Nsex) {
for (d in 1:Nsport) {
mtx[l,d] <- interceptsex[l] +interceptsport[d]
}
}
intercept <- mean(mtx[1:Nsex,1:Nsport])
for(l in 1:Nsex)
{
alphasex[l] <- mean(mtx[l,1:Nsport]) - intercept
}
for (d in 1:Nsport)
{
Gammasport[d] <- mean(mtx[1:Nsex,d]) - intercept # mtx ist definiert mit mtx[l,d]
}
#------------------------Ende Partial-Pooling fuer sport-----------------------------#
}
"
#Modell fuer Variablen aufrufen
modell.pred = run.jags(model=modellp,
data=dfz,
burnin = 5000,
monitor = c("hg.hat"),
n.chains = 3,
sample= 10000,
thin=2,
inits = list(list(.RNG.name="base::Mersenne-Twister", .RNG.seed=456),
list(.RNG.name="base::Super-Duper", .RNG.seed=123),
list(.RNG.name="base::Wichmann-Hill", .RNG.seed=789)),
method = "parallel")
summary(df)
pred.samples = as.matrix(modell.pred$mcmc)
pred.summary = view(modell.pred)
sum(pred.summary[, "MC%ofSD"] >= 1)
#Keine MC%ofSD Werte über 1. Gut!
# Frage 5: Welche Hemoglobin-Konzentration wird für eine Tennis-Spielerin mit 175cm GröÃe,
# 60 kg Magermasse und ansonsten mittleren Werten vorhergesagt?
# Geben Sie auch ein 75%-Intervall an.
# Relevante Daten im Datensatz:
# - hg ... Hemoglobin-Konzentration (Einheit: g/dl)
# - wcc ... Anzahl weiße Blutkörperchen (Einheit: 1/nl)
# - ht ... Größe (cm)
# - sex ... Frau (f) oder Mann (m)
# - lbm ... Magermasse des Körpers, auch fettfreie Masse genannt (kg)
# - pcbfat ... Anteil Körperfett (%)
# - sport ... Sportart |
c2fbc2a20c922f4f63f122829ae50cb51731fd22 | ffbee4997633add5acd7ff18dde63bdfbb79639f | /cat.r | e0baeee937b26d9c57e290a9b293255e3d699990 | [
"MIT"
] | permissive | ozjimbob/aaqfx-render | 0835a63c5c5f7f74bdaa73af664ae5d13e8e3db4 | eebad98fe176d2abaa28b6c043a0c7852deb518d | refs/heads/master | 2021-07-13T06:49:30.959138 | 2018-10-30T23:12:37 | 2018-10-30T23:12:37 | 126,103,666 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,349 | r | cat.r | #!/usr/bin/Rscript
library(tidyverse)
library(rjson)
fl = list.files("tiles")
of = list()
rad_tab=data.frame(rad=c("02","49"),time=c(6,10))
rdr = fl[substr(fl,1,3)=="rad"]
idx = 1
for(i in rdr){
time = substr(i,5,16)
site = substr(i,18,19)
interval = rad_tab[rad_tab$rad==as.character(site),]$time
filename = i
ftime = as.POSIXct(time,format="%Y%m%d%H%M")
stime = ftime - interval*60 + 1
thisdf = data.frame(prod="radar",site=site,end_time=as.character(ftime),start_time=as.character(stime),layer=filename,stringsAsFactors=FALSE)
of[[idx]]=thisdf
idx=idx+1
}
of = bind_rows(of)
ol2 = list()
idx=1
unq_prods = unique(of$prod)
for(tprod in unq_prods){
print(tprod)
tof = filter(of,prod==tprod)
ol2[[tprod]]=list()
unq_sites = unique(tof$site)
idx=1
for(tsite in unq_sites){
print(tsite)
sub_site = filter(tof,site==tsite)
ol2[[tprod]][[idx]]=list()
ol2[[tprod]][[idx]]$id = tsite
ol2[[tprod]][[idx]]$start_time = sub_site$start_time
ol2[[tprod]][[idx]]$end_time = sub_site$end_time
ol2[[tprod]][[idx]]$layer = sub_site$layer
idx=idx+1
}
}
# Himawari
of=list()
rdr = fl[substr(fl,1,4)=="hima"]
idx = 1
for(i in rdr){
time = substr(i,6,17)
site = substr(i,19,nchar(i))
print(site)
print(time)
interval = 10
filename = i
ftime = as.POSIXct(time,format="%Y%m%d%H%M")
stime = ftime - interval*60 + 1
print(filename)
thisdf = data.frame(prod="satellite",site=site,end_time=as.character(ftime),start_time=as.character(stime),layer=filename,stringsAsFactors=FALSE)
of[[idx]]=thisdf
idx=idx+1
}
of = bind_rows(of)
ol3 = list()
idx=1
unq_prods = unique(of$prod)
for(tprod in unq_prods){
print(tprod)
tof = filter(of,prod==tprod)
ol3[[tprod]]=list()
unq_sites = unique(tof$site)
idx=1
for(tsite in unq_sites){
print(tsite)
sub_site = filter(tof,site==tsite)
ol3[[tprod]][[idx]]=list()
ol3[[tprod]][[idx]]$id = tsite
ol3[[tprod]][[idx]]$start_time = sub_site$start_time
ol3[[tprod]][[idx]]$end_time = sub_site$end_time
ol3[[tprod]][[idx]]$layer = sub_site$layer
idx=idx+1
}
}
ol2 = c(ol2,ol3)
out=toJSON(as.list(ol2))
cat(out,file="tiles/layers.json")
library(ncdf4)
data = nc_open("aqfx_done/merge_vtas.nc")
tl=ncvar_get(data,"time")
tl=format(as.POSIXct(tl,origin="1970-1-1 00:00:00",tz="UTC"),format="%Y-%m-%dT%H:%M:%S.0Z")
out=toJSON(tl)
cat(out,file="tiles/aqfx_times.json")
|
57ade4c21f881cbb267a402349015bf23c8450bf | 8fbfade7dc137150a897611100dde49944fd0cb6 | /Plot1.R | e263de284f026c4decf786bb7b6d60312cb0ff88 | [] | no_license | jrajkamal/exp2 | 2c153637428d10ddc45ca3a8a472c4245b8ccc97 | c1a58670aed83ace045e9f969484b0d93ced5f1e | refs/heads/master | 2020-05-25T10:21:57.770071 | 2014-08-24T18:46:30 | 2014-08-24T18:46:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 508 | r | Plot1.R |
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
Emissions <- aggregate(NEI[, 'Emissions'], by=list(NEI$year), FUN=sum)
Emissions$PM <- round(Emissions[,2]/1000,2)
# total emissions from PM2.5 decreased in the United States from 1999 to 2008?
png(filename='plot1.png')
barplot(Emissions$PM, names.arg=Emissions$Group.1,
main=expression('Total Emission of PM'[2.5]),
xlab='Year', ylab=expression(paste('PM', ''[2.5], ' in Kilotons')))
dev.off() |
5478091c140676103a07d0735b9905e991f836a1 | d576e398ebc2ba1cfadb21237b0cf0de8ddde491 | /R/RExam/Exam_Level3_sol.R | 10eec23c41f573484efd4f5cbf690eb68c44f9f5 | [] | no_license | nobel861017/R_course_2018 | 8527fc94a12d354268f6e9c01d228b96e84d2502 | a6378bcd2bb56dd524f1c033fe3b8879217393fe | refs/heads/master | 2020-03-23T11:33:16.821773 | 2018-07-19T01:44:33 | 2018-07-19T01:44:33 | 141,509,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 514 | r | Exam_Level3_sol.R | # 1. 請載入 dplyr, 與 head(airquality) 5%
library(dplyr)
head(airquality)
# 2. 使用 filter 過濾出 airquality$Temp > 90 的資料 5%
airquality <- filter(airquality, Temp > 90)
# 3. 請利用 summarise, group_by,
# 列出以相同 Month 做 Wind 平均的資料 10%
summarise(group_by(airquality, Month), Wind_mean = mean(Wind))
# 4. 請透過 %>% 將第二題與第三題合併起來 10%
airquality %>%
filter(Temp > 90) %>%
group_by(Month) %>%
summarise(Wind_mean = mean(Wind))
|
27e022ae163a98d4b2f2d291850033c44b6f84bb | 0443357f09d74ef05cb479eae3447ecaf5fff67a | /HartigEtAl-PLOSone2014/R-Code-FiguresAndCalculations/Fig.2-6-AnalyticalPIPs/fig6.R | 581cf23bcc275c3a9059068ea9fb57d88dd1cfbb | [] | no_license | florianhartig/EvolutionOfRelativeNonlinearity | ba30a6ea6dec18d8903f0f5ff2088a0b7f41f7fd | 6c0e4b799dc8bae4993c05bbc82c0748b135d516 | refs/heads/master | 2021-01-13T01:36:40.448450 | 2015-10-03T10:06:38 | 2015-10-03T10:06:38 | 18,670,116 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,097 | r | fig6.R | ################################################################################
#
# http://florianhartig.wordpress.com/
################################################################################
rm(list=ls(all=TRUE))
setwd("/Users/Florian/Home/Projekte/Papers_published/14-HartigEtAl-PLOSone-SympatricEvolutionOfRNC/Code/Final/Figures/Fig.2-AnalyticalPIPs")
source("functions.R")
require(fields)
require(graphics)
invasibility_MSSmod = create_PIP(popfun = mssmod)
graphics.off()
#png(width= 1000, height = 340, res = 300)
pdf("fig6-raw.pdf", width = 20, height = 7)
scaling = 2
#png("fig6.png", width= 1500, height = 520)
par(mfrow=c(1,3), mar = c(3*scaling,3*scaling,3*scaling,3*scaling) )
labcex = scaling
maincex = scaling
par(cex.axis = scaling)
plussize = 1.5*scaling
mpg = c(2*scaling, 1,0)
color = gray(0:30/30)
x=seq(0,2,by=0.02)
bval <- 10^(seq(-0.5,1,length.out=30))
plot(x, mss(x, b=6), type = "n",xlab = "N/K", ylab = "Reproductive rate", main="Original MSS", lty = 3 , lwd = 2, col = color[1]
, cex.lab = labcex, cex.main= maincex, mgp = mpg, font.main = 1)
for (i in 1:length(bval)){
lines(x, mss(x, b=bval[i]) , col = color[i], lty = 1, lwd = 1.5)
}
lines(x, mss(x, b=bval[9]) , col = "darkred", lty = 2, lwd = 2)
lines(x, mss(x, b=bval[24]) , col = "darkgreen", lty = 2, lwd = 2)
abline(v=1, lwd = 0.5, , lty = 1)
abline(h=1, lwd = 0.5, , lty = 1)
#legend("topright", bg="white", cex = 0.7,inset=0.0, legend = round(bval, digits = 1), lwd = 1.5, col = color, merge = TRUE)
plot(x, mssmod(x, b=6), type = "n",xlab = "N/K", ylab = "Reproductive rate", main="Modified MSS", lty = 3 , lwd = 2, col = color[1]
, cex.lab = labcex, cex.main= maincex, mgp = mpg, font.main = 1)
for (i in 1:length(bval)){
lines(x, mssmod(x, b=bval[i]) , col = color[i], lty = 1, lwd = 1.5)
}
lines(x, mssmod(x, b=bval[9]) , col = "darkred", lty = 2, lwd = 2)
lines(x, mssmod(x, b=bval[24]) , col = "darkgreen", lty = 2, lwd = 2)
abline(v=1, lwd = 0.5, , lty = 1)
abline(h=1, lwd = 0.5, , lty = 1)
#legend("topright", bg="white", cex = 0.7,inset=0.0, legend = round(bval, digits = 1), lwd = 1.5, col = color, merge = TRUE)
globalmin = min(invasibility_MSSmod[[2]])
globalmax = max(invasibility_MSSmod[[2]])
zlimits = c(globalmin, globalmax)
col = rescale_colors(basecol, 0.67, 3, 3)
image(log10(invasibility_MSSmod[[1]]),log10(invasibility_MSSmod[[1]]),invasibility_MSSmod[[2]], axes = F, zlim = zlimits,
main = "Invasibility modified MSS", font.main = 1, xlab = "Resident density-compensation strategy b",ylab = "Invading density-compensation strategy b",
col = col , cex.lab = labcex, cex.main= maincex, mgp = mpg)
axis(side = 1, at = log10(c(seq(from = 0.1, to = 1, by = 0.1), seq(from = 2, to = 9, by = 1), seq(from = 10 ,to = 50, by = 10) )), labels = F)
axis(side = 1, at = log10(c(0.3, 1, 3, 10)), labels = c("0.3", "1", "3", "10"), lwd.ticks = 2, tcl = - 0.5 )
axis(side = 2, at = log10(c(seq(from = 0.1, to = 1, by = 0.1), seq(from = 2, to = 9, by = 1), seq(from = 10 ,to = 50, by = 10) )), labels = F )
axis(side = 2, at = log10(c(0.3, 1, 3, 10)), labels = c("0.3", "1", "3", "10"), lwd.ticks = 2, tcl = - 0.5 )
image.plot(log10(invasibility_MSSmod[[1]]),log10(invasibility_MSSmod[[1]]),invasibility_MSSmod[[2]], legend.only = T, zlim = zlimits, col = col)
#contour(log10(invasibility_MSSmod[[1]]),log10(invasibility_MSSmod[[1]]),invasibility_MSSmod[[2]], add = T, nlevels = 1, lty = 3, lwd = 2, drawlabels = F)
image(log10(invasibility_MSSmod[[1]]),log10(invasibility_MSSmod[[1]]),ifelse(invasibility_MSSmod[[2]]<0.000001,1,NA), add =T, col = "#00000030")
bcri = log10(2.51)
#abline(v = bcri, lwd = 1, lty = 2)
#lines(c(bcri + 0.05, bcri-0.05), c(bcri,bcri))
#abline(h = bcri, lwd = 1, lty = 2)
text(log10(1.1), log10(0.4), "-", cex = plussize)
text(log10(6), log10(11), "-", cex = plussize)
text(log10(0.5), log10(4), "+", cex = plussize)
text(log10(10), log10(1), "+", cex = plussize)
#contour(log10(bvalues), log10(bvalues), mutual, add = T, levels = c(1000), lty = 2, lwd = 1, drawlabels = F)
dev.off()
|
48325a030a440ad448ed62d959a7f1563ddff4b2 | 0100e7895e366970d60609e5160e8315fd9f6c36 | /man/NeticaNode-class.Rd | 2ac6d8f7ccfe199413f6af7e52e3086956e0640f | [
"Artistic-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ralmond/RNetica | 1f79bdab40d06287dbfd4d85424a794461e0043c | 1a822b70a03ac2be587322c58a4cdf247f60381a | refs/heads/master | 2023-07-21T03:42:29.750254 | 2023-07-17T20:34:27 | 2023-07-17T20:34:27 | 239,856,123 | 2 | 2 | NOASSERTION | 2023-04-12T00:00:03 | 2020-02-11T20:19:09 | C | UTF-8 | R | false | false | 7,869 | rd | NeticaNode-class.Rd | \name{NeticaNode-class}
\Rdversion{1.1}
\docType{class}
\alias{NeticaNode-class}
\alias{Compare,NeticaNode,ANY-method}
\alias{print,NeticaNode-method}
\alias{toString,NeticaNode-method}
\alias{as.character,NeticaNode-method}
\alias{is.element,NeticaNode,list-method}
\title{Class \code{"NeticaNode"}}
\description{
This object is returned by various RNetica functions which create or
find nodes in a \code{\linkS4class{NeticaBN}} network. A \code{NeticaNode}
object represents a node object inside of Netica's memory. The
function \code{is.active()} tests whether the node is still a valid
reference.
}
\section{Extends}{
All reference classes extend and inherit methods from
\code{"\linkS4class{envRefClass}"}. Note that because this is a
reference class unlike traditional S3 and S4 classes it can be
destructively modified. Also fields (slots) are accessed using the
\sQuote{$} operator.
}
\section{Methods}{
\describe{
\item{[<-}{\code{signature(x = "NeticaNode")}: Sets conditional
probabliity table for node, see \link{Extract.NeticaNode}. }
\item{[}{\code{signature(x = "NeticaNode")}: Gets conditional
probabliity table for node, see \link{Extract.NeticaNode}. }
\item{[[}{\code{signature(x = "NeticaNode")}: Gets conditional
probabliity table for node, see \link{Extract.NeticaNode}. }
\item{Compare}{\code{signature(e1 = "NeticaNode", e2 = "ANY")}:
Tests two nodes for equality }
\item{is.element}{\code{signature(el = "NeticaNode", set = "list")}:
Checks to see if \var{el} is in list of nodes.}
\item{print}{\code{signature(x = "NeticaNode")}: Makes printed
representation. }
\item{toString}{\code{signature(x = "NeticaNode")}: Makes character
representation. }
}
}
\details{
This is an object of class \code{NeticaNode}. It consists of a name,
and an pointer to a Netica node in the workspace. The function
\code{\link{is.active}()} tests the state of that handle and returns
\code{FALSE} if the node is no longer in active memory (usually
because of a call to \code{DeleteNode()} or \code{DeleteNetwork()}.
\code{NeticaNode}s come in two types: discrete and continuous (see
\code{\link{is.discrete}()}). The two types give slightly different
meanings to the \code{\link{NodeStates}()} and
\code{\link{NodeLevels}()} attributes of the node. The printed
representation shows whether the node is discrete, continuous or
inactive (deleted).
\code{NeticaNode} objects are created at two different times. First,
when the user creates a node in a network using the
\code{\link{NewContinuousNode}()} or \code{\link{NewDiscreteNode}()}
functions. The second is when a user first reads the network in from a
file using \code{\link{ReadNetworks}} and then subsequently searches
for the node using \code{\link{NetworkFindNode}}. Note that this
latter means that there may be nodes in the Netica network for which
no R object has yet been created. When \code{NeticaNode} objects are
created, they are cached in the \code{\linkS4class{NeticaBN}} object.
Cached objects can be referenced by the \code{nodes} field of the
\code{NeticaBN} object (which is an R
\code{\link[base]{environment}}). Thus, the expressions
\code{\var{net}$nodes$\var{nodename}} and
\code{\var{net}$nodes[[\var{nodename}]]} both reference a node with
the Netica name \code{\var{nodename}} in the network
\code{\var{net}}. Note that both of these expressions will yeild
\code{NULL} if no R object has yet been created for the node. The
function \code{\link{NetworkAllNodes}(\var{net})} will as a side
effect create node objects for all of the nodes in \code{\var{net}}.
The function \code{\link[base]{match}} (and consequently \code{\%in\%}
does not like it when the first argument is a node. To get around
this problem, wrap the node in a list. I've added a method for the
function \code{\link[base]{is.element}} which does this
automatically.
}
\references{
\newcommand{\nref}{\href{http://norsys.com/onLineAPIManual/functions/#1.html}{#1()}}
\url{http://norsys.com/onLurl/Manual/index.html}:
\nref{AddNodeToNodeset_bn}, \nref{RemoveNodeFromNodeset_bn},
\nref{IsNodeInNodeset_bn}
\nref{GetNodeUserData_bn}, \nref{SetNodeUserData_bn} (these are used
to maintain the back pointers to the R object).
}
\author{
Russell Almond
}
\note{
\code{NeticaNode} objects are all rendered inactive when
\code{\link{StopNetica}()} is called, therefore they do not persist
across R sessions. Generally speaking, the network should be saved,
using \code{\link{WriteNetworks}()} and then reloaded in the new
session using \code{\link{ReadNetworks}()}. The node objects should
then be recreated via a call to \code{\link{NetworkFindNode}()} or
\code{\link{NetworkAllNodes}()}.
}
\seealso{
Its container class can be found in \code{\linkS4class{NeticaBN}}.
The help file \code{\link{Extract.NeticaNode}} explains the principle
methods of referencing the conditional probability table.
\code{\link{NetworkFindNode}()},
\code{\link{is.active}()}, \code{\link{is.discrete}()},
\code{\link{NewContinuousNode}()}, \code{\link{NewDiscreteNode}()},
\code{\link{DeleteNodes}()}, \code{\link{NodeName}()},
\code{\link{NodeStates}()}, \code{\link{NodeLevels}()},
}
\examples{
sess <- NeticaSession()
startSession(sess)
nety <- CreateNetwork("yNode",sess)
node1 <- NewContinuousNode(nety,"aNode")
stopifnot(is.NeticaNode(node1))
stopifnot(is.active(node1))
stopifnot(node1$Name=="aNode")
node2 <- NetworkFindNode(nety,"aNode")
stopifnot(node2$Name=="aNode")
stopifnot(node1==node2)
NodeName(node1) <- "Unused"
stopifnot(node1==node2)
node1$Name == node2$Name
noded <- DeleteNodes(node1)
stopifnot(!is.active(node1))
stopifnot(!is.active(node2))
stopifnot(noded$Name=="Unused")
stopifnot(noded == node1)
node1 == node2
DeleteNetwork(nety)
stopSession(sess)
}
\keyword{classes}
\section{Fields}{
Note these should be regarded as read-only from user code.
\describe{
\item{\code{Name}:}{Object of class \code{character} giving the
Netica name of the node. Must follow the \code{\link{IDname}}
rules. This should not be modified by user code, use
\code{\link{NodeName}} instead.}
\item{\code{Netica_Node}:}{Object of class \code{externalptr} giving
the address of the node in Netica's memory space. }
\item{\code{Net}:}{Object of class \code{\linkS4class{NeticaBN}}, a
back reference to the network in which this node resides. }
\item{\code{discrete}:}{Object of class \code{logical} true if the
node is discrete and false otherwise. }
}
}
\section{Class-Based Methods}{
\describe{
\item{\code{show()}:}{ Prints a description of the node. }
\item{\code{isActive()}:}{ Returns true if the object currently
points to a Netica node, and false if it does not. }
\item{\code{clearErrors(severity)}:}{ Calls \code{clearErrors} on
the \code{Net$Session} object. }
\item{\code{reportErrors(maxreport, clear, call)}:}{ Calls
\code{reportErrors} on the \code{Net$Session} object. Returns an
object of class \code{\link{NeticaCondition}} if there was a
message, or \code{NULL} if not.}
\item{\code{signalErrors(maxreport, clear, call)}:}{ Calls
\code{signalErrors} on the \code{Net$Session} object. If there was a
problem, the appropriate condition is signaled, see
\code{\link{NeticaCondition}}. }
\item{\code{initialize(Name, Net, discrete, ...)}:}{ Initialziation
function. Should not be called directly by user code. Use
\code{\link{NewDiscreteNode}} or \code{\link{NewContinuousNode}}
instead. }
\item{\code{deactivate()}:}{ Recursively deactives all nodes
contained by this network. Should not be called by user code. }
}
}
|
167f205a218dbdcc868f2e259918a1493e6ad60b | 74e7b6122fb9b6f12ee0f91778d9558e157b093c | /man/ipolate.Rd | 0b32134ab702749c69868ead9f00e2e84ffc38a7 | [] | no_license | ckhead/HeadR | 3072ca2b9fe3166c1be41d3c86d50305f2c910e2 | ea0233831f7c38356866268b4ddd5e61968c2349 | refs/heads/main | 2023-08-21T21:16:38.601165 | 2021-05-28T21:25:52 | 2021-05-28T21:25:52 | 339,247,597 | 0 | 1 | null | 2023-08-08T03:45:37 | 2021-02-16T00:44:36 | R | UTF-8 | R | false | true | 433 | rd | ipolate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ipolate.R
\name{ipolate}
\alias{ipolate}
\title{Linear interpolation of missing data}
\usage{
ipolate(xvar, yvar)
}
\arguments{
\item{xvar}{the variable to interpolate using (usually year)}
\item{yvar}{the name of the variable with missing data}
}
\description{
}
\details{
ipolate resembles Stata command of same name, very little testing on this fn
}
|
be59c9009eb39bd1857a20d6248504e09c780ca8 | 6d7d91556d188b8786475d7a22064058fe33fe28 | /R/calc_climate_index.R | 992b80f4891d6487ff509d64bc40207f966d9828 | [] | no_license | ditianETH/leafnp | 22ff49bb84417b4fffa27eb4285cbfa6802ce4b3 | 9192c2a2e8e7c35c768b6d2a9e063a1cac289586 | refs/heads/main | 2023-06-20T04:28:30.881917 | 2021-07-14T07:23:13 | 2021-07-14T07:23:13 | 388,098,627 | 0 | 0 | null | 2021-07-21T11:45:21 | 2021-07-21T11:45:21 | null | UTF-8 | R | false | false | 3,228 | r | calc_climate_index.R | ## mean annual temperature
calc_climate_index_mat <- function(df, ...){
df %>%
summarise(mat = mean(temp, ...)) %>%
pull(mat)
}
## mean temperature during growing season
calc_climate_index_matgs <- function(df, temp_base = 5.0, ...){
df %>%
dplyr::filter(temp >= temp_base) %>%
summarise(matgs = mean(temp, ...)) %>%
pull(matgs)
}
## Minimal monthly temperature (coldest month temperature)
calc_climate_index_tmonthmin <- function(df, ...){
df %>%
mutate(month = lubridate::month(date)) %>%
group_by(month) %>%
summarise(temp = mean(temp, ...)) %>%
pull(temp) %>%
min()
}
## Maximal monthly temperature (warmest month temperature)
calc_climate_index_tmonthmax <- function(df, ...){
df %>%
mutate(month = lubridate::month(date)) %>%
group_by(month) %>%
summarise(temp = mean(temp, ...)) %>%
pull(temp) %>%
max()
}
## Number of days with daily temperature above 0˚C (TMP0nb) or 5˚C (TMP5nb)
calc_climate_index_ndaysgs <- function(df, temp_base = 5.0, ...){
df %>%
mutate(year = lubridate::year(date)) %>%
dplyr::filter(temp >= temp_base) %>%
group_by(year) %>%
summarise(ndays = n()) %>%
pull(ndays) %>%
mean()
}
## annual mean daily irradiance (PPFD)
calc_climate_index_mai <- function(df, ...){
df %>%
summarise(mai = mean(ppfd, ...)) %>%
pull(mai)
}
## growing season mean daily irradiance
calc_climate_index_maigs <- function(df, temp_base = 5.0, ...){
df %>%
dplyr::filter(temp >= temp_base) %>%
summarise(maigs = mean(ppfd, ...)) %>%
pull(maigs)
}
## mean annual summed precipitation
calc_climate_index_map <- function(df, ...){
df %>%
mutate(prec = rain + snow) %>%
mutate(year = lubridate::year(date),
prec = prec * (60*60*24)) %>%
group_by(year) %>%
summarise(map = sum(prec, ...)) %>%
pull(map) %>%
mean()
}
## Precipitation of Driest Month
calc_climate_index_pmonthmin <- function(df, ...){
df %>%
mutate(prec = rain + snow) %>%
mutate(month = lubridate::month(date),
year = lubridate::year(date),
prec = prec * (60*60*24)) %>%
group_by(year, month) %>%
summarise(prec = sum(prec, ...)) %>%
ungroup() %>%
group_by(month) %>%
summarise(prec = mean(prec, ...)) %>%
pull(prec) %>%
min()
}
## mean growing season summed precipitation (mm)
calc_climate_index_mapgs <- function(df, temp_base = 5.0, ...){
df %>%
mutate(prec = rain + snow) %>%
mutate(year = lubridate::year(date),
prec = prec * (60*60*24)) %>%
dplyr::filter(temp >= temp_base) %>%
group_by(year) %>%
summarise(prec = sum(prec, ...)) %>%
pull(prec) %>%
mean()
}
## mean daytime VPD during the growing season
calc_climate_index_mavgs <- function(df, temp_base = 5.0, ...){
df %>%
mutate(year = lubridate::year(date)) %>%
dplyr::filter(temp >= temp_base) %>%
group_by(year) %>%
summarise(vpd = mean(vpd, ...)) %>%
pull(vpd) %>%
mean()
}
## mean daytime VPD
calc_climate_index_mav <- function(df, ...){
df %>%
summarise(vpd = mean(vpd, ...)) %>%
pull(vpd)
}
## potential evapotranspiration from SOFUN! |
e6ce4150c59e7c6b1b3385d4434c726839fe6a5d | 0bd59749f0e9cf9011c72ddc87c380cb5150db77 | /FIGSHARE_Code_Fahrrad_Berlin/Praezision/Praezision.R | 14e4012b957c384d97f848ad3fb2a375aa4f1a96 | [] | no_license | LiliaMilo/GPS | b8efee5eee47fdb3ef416f4d43e92a1beac8c36e | 167dc01de319ce121fc8401881d0018deaf0fd0f | refs/heads/main | 2022-12-30T18:03:16.399537 | 2020-10-22T10:26:17 | 2020-10-22T10:26:17 | 306,300,432 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,501 | r | Praezision.R | ########################################################################
### library
########################################################################
library(XML)
library(OpenStreetMap)
library(lubridate)
library(ggmap)
library(ggplot2)
library(raster)
library(sp)
###########################################################################################################
###Wie sieht das mit der Präzission in einem GPS Punkt aus?
###########################################################################################################
#lat 111m Breitengrad zum Äquator oben unten
#lon 70m Längengrad zum Äquator rechts und links
#berliner Dom
bsp_berlinerdom=data.frame(lon=13.401111,lat=52.519354)
bsp3.1=data.frame(lon=c(13.401,13.401,13.4019,13.4019),lat=c(52.519,52.5199,52.5199,52.519))
bsp3.2=data.frame(lon=c(13.402,13.401),lat=c(52.519,52.519))
bsp4.1=data.frame(lon=c(13.4011,13.4011,13.40119,13.4011),lat=c(52.5193,52.51939,52.51939,52.5193))
bsp4.2=data.frame(lon=c(13.40119,13.4011),lat=c(52.5193,52.5193))
map <- get_map(c(left = 13.3950001, bottom = 52.517001, right = 13.404909, top = 52.52691))
ggmap(map)+
geom_point(data = bsp_berlinerdom,
aes(lon,lat), size=1, alpha=0.7,colour = "red")+
labs(x = "Longitude", y = "Latitude",title = "Berliner Dom")+
geom_line(data = bsp3.1,
aes(lon,lat), size=1, alpha=0.7,colour = "green")+
geom_line(data = bsp3.2,
aes(lon,lat), size=1, alpha=0.7,colour = "green")+
geom_line(data = bsp4.1,
aes(lon,lat), size=1, alpha=0.7,colour = "black")+
geom_line(data = bsp4.2,
aes(lon,lat), size=1, alpha=0.7,colour = "black")+
theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
title =element_text(size=12))
#Gormannstraße Mulackstraße
bsp_kreuzung=data.frame(lon=13.404667,lat=52.527551)
bsp3.1_kreuzung=data.frame(lon=c(13.404,13.404,13.4049,13.4049),lat=c(52.527,52.5279,52.5279,52.527))
bsp3.2_kreuzung=data.frame(lon=c(13.4049,13.404),lat=c(52.527,52.527))
bsp4.1_kreuzung=data.frame(lon=c(13.4046,13.4046,13.40469,13.40469),lat=c(52.5275,52.52759,52.52759,52.5275))
bsp4.2_kreuzung=data.frame(lon=c(13.40469,13.4046),lat=c(52.5275,52.5275))
map <- get_map(c(left = 13.40200, bottom = 52.52521, right = 13.40846, top = 52.53011),maptype="hybrid")
css_typ_title = list(theme(axis.text=element_text(size=32,face = "bold"),
axis.title.x=element_text(size=35,face="plain",hjust = 0.5,vjust=-2),
axis.title.y=element_text(size=35,face="plain",hjust = 0.5,vjust=3),
title=element_text(size=36,face="plain",hjust = 0.5),
plot.margin = (unit(c(.5, .5, 1, 1), "cm"))))
plot= ggmap(map)+
geom_point(data = bsp_kreuzung,
aes(lon,lat), size=0.8, alpha=0.7,colour = "red")+
labs(x = "Longitude", y = "Latitude",title = "Gormann- Mulackstraße",
colour = "Legend")+
geom_line(data = bsp3.1_kreuzung,
aes(lon,lat), size=0.8, alpha=0.7,colour = "darkgreen")+
geom_line(data = bsp3.2_kreuzung,
aes(lon,lat), size=0.8, alpha=0.7,colour = "darkgreen")+
geom_line(data = bsp4.1_kreuzung,
aes(lon,lat), size=0.8, alpha=0.7,colour = "black")+
geom_line(data = bsp4.2_kreuzung,
aes(lon,lat), size=0.8, alpha=0.7,colour = "black")+
css_typ_title
plot
|
1b89c470cd87f0ab71af6e26a0b94ad3d2752e6d | 0f64ac5e3d3cf43124dcb4917a4154829e7bb535 | /utils/abbrevList.R | 9c15713790971c185afe6f7aa7173a649553f37d | [] | no_license | wactbprot/r4vl | 8e1d6b920dfd91d22a01c8e270d8810f02cea27c | a34b1fa9951926796186189202750c71e7883f8d | refs/heads/master | 2016-09-11T02:22:39.828280 | 2014-10-07T14:37:55 | 2014-10-07T14:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,329 | r | abbrevList.R | abbrevList <- function(ccc){
## die functions checkSetList()
## wartet auf Verwendung
a <- list()
a$dataAvailable <- FALSE
a$c <- ccc$Calibration
a$cs <- a$c$Standard
a$ct <- a$c$Type
a$cy <- a$c$Year
a$csi <- a$c$Sign
a$cp <- a$c$Presettings
a$cpt <- a$cp$ToDo
a$cc <- a$c$Constants
a$cm <- a$c$Measurement
a$cms <- a$cm$Standard
a$cmv <- a$cm$Values
a$cma <- a$cm$AuxValues ## seit 4/11
a$cmco <- a$cm$CalibrationObject
a$cmco1 <- a$cm$CalibrationObject[[1]] ## customer device
### hier noch die Co[2...N] explizit trennen
### ce3-spezifisch
if(a$cs =="CE3"){
a$cmsc <- a$cm$SequenceControl
a$cmscok <- a$cmsc$operationKind
a$cmscg <- a$cmsc$Gas
a$cmscp <- a$cmsc$calPort
}
### se1-spezifisch
if(a$cs =="SE1"){
a$cmag <- a$cma$Gas
}
### VG spezifisch
if(a$cs =="FRS5|SE2" |
a$cs =="DKM|FRS5" |
a$cs == "FRS5" |
a$cs == "DKM"){
if(!(length(a$cm$SequenceControl) == 0)){
a$cmsc <- a$cm$SequenceControl
}
}
if((is.list(a$cmsc) )){
a$cmscoi <- a$cmsc$outIndex
}
if(length(a$cmv) > 0){
a$dataAvailable <- TRUE
}
a$ca <- a$c$Analysis
a$cav <- a$ca$Values
a$cr <- a$c$Result
return(a)
}
|
5f1e5e39de96e70cbba220a24c123e292086f52f | c5448803c6435f5ced328f1d3bc30fc56a4e70a3 | /Documents/Big Data Analytics/DA and E/jupyter/fma/man/copper1.Rd | a891e6b99900e735e65134054e6fad7856202784 | [] | no_license | JohannesKokozela/Research_Project | 0028dc0531e68d80b0ad2bee7b2e0f3a7f7596ba | a1b462d3f77ae6437d3648ab3f45fd76ce72ebbe | refs/heads/master | 2021-07-13T01:25:59.689912 | 2017-10-14T13:41:07 | 2017-10-14T13:41:07 | 106,183,621 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 382 | rd | copper1.Rd | \name{copper1}
\alias{copper1}
\title{Copper prices}
\description{Monthly copper prices for 28 consecutive months (in constant 1997 dollars).}
\usage{copper1}
\format{Time series data}
\source{Makridakis, Wheelwright and Hyndman (1998) \emph{Forecasting: methods and applications}, John Wiley & Sons: New York.
Chapter 9.}
\keyword{datasets}
\examples{plot(copper1)
}
|
71de68775ac144e031bc7a16adbee8088c467108 | 7b93f65c8d36750afdd5add6b6676b1523e818ec | /R/DiscoveringStatUsingR/chap03/3_6.R | c2a39a579689cb73da922a164b12771bab06f111 | [] | no_license | DaisukeYamato/book | 9cbb3620e5f297832a7c0dc4b35f38b61a6395ea | 0308106dedf8a23b491aada942f781d2ab000b69 | refs/heads/master | 2021-01-22T13:13:02.365196 | 2015-08-26T00:12:56 | 2015-08-26T00:12:56 | 40,760,927 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 72 | r | 3_6.R | # chap 3.6
install.packages("Rcmdr", dependencies = TRUE)
library(Rcmdr) |
c6176af26fcce784e75f8fee79e4087f6032ad68 | 75380f9c27f6f8d3726f1f9aade2b32dba726035 | /naive_b.R | 289bcbd2fb7d850a1d7c7d7674903cf690975a38 | [] | no_license | v7t-codes/ML_assignment | 311d41613183a6f2bc62dcb62648ae45e021d9f9 | 105a3b233d3097dae6aba0a2a71c404329746634 | refs/heads/master | 2023-05-26T07:49:04.524788 | 2017-07-11T15:05:05 | 2017-07-11T15:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 684 | r | naive_b.R | library(MASS)
library(klaR)
library("e1071")
library("caret")
train_<- iris[-c(41:50,91:100, 141:150),] ## splitting the data set by removing the test data
test<- iris[-c(1:40, 50:90, 100:140),] ## same as before but by removing the training data from iris
##the above method preserves the data frame
## seperate the features of the train data and targets to train
x1 = train_[,-5]
y1 = train_$Species
model = train(x1,y1,'nb',trControl=trainControl(method='cv',number=5))
model ## the naive bayes model
x_test = test[,-5]
predict(model$finalModel,x_test) ## predicting on the test data
y_test = test$Species
table(predict(model$finalModel,x_test)$class,y_test) |
d8b3b92a86d820ddfc23f79c0a36c989df04f342 | f49e4098e3c70fe664c17958f7fddaede5927f49 | /Assignment3/rankall.r | f9743c1c209bd0a4dae31006e22b02fa5936a267 | [] | no_license | rizwandel/Coursera-R-Programming-Assignment1 | e9e90440912801dacd26b936ca8c826c5a6cd87e | 331189e7494132a8c09e403d24d1def214547c52 | refs/heads/master | 2020-03-19T10:21:24.463925 | 2014-09-02T03:22:18 | 2014-09-02T03:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,799 | r | rankall.r | rankall <- function(outcome, num = "best") {
setwd("d:/r programming")
options(stringsAsFactors=FALSE)
data <- read.csv("outcome-of-care-measures.csv",na.strings= "Not Available", colClasses = "character")
valid_outcome=list("heart attack","heart failure", "pneumonia")
if (! outcome %in% valid_outcome){
stop("invalid outcome")
}
colnames(data)[11] <- "heart attack"
colnames(data)[17] <- "heart failure"
colnames(data)[23] <- "pneumonia"
data[,2]<-as.character(data[, 2])
data[, 7]<-as.character(data[, 7])
data[, 11] <- as.numeric(data[, 11])
data[, 17]<-as.numeric(data[, 17])
data[, 23]<-as.numeric(data[, 23])
df=data.frame(data$"Hospital.Name", data$"State", data[[outcome]])
colnames(df)[1]<-"Hospital.Name"
colnames(df)[2]<- "State"
colnames(df)[3]<- "Rate"
df[, 3]<-as.numeric(df[, 3])
clean=df[ ! is.na( df[, 3] ) , ]
state=unique(clean$State)
#order(states)
#state_abc=(sort(states))
result=data.frame(NULL)
for(states in seq_along(state)){
get_state=subset(clean, clean$State==state[states])
count=as.numeric(nrow(get_state))
get_state[,3]<-as.numeric(get_state[,3])
get_state[,1]<-as.character(get_state[,1])
sorted=get_state[ order(get_state[,3], get_state[,1]), ]
sorted[,1]<-as.character(sorted[,1])
ranked=order(sorted[,3])
if (num>count){
hospital=("NA")
}
if (num%in%ranked){
hospital=(sorted[num, 1])
}
if (num=="best"){
best=which.min(ranked)
hospital=(sorted[best,1])
}
if(num=="worst"){
worst=which.max(ranked)
hospital=sorted[worst,1]
}
event=data.frame(hospital)
result=rbind(result, event)
}
y=cbind(result, state)
final=y[order(y[,2],y[,1]),]
print(final)
}
|
5c5aa492dc639c296f88273f6e2d92e94a3da94a | ccb1bef7728e7886ea0b8a5cf0729973f50f2021 | /man/GeomHurricane.Rd | beaa0ebc58756b5d8213fd5d30baff32e253a0fa | [] | no_license | DYZI/Hurricane | 5e6f0f89798cba76805d384be97ff046272b3786 | 54b97c84fd6765b8f738098b6b8230194accc586 | refs/heads/master | 2020-05-02T20:44:15.500523 | 2019-03-31T01:07:50 | 2019-03-31T01:07:50 | 178,200,234 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 549 | rd | GeomHurricane.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_hurricane.R
\docType{data}
\name{GeomHurricane}
\alias{GeomHurricane}
\title{GeomHurricane}
\format{An object of class \code{GeomHurricane} (inherits from \code{GeomPolygon}, \code{Geom}, \code{ggproto}) of length 4.}
\usage{
GeomHurricane
}
\arguments{
\item{arc_step}{Resolution of the arcs in the wind_radii graph.}
\item{scale_radii}{Implement to scale the radius size.}
}
\description{
ggproto object to display the hurricane radii
plots.
}
\keyword{datasets}
|
dbf3467e77067f8542929d6449854fa0b8795206 | 625803c3ef1f7dcbae875823c3e53e6338d64880 | /Build_Ranger.R | 5c2ceb0d3012224be26944e54dbe76422aa29e18 | [] | no_license | Anderson-Lab/capstone-spring-2018-team-2 | 6201ce70448ac8070907a6d049cd8fdb21e76ad4 | f0f9be5afd814c73deb1096ef37fafb4d0f85f84 | refs/heads/master | 2021-03-24T13:51:17.292772 | 2018-05-06T14:11:06 | 2018-05-06T14:11:06 | 119,429,367 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,275 | r | Build_Ranger.R | library(ranger)
library(caret)
library(dplyr)
source("Join_Data.R")
source('baseline.R')
load('meta.rda')
# Get data
meps <- Join_MEPS()
meps.p <- meps[meps$PHOLDER == 1,]
#mepsPublic<-Public_Filter(meps.p)
mepsPrivate<-Private_Filter(meps.p)
# Get vars
mepsPrivate <- mepsPrivate[mepsPrivate$AGE15X > 40,]
mepsPrivate$w <- mepsPrivate$IPDIS15
mepsPrivate[mepsPrivate$w<1, 'w']<- .3
mepsPrivate$age.cat <- Age.to.Cat(mepsPrivate, 'AGE15X')
plan.dsn <- c('HOSPINSX','ANNDEDCT', 'HSAACCT', 'PLANMETL')
behaviors <- c('BPCHEK53', 'CHOLCK53', 'NOFAT53', 'CHECK53', 'ASPRIN53', 'PAPSMR53',
'BRSTEX53', 'MAMOGR53', 'CLNTST53')
controls <- c('PHOLDER', 'CHBMIX42','BMINDX53','ADGENH42', 'age.cat', 'FAMINC15',
'COBRA', 'OOPPREM', 'PREGNT31', 'PREGNT42', 'PREGNT53')
target <- 'IPDIS15'
weights <- 'w'
vars <- c(target, plan.dsn, behaviors, controls, weights)
predVars <- c(plan.dsn, behaviors, controls)
ordered <- c('PLANMETL', 'ADGENH42', 'age.cat', behaviors)
factors <- c('IPDIS15', 'HOSPINSX', 'HSAACCT','COBRA', 'PREGNT53')
#Set target to binary
mepsPrivate$IPDIS15[mepsPrivate$IPDIS15>1] <- 1
#Coerce to fewer factors
mepsPrivate$ANNDEDCT <- as.numeric(mepsPrivate$ANNDEDCT)
for(variable in c(plan.dsn, behaviors)){
mepsPrivate[mepsPrivate[,variable] < 0, variable] <- 0
}
for(factor in factors){
mepsPrivate[,factor] <- as.factor(mepsPrivate[, factor])
}
for(factor in ordered){
mepsPrivate[,factor] <- as.ordered(mepsPrivate[, factor])
}
#split
trainidx <- createDataPartition(mepsPrivate$IPDIS15, p=.8, list = FALSE)
train <- mepsPrivate[trainidx,vars]
y.test <- mepsPrivate[-trainidx,target]
x.test <- mepsPrivate[-trainidx,-which(names(meps) == target)]
#ds <- downSample(train, train[,target], list = FALSE)
f <- formula(paste(target, paste(predVars, collapse = '+' ), sep = '~'))
fit <- ranger(formula = f,
data = train,
case.weights = train$w,
num.trees = 2500,
importance = 'impurity',
min.node.size = 150,
probability = TRUE,
classification = TRUE,
sample.fraction = .7)
#save(fit, file = "r-shiny/template/data/ranger_hosp_fit.rda")
# get model performance metrics
preds.train <- as.data.frame(predict(fit, train[,predVars])$predictions)
preds.test <- as.data.frame(predict(fit, x.test)$predictions)
#save(preds.test, file = "r-shiny/template/data/ranger_hosp_preds.rda")
save(y.test, file = "r-shiny/template/data/y.test.rda")
classNames <- c('NoHosp', 'Hosp')
levels(train[,target])<-classNames
levels(y.test)<-classNames
colnames(preds.train)<-classNames
colnames(preds.test)<-classNames
cutOff = .7
train.Results<-data.frame(preds.train,
obs = train[,target],
pred = ifelse(preds.train[,classNames[1]] < cutOff, classNames[1], classNames[2]))
test.Results<-data.frame(preds.test,
obs = y.test,
pred = ifelse(preds.test[,classNames[1]] < cutOff, classNames[1], classNames[2]))
# test Results
levels(train.Results$pred) <- classNames
levels(test.Results$pred) <- classNames
twoClassSummary(train.Results, lev = classNames)
twoClassSummary(test.Results, lev = classNames)
base.preds <- as.data.frame(baseline(train[,target], length(y.test), TRUE))
colnames(base.preds) <- 'NoHosp'
base.preds$Hosp <- 1-base.preds$NoHosp
base.Results<-data.frame(base.preds,
obs = y.test,
pred = ifelse(base.preds[,classNames[1]] < cutOff, classNames[1], classNames[2]))
levels(base.Results$pred) <- c('Hosp', 'NoHosp')
levels(base.Results$pred) <- levels(base.Results$pred)[c('NoHosp', 'Hosp')]
twoClassSummary(base.Results, lev = classNames)
confusionMatrix(base.preds, y.test)
confusionMatrix(test.Results$pred, y.test)
# Performance Plots
library(ROCR)
## find the best cut off
pred <- prediction( preds.train[,1], train[,target])
plot(performance(pred, "sens" , x.measure = "cutoff"), col = 'red', ylab= NULL, main="Optimal Cutoff")
par(mar=c(4,4,4,4))
par(new=T)
plot(performance(pred, "spec" , x.measure = "cutoff"),add = TRUE, col = 'blue', xlab = NULL)
axis(side = 4, at = .5, labels = 'specificity', padj = 1 )
legend(.3, .9, legend=c("Sensitivity", "Specificity"),
col=c("red", "blue"), lty=1, cex=0.8)
#x<-locator()
plot(performance(pred, "tpr" , x.measure = "fpr"), col = 'red', ylab= NULL)
abline(0,1)
plot(performance(pred, "acc" , x.measure = "cutoff"), col = 'red', ylab= NULL)
performance(pred, "auc")
#Var Imp Plots
#x<-fit$variable.importance
#x<-x[order(x)]
#par(mar = c(4,10,4,4))
#barplot(x, horiz = TRUE, las = 1)
imp <- fit$variable.importance
#imp <- imp[imp > 50]
library(data.table)
imp.dt<-setDT(as.data.frame(imp), keep.rownames = TRUE)[]
imp.dt.top <- head(arrange(imp.dt,desc(imp)), n = 10)
save(imp.dt, file = "r-shiny/template/data/ranger_imp.rda") # save the data to the r-shiny directory so that var.importance interactivity can be added
library(ggplot2)
ggplot(data=imp.dt.top, aes(x=reorder(rn,imp), y=imp)) +
geom_bar(stat="identity", fill = "dodgerblue3", color="black") +
ggtitle('Variable Importance: Gini Impurity') +
xlab('Variables') +
ylab('Relative Importance')+
coord_flip()
|
610c77fa72bdce881c4318824f0a0f1feab2ce12 | 5ece49c446470c728ba11b66edd8c15604e81f3d | /man/remove_weak_snvs.Rd | 548d66c4055ed4f5762ed5a697ce5829a99c230e | [] | no_license | Romualdi-Lab/rbrewerix | 654d8593ed0d34a4153481d8bfd8177433862b3b | 7f01405b014c57b32a7fdb0508fd42c6215f40b1 | refs/heads/master | 2023-04-17T13:12:13.385876 | 2020-05-25T09:25:06 | 2020-05-25T09:25:06 | 266,130,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 461 | rd | remove_weak_snvs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_weak_snvs.R
\name{remove_weak_snvs}
\alias{remove_weak_snvs}
\title{Remove weak SNVs}
\usage{
remove_weak_snvs(LOI, thr = 0.2, odepth = 20, min_ac = 3)
}
\arguments{
\item{LOI}{a object obtained by read_guess_loi_tavle_v3}
\item{thr}{threshold}
\item{odepth}{overall depth}
\item{min_ac}{minimum allele count}
}
\value{
a GuessLoi object
}
\description{
Remove weak SNVs
}
|
8a21c08ed84033980f4759330e3ad3620aced80a | e0876c92bc406d0140748838f35a2aee63460e4e | /r-basics/assessment_2.R | 5c2c28da8cedc9da59dd12102fe1e5c6db278cbf | [] | no_license | orsdanilo/harvardx-ds | 84d1aa10f9f487ae9bd71eb893fa7db70466d0dd | 742a0afbf5ba2339c0c4c408a99552655c9c26bc | refs/heads/master | 2022-12-03T21:11:41.546768 | 2020-08-02T16:57:15 | 2020-08-02T16:57:15 | 256,887,722 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 359 | r | assessment_2.R | # Question 1
x <- c(2, 43, 27, 96, 18)
sort(x)
order(x)
rank(x)
# Question 2
min(x)
which.min(x)
max(x)
which.max(x)
# Question 3
name <- c("Mandi", "Amy", "Nicole", "Olivia")
distance <- c(0.8, 3.1, 2.8, 4.0)
time <- c(10, 30, 40, 50)
time <- time/60
speed <- distance/time
runner_df <- data.frame(name = name, hours = time, speed = speed)
runner_df
|
bcc357b6080cd4cd78788960f26075cf315ff426 | d9935521bc28411edf083b422a70a0e42cad0e9b | /source/cptac_processing.R | 1f1b238fbf52b4a8d3e89c423e81df32ecfd22c4 | [] | no_license | NicolasHousset/FocusHydrophil | cc0d5d77470c11087b55eabbb5b3da660e8d2894 | 8784f09a9c5a373cbba871b3e223e1b9c2c06491 | refs/heads/master | 2016-09-01T19:44:20.937676 | 2014-03-18T13:26:14 | 2014-03-18T13:26:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,191 | r | cptac_processing.R | # First part of the R code generating the graphs of the BBC 2013 poster
# This part handles the processing of the different text files
# The CPTAC data is made of 3 laboratories running in triplicate 7 samples
# QC1 containing only UPS protein we discard it. QC2 contains only yeast so it's interesting to keep it
# Sample A to E contain mostly yeast, with increasing concentrations of UPS48, a human protein
library(data.table)
inputPath <- "//uv2522.ugent.be/compomics/Andrea/CPTAC/"
outputPath <- "//uv2522.ugent.be/compomics/Nicolas/CPTAC/"
inputPath <- "/mnt/compomics/Andrea/CPTAC/"
outputPath <- "/mnt/compomics/Nicolas/CPTAC/"
joinFiles <- function(fileName,...){
inputFile1 <- paste0(fileName, ".mgf")
mgfDT <- fread(input = paste0(inputPath, inputFile1), sep="\n", header = FALSE)
rtDT <- data.table(mgfDT[grepl("TITLE=", V1)][, sub("TITLE=", "", V1)])
rtDT[, rtsec := mgfDT[grepl("RTINSECONDS=", V1)][, sub("RTINSECONDS=", "",V1)]]
rtDT[, pepmass := mgfDT[grepl("PEPMASS=", V1)][, sub("PEPMASS=", "",V1)]]
# Some very rare cases where there is no intensity recorded
rtDT <- rtDT[grepl(" ", pepmass)]
rtDT[, splitLine := strsplit(pepmass, " ")]
rtDT[, pepMZ := lapply(splitLine, "[[", 1)]
rtDT[, MS1_Intensity := lapply(splitLine, "[[", 2)]
rtDT[, spectrum_id := V1]
rtDT[, V1 := NULL]
rtDT <- rtDT[, list(spectrum_id, rtsec, pepMZ, MS1_Intensity)]
rtDT[, rtsec := as.double(rtsec)]
rtDT[, pepMZ := as.double(pepMZ)]
rtDT[, MS1_Intensity := as.double(MS1_Intensity)]
inputFile2 <- paste0(fileName,"_SvenSPyeast.dat.MASCOT")
mascotDT <- fread(input = paste0(inputPath, inputFile2), header = TRUE, sep = "\t")
mascotDT <- mascotDT[grepl("_rank1_", paste0(spectrum_id, "_"))]
mascotDT[, spectrum_id := sub("_rank1", "", spectrum_id)]
mascotDT[, query := as.character(query)]
setkey(rtDT, spectrum_id)
setkey(mascotDT, spectrum_id)
outputDT <- rtDT[mascotDT]
inputFile3 <- paste0(fileName, "_SvenSPyeast.dat.PERCOLATOR.csv")
percolatorDT <- data.table(read.table(file = paste0(inputPath, inputFile3),
header = FALSE, row.names = NULL,
sep = "\t", fill = TRUE))
if(NCOL(percolatorDT)==6){
percolatorDT <- percolatorDT[, list(V1,V2,V3,V4,V5,V6)]
# multiple proteins make new lines with a different structure
percolatorDT[, multi_pro := !grepl("query", V1)]
percolatorDT[1:(NROW(percolatorDT)-1), multi_pro := percolatorDT[2:NROW(percolatorDT), multi_pro]]
percolatorDT <- percolatorDT[grepl("query", V1)]
}else{
percolatorDT <- percolatorDT[, list(V1,V2,V3,V4,V5,V6,V7)]
percolatorDT <- percolatorDT[grepl("query", V1)]
percolatorDT[, multi_pro := (V7!="")]
}
percolatorDT <- percolatorDT[, list(V1,V2,V3,V4,V5,V6, multi_pro)]
percolatorDT[, query := as.character(sub("query:","",sub(";rank:1","",V1)))] # extracting the query number
list_var=list("PSMId","score_percolator","q_value_percolator","pep_percolator","peptide","protein")
for(i in 1:6){
eval(parse(text=paste0("percolatorDT[,", list_var[i], ":= V", i, "]")))
eval(parse(text=paste0("percolatorDT[,V", i, ":= NULL]")))
}
setkey(outputDT, query)
setkey(percolatorDT, query)
test <- percolatorDT[outputDT]
outputDT <- percolatorDT[outputDT]
outputDT[, fileOrigin := fileName]
return(outputDT)
}
inputList <- list(
"20080311_CPTAC6_04_6QC2",
"20080311_CPTAC6_07_6A005",
"20080311_CPTAC6_10_6B019",
"20080311_CPTAC6_13_6C012",
"20080311_CPTAC6_16_6D014",
"20080311_CPTAC6_19_6E010",
"20080311_CPTAC6_22_6QC1",
"20080313_CPTAC6_04_6QC2",
"20080313_CPTAC6_07_6A005",
"20080313_CPTAC6_10_6B019",
"20080313_CPTAC6_13_6C012",
"20080313_CPTAC6_16_6D014",
"20080313_CPTAC6_19_6E010",
"20080313_CPTAC6_22_6QC1",
"20080315_CPTAC6_04_6QC2",
"20080315_CPTAC6_07_6A005",
"20080315_CPTAC6_10_6B019",
"20080315_CPTAC6_13_6C012",
"20080315_CPTAC6_16_6D014",
"20080315_CPTAC6_19_6E010",
"20080315_CPTAC6_22_6QC1",
"mam_042408o_CPTAC_study6_6QC2",
"mam_042408o_CPTAC_study6_6A018",
"mam_042408o_CPTAC_study6_6B011",
"mam_042408o_CPTAC_study6_6C008",
"mam_042408o_CPTAC_study6_6D004",
"mam_042408o_CPTAC_study6_6E004",
"mam_042408o_CPTAC_study6_6QC1",
"mam_050108o_CPTAC_study6_6QC2",
"mam_050108o_CPTAC_study6_6A018",
"mam_050108o_CPTAC_study6_6B011",
"mam_050108o_CPTAC_study6_6C008",
"mam_050108o_CPTAC_study6_6D004",
"mam_050108o_CPTAC_study6_6E004",
"mam_050108o_CPTAC_study6_6QC1",
"mam_050108o_CPTAC_study6_6QC2_080504134857",
"mam_050108o_CPTAC_study6_6A018_080504183404",
"mam_050108o_CPTAC_study6_6B011_080504231912",
"mam_050108o_CPTAC_study6_6C008_080505040419",
"mam_050108o_CPTAC_study6_6D004_080505084927",
"mam_050108o_CPTAC_study6_6E004_080505133441",
"mam_050108o_CPTAC_study6_6QC1_080505181949",
"Orbi2_study6a_W080314_6QC2_yeast_ft8_pc",
"Orbi2_study6a_W080314_6B007_yeast_S48_ft8_pc",
"Orbi2_study6a_W080314_6C001_yeast_S48_ft8_pc",
"Orbi2_study6a_W080314_6D007_yeast_S48_ft8_pc",
"Orbi2_study6a_W080314_6E008_yeast_S48_ft8_pc",
"Orbi2_study6a_W080314_6QC1_sigma48_ft8_pc",
"Orbi2_study6b_W080321_6QC2_yeast_ft8_pc_01",
"Orbi2_study6b_W080321_6A013_yeast_S48_ft8_pc_01",
"Orbi2_study6b_W080321_6B007_yeast_S48_ft8_pc_01",
"Orbi2_study6b_W080321_6D007_yeast_S48_ft8_pc_01",
"Orbi2_study6b_W080321_6E008_yeast_S48_ft8_pc_01",
"Orbi2_study6b_W080321_6QC1_sigma48_ft8_pc_01",
"Orbi2_study6b_W080321_6QC2_yeast_ft8_pc_02",
"Orbi2_study6b_W080321_6A013_yeast_S48_ft8_pc_02",
"Orbi2_study6b_W080321_6B007_yeast_S48_ft8_pc_02",
"Orbi2_study6b_W080321_6C001_yeast_S48_ft8_pc_02",
"Orbi2_study6b_W080321_6D007_yeast_S48_ft8_pc_02",
"Orbi2_study6b_W080321_6E008_yeast_S48_ft8_pc_02",
"Orbi2_study6b_W080321_6QC1_sigma48_ft8_pc_02"
)
result <- data.table(NULL)
for(fileName in inputList){
print(fileName)
result <- rbind(result, joinFiles(fileName))
}
save(list = c("result"), file=paste0(outputPath, "CPTAC_processed.RData"),
compress = "gzip", compression_level = 1)
write.csv(result, file=paste0(outputPath, "CPTAC_processed_V2.csv")) |
84208c06937ec9553436ffabdede54b1a52d28c5 | 9952bbc11691a7c0c505bba4a4513610d8a542ad | /man/linf.Rd | bae95aba2ec7e7de0cdc18e9c08b5c9e743571ef | [] | no_license | benjilu/balancer | 9a6cbc24f4a4f87d2c9053791f0ce6befab09bd4 | 8af3cd9151c3d5156b6ee44b1252f86e16d489d9 | refs/heads/master | 2022-12-03T02:55:10.339926 | 2020-08-08T05:10:48 | 2020-08-08T05:10:48 | 274,989,853 | 0 | 0 | null | 2020-06-25T18:39:35 | 2020-06-25T18:39:34 | null | UTF-8 | R | false | true | 189 | rd | linf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/balance_funcs.R
\name{linf}
\alias{linf}
\title{Infinity norm}
\usage{
linf(x)
}
\description{
Infinity norm
}
|
e104eb1f831ac9c384bf5499405995869b781cbb | effe14a2cd10c729731f08b501fdb9ff0b065791 | /cran/paws.customer.engagement/man/connect_update_user_security_profiles.Rd | af3b554d551bb5be1a637607888ddfe12966c72e | [
"Apache-2.0"
] | permissive | peoplecure/paws | 8fccc08d40093bb25e2fdf66dd5e38820f6d335a | 89f044704ef832a85a71249ce008f01821b1cf88 | refs/heads/master | 2020-06-02T16:00:40.294628 | 2019-06-08T23:00:39 | 2019-06-08T23:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,265 | rd | connect_update_user_security_profiles.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_update_user_security_profiles}
\alias{connect_update_user_security_profiles}
\title{Updates the security profiles assigned to the user}
\usage{
connect_update_user_security_profiles(SecurityProfileIds, UserId,
InstanceId)
}
\arguments{
\item{SecurityProfileIds}{[required] The identifiers for the security profiles to assign to the user.}
\item{UserId}{[required] The identifier of the user account to assign the security profiles.}
\item{InstanceId}{[required] The identifier for your Amazon Connect instance. To find the ID of your
instance, open the AWS console and select Amazon Connect. Select the
alias of the instance in the Instance alias column. The instance ID is
displayed in the Overview section of your instance settings. For
example, the instance ID is the set of characters at the end of the
instance ARN, after instance/, such as
10a4c4eb-f57e-4d4c-b602-bf39176ced07.}
}
\description{
Updates the security profiles assigned to the user.
}
\section{Request syntax}{
\preformatted{svc$update_user_security_profiles(
SecurityProfileIds = list(
"string"
),
UserId = "string",
InstanceId = "string"
)
}
}
\keyword{internal}
|
1ec0aae114a2f8f7bf9a207ccad981276525e083 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mathgraph/examples/getpath.Rd.R | 063de5a2713e16c21d43ec871131dc194c78f7cb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 398 | r | getpath.Rd.R | library(mathgraph)
### Name: getpath
### Title: Find a Path in a Mathematical Graph
### Aliases: getpath getpath.mathgraph getpath.incidmat getpath.adjamat
### getpath.default
### Keywords: math
### ** Examples
getpath(mathgraph(~ 1:3 / 3:5), 1, 5) # returns a path
getpath(mathgraph(~ 1:3 / 3:5), 1, 4) # no path, returns NULL
getpath(mathgraph(~ 1:3 / 3:5), 1, 1) # returns mathgraph()
|
76631748b1c93a1c2d56def676e9e9ae04106c52 | 52bbf633b0060e54570ef30d787d33e315375aeb | /man/update_price_info.Rd | 08a4ab06e7b29eefe7f513fbda51d088166eb299 | [] | no_license | shawnlinxl/ptdash | c8553bb480032ddb7e59cd2effb8b95b2d50075b | ab1b8c7f6f8c6fe7705a85b1ff43c23441cd0268 | refs/heads/master | 2021-01-01T16:14:24.715024 | 2017-08-24T02:10:56 | 2017-08-24T02:10:56 | 97,790,210 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 470 | rd | update_price_info.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_price_info.R
\name{update_price_info}
\alias{update_price_info}
\title{Create a daily return series.}
\usage{
update_price_info(dir_log_file = NULL)
}
\arguments{
\item{dir_log_file}{directory of the log file. If not provided, look for
log file at default location.}
}
\description{
Create a daily return series based on the return on the historical date
instead of the current NAV.
}
|
9961887f8dedf096ca8982719d59cc5b378ebb77 | 704684551a7be001763bb41b16d410ea952a49df | /plot2.R | bd1d68c7b48580678eaea282596d995aba44565d | [] | no_license | sarahpenir/ExData_Plotting1 | 564d5fb8e6ef244300f9446a8acb517d4d0d01ad | bec66f1f7667357428567a4e408e1f67251b5019 | refs/heads/master | 2021-01-23T23:53:16.390428 | 2018-02-25T03:53:22 | 2018-02-25T03:53:22 | 122,743,081 | 0 | 0 | null | 2018-02-24T13:31:56 | 2018-02-24T13:31:56 | null | UTF-8 | R | false | false | 754 | r | plot2.R | ## Exploratory Data Analysis - Week 1
## Plot 2
# Load the data
table <- read.table("./household_power_consumption.txt", header = TRUE,
sep = ";", stringsAsFactors = FALSE,
colClasses=c("character","character", rep("numeric",7)), na.strings=c('?'))
# Fix date and time
table$DateTime <- strptime(paste(table$Date, table$Time), format="%d/%m/%Y %H:%M:%S")
table_subset <- table[table$DateTime>= as.POSIXlt("2007-02-01") & table$DateTime<as.POSIXlt("2007-02-03"),]
# Plot
png("plot2.png", width=480, height=480, units="px", bg="transparent")
plot(table_subset$DateTime, table_subset$Global_active_power, main="",
ylab='Global Active Power (kilowatts)', type='l',xlab="")
dev.off()
|
9189f7aa5c7e014baa8e5c03231c9c3aa628e2b6 | 1eed97c0cc56a10d720a61ad0d208518a8e96568 | /post_processing/plotting_functions.R | f56e8efca956fb1c211bfd2ed0f7cb570a428224 | [] | no_license | llavin13/dispatch_RA_model | 846da29f6321c928bdca7b5a492c1dab1d43846a | 23a460c47ba7fe6a42e7888f8f1074bb7e780983 | refs/heads/master | 2021-07-13T13:43:58.198635 | 2020-03-19T13:45:30 | 2020-03-19T13:45:30 | 188,279,480 | 0 | 0 | null | 2020-07-30T18:08:17 | 2019-05-23T17:36:11 | Python | UTF-8 | R | false | false | 18,390 | r | plotting_functions.R | # Created on Thu Apr 18 08:46:48 2019
# @author: bsergi
library(ggplot2)
library(openxlsx)
library(plyr)
library(reshape2)
## Working directory and inputs ####
#baseWD <- "/Users/Cartographer/GAMS/dispatch_RA-master"
baseWD <- "C:/Users/llavi/Desktop/research/dispatch_RA-master"
setwd(paste(baseWD, "post_processing", sep="/"))
## Load model results ####
# note: need to change loop to include multiple days when running
processResults <- function(dates, plotTitle){
setwd(baseWD)
for(i in 1:length(dates)){
date <- dates[i]
dateString <- paste(as.numeric(format(date, "%m")), as.numeric(format(date, "%d")), as.numeric(format(date, "%Y")), sep=".")
setwd(paste(baseWD, dateString, "results", sep="/"))
# LMP, reserves, and VRE results
modelLMPtemp <- read.csv("zonal_prices.csv")
reservestemp <- read.csv("reserve_segment_commit.csv")
VREtemp <- read.csv("renewable_generation.csv")
dispatchTemp <- read.csv("generator_dispatch.csv")
modelLMPtemp$date <- date; reservestemp$date <- date; VREtemp$date <- date; dispatchTemp$date <- date
# zonal loads, ordc shape, and generator types
setwd(paste(baseWD, dateString, "inputs", sep="/"))
zonalLoadtemp <- read.csv("timepoints_zonal.csv")
ordctemp <- read.csv("full_ordc.csv")
gensTemp <- read.csv("PJM_generators_full.csv")
gensTemp <- gensTemp[,c("Name", "Zone", "Category")] # subset generator columns
zonalLoadtemp$date <- date; ordctemp$date <- date
if(i == 1){
modelLMP <- modelLMPtemp
VRE <- VREtemp
reserves <- reservestemp
dispatch <- dispatchTemp
zonalLoad <- zonalLoadtemp
ordc <- ordctemp
gens <- gensTemp
} else{
modelLMP <- rbind(modelLMP, modelLMPtemp)
VRE <- rbind(VRE, VREtemp)
reserves <- rbind(reserves, reservestemp)
zonalLoad <- rbind(zonalLoad, zonalLoadtemp)
ordc <- rbind(ordc, ordctemp)
dispatch <- rbind(dispatch, dispatchTemp)
gens <- rbind(gens, gensTemp)
# remove duplicate generations
gens <- gens[!duplicated(gens),]
}
rm(modelLMPtemp); rm(VREtemp); rm(reservestemp); rm(zonalLoadtemp); rm(ordctemp); rm(gensTemp)
}
# subset to reserve segments 1-10 and hours 1-24
ordc_segments <- 10
hours <- 24
plotLMPs(dates, modelLMP, zonalLoad, plotTitle)
plotReserves(ordc_segments, hours, dates, reserves, ordc, plotTitle)
plotDispatch(dates, dispatch, VRE, gens, hours, plotTitle)
}
## LMPs ####
readPJM_LMPs <- function(dates){
setwd(paste(baseWD, "post_processing", sep="/"))
reportedLMPs <- read.csv("lmp_historical.csv")
reportedLMPs$datetime <- as.POSIXct(reportedLMPs[,"Local.Datetime..Hour.Ending."], format="%m/%d/%Y %H:%M")
#reportedLMPs$datetime <- reportedLMPs$datetime + 1*3600 #get the times in same
reportedLMPs$date <-format(reportedLMPs$datetime, "%m-%d-%y")
reportedLMPs$hour <- as.numeric(format(reportedLMPs$datetime, "%H"))
reportedLMPsub <- reportedLMPs[reportedLMPs$date %in% format(dates, "%m-%d-%y"),]
reportedLMPsub$date <- as.POSIXct(reportedLMPsub$date, format="%m-%d-%y")
return(reportedLMPsub)
}
loadNoORDC <- function(dates){
setwd(baseWD)
for(i in 1:length(dates)){
date <- dates[i]
dateString <- paste(as.numeric(format(date, "%m")), as.numeric(format(date, "%d")), as.numeric(format(date, "%Y")), sep=".")
#setwd(paste(baseWD, "No ORDC input files", dateString, "results", sep="/"))
setwd(paste(baseWD, "Jan2014_withORDC", dateString, "results", sep="/"))
# LMP and zonal loads for no_ordc case
modelLMPtemp <- read.csv("zonal_prices.csv")
modelLMPtemp$date <- date
#setwd(paste(baseWD, "No ORDC input files", dateString, "inputs", sep="/"))
setwd(paste(baseWD, "Jan2014_withORDC", dateString, "inputs", sep="/"))
zonalLoadtemp <- read.csv("timepoints_zonal.csv")
zonalLoadtemp$date <- date
if(i == 1){
modelLMP <- modelLMPtemp
zonalLoad <- zonalLoadtemp
} else{
modelLMP <- rbind(modelLMP, modelLMPtemp)
zonalLoad <- rbind(zonalLoad, zonalLoadtemp)
}
}
#formatting of model LMP
colnames(modelLMP)[1] <- "Node"
modelLMP <- merge(modelLMP, zonalLoad[,c("date", "timepoint", "zone", "gross_load")],
by.x=c("date", "hour", "Node"), by.y=c("date", "timepoint", "zone"), all=T, sort=F)
# calculated weighted average across zones for PJM-wide LMP
PJM_LMP <- ddply(modelLMP, ~ date + hour, summarize, LMP = sum(gross_load * LMP / sum(gross_load)), gross_load = sum(gross_load))
PJM_LMP$Node <- "PJM"
PJM_LMP <- PJM_LMP[,c("date", "hour", "Node", "LMP", "gross_load")]
modelLMP <- rbind(modelLMP, PJM_LMP)
modelLMP$Node <- mapvalues(modelLMP$Node, from=c("DC_BGE_PEP", "PA_METED_PPL"), to=c("BGE", "PPL"))
# merge reported and modeled data
#modelLMP$source <- "model: no ORDC"
modelLMP$source <- "model: updated zones"
modelLMP$datetime <- with(modelLMP, paste(date, hour))
modelLMP$datetime <- as.POSIXct(modelLMP$datetime, format = "%Y-%m-%d %H")
return(modelLMP)
}
plotLMPs <- function(dates, modelLMP, zonalLoad, plotTitle){
reportedLMPsub <- readPJM_LMPs(dates)
#formatting of model LMP
colnames(modelLMP)[1] <- "Node"
modelLMP <- merge(modelLMP, zonalLoad[,c("date", "timepoint", "zone", "gross_load")],
by.x=c("date", "hour", "Node"), by.y=c("date", "timepoint", "zone"), all=T, sort=F)
# calculated weighted average across zones for PJM-wide LMP
PJM_LMP <- ddply(modelLMP, ~ date + hour, summarize, LMP = sum(gross_load * LMP / sum(gross_load)), gross_load = sum(gross_load))
PJM_LMP$Node <- "PJM"
PJM_LMP <- PJM_LMP[,c("date", "hour", "Node", "LMP", "gross_load")]
modelLMP <- rbind(modelLMP, PJM_LMP)
modelLMP$Node <- mapvalues(modelLMP$Node, from=c("DC_BGE_PEP", "PA_METED_PPL"), to=c("BGE", "PPL"))
reportedLMPsub <- reportedLMPsub[, c("date", "hour", "Price.Node.Name", "Price...MWh")]
reportedLMPsub$gross_load <- NA
colnames(reportedLMPsub) <- c("date", "hour", "Node", "LMP", "gross_load")
reportedLMPsub$Node <- mapvalues(reportedLMPsub$Node,
from=c("PJM-RTO ZONE", "DOMINION HUB", "EASTERN HUB", "WESTERN HUB"),
to=c("PJM", "VA_DOM", "EAST", "WEST"))
# merge reported and modeled data
modelLMP$source <- "model"
reportedLMPsub$source <- "reported"
fullLMP <- rbind(modelLMP, reportedLMPsub)
fullLMP$datetime <- with(fullLMP, paste(date, hour))
fullLMP$datetime <- as.POSIXct(fullLMP$datetime, format = "%Y-%m-%d %H")
fullLMP$source <- mapvalues(fullLMP$source, from="model", to="model: ORDC")
noORDCresults <- loadNoORDC(dates)
fullLMP <- rbind(fullLMP, noORDCresults)
#write.csv(fullLMP, file = "LMPprint.csv")
# truncate
ggplot(data=fullLMP, aes(x=datetime, y=LMP, colour=Node, linetype=source)) + facet_wrap(~Node) +
geom_line() + theme_classic() + xlab("") + ylab("LMP ($ per MWh)") +
guides(colour=guide_legend(title="Zone"),
linetype=guide_legend(title="")) + coord_cartesian(ylim=c(0, 1000)) +
scale_linetype_manual(values=c(2,3,1)) +
theme(text=element_text(size=12),
axis.text=element_text(size=10))
setwd(paste(baseWD, "post_processing", "figures", sep="/"))
ggsave(paste0("LMPs ", plotTitle, ".png"), width=10, height=6)
}
## Reserve pricing ####
getReservePrices <- function(ordc_segments, hours, dates, reserves, ordc, plotTitle){
reserve_set <- as.data.frame(expand.grid(segment=1:ordc_segments, hour=1:hours))
reserve_set <- reserve_set[with(reserve_set, order(segment, hour)),]
reserve_set$set <- factor(with(reserve_set, paste(segment, hour, sep=",")))
reserves <- reserves[reserves$X %in% reserve_set$set,]
reserves$timepoint <- rep(1:hours, ordc_segments*length(dates))
reserves$segments <- rep(rep(1:ordc_segments, each=hours), length(dates))
#reserves <- merge(reserves, reserve_set, by.x="X", by.y="segment", all=T)
reserves$X <- NULL
reserves <- merge(reserves, ordc, by=c("date", "timepoint", "segments"), all.x=T)
reserves <- reserves[with(reserves, order(date, timepoint, segments)),]
reserves <- ddply(reserves, ~ date + timepoint, transform,
cumulativeReserve = cumsum(MW),
cumulativeProcured = cumsum(MW.on.reserve.segment),
procured = sum(MW.on.reserve.segment))
reserves$priceFlag <- ifelse(reserves$procured > reserves$cumulativeReserve | reserves$MW.on.reserve.segment == 0, F, T)
reserves$datetime <- with(reserves, paste(date, timepoint))
reserves$datetime <- as.POSIXct(reserves$datetime, format = "%Y-%m-%d %H")
procured <- reserves[reserves$priceFlag,]
return(procured)
}
# reformat and merge (add to function later)
plotReserves <- function(ordc_segments, hours, dates, reserves, ordc, plotTitle){
reserve_set <- as.data.frame(expand.grid(segment=1:ordc_segments, hour=1:hours))
reserve_set <- reserve_set[with(reserve_set, order(segment, hour)),]
reserve_set$set <- factor(with(reserve_set, paste(segment, hour, sep=",")))
reserves <- reserves[reserves$X %in% reserve_set$set,]
reserves$timepoint <- rep(1:hours, ordc_segments*length(dates))
reserves$segments <- rep(rep(1:ordc_segments, each=hours), length(dates))
#reserves <- merge(reserves, reserve_set, by.x="X", by.y="segment", all=T)
reserves$X <- NULL
reserves <- merge(reserves, ordc, by=c("date", "timepoint", "segments"), all.x=T)
reserves <- reserves[with(reserves, order(date, timepoint, segments)),]
reserves <- ddply(reserves, ~ date + timepoint, transform,
cumulativeReserve = cumsum(MW),
cumulativeProcured = cumsum(MW.on.reserve.segment),
procured = sum(MW.on.reserve.segment))
reserves$priceFlag <- ifelse(reserves$procured > reserves$cumulativeReserve | reserves$MW.on.reserve.segment == 0, F, T)
reserves$datetime <- with(reserves, paste(date, timepoint))
reserves$datetime <- as.POSIXct(reserves$datetime, format = "%Y-%m-%d %H")
procured <- reserves[reserves$priceFlag!=0,]
# scale for secondary price axis
scale <- max(procured$Price) / max(reserves$cumulativeProcured)
ggplot(reserves, aes(x=datetime, y=MW.on.reserve.segment, fill=segments)) + geom_bar(stat='identity', size=0) +
geom_line(data=procured, aes(x=datetime, y=Price/scale ), colour='red') +
#geom_point(data=procured, aes(x=timepoint, y=Price*scale), colour='red', shape=4) +
scale_y_continuous(sec.axis = sec_axis(~.*scale, name="Reserve price ($ per MW)")) +
coord_cartesian(ylim=c(0, max(reserves$cumulativeProcured)*1.05)) +
xlab("") + ylab("Reserves procured (MW)") +
guides(fill=guide_legend(title="ORDC\nsegment"),
shape=guide_legend()) +
scale_fill_gradient(breaks=rev(c(1:10))) + theme_classic() +
theme(text=element_text(size=12),
axis.text=element_text(size=10))
setwd(paste(baseWD, "post_processing", "figures", sep="/"))
ggsave(paste0("Reserves ", plotTitle, ".png"), width=10)
}
## Generation dispatch ####
plotDispatch <- function(dates, dispatch, VRE, gens, hours, plotTitle){
# subset dispatch output to single day
dispatch <- dispatch[, c(1:(hours+1), dim(dispatch)[2])]
colnames(dispatch) <- c("id", 0:(hours-1), "date")
dispatch$zone <- gsub("-[[:print:]]*", "", dispatch[,1])
dispatch$plant <- gsub("[[:print:]]*-", "", dispatch[,1])
dispatch[,"id"] <- NULL
dispatch <- melt(dispatch, id.vars=c("date", "zone", "plant"))
colnames(dispatch) <- c("date", "zone", "plant", "hour", "MW")
# drop rows with zero generation
#dispatch <- dispatch[dispatch$MW != 0,]
# match with fuel type
dispatch <- merge(dispatch, gens[,c("Name", "Category")], by.x="plant", by.y="Name", all.x=T)
# summarize by fuel type
fuelDispatch <- ddply(dispatch, ~ date + hour + zone + Category, summarize, MW = sum(MW))
fuelDispatch$zone <- factor(fuelDispatch$zone)
# add in renewable gen. and curtailment
VRE <- melt(VRE, id.vars = c("date", "timepoint", "zone"))
colnames(VRE) <- c("date", "hour", "zone", "Category", "MW")
VRE$hour <- factor(VRE$hour - 1)
fuelDispatch <- rbind(fuelDispatch, VRE)
fuelDispatch$datetime <- as.POSIXct(with(fuelDispatch, paste(date, hour)), format = "%Y-%m-%d %H")
fuelDispatch$Category <- factor(fuelDispatch$Category, levels = c("curtailment", "DR", "wind", "solar", "DS",
"CT", "CC", "ST", "NU", "HD", NA))
#fuelDispatch$Category <- mapvalues()
# calculate PJM wide
PJM_dispatch <- ddply(fuelDispatch, ~ datetime + Category, summarize, MW = sum(MW))
PJM_dispatch$zone <- "All PJM"
fuelDispatch <- fuelDispatch[,c("datetime", "Category", "MW", "zone")]
fuelDispatch <- rbind(fuelDispatch, PJM_dispatch)
curtailedPower <- fuelDispatch[fuelDispatch$Category == "curtailment" & !is.na(fuelDispatch$Category) & fuelDispatch$MW > 0,]
fuelDispatch
fuelDispatch$Category <- droplevels(fuelDispatch$Category)
ggplot(data=fuelDispatch, aes(x=datetime, y=MW/1E3, fill=Category)) + geom_area() + facet_wrap(~zone, nrow=3, scales = "free") +
theme_classic() + ylab("GW") + guides(fill=guide_legend(title="")) + xlab("") +
scale_x_datetime() +
ggtitle(paste("Generation by fuel for", plotTitle))
setwd(paste(baseWD, "post_processing", "figures", sep="/"))
ggsave(paste0("dispatch ", plotTitle, ".png"), width=12, height=12)
setwd(paste(baseWD, "post_processing", sep="/"))
save.image("C:/Users/llavi/Desktop/research/dispatch_RA-master/post_processing/Results both ORDC.RData")
}
## Main ####
dates <- seq(as.POSIXct("1/4/2014", format = "%m/%d/%Y"), by="day", length.out=7)
processResults(dates, plotTitle="Jan. 4-10, 2014")
dates <- seq(as.POSIXct("10/19/2017", format = "%m/%d/%Y"), by="day", length.out=7)
processResults(dates, plotTitle="Oct 19-25, 2017")
### CODE ADDED FROM BRIAN 5.21.19 ###
## Price deltas ####
# load prices
pjmLMPs1 <- readPJM_LMPs(dates1)
pjmLMPs2 <- readPJM_LMPs(dates2)
# some formatting of pricing data
# funcrion to calculate zonal differences
calcZonalDifference <- function(modelPrices, actualPrices, zoneMapping){
# actual prices
actualPrices <- actualPrices[,c("Price.Node.Name" ,"datetime", "Price...MWh")]
colnames(actualPrices) <- c("node", "datetime", "price")
actualPrices$node <- mapvalues(actualPrices$node,
from=c("DOMINION HUB", "EASTERN HUB", "WESTERN HUB", "BGE", "PPL"),
to=c("VA_DOM", "EAST", "WEST", "DC_BGE_PEP", "PA_METED_PPL"))
# drop PJM wide price
actualPrices <- actualPrices[actualPrices$node != "PJM-RTO ZONE",]
mapResults <- merge(zoneMapping, actualPrices, by.x="from", by.y="node", all.x=T)
mapResults <- merge(mapResults, actualPrices, by.x=c("to", "datetime"), by.y=c("node", "datetime"), all.x=T)
# modeled prices
modelPrices$hour <- modelPrices$hour - 1 # hours given in 1-24 format, so need to offset to match PJM reported data
modelPrices <- modelPrices[modelPrices$hour < 24,] # remove extra hours in each (inlcuded to smooth transition between days)
modelPrices$datetime <- as.POSIXct(paste(modelPrices$date, modelPrices$hour), format = "%Y-%m-%d %H")
colnames(modelPrices)[1] <- "node"
modelPrices <- modelPrices[,c("node", "datetime", "LMP")]
mapResults <- merge(mapResults, modelPrices, by.x=c("from", "datetime"), by.y=c("node", "datetime"), all.x=T)
mapResults <- merge(mapResults, modelPrices, by.x=c("to", "datetime"), by.y=c("node", "datetime"), all.x=T)
mapResults <- mapResults[with(mapResults, order(datetime, from, to)),]
mapResults <- mapResults[,c("datetime", "from", "to", "price.x", "price.y", "LMP.x", "LMP.y")]
colnames(mapResults) <- c("datetime", "from", "to", "price_actual_from", "price_actual_to", "price_model_from", "price_model_to")
# calculate deltas (from - to)
mapResults$delta_actual <- mapResults$price_actual_from - mapResults$price_actual_to
mapResults$delta_model <- mapResults$price_model_from - mapResults$price_model_to
return(mapResults)
}
# function to plot price deltas
plotDeltas <- function(priceDeltas, plotTitle){
priceDeltas <- priceDeltas[,c("datetime", "from", "to", "delta_actual", "delta_model")]
priceDeltas <- melt(priceDeltas, id.vars = c("datetime", "from", "to"))
priceDeltas$interface <- with(priceDeltas, paste(from, "to", to))
ggplot(priceDeltas, aes(x=datetime, y=value, linetype=variable, color=variable)) + geom_line() +
facet_wrap(~interface) + xlab("") + ylab("Price differntial ($ per MWh)") +
guides(linetype=guide_legend(title=""), color=guide_legend(title="")) + #theme_classic() +
scale_color_manual(breaks = c("delta_actual", "delta_model"), values=c("blue", "red"), labels = c("actual", "model")) +
scale_linetype_manual(breaks = c("delta_actual", "delta_model"), values=c(1,2), labels = c("actual", "model")) +
theme(legend.position = "top")
setwd(paste(baseWD, "post_processing", "figures", sep="/"))
ggsave(plotTitle, width=8, height=6)
}
# call price delta functions
# Transmission linkages
# EAST_to_PA_METED_PPL
# WEST_to_DC_BGE_PEP
# WEST_to_VA_DOM
# WEST_to_PA_METED_PPL
# DC_BGE_PEP_to_VA_DOM
# DC_BGE_PEP_to_PA_METED_PPL
txMapping <- data.frame(from=c("EAST", "WEST", "WEST", "WEST", "DC_BGE_PEP", "DC_BGE_PEP"),
to=c("PA_METED_PPL", "DC_BGE_PEP", "VA_DOM", "PA_METED_PPL", "VA_DOM", "PA_METED_PPL"))
priceDeltas1 <- calcZonalDifference(modelLMPs1, pjmLMPs1, txMapping)
plotDeltas(priceDeltas1, "Price deltas - January.pdf")
priceDeltas2 <- calcZonalDifference(modelLMPs2, pjmLMPs2, txMapping)
plotDeltas(priceDeltas2, "Price deltas - October.pdf")
## Save ####
save.image("Results.RData")
|
b186f38798478387d03f99377bb4a483593b1f6b | 1d960c31be6bd0f629ebb41cbaa834ba3a2a93d7 | /man/reduceNbTimes.Rd | 937e0e62463efcc93575ab96e2979a77d0f10c08 | [] | no_license | cran/kmlShape | 04c89c8ab9160f9a171885bb013d4ab37fc4b7bd | 821047dcd8ddd8309736ec9d6a9c14c8605bee25 | refs/heads/master | 2021-01-10T13:17:09.852222 | 2016-03-05T00:22:43 | 2016-03-05T00:22:43 | 53,172,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,658 | rd | reduceNbTimes.Rd | \name{reduceNbTimes}
\alias{reduceNbTimes}
\title{
~ Function: reduceNbTimes ~
}
\description{
\code{reduceNbTimes} simplify some trajectories (in long format) by reducing their number of points.
}
\usage{
reduceNbTimes(trajLong, nbPoints, spar=NA)
}
\arguments{
\item{trajLong}{[\code{data.frame}]: \code{data.frame} that hold the
trajectories in long format. The data.frame has to be
(no choice!) in the following format: the first column should be the individual
indentifiant. The second should be the times at which the measurement
are made. The third one should be the measurement.}
% \item{idCol}{[\code{character}]: Name of the column that hold the
% individual's indentifiant.}
% \item{timesCol}{[\code{character}]: Name of the column that hold the
% times at which the measurement are made.}
% \item{varyingCol}{[\code{character}]: Name of the column that hold the
% measurement.}
\item{nbPoints}{[\code{numeric}]: fixe the number of that the
simplified trajectories should have.}
\item{spar}{[\code{numeric}]: smoothing parameter that is used if the
trajectories shall be smoothed before being simplified.}
}
\details{
\code{reduceNbTimes} simplify some trajectories by reducing their number of points.
The trajectories should be in long format.
If a value is given to \code{spar} (different from NA), trajectories are smoothed using \code{\link[stats]{smooth.spline}}.
The reduction of the number of point is done using a variation of
\link[=DouglasPeucker]{Douglas-Peucker} algorithme based on the number of points
instead of an epsilon.
}
\value{
A \code{data.frame} holding the simplified trajectories, in long format.
}
\author{
Christophe Genolini
}
\seealso{
\code{\link{reduceNbTimes}}, \code{\link{DouglasPeuckerEpsilon}}, \code{\link{DouglasPeuckerNbPoints}}
}
\examples{
require(lattice)
### Some artificial data
g <- function(x)dnorm(x,3)+dnorm(x,7)+x/10
dn <- data.frame(id=rep(1:20,each=101),
times=rep((0:100)/10,times=20),
traj=rep(g((0:100)/10),20)+rep(runif(20),each=101)+rnorm(20*101,,0.1))
xyplot(traj ~ times, data=dn, groups=id,type="l")
### Reduction to 50 points
dn2 <- reduceNbTimes(trajLong=dn,nbPoints=50)
xyplot(traj ~ times, data=dn2, groups=id,type="l")
### Reduction to 20 points
dn3 <- reduceNbTimes(trajLong=dn,nbPoints=20)
xyplot(traj ~ times, data=dn3, groups=id,type="l")
### Smoothing then reduction to 20 points
dn4 <- reduceNbTimes(trajLong=dn,nbPoints=20,spar=0.5)
xyplot(traj ~ times, data=dn4, groups=id,type="l")
} |
86acecb5cd4012795aacf6b75226ff374fd0442f | 9ba6209679be367a8342419a0f58c85a7e7795a0 | /bak/1480894522.R | cca60f993f48142f90b1f721f216596a617cd22e | [] | no_license | sulaxd/data4course | c88eaa6793a40535df02620094c702f249ff9393 | 793da50f804f11be699bd9cc6f861fa3c4cc5912 | refs/heads/master | 2020-06-16T14:46:34.085007 | 2019-07-17T13:56:22 | 2019-07-17T13:56:22 | 195,613,181 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 219 | r | 1480894522.R | fit_pam1 <- pam(newiris, 3)
table(iris$Species, fit_pam1$cluster)
plot(newiris[c("Sepal.Length", "Sepal.Width")], col=fit_pam1$cluster)
points(fit_pam1$medoids[,c("Sepal.Length", "Sepal.Width")], col=1:3, pch=8, cex=2)
|
761ad4793bc05460a0fb34494994a1b8993b6b83 | 2477d74e904cd1e64e8a4108082957ee4d733132 | /R/distance_parser.R | 8d2ea48a3ad8a07c0be0c4140398d20c03212fe7 | [] | no_license | joranE/fispdfparsr | 9a9e6a8f9bc67ea7c335a98c34799a42b6bb7171 | 45c2dbc7f0b4879b7d1d5e0958ef54b4da9cdb01 | refs/heads/master | 2021-09-09T01:21:20.457795 | 2021-09-03T18:05:18 | 2021-09-03T18:05:18 | 75,509,565 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,754 | r | distance_parser.R | #' Parse Distance Race PDFs
#'
#' Convert FIS distance result PDFs into a format more
#' suitable for analysis. All times are converted to seconds.
#'
#' @result A data.frame; specifically a \code[dplyr]{\link{tbl_df}}
#'
#' @param file file path to PDF of distance results
#' @param race_distance numeric; race distance in km
#' @param long_mass boolean; flag for handling long mass start start races
#' @param edit boolean
#' @export
#' @import tidyr
#' @examples
#' \dontrun{
#' dst <- parse_dst_pdf(file = system.file("example_pdfs/dst_example1.pdf",
#' package = "fispdfparsr"),15)
#' }
parse_dst_pdf <- function(file = NULL,race_distance,long_mass = FALSE,edit = FALSE,opa = FALSE,...){
if (is.null(file)){
stop("Must provide file path for race PDF.")
}
if (is.null(race_distance)){
stop("Must provide race distance (in km).")
}
#Read tables from final PDF
tbls <- parse_pdf(file = file,method = "stream",output = "matrix",...)
if (edit){
for (i in seq_along(tbls)){
print(tbls[[i]])
cat("\n")
choice <- readline(prompt = "Edit (1), ok (2) or drop (3)? ")
if (choice == "2") next
else {
if (choice == "3"){
tbls <- tbls[-i]
}else {
tbls[[i]] <- edit(tbls[[i]])
}
}
}
}
if (opa){
result <- dst_clean_opa(tbls = tbls,race_distance = race_distance)
}else {
#Escape hatch of 30k/50k mass start races
# that are more similar to stage races in that
# they have bonus seconds at split
if (long_mass){
result <- dst_clean_mass(tbls = tbls,race_distance = race_distance)
}else{
result <- dst_clean(tbls = tbls,race_distance = race_distance)
}
}
result
}
|
ce586e85f13668875e3f9fa77a85277f3f01c67b | 42a2ec53d27a7856122ffc079634174f01dffcd3 | /creating_boundness_metric.R | 2b1a7d38f77fe1f5ad36332ef9df1a57ef3e4dfd | [
"CC-BY-4.0"
] | permissive | OlenaShcherbakova/Sociodemographic_factors_complexity | 84481c368369e4882951321164b363670797c074 | 559f590633699ae8f4c926d4da43d8d1a4eb7716 | refs/heads/main | 2023-06-23T23:39:26.516716 | 2023-06-15T07:05:39 | 2023-06-15T07:05:39 | 552,730,271 | 1 | 0 | CC-BY-4.0 | 2023-06-13T08:17:36 | 2022-10-17T06:23:59 | R | UTF-8 | R | false | false | 1,495 | r | creating_boundness_metric.R | #boundness/fusion
#Script was written by Hedvig Skirgård
source("requirements.R")
OUTPUTDIR1 <- file.path('.', "output", "Bound_morph")
# create output dir if it does not exist.
if (!dir.exists(OUTPUTDIR1)) {
dir.create(OUTPUTDIR1)
}
if (!file.exists(here(OUTPUTDIR1, "bound_morph_score.tsv"))) {
GB_wide <-
read_tsv(file.path("data", "GB_wide", "GB_wide_strict.tsv"),
col_types = WIDE_COLSPEC)
#read in sheet with scores for whether a feature denotes fusion
GB_fusion_points <-
data.table::fread(
file.path("data", "GB_wide", "parameters.csv"),
encoding = 'UTF-8',
quote = "\"",
header = TRUE,
sep = ","
) %>%
dplyr::select(Parameter_ID = ID, Fusion = boundness, informativity) %>%
mutate(Fusion = as.numeric(Fusion))
df_morph_count <- GB_wide %>%
filter(na_prop <= 0.25) %>% #exclude languages with more than 25% missing data
dplyr::select(-na_prop) %>%
reshape2::melt(id.vars = "Language_ID") %>%
dplyr::rename(Parameter_ID = variable) %>%
inner_join(GB_fusion_points, by = "Parameter_ID") %>%
filter(Fusion == 1) %>%
filter(!is.na(value)) %>%
group_by(Language_ID) %>%
dplyr::summarise(mean_morph = mean(value)) %>%
dplyr::select(Language_ID, boundness = mean_morph)
boundness_st = scale(df_morph_count$boundness)
df_morph_count <- cbind(df_morph_count, boundness_st)
df_morph_count %>%
write_tsv(file.path(OUTPUTDIR1, "bound_morph_score.tsv"))
} |
9691519e85b5b89cbf63316a87dadbed6dfec34a | 4f5c3e1f7e1ee109e2346d953c00d24b3992e9f9 | /concord_GUI.R | 51caf273e55df431c0a41dd0a6ff78c9c44fabce | [] | no_license | dependency-injection/NLP | adf7656a31adb4e4cf8a96223d6499ef0d21f011 | 3ec3263d533a56d1129a5165073b0ccea3312d10 | refs/heads/master | 2021-01-19T16:51:15.964491 | 2015-04-01T17:45:14 | 2015-04-01T17:45:14 | 33,262,281 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 586 | r | concord_GUI.R | window<-gwindow("Providing the context of a word")
mainwindow<-ggroup(horizontal=FALSE,container=window)
windowgroup1<-ggroup(horizontal=TRUE,container=mainwindow)
label1<-glabel("Text to traverse ",container=windowgroup1)
text1<-gedit(container=windowgroup1)
windowgroup2<-ggroup(horizontal=TRUE,container=mainwindow)
label2<-glabel("Word for context",container=windowgroup2)
text2<-gedit(container=windowgroup2)
butto<-gbutton("submitbutton",container=mainwindow,handler=function(h,...)
{
texname<-svalue(text1)
word<-svalue(text2)
source("concord.R")
concordanc(texname,word)
})
|
2740acfbe9e199a31aacd77bccfd577484ec6d9b | 6b7eb9e5757a9f5208f8783b552de4dd31647637 | /man/cancer.Rd | e8a25688339f8150c3fcac77a87e8fa2d699bf37 | [] | no_license | cran/PairViz | 03ccc39bd477f25a716ced15334efb91e6b529db | a82c3fc5e3bbdee4f7d50b7cc0fb6f6c00767925 | refs/heads/master | 2023-04-13T19:26:36.396111 | 2022-08-12T14:00:05 | 2022-08-12T14:00:05 | 17,681,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 992 | rd | cancer.Rd | \name{cancer}
\alias{cancer}
\docType{data}
\title{Cancer Survival data}
\description{
Patients with advanced cancers of the stomach, bronchus, colon, ovary or breast were treated with ascorbate. The purpose of the study was to determine if the survival times differ with respect to the organ affected by the cancer.
}
\format{This data frame contains the following columns:
\describe{
\item{\code{Survival}}{time in days}
\item{\code{Organ}}{Organ affected by the cancer}
}}
\usage{data(cancer)}
\references{
Cameron, E. and Pauling, L. (1978) Supplemental ascorbate in the supportive treatment of cancer: re-evaluation of prolongation of survival times in terminal human cancer. Proceedings of the National Academy of Science USA, 75, 4538-4542.
Also found in: Manly, B.F.J. (1986) Multivariate Statistical Methods: A Primer, New York: Chapman and Hall, 11. Also found in: Hand, D.J., et al. (1994) A Handbook of Small Data Sets, London: Chapman and Hall, 255.
}
\keyword{datasets} |
598df3000e065ec3acab7e93299f0536a5ba2f14 | ba8b66796420d7943610661838d8a047ad293e8f | /task12 (L1-regularization).r | be7143a67d027e28d2313d25c3b9f05726431690 | [] | no_license | ktrndy/home_task_applied_statistics | 20fe4b7a78154c92b8c953e6bbc9772b5ea2320b | 366b2480d0156eadb779500b8ece167baabcddd1 | refs/heads/main | 2023-06-07T21:57:36.592873 | 2021-06-27T09:37:57 | 2021-06-27T09:37:57 | 369,649,392 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,461 | r | task12 (L1-regularization).r | library(glmnet)
#ЗАДАНИЕ 1
#Сделайте предыдущее дз с L1-штрафом.
set.seed(7) #фиксируем случайную генерацию
X=rnorm(200)
Z=rnorm(200)
Y=Z+X^2+2*sin(3*X) #настоящая зависимость
data_matrix=cbind(poly(X,degree = 10, raw = T)) #предполагаем, что Y - полином 10й степени от X
#L1-регуляризация
cross_validation=cv.glmnet(x = data_matrix, y = Y, alpha = 1, nfolds = 10) #L1-рег, поэтому alpha=1, а не 0
plot(cross_validation)
l=cross_validation$lambda.min
model_L1=glmnet(x = data_matrix, y = Y, alpha = 1, lambda = l) #обучаем с подобранной лямбдой
coef(model_L1)
predict_L1=predict(model_L1, newx = data_matrix)
plot(X,Y, col=rgb(0,0,0,0.3),pch=16)
lines(X[order(X)],predict_L1[order(X)], type = "l", col=4,lwd=3, lty=2) #красивая штука, похожа на обычную модель, без рег., но должен быть подвох
#посчитаем R^2 (для L2 было около 0.3743894), нарисуем
X1=rnorm(1000)
Z1=rnorm(1000)
Y1=Z1+X1^2+2*sin(3*X1)
data_matrix1=data.frame(poly(X1,degree = 10, raw = T))
predict1=predict(model_L1, newx = as.matrix(data_matrix1))
1-sum((predict1-Y1)^2)/sum((Y1-mean(Y1))^2) #-3.164544 всё грустно, хуже, чем с L2-рег.
plot(X1,Y1, col=rgb(0,0,0,0.3),pch=16)
lines(X1[order(X1)],predict1[order(X1)], type = "l", col=2,lwd=3) #всё очень плохо, график(кроме левой части) похож на модель без рег. из прошлого дз
#ЗАДАНИЕ 2
#Попытайтесь осознать, что происходит. Для этого: внимательно посмотрите на размер лямбда-штрафа в L1 и в L2 случае.
#лямбда для L2 из прошлого дз l=1.962212
#лямбда для L1 l=0.002363491 маленькая, из-за чего уравнение для бетты, которое минимизируем, похоже на обычное, без штрафов
#ЗАДАНИЕ 3
#Примените такую же лямбда-решёточку для дз с L2 регуляризацией, вручную пропихнув её в кросс-валидацию. Наконец, докажите, что регуляризация неидеальна, а то, что мы получили в предыдущей домашке - обман :)
model_L2=glmnet(x = data_matrix, y = Y, alpha = 0, lambda = l)
predict_L2=predict(model_L2, newx = data_matrix)
plot(X,Y, col=rgb(0,0,0,0.3),pch=16)
lines(X[order(X)],predict_L2[order(X)], type = "l", col=4,lwd=3)
#посчитаем R^2, нарисуем
predict2=predict(model_L2, newx = as.matrix(data_matrix1))
plot(X1,Y1, col=rgb(0,0,0,0.3),pch=16)
lines(X1[order(X1)],predict2[order(X1)], type = "l", col=2,lwd=3) #ещё больше похоже на обычное, без штрафов
1-sum((predict2-Y1)^2)/sum((Y1-mean(Y1))^2) #0.229146 хотя тут не совсем плохо вроде
#для наших данных L2 работает и всё непллохо, но результат зависит от лямбды, которую получаем из кросс-валидации, если подставляем не ту лямбду, то и результат может быть совсем неочень.
#L1 совсем не подходит, R2<0, из теории: штрафы в L1 начисляются лишь за признаки с большим значением коэффициентов. В отличии от L2, тут коэффициенты могут! обнуляются. У нас обнулились при 6,7,8 степенях, из-за чего могли потерять не малую часть "объяснения".
coef(model_L1)
cross_validation1=cv.glmnet(x = data_matrix, y = Y, alpha = 0, nfolds = 100, lambda=c(0,exp(seq(from=-10, to = 1, length.out = 100))))
plot(cross_validation1)
l1=cross_validation1$lambda.min
model_L21=glmnet(x = data_matrix, y = Y, alpha = 0, lambda = l1)
predict21=predict(model_L21, newx = as.matrix(data_matrix1))
1-sum((predict21-Y1)^2)/sum((Y1-mean(Y1))^2)
#нашли глобальный максимум, получили по лицу, регуляризация не получилась))) |
dccd5fa8944975d42ea5e014864a0f2117c2d787 | 093af3a4b9c72bf4444040d8416176026c967ebb | /part2.r | b8e9b6248d7449c1bb7ffb4b127a48d15319926d | [] | no_license | Nathan-Lovell/DataScience-Nathan-Lovell | a5dddc6b2af39635061cd6079a5677a610091eb8 | 433dea45b3ebd9eb955af41f93fbcdad2834428a | refs/heads/master | 2020-08-11T09:12:27.196613 | 2019-12-19T23:01:27 | 2019-12-19T23:01:27 | 214,536,018 | 0 | 0 | null | 2019-10-11T22:06:08 | 2019-10-11T22:06:08 | null | UTF-8 | R | false | false | 2,220 | r | part2.r | ## ----echo=FALSE, message=FALSE, error=FALSE, warning=FALSE, results='hide'----
include <- function(library_name){
if( !(library_name %in% installed.packages()) )
install.packages(library_name)
library(library_name, character.only=TRUE)
}
include("tidyverse")
include("knitr")
include("jsonlite")
purl("https://raw.githubusercontent.com/Nathan-Lovell/DataScience-Nathan-Lovell/master/deliverable1.Rmd", output = "part1for2.r") # produces r source from rmd
source("part1for2.r") # executes the source
## ----echo=FALSE, message=FALSE, results='hide'---------------------------
Amazon_json <- read_csv("convertcsv.csv")
#Amazon_Ratings <- read_csv("https://raw.githubusercontent.com/NathanLovell/DataScience-Nathan-Lovell/master/5000_Rows.csv")
#Amazon_json <- fromJSON("5000_json.json", flatten=TRUE) <- formatting error and cannot run. Must convert to csv
## ------------------------------------------------------------------------
colnames(Amazon_json)
## ------------------------------------------------------------------------
json_reviews <- tibble(
reviewerer_id=Amazon_json$reviewerID, product_id=Amazon_json$asin, helpful_vote=Amazon_json$`helpful/0`, total_vote=Amazon_json$`helpful/1`, review_title=Amazon_json$summary, review_text=Amazon_json$reviewText, review_time=Amazon_json$unixReviewTime
)
colnames(json_reviews)
## ----echo=FALSE, message=FALSE, error=FALSE, warning=FALSE, results='hide'----
only_verified <- filter(Review, verified_purchase == "Y")
no_verified <- filter(Review, verified_purchase== "N")
## ------------------------------------------------------------------------
ggplot(only_verified, aes(x=star_rating)) + geom_bar(aes(y=(..count..)/sum(..count..))) + coord_cartesian(ylim=c(0, .60)) + labs(title="Verified Purchase Reviews", x="Number of Stars", y="% of Reviews")
ggplot(no_verified, aes(x=star_rating)) + geom_bar(aes(y=(..count..)/sum(..count..))) + coord_cartesian(ylim=c(0, .60)) + labs(title="Non-Verified Purchase Reviews", x="Number of Stars", y="% of Reviews")
## ------------------------------------------------------------------------
simple_model <- lm(Review, formula= as.numeric(star_rating) ~ as.numeric(verified_purchase))
summary(simple_model)
|
6ac3ec33143f5fee2024fd0e43da5e471490d313 | 75ee2ae5008e27c56633dd177d3a486fb65d2c33 | /Multi Linear Model for 50 startups.R | fb572df8c6f97ddb47246fba027c41e973bfd229 | [] | no_license | anilkrishna1000/Multi-Linear-regression-alagarithm-EXCELR-data-set-In-Python-and-R-Languages | dd36e96e169b65cc0f837c0cf3f9c9e2347bfa7e | 7f293e70bd57a5e828b0e10de79608e62a1e5bc6 | refs/heads/master | 2022-12-06T00:21:30.188384 | 2020-08-18T05:26:27 | 2020-08-18T05:26:27 | 288,362,462 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,944 | r | Multi Linear Model for 50 startups.R |
View(startup_50)
install.packages("data.table")
library(data.table)
summary(startup_50)
# find out the variance of each department
var(startup_50$R.D.Spend)
var(startup_50$Administration)
var(startup_50$Marketing.Spend)
var(startup_50$Profit)
# find out the standard deviation
sd(startup_50$R.D.Spend)
sd(startup_50$Administration)
sd(startup_50$Marketing.Spend)
sd(startup_50$Profit)
unique(state) # Checking How Many city are in state
startup_50 <- cbind(startup_50,ifelse(startup_50$State=="New York",1,0), ifelse(startup_50$State=="California",1,0), ifelse(startup_50$State=="Florida",1,0))
# Renaming the column
setnames(startup_50, 'V2','New York')
setnames(startup_50, 'V3','California')
setnames(startup_50, 'V4','Florida')
# Ploting the data on scatter plot
plot(startup_50) # In this plot we are plotting dummy which seems no relative
## removing the State Column ###
test=data.frame(startup_50)
test1=test[,-4]
View(test1)
plot(test1)# After removing the state Column see the plot
## after seeing scatter Finding the Correlation##
library(corpcor)
cor2pcor(cor(test1))
## Creating Model##
colnames(startup_50)
Profit_Model <- lm(Profit~`R.D.Spend`+Administration+`Marketing.Spend`, data = startup_50)
summary(Profit_Model) # P value for administration & Marketing.spend more Than 0.05
## so check the Influence records
library(car)
## Loading required package: carData
influenceIndexPlot(Profit_Model)
influencePlot(Profit_Model,id.n=3) ## Here cooks Graphs also P value is More tan 0.05 So Double Confirmed
## The Influence rows are 50 & 49 which is seeing in Cooks Graph
Profit_Model_Inf <- lm(Profit~`R.D.Spend`+`Administration`+`Marketing.Spend`, data = startup_50[-c(50,49),])
summary(Profit_Model_Inf)
## Variance influence factor to Check the Coliniarity Between the Variables
Profit_Model <- lm(Profit~`R&D Spend`+Administration+`Marketing Spend`, data = startup_50)
class(startup_50$`Marketing Spend`)
vif(Profit_Model)
summary(Profit_Model)
## vif>10 then there exists collinearity among all the variables
## Added Variable plot to check correlation b/n variables and o/p variable
avPlots(Profit_Model)
## Creating final Model for Administration data
Profit_Model_Revised <- lm(Profit~`R.D.Spend`+Administration+`Marketing.Spend`+`State`, data = startup_50)
library(MASS)
stepAIC(Profit_Model_Revised)
Profit_Model_Final <- lm(Profit~`R.D.Spend`+`Marketing.Spend`, data = startup_50)
summary(Profit_Model_Final)# R^2 value is 95% So Our Model is too sufficient and P value is also less than 0.05
### Here Administration variable coliniarity to the marketing.spend so ignore that as the input variable
## consider only independent variable as R.D. spend and Marketing.spend
### Conclusion: if we want to Predict the Profit for 50 startup the independent variables consideration is R.D.Spend and Marketing.Spend |
547f274d422f45dfecb2cf8b0c138df592a22e5f | c39292d8414028bb7c3b4ead6b90aac5546a5fda | /man/compare_runtimes.Rd | 3e6696335cfe024f1743f2e0e00bcc0e96633f8c | [] | no_license | GenomicsNX/CellTypeSpecificMethylationAnalysis | c4c3653b57dd511a8bc18090f6e4e2b0f1a23eea | b6a47a63bdd9f9c4f089b24e3c5c9afb63d407fe | refs/heads/master | 2023-03-04T06:48:05.263427 | 2021-02-13T04:49:05 | 2021-02-13T04:49:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,517 | rd | compare_runtimes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runtime_comparison.r
\name{compare_runtimes}
\alias{compare_runtimes}
\title{Compare method runtimes across various input sizes}
\usage{
compare_runtimes(
sample.sizes,
number.sites,
methods,
method.names,
number.replications,
number.cell.types,
random_seed
)
}
\arguments{
\item{sample.sizes}{Numeric vector for sample sizes to simulate.}
\item{number.sites}{Numeric vector for number of features/sites to simulate.}
\item{methods}{Character vector of method wrapper names to benchmark.
Examples of wrappers (tca.mle, tca.gmm, celldmc) are above. They
must have the same input arguments. Function names must
match to be called correctly.}
\item{method.names}{Pretty names of those listed in methods argument for plotting}
\item{number.replications}{Integer indicating how many replications of
each experiment to run. Same input is run for
each replicate. This is meant to account for
variability in runtime due to the system we are
running on.}
\item{number.cell.types}{Integer indicating number of cell types to simulate.}
\item{random_seed}{Integer to set seed for simulation to ensure replicability}
}
\value{
A list of dataframes. First dataframe with columns indicating the method,
replicate number, sample size, number of sites, number of sources, and
total runtime for each experiment. Second dataframe summarizes replicates.
}
\description{
Compare method runtimes across various input sizes
}
|
aa49dd6d9d2b21f8d2576c760061bd2c926709c9 | 4d36492368e067bdc821b2ee8bc5a9d524458c9a | /man/filter_rules.Rd | a22ba6f8143868b53a719e4368ef3b4c4d365814 | [] | no_license | cran/processcheckR | 27e177bb799cb9ebfe4708ea56c9bb99cbd1685f | bb4e4523ae7b65f4f37adcf3eb1380735f8887a3 | refs/heads/master | 2022-10-15T01:29:09.022611 | 2022-10-03T08:40:08 | 2022-10-03T08:40:08 | 152,093,595 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,635 | rd | filter_rules.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_rules.R
\name{filter_rules}
\alias{filter_rules}
\alias{filter_rules.log}
\title{Filter Using Declarative Rules}
\usage{
filter_rules(log, ..., eventlog = deprecated())
\method{filter_rules}{log}(log, ..., eventlog = deprecated())
}
\arguments{
\item{log}{\code{\link[bupaR]{log}}: Object of class \code{\link[bupaR]{log}} or derivatives (\code{\link[bupaR]{grouped_log}}, \code{\link[bupaR]{eventlog}}, \code{\link[bupaR]{activitylog}}, etc.).}
\item{...}{Name-rule pairs created by rule functions.}
\item{eventlog}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}; please use \code{log} instead.}
}
\value{
A filtered log (of same type as input) that satisfied the specified rules.
}
\description{
This function can be used to filter event data using declaritive rules or constraint templates.
It needs a \code{log} (object of class \code{\link[bupaR]{log}} or derivatives, e.g. \code{\link[bupaR]{grouped_log}},
\code{\link[bupaR]{eventlog}}, \code{\link[bupaR]{activitylog}}, etc.). and a set of \code{rules}.
Rules can be made with the following templates:
\itemize{
\item \emph{Cardinality}:
\itemize{
\item \code{\link{absent}}: Check if the specified activity is absent from a case,
\item \code{\link{contains}}: Check if the specified activity is present (contained) in a case,
\item \code{\link{contains_between}}: Check if the specified activity is present (contained) in a case between the minimum and maximum number of times,
\item \code{\link{contains_exactly}}: Check if the specified activity is present (contained) in a case for exactly \code{n} times.
}
\item \emph{Relation}:
\itemize{
\item \code{\link{ends}}: Check if cases end with the specified activity,
\item \code{\link{starts}}: Check if cases start with the specified activity.
\item \code{\link{precedence}}: Check for precedence between two activities,
\item \code{\link{response}}: Check for response between two activities,
\item \code{\link{responded_existence}}: Check for responded existence between two activities,
\item \code{\link{succession}}: Check for succession between two activities.
}
\item \emph{Exclusiveness}:
\itemize{
\item \code{\link{and}}: Check for co-existence of two activities,
\item \code{\link{xor}}: Check for exclusiveness of two activities.
}
}
}
\details{
The rules or constraint templates in this package are (partially) based on \emph{DecSerFlow} (\emph{Declarative Service Flow Language}).
For more information, see the \strong{References} below.
\subsection{Grouped Logs}{
When applied to a \code{\link[bupaR]{grouped_log}}, the grouping variables are ignored but retained in the returned log.
}
}
\section{Methods (by class)}{
\itemize{
\item \code{filter_rules(log)}: Filter a \code{\link[bupaR]{log}} using declaritive rules.
}}
\examples{
library(bupaR)
library(eventdataR)
# Filter where Blood test precedes MRI SCAN and Registration is the start of the case.
patients \%>\%
filter_rules(precedence("Blood test","MRI SCAN"),
starts("Registration"))
}
\references{
van der Aalst, W. M. P., & Pesic, M. (2006). DecSerFlow: Towards a Truly Declarative Service Flow Language.
In M. Bravetti, M. Núñez, & G. Zavattaro (Eds.), Proceedings of the 3rd International Workshop on Web Services and Formal Methods (Vol. 4184, pp. 1–23).
Springer. \doi{10.1007/11841197_1}
}
\seealso{
\code{\link{check_rules}}
}
|
74214ff00ee6c7f07d1961de13b60ea5ea554f6a | 739f77c7e3952bf827d4a1c4e36b9610e57045b4 | /sesame-lvl3betas.R | f0cd7d99febd208f511270e6c38f5ca5d889c193 | [] | no_license | dmiller15/sesame-docker | ed00a5280692a31c34cf752c4909a2a09166da3e | ff4536a80748b445f24ae1267ff4b9e69aa537a9 | refs/heads/master | 2020-03-10T14:46:13.659391 | 2018-10-24T21:56:32 | 2018-10-24T21:56:32 | 129,434,549 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 637 | r | sesame-lvl3betas.R | #!/usr/bin/env Rscript
library(sesame)
# Take a folder and IDAT R/G basename from command line and return the beta values
# @param argv[0]: the folder containing the R&G IDAT files
# @param argv[1]: the basename for the R&G IDAT files
# @return file containing the beta values
args = commandArgs(trailingOnly=TRUE)
Sys.setenv(SESAMEHOME='/home/sesame-refs/')
sset <- readIDATs(paste0(args[1],args[2]))[[1]]
sset.nb <- noob(sset)
sset.db <- dyeBiasCorrTypeINorm(sset.nb)
level3.betas <- getBetas(sset.db)
write.table(level3.betas,file=paste0(args[2],"-level3betas-gdcrerun.txt"),col.names = FALSE,quote = FALSE,sep = '\t')
|
4441cf209fdd2ef8d354b393c217cdc66d8bb2b2 | fde86e5c9d878baef1d85fe684dd6738f23163e9 | /functions2.R | 4fbb63e1c5df6bd096416481f4d8a18cf43e4afb | [] | no_license | tsoleary/season_adapt | 3c203dba2d76776601a9c485c07da14498fb1d69 | 429be6ac2982869455455b7e9e3d818f44a28854 | refs/heads/master | 2020-08-29T18:29:40.497647 | 2019-12-13T20:43:51 | 2019-12-13T20:43:51 | 218,127,805 | 1 | 0 | null | 2019-11-22T19:36:25 | 2019-10-28T19:22:59 | R | UTF-8 | R | false | false | 14,054 | r | functions2.R | # Seasonal adaptation model functions ------------------------------------------
# Function to initialise population --------------------------------------------
init_pop <- function(L, pop_size, prob_0 = 0.5, prob_1 = 0.5) {
# initialize diploid chromosomes with a random population
genome <- matrix(sample(0:1, 2*L*pop_size,
replace = TRUE,
prob = c(prob_0, prob_1)),
nrow = pop_size * 2,
ncol = L)
# name the loci, the individuals, and their chromosome pairs
colnames(genome) <- paste("locus",
str_pad(1:L, 2, "0", side = "left"), sep = "_")
individual <- paste("indiv",
str_pad(rep(1:pop_size, each = 2), 6, "0", side = "left"),
sep = "_")
chr <- paste("chr", rep(1:2, times = pop_size), sep = "_")
# bind the matrix and names together
return(as_tibble(cbind(individual, chr, genome)))
}
# Funtion to calculate fitness -------------------------------------------------
fitness_func <- function(genomes, d, y) {
# creates the diploid genotypes: 0 (homo 00), 1 (het), or 2 (homo 11)
genotype <- genomes %>%
pivot_longer(cols = contains("locus"),
names_to = "loci",
values_to = "allele") %>%
group_by(individual, loci) %>%
summarize(geno = sum(as.numeric(allele)))
# count of each genotype for each loci in an individual
geno_count <- genotype %>%
group_by(individual, geno, .drop = FALSE) %>%
count()
# seasonal score for each individual
z_score <- geno_count %>%
pivot_wider(names_from = geno, values_from = n) %>%
rename("geno_0" = "0", "geno_1" = "1", "geno_2" = "2") %>%
mutate(z_winter = geno_0, # + geno_1 * d,
z_summer = geno_2) # + geno_1 * d)
# heterozygote effect
z_score[is.na(z_score)] <- 0
z_score["z_winter"] <- z_score["z_winter"] + z_score["geno_1"]*d
z_score["z_summer"] <- z_score["z_summer"] + z_score["geno_1"]*d
#mutational interactions effects
long_seqs_1 <- c()
long_seqs_0 <- c()
for (i in 1:nrow(genomes)){
temp <- as.numeric(genomes[i,3:ncol(genomes)])
new_temp.rle <- rle(temp)
by_season <- tapply(new_temp.rle$lengths, new_temp.rle$values, max)
y <- unname(by_season)
long_seqs_0 <- list.append(long_seqs_0, y[1])
long_seqs_1 <- list.append(long_seqs_1, y[2])
}
j <- 1
for (i in 1:nrow(z_score)){
z_score[i, "z_winter"] <- z_score[i, "z_winter"] + sum(long_seqs_0[j],long_seqs_0[j+1])
z_score[i, "z_summer"] <- z_score[i, "z_summer"] + sum(long_seqs_1[j],long_seqs_1[j+1])
j <- j + 2
}
# fitness of each individual
fitness <- rename(z_score, f_winter = z_winter, f_summer = z_summer)
return(fitness)
}
# Crossover function that takes a single individual ----------------------------
cross_over <- function(parent) {
loci <- genomes %>%
filter(.$individual == as.character(parent)) %>%
select(contains("locus"))
# determine the location(s) for crossover
cross <- sample(0:1,
ncol(loci),
prob = c(1 - cross_prob, cross_prob),
replace = TRUE)
cross_locs <- which(cross == 1)
# do crossover at each location
for (cross_loc in cross_locs){
cross_genome <- loci[,cross_loc:ncol(loci)]
cross_genome[c(1,2), ] <- cross_genome[c(2,1), ]
loci[,cross_loc:ncol(loci)] <- cross_genome
}
pick_chr <- sample(c(1,2), 1)
# add back the indiv and chr identifiers
return(loci[pick_chr, ])
}
# cross over parents at the same time ------------------------------------------
cross_over_parents <- function(parent1, parent2) {
p1_xover <- cross_over(parent1)#, genomes, cross_prob)
p2_xover <- cross_over(parent2)#, genomes, cross_prob)
new_chr <- rbind(p1_xover, p2_xover)
bind_cols("chr" = c("chr_1", "chr_2"), new_chr)
}
# uniform crossover function ---------------------------------------------------
cross_over_uniform <- function(parent) {
loci <- genomes %>%
filter(.$individual == as.character(parent)) %>%
select(contains("locus"))
# determine the location(s) for crossover
cross <- sample(1:2,
ncol(loci),
prob = c(0.5, 0.5),
replace = TRUE)
new_chr <- vector(mode = "double", length = ncol(loci))
for (i in 1:length(cross)){
new_chr[i] <- as.numeric(loci[cross[i], i])
}
names(new_chr) <- names(loci)
return(new_chr)
}
# cross over parents at the same time ------------------------------------------
cross_over_parents_uniform <- function(parent1, parent2) {
p1_xover <- cross_over_uniform(parent1)
p2_xover <- cross_over_uniform(parent2)
new_chr <- as_tibble(rbind(p1_xover, p2_xover))
bind_cols("chr" = c("chr_1", "chr_2"), new_chr)
}
# Parent selection -------------------------------------------------------------
parent_selection <- function(genomes, fitness_all, season){
if (season == "summer"){
fit <- fitness_all$f_summer
} else {
fit <- fitness_all$f_winter
}
# create an empty data frame for parent 1 and parent 2
df <- tibble(p1 = character(),
p2 = character())
# sample pairs of potential parents based on fitness
for (i in 1:nrow(fitness_all)){
df[i, 1:2] <- sample(fitness_all$individual, 2,
prob = fit,
replace = FALSE)
}
return(df)
}
# Mutation on the entire population each generation before crossover -----------
mutate_genome <- function(genomes, mut_prob) {
# multiply the mutation probabilty by two (because half the time 0's will
# be mutated to 0's and same for 1's)
mut_prob_2 <- mut_prob * 2
# total number of chromosomes and loci in the genome
total_chr <- nrow(genomes)
genome_length <- sum(grepl("locus", colnames(genomes)))
# determine mutation positions with a random uniform distribution
mut_pos <- matrix(runif(genome_length * total_chr),
nrow = total_chr,
ncol = genome_length) < mut_prob_2
# mutation values (with zeros where the original values are located)
mut_mat <- matrix(sample(0:1, genome_length, replace = TRUE),
nrow = total_chr,
ncol = genome_length) * mut_pos
# original values (with zeros where the mutated values are located)
org_mat <- matrix(as.numeric(as.matrix(select(genomes, contains("locus")))),
nrow = total_chr,
ncol = genome_length) * !mut_pos
# combine matrices by addition
new_mat <- mut_mat + org_mat
# add indiv and chr identifiers to dataframe
indiv_chr <- select(genomes, -contains("locus"))
df <- cbind(indiv_chr, new_mat)
# keep column names the same for the loci
colnames(df) <- colnames(genomes)
return(as_tibble(df))
}
# Calculate loci specific frequencies ------------------------------------------
get_freqs <- function(genomes, pop_size){
genomes %>%
pivot_longer(cols = contains("locus"),
names_to = "loci",
values_to = "allele") %>%
group_by(loci) %>%
summarize(freq_1 = sum(as.numeric(allele)) / (pop_size * 2))
}
# Plotting allele frequencies overtime -----------------------------------------
plot_freq <- function(loci_freq, type = "loci", figure_caption,
fig_title = "Loci specific allele frequencies over time"){
if (type == "avg"){
avg_freq <- loci_freq %>%
group_by(genz) %>%
summarize(freqs = mean(freqs))
g <- ggplot(avg_freq, mapping = aes(x = genz, y = freqs)) +
geom_line() +
labs(title = fig_title,
caption = figure_caption) +
xlab("Generations") +
ylab("Freq of Summer Allele") +
ylim(0,1) +
theme_classic() +
theme(legend.position = "none")
} else {
plot_df <- loci_freq
g <- ggplot(plot_df, mapping = aes(x = genz, y = freqs, color = loci)) +
geom_line() +
labs(title = fig_title,
caption = figure_caption)+
xlab("Generations") +
ylab("Freq of Summer Allele") +
ylim(0,1) +
theme_classic() +
theme(legend.position = "none")
}
return(g)
}
# Simulation -------------------------------------------------------------------
run_simulation <- function(L, pop_size, d, y, cross_prob, mut_prob, years,
generations, seasonal_balance, rep) {
# Initialize Population
individual <- paste("indiv",
str_pad(rep(1:pop_size, each = 2),
6, "0", side = "left"),
sep = "_")
cross_prob <<- cross_prob
genomes <<- init_pop(L, pop_size)
freq_df <- get_freqs(genomes, pop_size)
G <- 1
for (year in 1:years){
for (generation in 1:generations){
if (generation < generations / seasonal_balance){
season <- "summer"
} else {
season <- "winter"
}
# create an empty population data frame with all zeros
new_pop <- init_pop(L, pop_size, prob_0 = 1, prob_1 = 0)
fitness_all <- fitness_func(genomes, d, y)
df <- parent_selection(genomes, fitness_all, season)
new_pop <- map2_df(df[[1]], df[[2]], cross_over_parents)
genomes <- cbind(individual, new_pop)
freq_temp <- get_freqs(genomes, pop_size)
freq_df <- full_join(freq_df, freq_temp, by = "loci")
colnames(freq_df)[which(colnames(freq_df) == "freq_1")] <-
paste0("freq_G.", G)
# mutate genomes for the next year
genomes <<- mutate_genome(genomes, mut_prob)
# print information to keep track of simulation progress
print(paste("year", year, "generation", generation, "season", season,
"total generation", G))
G <- G + 1
}
}
colnames(freq_df)[2:3] <- c("freq_G.0", "freq_G.1")
loci_freq <- freq_df %>%
pivot_longer(cols = contains("freq"),
names_to = "genz",
values_to = "freqs")
loci_freq$genz <- as.numeric(str_extract(loci_freq$genz, "[:digit:]+"))
# caption <<- paste(paste("Generations per year", generations),
# paste("Pop size", pop_size),
# paste("Seasonal Balance", seasonal_balance),
# paste("Number of Loci", L),
# paste("Dominance", d),
# paste("Epistasis", y), sep = "; ")
#
# g1 <- plot_freq(loci_freq, figure_caption = caption)
# print(g1)
# g2 <- plot_freq(loci_freq, type = "avg", figure_caption = caption)
# print(g2)
file_names <- paste("results",
"G", generations,
"Ps", pop_size,
"Sb", seasonal_balance,
"L", L,
"d", d,
"y", y,
"c", cross_prob,
"num_rep", rep,
sep = "_")
write.csv(loci_freq, paste0(file_names, ".csv"), row.names = FALSE)
#return(loci_freq)
}
# simulation with uniform crossover --------------------------------------------
run_simulation_uniform <- function(L, pop_size, d, y, cross_prob, mut_prob,
years, generations, seasonal_balance, rep) {
# Initialize Population
individual <- paste("indiv",
str_pad(rep(1:pop_size, each = 2),
6, "0", side = "left"),
sep = "_")
cross_prob <<- cross_prob
genomes <<- init_pop(L, pop_size)
freq_df <- get_freqs(genomes, pop_size)
G <- 1
for (year in 1:years){
for (generation in 1:generations){
if (generation < generations / seasonal_balance){
season <- "summer"
} else {
season <- "winter"
}
# create an empty population data frame with all zeros
new_pop <- init_pop(L, pop_size, prob_0 = 1, prob_1 = 0)
fitness_all <- fitness_func(genomes, d, y)
df <- parent_selection(genomes, fitness_all, season)
new_pop <- map2_df(df[[1]], df[[2]], cross_over_parents_uniform)
genomes <- cbind(individual, new_pop)
freq_temp <- get_freqs(genomes, pop_size)
freq_df <- full_join(freq_df, freq_temp, by = "loci")
colnames(freq_df)[which(colnames(freq_df) == "freq_1")] <-
paste0("freq_G.", G)
# mutate genomes for the next year
genomes <<- mutate_genome(genomes, mut_prob)
# print information to keep track of simulation progress
print(paste("year", year, "generation", generation, "season", season,
"total generation", G))
G <- G + 1
}
}
colnames(freq_df)[2:3] <- c("freq_G.0", "freq_G.1")
loci_freq <- freq_df %>%
pivot_longer(cols = contains("freq"),
names_to = "genz",
values_to = "freqs")
loci_freq$genz <- as.numeric(str_extract(loci_freq$genz, "[:digit:]+"))
# caption <<- paste(paste("Generations per year", generations),
# paste("Pop size", pop_size),
# paste("Seasonal Balance", seasonal_balance),
# paste("Number of Loci", L),
# paste("Dominance", d),
# paste("Epistasis", y), sep = "; ")
#
# g1 <- plot_freq(loci_freq, figure_caption = caption)
# print(g1)
# g2 <- plot_freq(loci_freq, type = "avg", figure_caption = caption)
# print(g2)
file_names <- paste("results",
"G", generations,
"Ps", pop_size,
"Sb", seasonal_balance,
"L", L,
"d", d,
"y", y,
"c", cross_prob,
"num_rep", rep,
sep = "_")
write.csv(loci_freq, paste0(file_names, ".csv"), row.names = FALSE)
#return(loci_freq)
}
|
c4c8aa639f384a4e9002190db6c3e6dba06361c0 | b5eb2352d35fbd125b47a39e00774b73576cfb03 | /R/airport_delay.R | 2169cc4ad66af0ec81cbab95e37607798f8ee94c | [] | no_license | joshu107/732A94_Lab7 | a1dcb0d0be076df863d68d9dd216c81dd7519a78 | 68949a6486a5b1c109c56957175d8b851c53689c | refs/heads/master | 2021-01-11T01:32:10.905567 | 2016-10-12T10:03:28 | 2016-10-12T10:03:28 | 70,693,421 | 0 | 1 | null | 2016-10-12T11:31:24 | 2016-10-12T11:31:23 | null | UTF-8 | R | false | false | 1,139 | r | airport_delay.R | library(dplyr)
library(nycflights13)
library(maps)
# Prepare airport data for join
airport <- nycflights13::airports %>%
mutate(ID = faa) %>%
select(-faa, -alt, -tz, -dst)
# calculate mean values for each departure
dep <- nycflights13::flights %>%
group_by(origin) %>%
summarise(avg_dep_delay = mean(dep_delay, na.rm = TRUE)) %>%
arrange(avg_dep_delay) %>%
mutate(ID = origin) %>%
select(-origin) %>%
right_join(airport, by = 'ID')
# calculate mean values for each destination
arr <- nycflights13::flights %>%
group_by(dest) %>%
summarise(avg_arr_delay = mean(arr_delay, na.rm = TRUE)) %>%
arrange(avg_arr_delay)%>%
mutate(ID = dest) %>%
select(-dest) %>%
right_join(airport, by = 'ID')
map(database = 'state')
symbols(arr$lon, arr$lat,
bg = '#e2373f', fg = '#ffffff',
circles = sqrt(arr$avg_arr_delay),
inches = 0.1, add = TRUE, lwd = 0.5,
main = '')
symbols(dep$lon, dep$lat,
bg = '#233d4e', fg = '#ffffff',
circles = sqrt(dep$avg_dep_delay),
inches = 0.1, add = TRUE, lwd = 0.5,
main = 'Average departute and arrival delay of US flights')
|
16bc20dfd52c566242eb91ded55637a51d010336 | 446373433355171cdb65266ac3b24d03e884bb5d | /man/qgis_scatter3dplot.Rd | 7e0212ca27b1803c1d32e5572b687e85c6d2ea50 | [
"MIT"
] | permissive | VB6Hobbyst7/r_package_qgis | 233a49cbdb590ebc5b38d197cd38441888c8a6f3 | 8a5130ad98c4405085a09913b535a94b4a2a4fc3 | refs/heads/master | 2023-06-27T11:52:21.538634 | 2021-08-01T01:05:01 | 2021-08-01T01:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,508 | rd | qgis_scatter3dplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qgis_scatter3dplot.R
\name{qgis_scatter3dplot}
\alias{qgis_scatter3dplot}
\title{QGIS algorithm Vector layer scatterplot 3D}
\usage{
qgis_scatter3dplot(
INPUT = qgisprocess::qgis_default_value(),
XFIELD = qgisprocess::qgis_default_value(),
YFIELD = qgisprocess::qgis_default_value(),
ZFIELD = qgisprocess::qgis_default_value(),
OUTPUT = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{INPUT}{\code{source} - Input layer. Path to a vector layer.}
\item{XFIELD}{\code{field} - X attribute. The name of an existing field. ; delimited list of existing field names.}
\item{YFIELD}{\code{field} - Y attribute. The name of an existing field. ; delimited list of existing field names.}
\item{ZFIELD}{\code{field} - Z attribute. The name of an existing field. ; delimited list of existing field names.}
\item{OUTPUT}{\code{fileDestination} - Histogram. Path for new file.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by QGIS Vector layer scatterplot 3D (qgis:scatter3dplot)
}
\details{
\subsection{Outputs description}{
\itemize{
\item OUTPUT - outputHtml - Histogram
}
}
}
|
cca618df2b5536818ede754fac17bdb8adc0a763 | 843202ff6b26a57d4975d58b123340ab44a1ab41 | /Bagging, RF _ Boosting after PCA.R | 19aff05f0e63931263221fe0234a757200a94f4e | [] | no_license | weiyaom/R-Predicted-returning-rate-of-patients | ecc81a8ac4fc1c89148511da8533d9a46e3d3e1a | a57e5973d44623c6b06b0ae113b0b1b0d6960bb6 | refs/heads/master | 2020-07-22T11:31:05.554809 | 2020-01-25T22:00:23 | 2020-01-25T22:00:23 | 207,185,872 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,221 | r | Bagging, RF _ Boosting after PCA.R | library(readr)
#na.strings argument is for substitution within the body of the file, that is, matching strings that should be replaced with NA
Hospital_Train<-read.csv("5.3_Hospitals_train_cleaned.csv",na.strings = c('#N/A',' ','','#VALUE!'))
Hospital_Test<-read.csv("5.3_Hospitals_test_cleaned.csv",na.strings = c('#N/A',' ','','#VALUE!'))
nrow(Hospital_Train)
nrow(Hospital_Test)
colnames(Hospital_Train)
colnames(Hospital_Test)
Hospital_Test$RETURN = "Unknown"
dt_combined = rbind(Hospital_Train,Hospital_Test)
## Type Conversion
# the columns required to be transformed into other typy:
# (1) WEEKDAY_ARR: 'integer'->'factor'
dt_combined$WEEKDAY_ARR <- as.factor(dt_combined$WEEKDAY_ARR)
# (2) WEEKDAY_DEP: 'integer'->'factor'
dt_combined$WEEKDAY_DEP <- as.factor(dt_combined$WEEKDAY_DEP)
# (3) HOUR_ARR: 'numeric'->'factor'
dt_combined$HOUR_ARR <- as.factor(dt_combined$HOUR_ARR)
# (4) HOUR_ARR: 'numeric'->'factor'
dt_combined$HOUR_DEP <- as.factor(dt_combined$HOUR_DEP)
# (5) MONTH_ARR: 'integer'->'factor'
dt_combined$MONTH_ARR <- as.factor(dt_combined$MONTH_ARR)
# (6) MONTH_DEP: 'integer'->'factor'
dt_combined$MONTH_DEP <- as.factor(dt_combined$MONTH_DEP)
# (7) SAME_DAY: 'integer'->'factor'
dt_combined$SAME_DAY <- as.factor(dt_combined$SAME_DAY)
# (8) CONSULT_ORDER: 'integer'->'factor'
dt_combined$CONSULT_ORDER <- as.factor(dt_combined$CONSULT_ORDER)
# (9) CONSULT_CHARGE: 'integer'->'factor'
dt_combined$CONSULT_CHARGE <- as.factor(dt_combined$CONSULT_CHARGE)
# (10) CONSULT_IN_ED: 'integer'->'factor'
dt_combined$CONSULT_IN_ED <- as.factor(dt_combined$CONSULT_IN_ED)
# (11) CHARGES: 'factor'->'numeric'
dt_combined$CHARGES <- as.numeric(dt_combined$CHARGES)
# Remove one of ArriveTime/DepartTiem: DepartTime:
dt_combined$WEEKDAY_DEP = NULL
dt_combined$HOUR_DEP = NULL
dt_combined$MONTH_DEP = NULL
dt_combined$WEEKDAY_ARR = NULL
dt_combined$HOUR_ARR = NULL
dt_combined$MONTH_ARR = NULL
dt_combined$RETURN = as.factor(ifelse(dt_combined$RETURN=='Yes',1,0))
# combination 1
# Ed_result, Charges, Financial class, Age, Gender, Acuity_arr, Dc_result, PC (all others)
X <- model.matrix( ~ .-1, dt_combined[,c(2,5,6,8,12:19)])
# combination 2
# PC all variables that step() not choose
# X <- model.matrix( ~ .-1, dt_combined[,c(2,11,14:17,19,20)])
# combination 3
#Ed_result, Charges, Financial class, Age, Gender, Acuity_arr, PC (all others)
# X <- model.matrix( ~ .-1, dt_combined[,c(2,5,6,8,11:19)])
PC_X = prcomp(X)
summary(PC_X)
plot(PC_X)
PC1 = PC_X$x[,1]
PC2 = PC_X$x[,2]
dt_combined$PC = PC1
dt_combined$PC2 = PC2
#-----------------------
set.seed(1234)
dt = dt_combined[1:nrow(Hospital_Train),]
dt_pred = dt_combined[(nrow(Hospital_Train)+1):nrow(dt_combined),]
# Split the data: train + valid + test
index_test = sample(nrow(dt),0.2*nrow(dt))
dt_test = dt[index_test,]
dt_rest = dt[-index_test,]
# combination 1
################################################################################################
library(randomForest)
# 200 trees:
rf.200 = randomForest(RETURN ~ ED_RESULT + CHARGES + FINANCIAL_CLASS + AGE +
GENDER + ACUITY_ARR + DC_RESULT + PC,
data=dt_rest, ntree=200, mytry=5,importance=TRUE)
importance(rf.200)
varImpPlot(rf.200)
rf_200_pre = predict(rf.200,newdata = dt_test)
acc_rf_200 = sum(ifelse(rf_200_pre==dt_test$RETURN,1,0))/nrow(dt_test)
acc_rf_200
# Ed_result, Charges, Financial class, Age, Gender, Acuity_arr, Dc_result, PC (all others)
library(gbm)
boost_rest=gbm(as.numeric(RETURN)-1 ~ ED_RESULT + CHARGES + FINANCIAL_CLASS + AGE +
GENDER + ACUITY_ARR + DC_RESULT + PC,
data = dt_rest, n.trees=200, distribution="bernoulli")
summary(boost_rest)
boost_preds=predict(boost_rest,newdata=dt_test,n.trees=200,type="response")
boost_preds_class = ifelse(boost_preds>0.5,'1',"0")
table_preds_pca = table(boost_preds_class,dt_test$RETURN,dnn=c('Actual','Pred'))
acc_preds = (table_preds_pca[1]+table_preds_pca[4])/sum(table_preds_pca)
acc_preds
# combination 2
##################################################################################################
library(randomForest) # 200 trees:
rf.200 = randomForest(RETURN ~ ED_RESULT + FINANCIAL_CLASS + GENDER + ETHNICITY + RACE +
ACUITY_ARR + CONSULT_ORDER + RISK + ADMIT_RESULT + AGE + SAME_DAY + PC,
data=dt_rest, ntree=200, mytry=5,importance=TRUE)
importance(rf.200)
varImpPlot(rf.200)
rf_200_pre = predict(rf.200,newdata = dt_test)
acc_rf_200 = sum(ifelse(rf_200_pre==dt_test$RETURN,1,0))/nrow(dt_test)
acc_rf_200
library(gbm)
boost_rest=gbm(as.numeric(RETURN)-1 ~ ED_RESULT + FINANCIAL_CLASS + GENDER + ETHNICITY + RACE +
ACUITY_ARR + CONSULT_ORDER + RISK + ADMIT_RESULT + AGE + SAME_DAY + PC,
data = dt_rest, n.trees=200, distribution="bernoulli")
summary(boost_rest)
boost_preds=predict(boost_rest,newdata=dt_test,n.trees=200,type="response")
boost_preds_class = ifelse(boost_preds>0.5,'1',"0")
table_preds_pca = table(boost_preds_class,dt_test$RETURN,dnn=c('Actual','Pred'))
acc_preds = (table_preds_pca[1]+table_preds_pca[4])/sum(table_preds_pca)
acc_preds
library(randomForest)
bag.200=randomForest(RETURN ~ ED_RESULT + FINANCIAL_CLASS + GENDER + ETHNICITY + RACE +
ACUITY_ARR + CONSULT_ORDER + RISK + ADMIT_RESULT + AGE + SAME_DAY + PC,
data=dt_rest,ntree=200,mtry=12,importance=TRUE)
bag.200
importance(bag.200)
varImpPlot(bag.200)
bag_200_pre = predict(bag.200,newdata = dt_test)
acc_bag_200 = sum(ifelse(bag_200_pre==dt_test$RETURN,1,0))/nrow(dt_test)
acc_bag_200
# combination 3
##################################################################################################
library(randomForest)
rf.200 = randomForest(RETURN ~ ED_RESULT + CHARGES + FINANCIAL_CLASS + AGE +
GENDER + ACUITY_ARR + PC,
data=dt_rest, ntree=200, mytry=5,importance=TRUE)
importance(rf.200)
varImpPlot(rf.200)
rf_200_pre = predict(rf.200,newdata = dt_test)
acc_rf_200 = sum(ifelse(rf_200_pre==dt_test$RETURN,1,0))/nrow(dt_test)
acc_rf_200
library(gbm)
boost_rest=gbm(as.numeric(RETURN)-1 ~ ED_RESULT + CHARGES + FINANCIAL_CLASS + AGE +
GENDER + ACUITY_ARR + PC, data = dt_rest, n.trees=200, distribution="bernoulli")
summary(boost_rest)
boost_preds=predict(boost_rest,newdata=dt_test,n.trees=200,type="response")
boost_preds_class = ifelse(boost_preds>0.5,'1',"0")
table_preds_pca = table(boost_preds_class,dt_test$RETURN,dnn=c('Actual','Pred'))
acc_preds = (table_preds_pca[1]+table_preds_pca[4])/sum(table_preds_pca)
acc_preds
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Test_pred = predict(boost_rest,newdata = dt_pred,n.trees=200,type="response")
#result = ifelse(Test_pred>0.5,'YES',"NO")
#write.csv(result,'5.12 PCB Final.csv')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Test_prediction
#Test_pred = predict(rf.200,newdata = dt_pred)
#result = ifelse(Test_pred=='1','Yes','No')
#write.csv(result,'5.12 RFPC.csv')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
c199b6f233fde729b1f0734ec3dbc85da9a5294b | 4a9747c24015dd65a3bf460a62d01109a8058c43 | /R/hhcartr_export_predict.R | 0045a82f67d885ef136427bded7398825d2a1e4c | [] | no_license | cran/hhcartr | 76b301038254e80ed795316949aa9163b686b20c | 55a061c1fe0ce90e8298372f39c40d6e5d8cc1fb | refs/heads/master | 2023-06-07T04:26:11.104290 | 2021-07-02T06:00:08 | 2021-07-02T06:00:08 | 382,391,912 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,791 | r | hhcartr_export_predict.R | # source: hhcartr_export_predict.R
#################################################################################################
#'
#' predict - Create generic S3method to make predictions via predict.hhcartr.
#' Needs export entry in the NAMESPACE file.
#'
#' This function creates a generic S3method predict which is used to call predict.hhcartr when
#' an object of type hhcartr passed to the predict function, i.e. an object that is returned
#' from the fit() function. The object created from the predict function supports the accuracy and
#' predictions methods. The accuracy method returns the accuracy achieved on the test_data and the
#' method predictions returns the actual predictions made on the test_data.
#'
#' @param object Unused parameter.
#' @param ... Unused parameter.
#' @param test_data The test dataset the user wants to make predictions on.
#'
#' @return exposes the accuracy() and predictions() methods.
#'
#' @example man/examples/predict.R
#'
#' @export
predict.hhcartr <- function(object, ..., test_data){
# get parameters used to create the model
useIdentity <-pkg.env$useIdentity
classify <- pkg.env$classify
if(is.na(useIdentity) | is.na(classify)){
stop("hhcartr(predict.hhcartr) Run the fit() function before trying to make predictions.")
}
# need to validate the test_data here - it must have the y column as the last column.
hhcart_verify_input_data(test_data[,1:ncol(test_data) - 1],
as.factor(test_data[,ncol(test_data)]),
classify = classify)
# make sure the y column is a factor.
test_data[,ncol(test_data)] <- as.factor(test_data[,ncol(test_data)])
# go and make predictions on the test set
prediction_output <- make_predictions(object,
test_data,
useIdentity,
classify,
objectid = 999999)
# tree accuracy in [[1]], mr in [[2]], predictions for each tree in [[3]]
stats <- prediction_output[[1]]
# predictions for each row on each tree
preds <- prediction_output[[3]]
df <- data.frame()
for (i in seq_along(stats)){
nRow <- data.frame(Fold = i, Accuracy = round(stats[[i]], 2))
df <- rbind(df, nRow)
}
# display the accuracy results.
msg <- "Predicting on the Test data of the %s dataset..."
msgs <- sprintf(msg, get_data_description())
message(msgs)
msg <- "Test Data Accuracy: Mean accuracy-[%s]"
msgs <- sprintf(msg, round(mean(df$Accuracy), 2))
message(msgs)
parms <- list(
accuracy = function(){
return(df)
},
predictions = function(){
return(preds)
}
)
class(parms) <- append(class(parms), "predict")
return(parms)
}
|
d251187e87dcdfee6c7c9ef4b089e493aa27f682 | b4638dd481c4e17e2b01f1d28a06e6fb9ca5ccbe | /switch_to_av/av_share_plot.R | 38e3d8d751bc0580cfa87326aa4d39ba93611749 | [] | no_license | cllorca1/land_use_transport_analysis | d3f81ae1c0acc656b92ea6617df1b2fef00e2036 | 7f1b3a619484caef2733f6f9c02e9862892c649f | refs/heads/master | 2021-06-04T20:29:47.073581 | 2021-05-27T08:38:37 | 2021-05-27T08:38:37 | 142,977,282 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 402 | r | av_share_plot.R |
av_ownership = read_csv("c:/models/silo/muc/scenOutput/AVA_none/siloResults/avOwnership.csv")
ggplot(av_ownership, aes(x = year, y= avs/autos)) + geom_line(size = 2) +
ylab("Share of autonomous vehicles") + xlab("Year") +
theme_bw() + theme(axis.text.x = element_text(angle = 90))
ggsave("C:/projects/Papers/2020_cities/figs/av_share.pdf", width = 8, units = "cm", height = 7, scale = 1.3)
|
54591beba464976688774c775ecb8c2185c3a776 | 93d3f810a4169d7bd993641e6f776af1616dd79e | /MCPcounter/workflow/docker/mcpcounter/bin/mcpcounter.R | 49fee8aeb4b62fc52286aeb3a37a61f074afd2cc | [] | no_license | CRI-iAtlas/iatlas-workflows | 699c3b897e580a391e9700b48b91b64e2a03eb55 | d94151d3aaadee96b52f27e4ce84692c0366fe75 | refs/heads/develop | 2023-05-24T11:08:56.155005 | 2023-05-23T15:21:58 | 2023-05-23T15:21:58 | 156,773,901 | 2 | 4 | null | 2023-05-22T21:19:12 | 2018-11-08T21:54:27 | Common Workflow Language | UTF-8 | R | false | false | 2,277 | r | mcpcounter.R | library(MCPcounter)
library(argparse)
library(readr)
library(tibble)
library(magrittr)
parser = ArgumentParser(description = "Deconvolute tumor samples with MCPcounter")
parser$add_argument(
"--input_expression_file",
type = "character",
required = TRUE,
help = "Path to input matrix of microarray expression data. Tab separated file with features in rows and samples in columns."
)
parser$add_argument(
"--output_file",
default = "./output_file.tsv",
type = "character",
help = "Path to output file."
)
parser$add_argument(
"--features_type",
default = "affy133P2_probesets",
type = "character",
help = "Type of identifiers for expression features. Defaults to 'affy133P2_probesets' for Affymetrix Human Genome 133 Plus 2.0 probesets. Other options are 'HUGO_symbols' (Official gene symbols) or 'ENTREZ_ID' (Entrez Gene ID)"
)
parser$add_argument(
"--input_probeset_file",
type = "character",
help = "Path to input table of gene data. Tab separated file of probesets transcriptomic markers and corresponding cell populations. Fetched from github by a call to read.table by default, but can also be a data.frame"
)
parser$add_argument(
"--input_gene_file",
type = "character",
help = "Path to input table of gene data. Tab separated file of genes transcriptomic markers (HUGO symbols or ENTREZ_ID) and corresponding cell populations. Fetched from github by a call to read.table by default, but can also be a data.frame"
)
args = parser$parse_args()
expression <- args$input_expression_file %>%
readr::read_tsv() %>%
as.data.frame() %>%
tibble::column_to_rownames(., colnames(.)[[1]]) %>%
as.matrix()
arg_list = list("expression" = expression, "featuresType" = args$features_type)
if(!is.null(args$input_probeset_file)){
probesets <- tsv_file_to_matrix(args$input_probeset_file)
arg_list[['probesets']] <- probesets
}
if(!is.null(args$input_gene_file)){
genes <- tsv_file_to_matrix(args$input_gene_file)
arg_list[['genes']] <- genes
}
result <-
do.call(MCPcounter::MCPcounter.estimate, arg_list) %>%
as.data.frame() %>%
tibble::rownames_to_column("feature") %>%
dplyr::as_tibble() %>%
print() %>%
readr::write_tsv(args$output_file) |
34ecdd3ecb7c480d1d0b8f509559cd374b520aed | 6fdbd78c7eddcbc9093226af7a7bd513da3f97bf | /Rscripts/Ivan_vowpal_wabbit.R | e7f697e5493f0a5c2b28d97ff5bd19c37bfe6a7e | [] | no_license | enfeizhan/Melbourne_Datathon_2015_Kaggle | a904c3679ad384ccd085dcfc9fc3b17ec5745c53 | a60676e19313b3e6fe3947a1531cde7542c9b9c5 | refs/heads/master | 2020-12-31T02:23:27.564502 | 2015-12-10T11:48:13 | 2015-12-10T11:48:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,176 | r | Ivan_vowpal_wabbit.R | # install.packages("devtools")
# devtools::install_github("JohnLangford/vowpal_wabbit", subdir = "R/r.vw")
setwd('/Users/ivanliu/Google Drive/Melbourne Datathon/Melbourne_Datathon_2015_Kaggle/vowpal_wabbit')
rm(list=ls()); gc()
require(data.table);library(r.vw);library(ggplot2);library(pROC)
load('../data/9_train_validation_test_20151122.RData');ls()
source('../Rscripts/Ivan_vowpal_wabbit_func.R')
# setwd where the data would be
feat <- names(total)[c(3:(ncol(total)-1))]; target <- 'flag_class'
train_dt <- to_vw(total, feat, target, 'data/train_dt.vw') # total
test_dt <- to_vw(test, feat, target, 'data/test_dt.vw') # test
write.table(test_dt$flag_class, file='data/test_labels.txt', row.names = F, col.names = F, quote = F)
training_data='data/train_dt.vw'
test_data='data/test_dt.vw'
test_labels = "data/test_labels.txt"
out_probs = "predictions/sub.txt"
model = "models/mdl.vw"
# AUC using perf - Download at: osmot.cs.cornell.edu/kddcup/software.html
# Shows files in the working directory: /data
list.files('data/')
grid = expand.grid(eta=c(0.5, 1),
extra=c('--holdout_period 10000 --normalized --adaptive --invariant',
'--nn 30 --holdout_period 10000 --normalized --adaptive --invariant',
'-q:: --holdout_period 10000 --normalized --adaptive --invariant'))
for(i in 1:nrow(grid)){
g = grid[i, ]
out_probs = paste0("predictions/submission_vw_20151202_NoReg_", g[['eta']], "_", i,".txt")
model = paste0("models/mdl",i,".vw")
# out_probs = paste0("predictions/submission_vw_20151126_0.25_1.txt")
auc = vw(training_data, training_data, loss = "logistic",
model, b = 30, learning_rate = g[['eta']],
passes = 20, l1=NULL, l2=NULL, early_terminate = 2,
link_function = "--link=logistic", extra = g[['extra']],
out_probs = out_probs, validation_labels = test_labels, verbose = TRUE,
do_evaluation = F, use_perf=FALSE, plot_roc=F)
#extra='--decay_learning_rate 0.9 --ksvm --kernel linear -q ::'
# print(auc)
# [1] 0.7404759
# 0.7749233 'nn 80'
}
# AUC using pROC - Saving plots to disk
### create a parameter grid
grid = expand.grid(l1=c(1e-06),
l2=c(1e-06),
eta=c(0.1, 0.2),
ps=c(12,18),
extra=c('--nn 120', '--nn 80'))
cat('Running grid search\n')
pdf('output/ROCs.pdf')
aucs <- lapply(1:nrow(grid), function(i){
g = grid[i, ]
auc = vw(training_data=training_data, # files relative paths
validation_data=test_data,
validation_labels=test_labels, model=model,
# grid options
loss='logistic', b=30, learning_rate=g[['eta']],
passes=g[['ps']], l1=g[['l1']], l2=g[['l2']],
early_terminate=2, extra=g[['extra']],
# ROC-AUC related options
use_perf=FALSE, plot_roc=TRUE,
do_evaluation = TRUE # If false doesn't compute AUC, use only for prediction
)
auc
})
dev.off()
results = cbind(iter=1:nrow(grid), grid, auc=do.call(rbind, aucs))
print(results)
# iter l1 l2 eta ps extra auc
# 1 1 1e-06 1e-06 0.05 6 --nn 30 0.7403335
# 2 2 1e-06 1e-06 0.15 6 --nn 30 0.7604067
# 3 3 1e-06 1e-06 0.05 12 --nn 30 0.7403335
# 4 4 1e-06 1e-06 0.15 12 --nn 30 0.7654396
# 5 5 1e-06 1e-06 0.05 6 --nn 80 0.7403404
# 6 6 1e-06 1e-06 0.15 6 --nn 80 0.7652404
# 7 7 1e-06 1e-06 0.05 12 --nn 80 0.7403404
# 8 8 1e-06 1e-06 0.15 12 --nn 80 0.7702607
# 1 1 1e-06 1e-06 0.1 12 --nn 120 0.7661254
# 2 2 1e-06 1e-06 0.2 12 --nn 120 0.7736231
# 3 3 1e-06 1e-06 0.1 18 --nn 120 0.7695463
# 4 4 1e-06 1e-06 0.2 18 --nn 120 0.7747579
# 5 5 1e-06 1e-06 0.1 12 --nn 80 0.7645808
# 6 6 1e-06 1e-06 0.2 12 --nn 80 0.7728860
# 7 7 1e-06 1e-06 0.1 18 --nn 80 0.7678433
# 8 8 1e-06 1e-06 0.2 18 --nn 80 0.7741317
p = ggplot(results, aes(iter, auc, color=extra)) +
geom_point(size=3) +
theme_bw() +
labs(list(x='Iteration', y='AUC',
title='Logistic regression results'))
print(p)
ggsave('output/results_plot.png', plot=p)
|
279951810c9db5145ebc6966972729cc48525b4a | 9044cb01fd36302a17c4aa21d5887a75bfb55331 | /arules.R | 36ec01f203872372b0f621d38ad6897259c34b57 | [] | no_license | scarlettswerdlow/open_payments | 30a1dad8e141965f66d6e09302cfc2e47daf7d99 | 3d7da4c1af08bd65904bdf17ed70c1333c5a42b2 | refs/heads/master | 2021-01-10T04:03:09.007139 | 2015-05-31T20:43:52 | 2015-05-31T20:43:52 | 36,332,008 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,962 | r | arules.R | ###############################################################################
# #
# Big Data #
# Project: Association rules #
# Coded by Scarlett Swerdlow #
# scarlettswerdlow@uchicago.edu #
# May 26, 2015 #
# #
###############################################################################
###############
# CONSTANTS #
###############
WD <- "~/Google Drive/Grad school/Courses/BUS41201 Big Data/project/Big Data Final Project/"
FN <- "data/OPPR_ALL_DTL_GNRL_09302014.csv"
#################
# SOURCE CODE #
#################
setwd(WD)
source("code/data.R")
source("code/arules_starter.R")
general_pmts <- loadData(WD, FN)
manu_network <- graphNetwork(general_pmts, "manu", "phys_id")
phys_network <- graphNetwork(general_pmts, "phys_id", "manu", s=.01, c=.9)
# Graph manufacturers network
manug <- manu_network$network
V(manug)$color <- "turquoise"
par(mar=c(0,0,0,0)+.01)
plot(manug, vertex.label=NA, vertex.size=3, edge.curved=F)
# Graph physician network
physg <- phys_network$network
V(physg)$color <- "pink"
par(mar=c(0,0,0,0)+.01)
plot(physg, vertex.label=NA, vertex.size=3, edge.curved=F)
# Graph neighborhood around manufacturer with most degrees and betweenness
graphNei(manug, 2, labels(manu_network$d[6]), T)
graphNei(manug, 2, labels(manu_network$b[6]), T)
# Graph neighborhood around manufacturer with most degrees and betweenness
# These networks are so dense that the neighborhood plots are not helpful
graphNei(physg, 2, labels(phys_network$d[6]), F)
graphNei(physg, 2, labels(phys_network$b[6]), F)
|
4eb3c284372f1e514f52c1dcffe77e0e16dcd479 | 9a05161b8cdfe1d6226501b167ee467ea93e4642 | /Shiny_app/FA_model.R | fd963c1203e1c28167b729cb3abba81e4419c216 | [] | no_license | Philipp-Neubauer/AdaptiveActivityModel | 21142922b81de0e3b8c1284e93d4b6e4b6da8a5a | e790c52c65d42a6011d26aebdcba1a876312b82b | refs/heads/master | 2020-09-27T11:45:22.394394 | 2019-05-20T10:23:13 | 2019-05-20T10:23:13 | 66,080,774 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,683 | r | FA_model.R | require(dplyr)
require(purrr)
require(tidyr)
require(ggplot2)
require(cowplot)
lw <- function(w) (w/0.01)^(1/3)
wl <- function(l) 0.01*l^3
sc <- function(x) x/max(x)
logit <- function(p) log(p/(1-p))
inv_logit3 <- function(m,mstar,a,b,c) {
z = (m-mstar)*cos(atan(b))-a*sin(atan(b))
1/(1+exp(-(c*z)))
}
# Activity model
eval_tau_eq_temp <- function(Ea,
temp,
temp_ref=15,
gamma=50,
delta=2,
phi=10,
h=30,
beta=0.75,
k=2,
p=0.8,
q=0.9,
n=0.8,
m=100,
M=0.2,
v=1){
tc = exp(Ea*((temp+(288.2-temp_ref))-288.2)/(8.6173324*10^(-5)*(temp+(288.2-temp_ref))*288.2))
-(M*delta*h*k*m^(p + q + 1)*tc - h*k*m^(n + p + q)*tc*v - sqrt(-((beta - 1)*h*k*m^(n + 2*p + 3*q) + h*k*m^(n + 2*p + 3*q)*phi)*tc^2*v^2 - ((beta - 1)*delta*gamma*k*m^(3*p + 2*q + 1) + delta*gamma*k*m^(3*p + 2*q + 1)*phi)*M^2*tc + (((beta - 1)*delta*h*k*m^(2*p + 3*q + 1) + delta*h*k*m^(2*p + 3*q + 1)*phi)*tc^2 + (gamma*h*m^(3*p + 3*q)*phi^2 + (beta - 1)*gamma*k*m^(n + 3*p + 2*q) + (beta^2 - 2*beta + 1)*gamma*h*m^(3*p + 3*q) + (2*(beta - 1)*gamma*h*m^(3*p + 3*q) + gamma*k*m^(n + 3*p + 2*q))*phi)*tc)*M*v)*h)/(M*delta*gamma*k*m^(2*p + 1) - ((beta - 1)*gamma*h*m^(2*p + q) + gamma*h*m^(2*p + q)*phi + gamma*k*m^(n + 2*p))*v)
}
model_out <- function(tau_max,
temp,
temp_ref=15,
Ea,
r=0.2,
gamma=50,
delta=2,
phi=0.15,
h=30,
beta=0.2,
k=2,
p=0.8,
q=0.9,
n=0.8,
m=100){
tc = exp(Ea*((temp+(288.2-temp_ref))-288.2)/(8.6173324*10^(-5)*(temp+(288.2-temp_ref))*288.2))
f <- tau_max*gamma*m^p/(tau_max*gamma*m^p+tc*h*m^q)
inp <- (1-phi-beta)*f*tc*h*m^q
out <- k*tc*m^n + tau_max*delta*k*tc*m
e <- inp -out
efficiency <- e/f
efficiency[efficiency<0] <- 0
predation_rate <- f/phi
met = beta*f*tc*h*m^q+ out
#browser()
data_frame(`Feeding level`= f,
Consumption = inp,
`C used for Metabolism` = out,
`C for growth` = e,
Efficiency = efficiency,
`Predation rate`=predation_rate,
Metabolism = met,
Std = k*tc*m^n)
}
model_out_growth <- function(temp,
temp_ref=15,
l,
lm,
Ea,
gamma=50,
delta=2,
phi=0.15,
h=30,
beta=0.2,
k=2,
p=0.8,
q=0.9,
n=0.8,
tmax=10,
slope=0.05,
tr = 1,
v=NULL,
dt=100){
#browser()
temps = length(temp)
tc = exp(Ea*((temp+(288.2-temp_ref))-288.2)/(8.6173324*10^(-5)*(temp+(288.2-temp_ref))*288.2))
ts <- seq(0,tmax,l=dt)
withProgress(message = 'Calculating Winf', value = 0, {
s <- array(0, c(temps,l,dt))
allocs <- array(0, c(temps,l,dt))
R0 <- array(0, c(temps,l,dt))
surv <- array(0, c(temps,l,dt))
s[,,1] <- min(wl(lm))
dts <- (tmax/(dt-1))
for(t in 2:dt) {
tm1 <- get_taus(v,1,10,temp,s[,,t-1])
Es <- (1-phi-beta)*(tm1*gamma*s[,,t-1]^p/(tm1*gamma*s[,,t-1]^p+tc*h*s[,,t-1]^q))*tc*h*s[,,t-1]^q -k*tc*s[,,t-1]^n-tm1*delta*k*tc*s[,,t-1]
allocs[,,t] <- pmax(allocs[,,t-1],t(apply(lw(s[,,t-1]),1,inv_logit3,lm,ts[t],slope,tr)))
s[,,t] <- s[,,t-1]+dts*(1-allocs[,,t])*Es
surv[,,t] <- surv[,,t-1] + dts*(tm1*v$v+v$M)*s[,,t]^v$nu
R0[,,t] <- R0[,,t-1] + dts*allocs[,,t]*Es*exp(-surv[,,t])
incProgress(dts/tmax, detail = paste("Time", round(ts[t],2)))
}
ls <- lw(s)
opt <- apply(R0[,,dt],1,function(x) ifelse(any(!is.nan(x)),which.max(x),NA))
#browser()
s=t(sapply(1:temps,function(x) s[x,opt[x],]))
allocs=t(sapply(1:temps,function(x) allocs[x,opt[x],]))
R00s=t(sapply(1:temps,function(x) R0[x,opt[x],]))
lss <- reshape2::melt(s)
colnames(lss) <- c('Temperature','t','size')
lss$opt <- opt[lss$Temperature]
lss$Temperature <- temp[lss$Temperature]
lss$t <- ts[lss$t]
alloc <- reshape2::melt(allocs)
colnames(alloc) <- c('Temperature','t','allocs')
alloc$opt <- opt[alloc$Temperature]
alloc$Temperature <- temp[alloc$Temperature]
alloc$t <- ts[alloc$t]
R0s <- reshape2::melt(R00s)
colnames(R0s) <- c('Temperature','t','R0')
R0s$opt <- opt[R0s$Temperature]
R0s$Temperature <- temp[R0s$Temperature]
R0s$t <- ts[R0s$t]
#browser()
growth <- inner_join(inner_join(lss,alloc),R0s) %>% arrange(t,Temperature)
winfs <- growth %>% group_by(Temperature) %>%
summarise(l=size[ifelse(any(abs(allocs-0.5)<0.05),which.min(abs(allocs-0.5)),NA)-1],
t=t[ifelse(any(abs(allocs-0.5)<0.05),which.min(abs(allocs-0.5)),NA)-1],
opt=unique(opt))
G <- rep(NA,length(unique(winfs$Temperature)))
for(t in 2:(length(winfs$Temperature)-1)) {
tau = winfs$Temperature[t]
this.l <- winfs$l[t]
if (is.na(this.l)) next
tPM <- R0[t,opt[t],dt]
lPM <- R0[t-1,opt[t],dt]
nPM <- R0[t+1,opt[t],dt]
sl <- abs((nPM-lPM)/(winfs$Temperature[t+1]-winfs$Temperature[t-1]))
G[t] <- 0.04*this.l*(sl/tPM)
}
#sG <- sign(G)
winfs$G <- G/winfs$t ### why divide?
winfs$L <- 10*(lw(winfs$l + winfs$G)-lw(winfs$l))
})
list(winfs=winfs, growth=growth, R0s = data.frame(R0=sapply(1:nrow(R0[,,dt]),function(x) R0[x,opt[x],dt]),
Temperature = winfs$Temperature))
}
model_out_growth_check <- function(temp,
temp_ref=15,
Ea,
gamma=50,
delta=2,
phi=0.15,
h=30,
beta=0.2,
k=2,
p=0.8,
q=0.9,
n=0.8,
mstar=1000,
tmax=10,
slope=0.05,
tr = 1,
v=NULL,
dt=100,
lm=NULL){
temps = length(temp)
tc = exp(Ea*((temp+(288.2-temp_ref))-288.2)/(8.6173324*10^(-5)*(temp+(288.2-temp_ref))*288.2))
ts <- seq(0,tmax,l=dt)
withProgress(message = 'Calculating Norms', value = 0, {
s <- array(0, c(temps,length(gamma),dt))
allocs <- array(0, c(temps,length(gamma),dt))
s[,,1] <- min(wl(lm))
dts <- (tmax/(dt-1))
gammas <- matrix(gamma,temps,length(gamma),byrow = T)
gs <- length(gamma)
for(t in 2:dt) {
tm1 <- sapply(1:gs,function(g) {
w <- v
w$gamma <- gamma[g]
get_taus(w,1,10,temp,s[,g,t-1])
})
f <- (tm1*gammas*s[,,t-1]^p/(tm1*gammas*s[,,t-1]^p+tc*h*s[,,t-1]^q))
Es <- (1-phi-beta)*f*tc*h*s[,,t-1]^q-k*tc*s[,,t-1]^n-tm1*delta*k*tc*s[,,t-1]
allocs[,,t] <- pmax(allocs[,,t-1],t(apply(lw(s[,,t-1]),1,inv_logit3,mstar,ts[t],slope,tr)))
s[,,t] <- s[,,t-1]+dts*(1-allocs[,,t])*Es
incProgress(dts/tmax, detail = paste("Time", round(ts[t],2)))
}
ls <- lw(s)
#browser()
ref = which.min(abs(temp-temp_ref))
refg = which.min(abs(gamma-v$gamma))
gls=t(sapply(1:gs,function(x) ls[ref,x,]))
gallocs=t(sapply(1:gs,function(x) allocs[ref,x,]))
tls=t(sapply(1:temps,function(x) ls[x,refg,]))
tallocs=t(sapply(1:temps,function(x) allocs[x,refg,]))
lss <- reshape2::melt(tls)
colnames(lss) <- c('Temperature','t','t_length')
lss$Temperature <- temp[lss$Temperature]
lss$Gamma <- gamma[refg]
lss$t <- ts[lss$t]
alloc <- reshape2::melt(tallocs)
colnames(alloc) <- c('Temperature','t','allocs')
alloc$Temperature <- temp[alloc$Temperature]
alloc$Gamma <- gamma[refg]
alloc$t <- ts[alloc$t]
lgs <- reshape2::melt(gls)
colnames(lgs) <- c('Gamma','t','g_length')
lgs$Temperature <- temp[ref]
lgs$Gamma <- gamma[lgs$Gamma]
lgs$t <- ts[lgs$t]
galloc <- reshape2::melt(gallocs)
colnames(galloc) <- c('Gamma','t','allocs')
galloc$Temperature <- temp[ref]
galloc$Gamma <- gamma[galloc$Gamma]
galloc$t <- ts[galloc$t]
#browser()
})
list(t_growth = inner_join(lss,alloc) %>% arrange(t,Temperature),
g_growth = inner_join(lgs,galloc) %>% arrange(t,Temperature))
}
O2_supply <- function(O2 = 1:100,O2crit=20,P50 = 40,Tmax=30,Topt=15,T,omega=1.870,delta=1038){
level <- delta*((Tmax-T)/(Tmax-Topt))^omega*exp(-omega*(Tmax-T)/(Tmax-Topt))/exp(-omega)
365*24*level*(1-exp(-(O2-O2crit)/(-(P50-O2crit)/log(0.5))))/1000
}
# plot(O2_supply(level=200,P50 = 10),t='l',xlab='Disolved O2',ylab='02 supply')
O2_fact <- function(temp,Tref=15){
exp(-0.01851*(temp-Tref))
}
eval_tau_max_temp <- function(f=O2_supply(),
Ea = 0.52,
temp=seq(5,10,l=100),
temp_ref=15,
omega = 0.4,
gamma=50,
delta=2,
h=30,
phi=0.15,
beta=0.25,
k=2,
p=0.8,
q=0.9,
n=0.8,
m=100){
tc = exp(Ea*((temp+(288.2-temp_ref))-288.2)/(8.6173324*10^(-5)*(temp+(288.2-temp_ref))*288.2))
-1/2*(delta*h*k*m^(q + 1)*omega*tc^2 - f*gamma*m^(p + 1) + (beta*gamma*h*m^(p + q) + gamma*k*m^(n + p))*omega*tc - sqrt(delta^2*h^2*k^2*m^(2*q + 2)*omega^2*tc^4 + 2*(beta*delta*gamma*h^2*k*m^(p + 2*q + 1) - delta*gamma*h*k^2*m^(n + p + q + 1))*omega^2*tc^3 + f^2*gamma^2*m^(2*p + 2) - 2*(beta*f*gamma^2*h*m^(2*p + q + 1) + f*gamma^2*k*m^(n + 2*p + 1))*omega*tc + (2*delta*f*gamma*h*k*m^(p + q + 2)*omega + (beta^2*gamma^2*h^2*m^(2*p + 2*q) + 2*beta*gamma^2*h*k*m^(n + 2*p + q) + gamma^2*k^2*m^(2*n + 2*p))*omega^2)*tc^2))*m^(-p - 1)/(delta*gamma*k*omega*tc)
}
get_taus <- function(v,tau_uc,O2_in,temp_in,m=10^seq(0,6,l=1000)){
#browser()
O2_tcor <- O2_fact(temp_in,5)
O2 = O2_supply(O2=10*O2_tcor,Topt=v$Topt,O2crit=v$O2crit,Tmax=v$temp[length(v$temp)],T=temp_in,delta=v$lO,omega=v$shape,P50=v$P50)
max_tau <- eval_tau_max_temp(f=O2,
temp=temp_in,
Ea=v$Ea,
omega = v$omega,
gamma=v$gamma,
delta=v$delta,
phi=v$phi,
h=v$h,
beta=v$beta,
k=v$k,
p=v$p,
q=v$q,
n=v$n,
m=m)
tau <- eval_tau_eq_temp(temp=temp_in,
Ea=v$Ea,
gamma=v$gamma,
delta=v$delta,
phi=v$phi,
h=v$h,
beta=v$beta,
k=v$k,
p=v$p,
q=v$q,
n=v$n,
m=m,
M=v$M,
v=v$v)
tau_max = pmin(tau,max_tau)
tau_max[tau_max<0] <- 0
tau_max[tau_max>1] <- 1
tau_max
}
|
e5b2b539c9acfe746a031c778ca2c13bef3199f9 | f89c621ea18f70f6ac2462eae1c43122b9a26d28 | /drill/swirl_ggplot2_extras.R | 116878d7e827de3f036231a33f9dd845adbe3979 | [] | no_license | smstaneva/datasciencecoursera | 5c828ac4467ffa76286eafd0709edca2d842ea52 | 5a74432696ee41e543f9e04faa59b9b0503d44ef | refs/heads/master | 2021-09-11T19:32:07.391859 | 2018-04-11T14:39:46 | 2018-04-11T14:39:46 | 75,734,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,162 | r | swirl_ggplot2_extras.R | str(diamonds)
qplot(price, data = diamonds)
range(diamonds$price)
qplot(price, data = diamonds, binwidth = 18497/30)
brk
counts
qplot(price, data = diamonds, binwidth = 18497/30, fill = cut)
qplot(price, data = diamonds, geom = "density")
qplot(price, data = diamonds, geom = "density", color = cut)
qplot(carat, price, data = diamonds)
qplot(carat, price, data = diamonds, shape = cut)
qplot(carat, price, data = diamonds, color = cut)
qplot(carat, price, data = diamonds, color = cut) + geom_smooth(method = "lm")
qplot(carat, price, data = diamonds, color = cut, facets = .~ cut)
+ geom_smooth(method = "lm")
g <- ggplot(diamonds, aes(depth, price))
summary(g)
g + geom_point(alpha = 1/3)
cutpoints <- quantile(diamonds$carat, seq(0, 1, length = 4), na.rm = TRUE)
cutpoints
diamonds$car2 <- cut(diamonds$carat, cutpoints)
g <- ggplot(diamonds, aes(depth, price))
g + geom_point(alpha = 1/3) + facet_grid(cut ~ car2)
diamonds[myd,]
g + geom_point(alpha = 1/3)
+ facet_grid(cut ~ car2)
+ geom_smooth(method = "lm", size = 3, color = "pink")
ggplot(diamonds, aes(carat, price)) + geom_boxplot() + facet_grid(.~cut) |
636453efd08089c64b4ab489a8343bb1711fd13e | 70ceafccb7ed3005e64521551eae6657385118e5 | /R-Portable/library/survival/tests/detail.R | 3b1f6b788bc96eafc5f8c4051620bcaa62b06920 | [
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | ksasso/Electron_ShinyApp_Deployment | 6e63686b27bc38607bca1e5b50ed7cd58f6a4a3b | 1402f4d6bbb4a415bce07ebfddc8b76704f11f97 | refs/heads/master | 2023-07-06T11:48:21.413305 | 2020-04-30T12:53:11 | 2020-04-30T12:53:11 | 120,221,429 | 100 | 24 | CC0-1.0 | 2023-06-25T03:17:43 | 2018-02-04T20:42:11 | HTML | UTF-8 | R | false | false | 2,665 | r | detail.R | # A short test on coxph.detail, to ensure that the computed hazard is
# equal to the theoretical value
library(survival)
aeq <- function(a,b) all.equal(as.vector(a), as.vector(b))
# taken from book4.R
test2 <- data.frame(start=c(1, 2, 5, 2, 1, 7, 3, 4, 8, 8),
stop =c(2, 3, 6, 7, 8, 9, 9, 9,14,17),
event=c(1, 1, 1, 1, 1, 1, 1, 0, 0, 0),
x =c(1, 0, 0, 1, 0, 1, 1, 1, 0, 0) )
byhand <- function(beta, newx=0) {
r <- exp(beta)
loglik <- 4*beta - (log(r+1) + log(r+2) + 2*log(3*r+2) + 2*log(3*r+1) +
log(2*r +2))
u <- 1/(r+1) + 1/(3*r+1) + 2*(1/(3*r+2) + 1/(2*r+2)) -
( r/(r+2) +3*r/(3*r+2) + 3*r/(3*r+1))
imat <- r*(1/(r+1)^2 + 2/(r+2)^2 + 6/(3*r+2)^2 +
6/(3*r+1)^2 + 6/(3*r+2)^2 + 4/(2*r +2)^2)
hazard <-c( 1/(r+1), 1/(r+2), 1/(3*r+2), 1/(3*r+1), 1/(3*r+1),
1/(3*r+2), 1/(2*r +2) )
# The matrix of weights, one row per obs, one col per time
# deaths at 2,3,6,7,8,9
wtmat <- matrix(c(1,0,0,0,1, 0, 0,0,0,0,
0,1,0,1,1, 0, 0,0,0,0,
0,0,1,1,1, 0, 1,1,0,0,
0,0,0,1,1, 0, 1,1,0,0,
0,0,0,0,1, 1, 1,1,0,0,
0,0,0,0,0, 1, 1,1,1,1,
0,0,0,0,0,.5,.5,1,1,1), ncol=7)
wtmat <- diag(c(r,1,1,r,1,r,r,r,1,1)) %*% wtmat
x <- c(1,0,0,1,0,1,1,1,0,0)
status <- c(1,1,1,1,1,1,1,0,0,0)
xbar <- colSums(wtmat*x)/ colSums(wtmat)
n <- length(x)
# Table of sums for score and Schoenfeld resids
hazmat <- wtmat %*% diag(hazard) #each subject's hazard over time
dM <- -hazmat #Expected part
for (i in 1:5) dM[i,i] <- dM[i,i] +1 #observed
dM[6:7,6:7] <- dM[6:7,6:7] +.5 # observed
mart <- rowSums(dM)
# Table of sums for score and Schoenfeld resids
# Looks like the last table of appendix E.2.1 of the book
resid <- dM * outer(x, xbar, '-')
score <- rowSums(resid)
scho <- colSums(resid)
# We need to add the ties back up (they are symmetric)
scho[6:7] <- rep(mean(scho[6:7]), 2)
list(loglik=loglik, u=u, imat=imat, xbar=xbar, haz=hazard* exp(beta*newx),
mart=mart, score=score, rmat=resid,
scho=scho)
}
# The actual coefficient of the fit is close to zero. Using a larger
# number pushes the test harder, but it should still work without
# the init and iter arguments, i.e., for any coefficient.
fit1 <- coxph(Surv(start, stop, event) ~x, test2,init=-1, iter=0)
temp <- coxph.detail(fit1)
temp2 <- byhand(fit1$coef, fit1$means)
aeq(temp$haz, c(temp2$haz[1:5], sum(temp2$haz[6:7])))
|
90ec2e535e5a2ff8796469d00c6a87789978671f | c51347680754745733293e00aacf7b633334c1fc | /man/turtle.Rd | fa69caedf883471e9d4db51d8c5d1e19a9c08d04 | [] | no_license | cran/YplantQMC | 771c341d00e410a0e61dbdadc02af8866d5cd198 | dc62bfc247ba9d6dd92498e8afa00d511a36e00e | refs/heads/master | 2021-01-21T21:47:33.241377 | 2016-05-23T06:34:50 | 2016-05-23T06:34:50 | 17,694,152 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 473 | rd | turtle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yplantqmc-package.R
\docType{data}
\name{turtle}
\alias{turtle}
\title{A turtle sky with 58 points}
\format{A data frame with 59 observations on the following 2 variables.
\describe{ \item{altitude}{a numeric vector}
\item{azimuth}{a numeric vector} }}
\description{
These are the angles used in \code{\link{STARbar}} when \code{integration =
"Turtlesky"}.
}
\keyword{datasets}
|
188c645aa022d84b3f710166b10cc0306e9275b7 | 5051798cfdbaacea22c0d7073bbf889e7fd192ba | /task_ggplot.R | baa27de09349d144654f24f25f95f21d8d71d9ef | [] | no_license | leticiaamarcal/Predicting-Customer-Preferences | ceafae5df9fa0974f15b64520fd446e88265e414 | ca99e1efcc4838c60cafeada43770368c3baeebe | refs/heads/master | 2020-08-08T03:08:38.169752 | 2019-10-14T10:02:52 | 2019-10-14T10:02:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 159 | r | task_ggplot.R | #ler arquivo
blackwell <- read.csv("C:/Users/letic/Desktop/Ubiqum/Blackwell_Hist_Sample.csv")
#library
library(ggplot2)
data("midwest", package = "ggplot2")
|
c86e6309c5327f7b33c0e2fd10ddfea35be14ebd | e0d35b1a16ce9451e4033042896f56ade5ce0d05 | /man/food_coded.Rd | 06e6f07a4a3f585632c5b42d13689fa0a2f51108 | [] | no_license | jnumainville/JoeNumainvilleTools | 769c1df0ff72a2babc9808bd40fb663dbd5559b9 | 4338f1d4f14d8393be8d9c189b30bd8cdba80517 | refs/heads/master | 2021-01-24T02:45:51.470895 | 2018-02-26T19:43:56 | 2018-02-26T19:43:56 | 122,859,378 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 537 | rd | food_coded.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/food_coded_info.R
\docType{data}
\name{food_coded}
\alias{food_coded}
\title{food_coded contains data on college students and their food choices}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 125 rows and 61 columns.}
\source{
\href{https://www.kaggle.com/borapajo/food-choices}{Kaggle}
}
\usage{
food_coded
}
\description{
food_coded contains data on college students and their food choices
}
\keyword{datasets}
|
5a8ca0eca43177f72871833f9f09e7ec6270382f | 452b64470dfd99bea03e9ff070a342c09d74b5cc | /code/experiment_5.R | 74ef833ebe7985c64973f4494eb6d1cbe2beaf9f | [] | no_license | ma-hei/thesis | a21a346352a0d010b080c52d7cb8bf0dc02204d0 | b6517e6f0f83c65905decd22bf59341ef0a44654 | refs/heads/master | 2020-04-04T05:53:42.607816 | 2016-10-19T16:01:40 | 2016-10-19T16:01:40 | 55,417,618 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,270 | r | experiment_5.R | load('../data/mix_dataset.Rda')
n_drugs = length(unique(mix_dataset[,1]))
n_targets = length(unique(mix_dataset[,2]))
drug_sim = read.table('../data/mix_dataset_drug_drug_sim.txt')
drug_sim = as.matrix(drug_sim)
target_sim = read.table('../data/mix_dataset_target_target_sim.txt')
target_adj_mat = make_adjacency_mat_targets(target_sim, 0.5)
drug_adj_mat = make_adjacency_mat(drug_sim)
test_folds = get_folds(mix_dataset, 5)
crf_predictions = rep(NA, nrow(mix_dataset))
mf_predictions = rep(NA, nrow(mix_dataset))
i = 1
test_ind = test_folds[[i]]
train_ind = setdiff(1:nrow(mix_dataset),test_ind)
dt_mat = matrix(-1, nrow = n_drugs, ncol = n_targets)
dt_mat[mix_dataset[train_ind,c(1,2)]] = mix_dataset[train_ind,3]
cat('getting MF cv-prediction on training data..\n')
mf_preds_train = get_mf_cv(dt_mat, 400)
mf_preds_train_mat = mf_preds_train[[2]]
cat('getting MF predictions for complete matrix..\n')
mf_preds_all = get_libmf_prediction(dt_mat, 400)
sim_mat = drug_sim
adj_mat = drug_adj_mat
for (t in 1:n_targets){
#for (t in c(65,171)){
cat('fold ',i,', target ',t,'... ',length(which(dt_mat[,t]>=0)),' observations\n')
mf_pred_train_col_t = mf_preds_train_mat[which(!is.na(mf_preds_train_mat[,t])),t]
adj_mat_train_col_t = make_training_adj_mat_for_column(dt_mat, sim_mat, t)
training_vals_col_t = dt_mat[which(dt_mat[,t]>=0),t]
cat(length(which(adj_mat_train_col_t>0)),'\n')
if (length(which(dt_mat[,t]>=0))>500){
eta = 0.001
} else{
eta = 0.01
}
params = train_crf_row(y = training_vals_col_t, X = mf_pred_train_col_t, adj_mat = adj_mat_train_col_t, crf_iters = 1000, eta = eta)
cat('learned parameters: ', params[[1]], params[[2]],'\n')
inds = which(mix_dataset[test_ind,2] == t)
labels_test_col = mix_dataset[test_ind[inds], 3]
mf_prediction_col = mf_preds_all[,t]
mf_prediction_test_col = mf_preds_all[cbind(mix_dataset[test_ind[inds],1],mix_dataset[test_ind[inds],2])]
mf_predictions[test_ind[inds]] = mf_prediction_test_col
cat('making crf predictions..\n')
crf_prediction_col = make_crf_predictions_row(params[[1]], params[[2]], column = dt_mat[,t], adj_mat = adj_mat, X = mf_prediction_col)
crf_prediction_test_col = crf_prediction_col[mix_dataset[test_ind[inds],1]]
crf_predictions[test_ind[inds]] = crf_prediction_test_col
mf_metrics = get_metrics(mf_prediction_test_col, labels_test_col, 7)
crf_metrics = get_metrics(crf_prediction_test_col, labels_test_col, 7)
cat('target rmse (mf, crf): ',mf_metrics[[1]],', ',crf_metrics[[1]],'\n')
cat('target auc (mf, crf): ',mf_metrics[[2]],', ',crf_metrics[[2]],'\n')
cat('target aupr (mf, crf): ',mf_metrics[[3]],', ',crf_metrics[[3]],'\n')
inds = which(!is.na(mf_predictions))
mf_metrics = get_metrics(mf_predictions[inds], mix_dataset[inds,3], 7)
crf_metrics = get_metrics(crf_predictions[inds], mix_dataset[inds,3], 7)
cat('all test rmse (mf, crf) so far: ',round(mf_metrics[[1]], digits = 3),', ',round(crf_metrics[[1]], digits = 3),'\n')
cat('all test auc (mf, crf) so far: ',round(mf_metrics[[2]], digits = 3),', ',round(crf_metrics[[2]], digits = 3),'\n')
cat('all test aupr (mf, crf) so far: ',round(mf_metrics[[3]], digits = 3),', ',round(crf_metrics[[3]], digits = 3),'\n\n')
} |
ad21117baa38fd33a65e0d7ac13a0b93cd83c04c | 6bd27d402982693463c41b47d47b1c94c287a2af | /data_analysis_rich.R | ba03c8ccc302dbbb03f8895a14c0eaf593d6d1de | [] | no_license | RSchwinn/fdic | 1ec8ae1972e2f8fc52fc3e49740f39a44d29fba9 | 144dd9aefba77af3f288312a7c25b0111050670a | refs/heads/master | 2020-03-25T21:14:49.356341 | 2018-11-09T18:04:51 | 2018-11-09T18:04:51 | 144,166,869 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,623 | r | data_analysis_rich.R | # Always start here (loads the data) ----
library(tidyverse)
df = readRDS("../data/fdic/working_df.RDS")
definitions = read_csv("even_better_chosen_list.csv")
# Rich's sample plot ----
df %>%
select(name, rssdhcr, fed_rssd, asset, date) %>%
arrange(desc(asset)) %>%
head()
# Use fed_rssd 852218 to filter by individual bank
# Only use rssdhcr if you are interested in seeing all banks under a given holding company.
df_1 = df %>%
filter(fed_rssd == 852218)
# fixes scientific notation
options(scipen = 99)
library(scales)
ggplot(data = df_1,
mapping = aes(x = dep, y = asset)) +
geom_point(alpha = 0.5)+
geom_smooth(alpha = 0.1)+
scale_x_continuous(label = comma)+
scale_y_continuous(label = comma)+
xlab("Deposits ($1000)")+
ylab("Assets ($1000)")+
theme_test()+
ggtitle(df_1$name[nrow(df_1)])
# Vicky's visualization ----
recent_df = df %>%
filter( date == "2018-03-31",
cb == 1
)%>%
select(name, rssdhcr, dep, fed_rssd, asset, date)
ggplot(data = recent_df,
mapping = aes(x=asset))+
geom_dotplot()
ggplot(data = recent_df,
mapping = aes(sample=asset))+
geom_qq()
library(plotly)
p = ggplot(data = recent_df,
mapping = aes(x=asset))+
geom_histogram(bins = 100)
ggplotly(p)
p = ggplot(data = recent_df,
mapping = aes(x = dep, y = asset, text = paste("Bank:", name))) +
geom_point(alpha = 0.5)+
scale_x_continuous(label = comma)+
scale_y_continuous(label = comma)+
xlab("Deposits ($1000)")+
ylab("Assets ($1000)")+
theme_test()
ggplotly(p)
|
57bfc142b9b8d87810964f94cae2e1c2c8c5b6ce | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rccmisc/tests/test-specify_missing.R | 3da8a63fbe3b9b8e9ec3d086b35a15da4e8c1352 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 337 | r | test-specify_missing.R |
context("specify_missing")
test_that("specify_missing", {
expect_that(specify_missing(1:8), is_equivalent_to(1:8))
expect_that(specify_missing(1:8, 5), is_equivalent_to(c(1:4, NA, 6:8)))
expect_that(specify_missing(c(NA, "", "apa ", " ", "hej")),
is_equivalent_to(c(NA, NA, "apa ", NA, "hej")))
})
|
c8310ab60e015ec824dc0469742b57664f4d0fd0 | af0df2be1822e2ed328f8bc1fffd0410d0e954ac | /Max_Likelihood_Est.R | 02cc4cb176a6dfde978044a968117fb6661bcea0 | [] | no_license | dabaja/StatFinDat | 8a08d729595f0a77e693acfb44bd33a259c8157d | 7537bcf39d0c53aaec202d66557dd9483fe2976b | refs/heads/master | 2021-07-04T04:30:34.762642 | 2017-09-28T18:06:57 | 2017-09-28T18:06:57 | 103,696,231 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 211 | r | Max_Likelihood_Est.R | #Maximum Likelihood Estimators
#The case of a GEV
X <- rgev(500, lambda = 3.5, xi= 0.4)
gev.ml(X) # ignore warnings
#The case of a GPD
Y <- rpareto(500, lambda = 3.5, xi= 0.4)
gpd.ml(Y)$param.est
|
1a5e51d09423dc5d7b5abf479ec2653ff099d83d | c089d92398b2e8f8ab05972acf2c8028d0c5dc34 | /Bayesian1/postcoin3.R | ab27003aa85f6f882a28e4e14b3c8757f50c8229 | [] | no_license | Robbie-E/practice_R | 26038d84b120e3ef8ae894335d5c6e425b90f426 | a37c57c51432d997ee82113517fda63336ef83d3 | refs/heads/main | 2023-06-01T02:52:18.071692 | 2021-06-02T08:14:01 | 2021-06-02T08:14:01 | 357,104,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 836 | r | postcoin3.R | post.coin3 <- function(guess, heads, prior1, prior2, nflips=4) {
#guess is an outcome (fair, head-loaded, tail-loaded)
#heads is number of heads after nflips flips of coin
#prior1 is unconditional probability that coin is fair
#prior2 is unconditional probability that coin is hload
prob1 <- 0.5
prob2 <- 0.7
prob3 <- 0.3
normf <- (dbinom(heads,size=nflips,prob=prob1)*prior1)+(dbinom(heads,size=nflips,prob=prob2)*prior2)+(dbinom(heads,size=nflips,prob=prob3)*(1-prior1-prior2))
if(identical(guess,'fair')){
post <- (dbinom(heads,size=nflips,prob=prob1)*prior1)/normf
}
if(identical(guess,'hload')){
post <- (dbinom(heads,size=nflips,prob=prob2)*prior2)/normf
}
if(identical(guess,'tload')){
post <- (dbinom(heads,size=nflips,prob=prob3)*(1-prior1-prior2))/normf
}
post
} |
d08480643cfdcd9c6488450ff8e1c7e46039a914 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Devore7/examples/ex13.49.Rd.R | fda320e10d0152a3c30fbd8b0f846e631d955797 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 160 | r | ex13.49.Rd.R | library(Devore7)
### Name: ex13.49
### Title: R Data set: ex13.49
### Aliases: ex13.49
### Keywords: datasets
### ** Examples
data(ex13.49)
str(ex13.49)
|
0839ab39a947c1fe41ab4992ae406a25ac05142d | 5901b07a2c6bc8f2e8fb433e4388abf8bc4afd11 | /R/fsia.r | 6d1ad19f8f511d71ffc26b7fd8d46ad407cb08b2 | [] | no_license | cran/fsia | f7a3a5b6c68f08701af145ced244d996669a5614 | 766dc7451a5b00e2c9951daec0eb40b220c5e269 | refs/heads/master | 2021-01-19T08:45:08.804806 | 2017-06-23T11:55:29 | 2017-06-23T11:55:29 | 29,202,646 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,326 | r | fsia.r |
read.formscanner<-function(file,col.names=NULL,conc=NULL,id=NULL,dummy=NULL)
{
data<-read.csv2(file,as.is=TRUE,na.strings = "")
if (!is.null(col.names)) colnames(data)<-col.names #modified
if (is.numeric(conc)) {
tmp<-c()
for (i in conc) tmp<-paste(tmp,data[,i],sep="")
data[,conc[1]]<-tmp
data<-data[,-(conc[2:length(conc)])]
}
if (!is.numeric(conc)) {
tmp<-c()
for (i in conc) tmp<-paste(tmp,data[,i],sep="")
data[,conc[1]]<-tmp
for (i in 2:length(conc)) data[,conc[i]]<-c()
}
if (!is.null(id)) colnames(data)[colnames(data)==id]<-"id"
if (!is.null(dummy)) { #added option dummy
if (is.numeric(dummy)) dummy<-colnames(data)[dummy]
for (i in dummy) {
dsp<-strsplit(data[,i],"[|]")
opt<-sort(unique(unlist(dsp)))
if (length(opt)>1) {
dummydata<-matrix(unlist(lapply(dsp,FUN=function(x,opt) opt%in%x, opt=opt)),ncol=length(opt),byrow=TRUE)*1
colnames(dummydata)<-paste(i,opt,sep=".")
data<-cbind(data,dummydata)
}
}
}
return(data)
}
addkey<-function(obj,keyline=NULL,keyfile=NULL,keydata=NULL)
{
if (is.null(keyline) & is.null(keyfile) & is.null(keydata)) stop("Specify keyline or keyfile or keydata.\n")
if ((!is.null(keyline) + !is.null(keyfile) + !is.null(keydata)) > 1) stop("Specify only one key.\n")
if (is.null(obj$key) & is.null(obj$weights)) data<-obj else data<-obj$data
weights<-obj$weights
if (!is.null(keyline)) {
key<-data[keyline,]
data<-data[-keyline,]
}
if (!is.null(keyfile)) key<-read.csv2(keyfile)
if (!is.null(keydata)) key<-keydata
sel<-colnames(key)[colnames(key)%in%colnames(data)]
key<-key[,sel]
if (is.null(weights)) obj<-list(data=data,key=key)
else obj<-list(data=data,key=key,weights=weights)
return(obj)
}
addweights<-function(obj,weightsfile=NULL,weightsdata=NULL)
{
if (is.null(weightsfile) & is.null(weightsdata)) stop("Specify weightsfile or weightsdata.\n")
if ((!is.null(weightsfile) + !is.null(weightsdata)) > 1) stop("Specify either weightsfile or weightsdata.\n")
if (is.null(obj$key) & is.null(obj$weights)) data<-obj else data<-obj$data
key<-obj$key
if (!is.null(weightsfile)) weights<-read.csv2(weightsfile)
if (!is.null(weightsdata)) weights<-weightsdata
rownames(weights)<-weights$response
sel<-colnames(weights)[colnames(weights)%in%colnames(data)]
weights<-weights[,sel]
if (is.null(key)) obj<-list(data=data,weights=weights)
else obj<-list(data=data,key=key,weights=weights)
return(obj)
}
resp2binary<-function(obj,columns)
{
key<-obj$key
if (is.null(key)) stop("key is required.\n")
if (is.numeric(columns)) item<-colnames(obj$data)[columns]
else item<-columns
data<-obj$data
out<-matrix(NA,nrow(data),length(columns))
for (i in 1:length(columns)) {
out[,i]<-(as.character(data[,item[i]])==as.character(key[,item[i]]))*1
}
data[,columns]<-out
return(data)
}
resp2scores<-function(obj,columns)
{
weights<-obj$weights
if (is.null(weights)) stop("weights are required.\n")
if (is.numeric(columns)) item<-colnames(obj$data)[columns]
else item<-columns
data<-obj$data
out<-matrix(NA,nrow(data),length(columns))
if (nrow(weights)==1) {
key<-obj$key
if (is.null(key)) stop("key is required.\n")
for (i in 1:length(columns)) {
out[,i]<-(as.character(data[,item[i]])==as.character(key[,item[i]]))*1*weights[1,i]
}
}
if (nrow(weights)>1) {
for (i in 1:length(columns)) {
datsp<-strsplit(data[,item[i]],"[|]")
for (j in 1:length(datsp)) out[j,i]<-sum(weights[datsp[[j]],item[i]])
}
}
data[,columns]<-out
return(data)
}
freq<-function(obj,columns,perc=FALSE)
{
if (is.null(obj$key) & is.null(obj$weights)) data<-obj else data<-obj$data
if (is.numeric(columns)) item<-colnames(data)[columns]
else item<-columns
if (!is.null(obj$key)) key<-as.matrix(obj$key[,item])
else key<-obj$key
out<-list()
j<-1
for (i in columns) {
tab<-table(data[,i])
if (perc) {
tab<-tab/nrow(data)*100
tab<-round(tab,2)
#tab<-paste(tab,"%",sep="")
}
out[[j]]<-list(item=item[j],tab=tab,key=key[j])
j<-j+1
}
class(out)<-"frlist"
return(out)
}
print.frlist<-function(x, ...)
{
for (i in 1:length(x)) {
cat("\n============== ")
cat(x[[i]]$item)
cat(" ==============\n")
tab<-x[[i]]$tab
names(tab)[names(tab)==x[[i]]$key]<-paste(names(tab)[names(tab)==x[[i]]$key],"*",sep="")
print(tab)
}
cat("\n")
}
plot.frlist<-function(x, display=TRUE, ask=TRUE, ...)
{
devAskNewPage(ask = ask)
for (i in 1:length(x)) {
tab<-x[[i]]$tab
colour<-rep(2,dim(tab))
if (!is.null(x[[i]]$key)) colour[names(tab)==x[[i]]$key]<-3
bp<-barplot(tab,col=colour,main=x[[i]]$item,ylim=c(0,max(tab)*1.2))
if (display) {
text(x=bp,y=(tab+max(tab)*0.02),labels=tab,adj = c(0.5, 0))
}
}
devAskNewPage(ask = FALSE)
}
person.stat<-function(obj,columns,weights=FALSE)
{
if (is.numeric(columns)) item<-colnames(obj$data)[columns]
else item<-columns
if (!weights) data01<-resp2binary(obj=obj,columns=columns)
else data01<-resp2scores(obj=obj,columns=columns)
score<-rowSums(data01[,columns],na.rm=TRUE)
if (weights) count<-sum(apply(obj$weights[,item],2,FUN=function(x) sum(x[x>0])))
else count<-length(columns)
if (any(colnames(obj$data)=="id"))
out<-data.frame(id=obj$data$id,score=score,max=count,perc=round(score/count,2))
else
out<-data.frame(rownames=rownames(obj$data),score=score,max=count,perc=round(score/count,2))
return(out)
}
item.stat<-function(obj,columns,weights=FALSE)
{
if (is.numeric(columns)) item<-colnames(obj$data)[columns]
else item<-columns
if (!weights) data01<-resp2binary(obj=obj,columns=columns)
else data01<-resp2scores(obj=obj,columns=columns)
score<-colSums(data01[,columns],na.rm=TRUE)
count<-nrow(data01)
if (weights) max<-apply(obj$weights[,item],2,FUN=function(x) sum(x[x>0]))*count
else max=count
out<-data.frame(item=names(score),score=score,max=max,perc=round(score/count,2))
rownames(out)<-NULL
return(out)
}
report<-function(obj,columns,whichid,grid=TRUE,main="",las=0,itemlab=NULL,weights=FALSE)
{
if (!any(colnames(obj$data)=="id")) stop("id variable is missing. Select id in function read.formscanner.\n")
if (is.numeric(columns)) item<-colnames(obj$data)[columns]
else item<-columns
n<-length(columns)
resp<-as.matrix(obj$data[obj$data$id%in%whichid,columns])
if (is.null(itemlab)) itemlab <- item
nid<-length(whichid)
if (!weights) {
if (!is.null(obj$key)) key<-as.matrix(obj$key[,item])
else key<-obj$key
plot(1,ylim=c(0,n),xlim=c(0.5,nid+2+0.5),type="n",xaxt="n",yaxt="n",bty="n",ann=FALSE,main=main,las=las)
axis(1,at=1:(nid+2),labels=c("item",whichid,"key"),tick=FALSE)
text(1,n:1-0.5,itemlab)
for (i in seq_along(whichid)) {
colour<-rep(2,n)
colour[resp[i,]==key]<-3
text(i+1,n:1-0.5,resp[i,],col=colour)
}
text(nid+2,n:1-0.5,key)
}
if (weights) {
if (nrow(obj$weights)==1) {
if (!is.null(obj$key)) key<-as.matrix(obj$key[,item])
else key<-obj$key
plot(1,ylim=c(0,n),xlim=c(0.5,nid+2+0.5),type="n",xaxt="n",yaxt="n",bty="n",ann=FALSE,main=main,las=las)
axis(1,at=1:(nid+2),labels=c("item",whichid,"weights"),tick=FALSE)
text(1,n:1-0.5,itemlab)
for (i in seq_along(whichid)) {
colour<-rep(2,n)
colour[resp[i,]==key]<-3
wght<-obj$weights[,item]
wght[resp[i,]!=key]<-0
text(i+1,n:1-0.5,paste(resp[i,],wght,sep="="),col=colour)
}
text(nid+2,n:1-0.5,paste(key,obj$weights[,item],sep="="))
}
if (nrow(obj$weights)>1) {
plot(1,ylim=c(0,n),xlim=c(0.5,nid+2+0.5),type="n",xaxt="n",yaxt="n",bty="n",ann=FALSE,main=main,las=las)
axis(1,at=1:(nid+2),labels=c("item",whichid,"weights"),tick=FALSE)
text(1,n:1-0.5,itemlab)
weights<-as.matrix(obj$weights[,item])
text(1,n:1-0.5,item)
for (j in 1:length(columns)) {
datsp<-strsplit(resp[,j],"[|]")
for (i in 1:length(datsp)) {
respij<-datsp[[i]]
respij<-paste(respij,weights[respij,item[j]],sep="=")
paste(respij,collapse="; ")
text(i+1,n-j+0.5,paste(respij,collapse="; "))
wght<-paste(rownames(weights),weights[,item[j]],sep="=")
text(nid+2,n-j+0.5,paste(wght,collapse="; "))
}
}
}
}
if (grid) abline(h=(0:n))
}
|
82b1c784d0a10bf82859ab2182e5906b37f19f68 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.customer.engagement/man/connect_create_routing_profile.Rd | 53911d16d3f4022d3e2540ea10eac99c4b94a3a5 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 1,755 | rd | connect_create_routing_profile.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_create_routing_profile}
\alias{connect_create_routing_profile}
\title{Creates a new routing profile}
\usage{
connect_create_routing_profile(InstanceId, Name, Description,
DefaultOutboundQueueId, QueueConfigs, MediaConcurrencies, Tags)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{Name}{[required] The name of the routing profile. Must not be more than 127 characters.}
\item{Description}{[required] Description of the routing profile. Must not be more than 250
characters.}
\item{DefaultOutboundQueueId}{[required] The default outbound queue for the routing profile.}
\item{QueueConfigs}{The inbound queues associated with the routing profile. If no queue is
added, the agent can only make outbound calls.}
\item{MediaConcurrencies}{[required] The channels agents can handle in the Contact Control Panel (CCP) for
this routing profile.}
\item{Tags}{One or more tags.}
}
\value{
A list with the following syntax:\preformatted{list(
RoutingProfileArn = "string",
RoutingProfileId = "string"
)
}
}
\description{
Creates a new routing profile.
}
\section{Request syntax}{
\preformatted{svc$create_routing_profile(
InstanceId = "string",
Name = "string",
Description = "string",
DefaultOutboundQueueId = "string",
QueueConfigs = list(
list(
QueueReference = list(
QueueId = "string",
Channel = "VOICE"|"CHAT"|"TASK"
),
Priority = 123,
Delay = 123
)
),
MediaConcurrencies = list(
list(
Channel = "VOICE"|"CHAT"|"TASK",
Concurrency = 123
)
),
Tags = list(
"string"
)
)
}
}
\keyword{internal}
|
57727f90420e2d3660f5721d5bb8ccdeea83a461 | b895212edafe2b1916667c2bea6683224d9b614d | /predicthigh.R | dec6acd327202d562aa4214bc6f6d94cb887c61c | [] | no_license | andrewmahurin/equityproject | 253de037c9d3bfac818a74daa5fd8b1565472d40 | 6653ce48d0335ae3802150ec64c51a17c6b88ce5 | refs/heads/master | 2021-01-15T11:48:26.212053 | 2014-05-17T19:14:44 | 2014-05-17T19:14:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,603 | r | predicthigh.R | source("~/program/getdata.r")
fitmodel = function (model = highmodel){
( model$coefficients[1] * 1 +
model$coefficients[2] * (openchange) +
model$coefficients[3] * (twodaychange) +
model$coefficients[4] * (log(close)) +
model$coefficients[5] * (close) +
model$coefficients[6] * (dailyhigh) +
model$coefficients[7] * (dailylow) +
model$coefficients[8] * (monthlyhigh) +
model$coefficients[9] * (monthlylow) +
model$coefficients[10] * (weeklychange) +
model$coefficients[11] * (monthlychange) +
model$coefficients[12] * (threemonthchange) )
}
fitmodel1 = function (model = highmodel){
( model$coefficients[1] * 1 +
model$coefficients[2] * (fitlow1) +
model$coefficients[3] * (fithigh1) +
model$coefficients[4] * (openchange) +
model$coefficients[5] * (twodaychange) +
model$coefficients[6] * (log(close)) +
model$coefficients[7] * (close) +
model$coefficients[8] * (dailyhigh) +
model$coefficients[9] * (dailylow) +
model$coefficients[10] * (monthlyhigh) +
model$coefficients[11] * (monthlylow) +
model$coefficients[12] * (weeklychange) +
model$coefficients[13] * (monthlychange) +
model$coefficients[14] * (threemonthchange) )
}
model=lm(intradayhigh~
lag1(openchange) + lag1(twodaychange) +
lag1(log(close))
+ lag1(close)
+ lag1(dailyhigh) + lag1(dailylow)
+ lag1(monthlyhigh) + lag1(monthlylow)
+ lag1(weeklychange) + lag1(monthlychange) + lag1(threemonthchange)
, data= x);
print(summary(model))
fithigh1 = fitmodel(model)
tail(fithigh1)
tail(fitted(model))
model=lm(weeklylow~
lag5(openchange) + lag5(twodaychange) +
lag5(log(close))
+ lag5(close)
+ lag5(dailyhigh) + lag5(dailylow)
+ lag5(monthlyhigh) + lag5(monthlylow)
+ lag5(weeklychange) + lag5(monthlychange) + lag5(threemonthchange)
, data= x);
print(summary(model))
fitlow1 = fitmodel(model)
sd(model$residuals)
sd(intradaylow)
tail(fitlow1)
tail(fitted(model))
hist(tail(dailylow, 60))
tail(intraday)
model2 = (lm(intradaylow ~lag5(fitlow1) + lag1(fithigh1)+
lag2(openchange) + lag2(twodaychange) +
lag2(log(close))
+ lag2(close)
+ lag2(dailyhigh) + lag2(dailylow)
+ lag2(monthlyhigh) + lag2(monthlylow)
+ lag2(weeklychange) + lag2(monthlychange) + lag2(threemonthchange)
, data = x)
)
summary(model2)
model2$coefficients
fitlow2 = fitmodel1(model2)
tail(fitlow2)
tail(fitted(model2))
|
89875d39601f6d820f4514680feab33cfd2fb0b7 | 59ea89f1162f8048d9f7f10f6e6a3a1567c56607 | /rstudio/pam_peak_analysis.R | 7629f9f4f64d0615e191905e5f3c78d15458ac70 | [] | no_license | elshafeh/own | a9b8199efb3511aa1b30b53755be9337d572b116 | ef3c4e1a444b1231e3357c4b25b0ba1ba85267d6 | refs/heads/master | 2023-09-03T01:23:35.888318 | 2021-11-03T09:56:33 | 2021-11-03T09:56:33 | 314,668,569 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,530 | r | pam_peak_analysis.R | library(dae);library(nlme);library(effects);
library(psych);library(interplot);library(plyr);
library(devtools);library(ez);library(Rmisc);
library(wesanderson)
library(lme4);library(lsmeans);library(plotly);
library(ggplot2);library(ggpubr);library(dplyr)
library(ggthemes);library(extrafont)
library(car);library(ggplot2)
library(optimx);library(simr)
library(tidyverse)
library(hrbrthemes)
library(viridis);library(afex)
library(multcomp);library(emmeans);
library(gridExtra)
rm(list=ls())
erbar_w <- .6; erbar_s <- .8; pd <- position_dodge(erbar_w+.1)
scat_s <- 1.5;mean_s <- 5; font_s <- 16
dir_file <- "/Users/heshamelshafei/gitHub/own/doc/"
fname <- paste0(dir_file,"pam_alpha_peak.txt")
sub_table <- read.table(fname,sep = ',',header=T)
sub_table$sub <- as.factor(sub_table$sub)
sub_table$mod <- as.factor(sub_table$mod)
sub_table$hemi <- as.factor(sub_table$hemi)
sub_table$wind <- as.factor(sub_table$wind)
sub_table$cue <- as.factor(sub_table$cue)
sub_table$cue_cat <- as.factor(sub_table$cue_cat)
sub_table$pos <- as.factor(sub_table$pos)
sub_table$wind <- ordered(sub_table$wind, levels = c("precue", "cuetarget"))
sub_table$cue <- ordered(sub_table$cue, levels = c("left", "right","unf"))
model_glm <- lme4::lmer(peak ~ (mod+hemi+wind)^3 + (1|sub), data =sub_table)
model_anova <- Anova(model_glm,type=2,test.statistic=c("F"))
print(model_anova)
emmeans(model_glm, pairwise ~ hemi|mod)
ct_table <- sub_table[sub_table$wind == "cuetarget",]
model_glm <- lme4::lmer(peak ~ (mod+pos+cue)^3 + (1|sub), data =ct_table)
model_anova <- Anova(model_glm,type=2,test.statistic=c("F"))
print(model_anova)
ct_table <- sub_table[sub_table$wind == "cuetarget" & sub_table$mod =="aud",]
model_glm <- lme4::lmer(peak ~ (cue+pos)^2 + (1|sub), data =ct_table)
model_anova <- Anova(model_glm,type=2,test.statistic=c("F"))
print(model_anova)
emmeans(model_glm, pairwise ~ pos|cue)
map_name <- c("#70ba8d","#7098ba")
ggplot(sub_table, aes(x = mod, y = peak, fill = hemi)) +
geom_boxplot(outlier.shape = NA, alpha = .5, width = .35, colour = "black")+
scale_colour_manual(values= map_name)+
scale_fill_manual(values = map_name)+
ggtitle("")+
scale_y_continuous(name = "alpha peak",limits = c(5,15))+
scale_x_discrete(name = "")+
theme_pubclean(base_size = 18,base_family = "Calibri")+
facet_wrap(~ wind~cue)
|
35bd8029791d591ba5b06ed3b74bf96d2a80620f | 9a46431a4ff2ccdfd963d71767e9a9365f721c51 | /mini_cygnet_rstan.R | 3f905b8e5c0b1f220fd8a5c07616986a0538ebce | [] | no_license | samcarlos/Badge_WORTH | 35f03916581c1d9fe5a9382202f9f99f62a8ba6b | 6c5411815848e7b9e67ea8d6a1e9cde2a6f296d3 | refs/heads/master | 2021-01-10T20:22:54.765402 | 2015-03-11T21:37:58 | 2015-03-11T21:37:58 | 31,980,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 979 | r | mini_cygnet_rstan.R | library(rstan)
badge_worth="
data{
real cygnet;
real iq;
real mini;
real miniR;
}
parameters{
real pcygnet;
real piq;
real pmini;
real pminiR;
real<lower=0,upper=1> beta_ratio;
}
transformed parameters{
real interior_premium;
interior_premium <- pminiR-pmini;
}
model{
pcygnet~normal(cygnet,500);
piq~normal(iq,1500);
pmini~normal(mini,1000);
pminiR~normal(miniR,1500);
beta_ratio~normal(.6,.1);
}
generated quantities{
real badge_premium;
badge_premium<-pcygnet-piq-beta_ratio*interior_premium;
}
"
car.price.list=list(cygnet=23950,iq=7990, mini=17000, miniR=28990)
stan.out=stan(model_name="badge_worth", model_code=badge_worth,data=car.price.list, iter=5000, chains=1, verbose=TRUE)
stan.mat=as.matrix(stan.out)
h = hist(stan.mat[,7], breaks = 50, plot=FALSE)
h$counts=h$counts/sum(h$counts)
plot(h, ylab="Empirical Probability", xlab="Estimated Badge in English Pounds", main="Histogram of Aston Martin Badge Worth on Toyota IQ")
hist(stan.mat[,7])
|
d9e44133126f71607544c414359cee86185ed999 | 2422a6753ca50c7422799abc02bc91e2bacc92b6 | /nfl team wins scraping.R | 205e3d523b0bd0a52b6070ca16247f6144889ad1 | [] | no_license | dantok18/YUSAG | a1ecb51e5fdac503b38ed585051119a284ba131e | 2e52b9566e5bab0ef0eb42abff3edf9fc687394e | refs/heads/master | 2021-05-13T19:14:40.186324 | 2018-01-10T01:35:30 | 2018-01-10T01:35:30 | 116,886,631 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 810 | r | nfl team wins scraping.R | library(XML)
library(RCurl)
i <- 2002
for(y in i:2017)
{
u <- paste0('https://www.pro-football-reference.com/years/',toString(y),'/#all_team_stats')
newu <- getURL(u)
data1 <- readHTMLTable(newu)[[1]]
data2 <- readHTMLTable(newu)[[2]]
data <- rbind(data1,data2)
data <- data[substring(data$Tm,2,3) != 'FC', ]
data$Tm <- as.character(data$Tm)
for(j in 2:8){
data[, j] <- as.numeric(as.character(data[,j]))
}
if(y==i)
{
tot <- data.frame(data$Tm,data$W,data$L,data$PF,data$PA,data$PD)
}
if(y > i)
{
temp <-data.frame(data$Tm,data$W,data$L,data$PF,data$PA,data$PD)
tot <- rbind(tot,temp)
}
print(y)
}
####
plot(tot$data.PD,tot$data.W,xlab = 'Point Differential',ylab = 'Wins',col = 'blue',main="NFL Wins vs Point Differential 2002-2017",pch=20)
|
23fdf4541e4ddecf7ef95e79deb287cc2cdd57e8 | e76a2bf9abb63b93d858ea49e7f886489bc4a7e1 | /R/tirgol r 1.R | dbcf8b10b0dd64617a75aa290ff004784b992a30 | [] | no_license | DANIELH2/DataScience | d8b76816d068bb13ac3976f3bc5f5e2ebc2fcffe | 658bf2786f1c159cd6529a2592ef803cc3e22239 | refs/heads/master | 2021-06-21T05:20:24.895993 | 2021-04-21T08:14:22 | 2021-04-21T08:14:22 | 212,381,115 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 897 | r | tirgol r 1.R | df = iris
df
min(df$Sepal.Length,df$Sepal.Width,df$ngth,df$Petal.Width)
max(df$Sepal.Length)
max(df$Sepal.Width)
max(df$Petal.Length)
max(df$Petal.Width)
mean(df$Sepal.Length)
mean(df$Sepal.Width)
mean(df$Petal.Length)
mean(df$Petal.Width)
cf = mtcars
cf
sqrt(cf$mpg)
log(cf$disp)
cf$wt^3
s1<-c("age","gender","height","weight")
s1
s1 <- paste("age","gender","height","weight",sep="+")
s1
m1<-matrix(c(4,7,-8,3,0,-2,1,-5,12,-3,6,9),ncol=4)
m1
rowMeans(m1)
colMeans(m1)
mean(m1)
az<-LETTERS
za<-order(az,decreasing =TRUE)
az[za]
y<-1
for (x in 1:10) {
print(x)
if(x==8)
break
}
for (x in 1:10) {
print(x)
if(x==8)
break
}
for (x in 1:10) {
print(x)
if(x==8)
break
}
for (i in 1:40) {
x <- sample(x=1:10,size =1)
print(x)
if(x==8){
break
}
}
x<-0
while(x!=8){
x<-sample(x=1:10,size =1)
print(x)
}
|
f187b37190434cdf13e11fa5f8408ccf5345c359 | e622fa6a472d3c2c3a7c23b1c6f0edbf850c242a | /cognitives.R | 015d75f17b30635784b1393a6e236788c23e52b5 | [] | no_license | hddsilva/participant_subsets | f35d5bfbe3a6f8e76150adf6566c15a4606008fd | 1ad99d26689406b8e6e630c18f72c631f20ca299 | refs/heads/main | 2023-08-17T09:25:35.679793 | 2021-10-01T19:56:12 | 2021-10-01T19:56:12 | 412,597,900 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 935 | r | cognitives.R | #Creates a dataframe of all cognitive assessments
library(dplyr)
lookup_table <- read.delim(dir("data_categories/lookup_table/",
full.names=T, pattern="^lookup_table_20"),header=TRUE, sep="\t")
data$cogtest_date <- as.Date(as.character(data$cogtest_date),"%Y-%m-%d")
#Create cognitive assessment table
cognitives <- data %>%
select(-childsex, -childsex.factor, -childdob) %>%
left_join(lookup_table, by = "record_id") %>%
group_by(record_id) %>%
filter(grepl("cog",redcap_event_name)) %>%
mutate(dob_cog_gap = abs(difftime(childdob,cogtest_date, units="days"))) %>%
select(record_id,
cogtest_completed:cognitive_test_information_complete,
wasi_completed:wasiii_complete,
wisc_completed:ranras_complete,
dob_cog_gap)
write.table(cognitives, file=paste("data_categories/cognitives/cognitives_",Sys.Date(),".txt",sep=""), sep="\t", row.names = FALSE)
|
ccf2c0237cbd875bda5f748b5658244747d0fd58 | 9ca187e11f931f782ef2ddd8323637685ba0ce37 | /man/vdj.stats.Rd | a1431970b192217d650b5b65ffdf620b2a240808 | [] | no_license | weiliuyuan/iCellR | 320cf17970543478c6c1a0d1629fe456698da84b | d5a191957ebf1519c60bc773ed95112dca8e03ae | refs/heads/master | 2020-05-30T07:30:14.823648 | 2019-05-28T15:47:36 | 2019-05-28T15:47:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 565 | rd | vdj.stats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/F042vdj.stats.R
\name{vdj.stats}
\alias{vdj.stats}
\title{Add CITE-seq antibody-derived tags (ADT)}
\usage{
vdj.stats(vdj.data = "VDJ_analysis_ready.tsv")
}
\arguments{
\item{x}{An object of class iCellR.}
\item{adt.data}{A data frame containing ADT counts for cells.}
}
\value{
An object of class iCellR
}
\description{
This function takes a data frame of ADT values per cell and adds it to the iCellR object.
}
\examples{
\dontrun{
my.obj <- add.adt(my.obj, adt.data = adt.data)
}
}
|
a4377f9f28f5a23f4b00ce19e19e9f390ce00341 | e69d99866cb5f5267d9c71391ffcacdabf5e5806 | /man/titanicgrp.rd | 360ccd39bb993e4c888dc1d938f57c265669f3bc | [] | no_license | cran/LOGIT | b2d3d4407fe11a846a3b9e523da95148c46f074c | 9deb1d08174d6e37f228674bf8121ddb19406215 | refs/heads/master | 2016-08-11T15:20:24.938606 | 2016-02-06T11:20:21 | 2016-02-06T11:20:21 | 48,082,940 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,225 | rd | titanicgrp.rd | \name{titanicgrp}
\alias{titanicgrp}
\docType{data}
\title{titanicgrp}
\description{
The data is an grouped version of the 1912 Titanic passenger survival
log,
}
\usage{data(titanicgrp)}
\format{
A data frame with 12 observations on the following 5 variables.
\describe{
\item{\code{survive}}{number of passengers who survived}
\item{\code{cases}}{number of passengers with same pattern of covariates}
\item{\code{age}}{1=adult; 0=child}
\item{\code{sex}}{1=male; 0=female}
\item{\code{class}}{ticket class 1= 1st class; 2= second class; 3= third class}
}
}
\details{
titanicgrp is saved as a data frame.
Used to assess risk ratios
}
\source{
Found in many other texts
}
\references{
Hilbe, Joseph M (2015), Practical Guide to Logistic Regression, Chapman & Hall/CRC.
Hilbe, Joseph M (2014), Modeling Count Data, Cambridge University Press.
Hilbe, Joseph M (2007, 2011), Negative Binomial Regression, Cambridge University Press.
Hilbe, Joseph M (2009), Logistic Regression Models, Chapman & Hall/CRC.
}
\examples{
library(MASS) # if not automatically loaded
# LOGISTIC REGRESSION
library(LOGIT)
data(titanicgrp)
tg <- titanicgrp
head(tg)
tg$died <- tg$cases - tg$survive
summary(mylr <- glm( cbind(survive, died) ~ age + sex + factor(class),
family=binomial, data=tg))
toOR(mylr)
P__disp(mylr)
# SCALED LOGISTIC REGRESSION
summary(myqr <- glm( cbind(survive, died) ~ age + sex + factor(class),
family=quasibinomial, data=tg))
toOR(myqr)
# POISSON REGRESSION
# library(COUNT)
data(titanicgrp)
titanicgrp$class <- as.factor(titanicgrp$class)
titanicgrp$logcases <- log(titanicgrp$cases)
glmpr <- glm(survive ~ age + sex + class + offset(logcases), family= poisson, data=titanicgrp)
summary(glmpr)
exp(coef(glmpr))
#lcases <- log(titanicgrp$cases)
#nb2o <- nbinomial(survive ~ age + sex + factor(class),
# formula2 =~ age + sex,
# offset = lcases,
# mean.link="log",
# scale.link="log_s",
# data=titanicgrp)
#summary(nb2o)
#exp(coef(nb2o))
}
\keyword{datasets}
|
570e59351a5c2a9d7d969a0f52269261b71f19a6 | cb1e1d51055460841ca024c33681cb2e30c590a7 | /R/RCPmod.R | f4561e69e61c609e126f79754056a484122b245a | [] | no_license | cran/RCPmod | fe9569983cb33b5652f7addbdcdcb1f688f62f00 | ccef326318838b8663b5a924c5de137b3436ed3c | refs/heads/master | 2022-11-11T10:44:08.599637 | 2022-10-25T20:47:47 | 2022-10-25T20:47:47 | 17,681,879 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 93,577 | r | RCPmod.R | # This is package RCPmod
".onAttach" <-
function( libname, pkgname)
{
packageStartupMessage("Welcome to RCPmod. To fit RCPmodels see ?regimix")
}
".onLoad" <-
function (libname, pkgname){
# Generic DLL loader
dll.path <- file.path( libname, pkgname, 'libs')
if( nzchar( subarch <- .Platform$r_arch))
dll.path <- file.path( dll.path, subarch)
this.ext <- paste( sub( '.', '[.]', .Platform$dynlib.ext, fixed=TRUE), '$', sep='')
dlls <- dir( dll.path, pattern=this.ext, full.names=FALSE)
names( dlls) <- dlls
if( length( dlls))
lapply( dlls, function( x) library.dynam( sub( this.ext, '', x), package=pkgname, lib.loc=libname))
}
"additive.logistic" <-
function(x)
{
tmp <- exp( x)
tmp <- tmp / (1+sum( tmp))
tmp <- c(tmp, 1-sum( tmp))
return( tmp)
}
"AIC.regimix" <-
function (object, ..., k = 2)
{
p <- length(unlist(object$coefs))
if (is.null(k))
k <- 2
star.ic <- -2 * object$logl + k * p
return(star.ic)
}
"BIC.regimix" <-
function (object, ...)
{
p <- length(unlist(object$coefs))
k <- log(object$n)
star.ic <- -2 * object$logl + k * p
return(star.ic)
}
"calcInfoCrit" <-
function( ret)
{
k <- length(unlist(ret$coefs))
ret$BIC <- -2 * ret$logl + log(ret$n) * k
ret$AIC <- -2 * ret$logl + 2 * k
# entro <- ret$postProbs * log( ret$postProbs)
# EN <- -sum(entro)
# ret$ICL <- ret$BIC + 2 * EN
return( ret)
}
"calcPostProbs" <-
function( pis, logCondDens)
{
logPostProbs <- log( pis) + logCondDens #would be better to be working with log(pis) previously but c'est la vie
mset <- apply( logPostProbs, 1, max)
logSums <- mset + log( rowSums( exp( logPostProbs-mset)))
logPostProbs <- logPostProbs - logSums
postProbs <- exp( logPostProbs)
return( postProbs)
}
"check.outcomes1" <-
function( outs)
{
nam <- colnames( outs)
if( length( nam) == length( unique( nam)))
return( length( nam))
else
return( FALSE)
}
"clean.data" <-
function( data, form1, form2){
mf.X <- model.frame(form1, data = data, na.action = na.exclude)
if( !is.null( form2)){
mf.W <- model.frame(form2, data = data, na.action = na.exclude)
ids <- c( rownames( mf.W), rownames( mf.X))[duplicated( c( rownames( mf.W), rownames( mf.X)))] #those rows of data that are good for both parts of the model.
mf.X <- mf.X[rownames( mf.X) %in% ids,, drop=FALSE]
mf.W <- mf.W[rownames( mf.W) %in% ids,, drop=FALSE]
}
else{
mf.W <- NULL
ids <- rownames( mf.X)
}
res <- list(ids=ids, mf.X=mf.X, mf.W=mf.W)
return( res)
}
"coef.regimix" <-
function (object, ...)
{
res <- list()
res$alpha <- object$coefs$alpha
names( res$alpha) <- object$names$spp
if( !is.null( object$coef$tau)){
res$tau <- matrix(object$coefs$tau, nrow = object$nRCP - 1, ncol = object$S)
colnames( res$tau) <- object$names$spp
}
if( !is.null( object$coef$beta)){
res$beta <- matrix(object$coefs$beta, nrow = object$nRCP - 1, ncol = object$p.x)
colnames( res$beta) <- object$names$Xvars
}
if( !is.null( object$coef$gamma)){
res$gamma <- matrix( object$coef$gamma, nrow=object$S, ncol=object$p.w)
colnames( res$gamma) <- object$names$Wvars
rownames( res$gamma) <- object$names$spp
}
if( !is.null( object$coef$disp)){
res$logDisp <- object$coef$disp
names( res$logDisp) <- object$names$spp
}
return(res)
}
"cooks.distance.regimix" <-
function( model, ..., oosSize=1, times=model$n, mc.cores=1, quiet=FALSE)
{
if (oosSize > model$n %/% 2)
stop("Out of sample is more than half the size of the data! This is almost certainly an error. Please set `oosSize' to something smaller.")
if (is.null(model$titbits))
stop("Model doesn't contain all information required for cross validation. Please supply model with titbits (from titbits=TRUE in regimix call)")
if ( !quiet)
pb <- txtProgressBar(min = 1, max = times, style = 3, char = "><(('> ")
funny <- function(x) {
if (!quiet)
setTxtProgressBar(pb, x)
if( oosSize!=1 | times!=model$n) #do we need to sample?
OOBag <- sample(1:model$n, oosSize, replace = FALSE)
else
OOBag <- x
inBag <- (1:model$n)[!(1:model$n) %in% OOBag]
new.wts <- model$titbits$wts
new.wts[OOBag] <- 0
control <- model$titbits$control
control$quiet <- TRUE
control$trace <- 0
control$optimise <- TRUE
tmpmodel <- regimix.fit(outcomes = model$titbits$Y,
W = model$titbits$W, X = model$titbits$X, offy = model$titbits$offset,
wts = new.wts, disty = model$titbits$disty, nRCP = model$nRCP,
power = model$titbits$power, inits = unlist(model$coef),
control = control, n = model$n, S = model$S, p.x = model$p.x,
p.w = model$p.w)
OOSppPreds <- matrix(NA, nrow = tmpmodel$n, ncol = tmpmodel$S)
for (ss in 1:tmpmodel$S)
OOSppPreds[OOBag, ss] <- rowSums(tmpmodel$mus[OOBag, ss,] * tmpmodel$pis[OOBag, , drop=FALSE])
newPis <- tmpmodel$pis
r.negi <- model$pis - newPis
r.negi[OOBag,] <- NA
r.negi <- colMeans( r.negi, na.rm=TRUE)
#great lengths to calc pred logl...
#great lengths indeed...
alpha.score <- as.numeric(rep(NA, model$S))
tau.score <- as.numeric(matrix(NA, ncol = model$S, nrow = model$nRCP - 1))
beta.score <- as.numeric(matrix(NA, ncol = ncol(model$titbits$X), nrow = model$nRCP - 1))
if( model$p.w > 0){
gamma.score <- as.numeric(matrix( NA, nrow=model$S, ncol=model$p.w))
gamma <- tmpmodel$coef$gamma
W <- model$titbits$W
}
else
gamma.score <- W <- gamma <- -999999
if( model$titbits$disty %in% 3:5){
disp.score <- as.numeric( rep( NA, model$S))
disp <- coef( model)$logDisp
}
else
disp.score <- -999999
scoreContri <- -999999
#model quantities
# pis <- as.numeric(matrix(NA, nrow = n, ncol = nRCP)) #container for the fitted RCP model
# mus <- as.numeric(array( NA, dim=c( n, S, nRCP))) #container for the fitted spp model
logCondDens <- as.numeric(matrix(NA, nrow = model$n, ncol = model$nRCP))
logls <- as.numeric(rep(NA, model$n))
conv <- as.integer(0)
tmplogl <- .Call( "RCP_C", as.numeric( model$titbits$Y), as.numeric(model$titbits$X), as.numeric( model$titbits$W), as.numeric(model$titbits$offset), as.numeric(model$titbits$wts),
as.integer(model$S), as.integer(model$nRCP), as.integer(model$p.x), as.integer(model$p.w), as.integer(model$n), as.integer( model$titbits$disty),
as.numeric( tmpmodel$coef$alpha), as.numeric( tmpmodel$coef$tau), as.numeric( tmpmodel$coef$beta), as.numeric( gamma), as.numeric( tmpmodel$coef$disp), as.numeric( model$titbits$power),
as.numeric(model$titbits$control$penalty), as.numeric(model$titbits$control$penalty.tau), as.numeric(model$titbits$control$penalty.gamma), as.numeric(model$titbits$control$penalty.disp[1]), as.numeric(model$titbits$control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
as.numeric( tmpmodel$pis), as.numeric( tmpmodel$mus), logCondDens, logls,
as.integer(model$titbits$control$maxit), as.integer(model$titbits$control$trace), as.integer(model$titbits$control$nreport), as.numeric(model$titbits$control$abstol), as.numeric(model$titbits$control$reltol), as.integer(conv),
as.integer(FALSE), as.integer(TRUE), as.integer(FALSE), as.integer(FALSE), as.integer(FALSE), PACKAGE = "RCPmod")
ret.logl <- rep( NA, model$n)
ret.logl[OOBag] <- logls[OOBag]
return( list( OOSppPreds=OOSppPreds, cooksDist=r.negi, predLogL=ret.logl))
}
if (!quiet & mc.cores>1 & Sys.info()['sysname'] != "Windows")
message("Progress bar may not be monotonic due to the vaguaries of parallelisation")
tmp <- parallel::mclapply(1:times, funny, mc.cores = mc.cores)
if (!quiet)
message("")
cooksD <- t( sapply( tmp, function(x) x$cooksDist))
OOpreds <- array(NA, dim = c(model$n, model$S, times), dimnames = list(rownames(model$titbits$X), colnames(model$titbits$Y), paste("CVset", 1:times, sep = "")))
for (bb in 1:times)
OOpreds[, , bb] <- tmp[[bb]]$OOSppPreds
logls <- sapply( tmp, function(x) x$predLogL)
colnames( logls) <- rownames( cooksD) <- paste( "OOS",1:times,sep="_")
ret <- list(Y = model$titbits$Y, CV = OOpreds, cooksD=cooksD, predLogL=logls)
class(ret) <- "regiCooksD"
return(ret)
}
"extractAIC.regimix" <-
function (fit, scale = 1, k = 2, ...)
{
n <- object$n
edf <- length(unlist(coef( fit)))
if (is.null(k))
k <- 2
aic <- -2 * logLik( fit) + k * edf
return(c(edf, aic))
}
"get.dist" <-
function( disty.cases, dist1)
{
error.msg <- paste( c( "Distribution not implemented. Options are: ", disty.cases, "-- Exitting Now"), collapse=" ")
disty <- switch( dist1, "Bernoulli" = 1,"Poisson" = 2,"NegBin" = 3,"Tweedie" = 4,"Normal" = 5,{stop( error.msg)} )
return( disty)
}
"get.long.names" <-
function( object)
{
#function to get the names of columns for the vcov matrix or the regiboot matrix
#defining the column names... Trickier than you might expect
coef.obj <- coef( object)
colnammy <- paste( names( coef.obj$alpha), "alpha", sep="_")
if( "tau" %in% names( coef.obj))
colnammy <- c( colnammy, paste( paste( rep( colnames( coef.obj$tau), each=nrow( coef.obj$tau)), paste( "tau", 1:nrow( coef.obj$tau), sep="_"), sep="_")))
if( "beta" %in% names( coef.obj))
colnammy <- c( colnammy, paste( paste( rep( colnames( coef.obj$beta), each=nrow( coef.obj$beta)), paste( "beta", 1:nrow( coef.obj$beta), sep="_"), sep="_")))
if( "gamma" %in% names( coef.obj))
colnammy <- c( colnammy, paste( paste( rep( rownames( coef.obj$gamma), times=ncol( coef.obj$gamma)), "gamma", sep="_"), rep( colnames( coef.obj$gamma), each=nrow( coef.obj$gamma)), sep="_"))
if( "logDisp" %in% names( coef.obj))
colnammy <- c( colnammy, paste( names( coef.obj$logDisp), "logDisp", sep="_"))
return( colnammy)
}
"get.offset" <-
function( mf, mf.X, mf.W)
{
offy <- model.offset( mf)
if( any( offy!=0))
return( offy)
offy <- rep( 0, nrow( mf.X))
return( offy)
}
"get.power" <-
function( disty, power, S)
{
if( disty == 4){
if( length( power) == 1)
power <- rep(power, S)
if( length( power) != S)
stop( "Power parameter(s) not properly specified, exitting now")
}
else
power <- -999999
return( power)
}
"get.residuals" <-
function( site.logls, outcomes, dist, coef, nRCP, type="deviance", powers=NULL, quiet=FALSE, nsim=1000, X, W, offy)
{
if( ! type %in% c("deviance","RQR"))
stop( "Unknown type of residual requested. Only deviance and RQR (for randomised quantile residuals) are implemented\n")
if( type=="deviance"){
resids <- sqrt( -2*site.logls)
if( !quiet){
message( "The sign of the deviance residuals is unknown -- what does sign mean for multiple species? Their mean is also unknown -- what is a saturated model in a mixture model?")
message( "This is not a problem if you are just looking for an over-all fit diagnostic using simulation envelopes (cf normal and half normal plots).")
message( "It is a problem however, when you try to see how residuals vary with covariates etc.. but the meaning of these plots needs to be considered carefully as the residuals are for multiple species anyway.")
}
}
if( type=="RQR"){
ii <- 1
X1 <- kronecker( rep( 1, nsim), X[ii,])
W1 <- kronecker( rep( 1, nsim), W[ii,])
sims <- simRCPdata( nRCP=nRCP, S=length( coef$alpha), n=n.sim, p.x=ncol( X), p.w=ncol( W), alpha=coef$alpha, tau=coef$tau, beta=coef$beta, gamma=coef$gamma, logDisps=coef$disp, powers=pwers, X=X1, W=W1, offset=offy,dist=dist)
}
return( resids)
}
"get.start.vals" <-
function( outcomes, W, X, offy, wts, disty, G, S, power, inits, quiet=FALSE)
{
if( !quiet)
message( "Obtaining starting values...")
alpha <- rep( -999999, S)
tau <- matrix( -999999, nrow=G, ncol=S)
if( length( W) != 1 & !is.null( W)) gamma <- matrix( -999999, nrow=S, ncol=ncol( W)) else gamma <- -999999
beta <- matrix( -999999, nrow=G-1, ncol=ncol( X))
if( disty>2) disp <- rep( -999999, S) else disp <- -999999
if (inits[1] %in% c("random","random2","hclust","noPreClust")) {
if( !inits[1] %in% "noPreClust"){
tmp <- dist(outcomes, method = "manhattan")
tmp1 <- hclust(tmp, method = "ward.D2")
tmpGrp <- cutree(tmp1, G)
tmpX <- model.matrix( ~-1+as.factor( tmpGrp))
}
else{
tmpX <- scotts.rdirichlet( n=nrow( X), alpha=rep( 5, G))
}
if( length( W) != 1)
df <- cbind( tmpX, W)
else
df <- tmpX
lambda.seq <- sort( unique( c( seq( from=1/0.001, to=1, length=25), seq( from=1/0.1, to=1, length=10))), decreasing=TRUE)#1/seq( from=0.001, to=1, length=100)
if( disty == 1)
fam <- "binomial"
if( disty == 2 | disty == 3)
fam <- "poisson"
if( disty == 5)
fam <- "gaussian"
# if( length( W) != 1)
# df <- cbind( model.matrix( ~-1+as.factor(tmpGrp)), W)
# else
# df <- cbind( model.matrix( ~-1+as.factor(tmpGrp)))
for( ss in 1:ncol( outcomes)){
if( disty != 4){
tmp.fm <- glmnet::glmnet(y=outcomes[,ss], x=df, family=fam, offset=offy, weights=wts,
alpha=0, #ridge penalty
lambda=lambda.seq, #the range of penalties, note that only one will be used
standardize=FALSE, #don't standardize the covariates (they are already standardised)
intercept=FALSE) #don't give me an intercept
locat.s <- 1/1
my.coefs <- glmnet::coef.glmnet( tmp.fm, s=locat.s)
if( any( is.na( my.coefs))){ #just in case the model is so badly posed that mild penalisation doesn't work...
my.coefs <- glmnet::coef.glmnet( tmp.fm, s=lambda.seq)
lastID <- apply( my.coefs, 2, function(x) !any( is.na( x)))
lastID <- tail( (1:length( lastID))[lastID], 1)
my.coefs <- my.coefs[,lastID]
}
}
else{ #Tweedie needs an unconstrained fit. May cause problems in some cases, especially if there is quasi-separation...
df3 <- as.data.frame( cbind( y=outcomes[,ss], offy=offy, df))
colnames( df3)[-(1:2)] <- c( paste( "grp", 1:G, sep=""), paste( "w",1:ncol( W), sep=""))
tmp.fm1 <- fishMod::tglm( y~-1+.-offy+offset( offy), wts=wts, data=df3, p=power[ss], vcov=FALSE, residuals=FALSE, trace=0)
my.coefs <- c( NA, tmp.fm1$coef)
disp[ss] <- log( tmp.fm1$coef["phi"])
my.coefs <- my.coefs[names( my.coefs) != "phi"]
}
alpha[ss] <- mean( my.coefs[1+1:G])
tau[,ss] <- my.coefs[1+1:G] - alpha[ss]
if( length( W) != 1)
gamma[ss,] <- my.coefs[-(1:(G+1))]
if( disty == 3){
tmp <- MASS::theta.mm( outcomes[,ss], as.numeric( predict( tmp.fm, s=locat.s, type="response", newx=df, newoffset=offy)), weights=wts, dfr=nrow(outcomes), eps=1e-4)
if( tmp>2)
tmp <- 2
disp[ss] <- log( 1/tmp)
}
if( disty == 5){
preds <- as.numeric( predict(tmp.fm, s=locat.s, type="link", newx=df, newoffset=offy))
disp[ss] <- log( sqrt( sum((outcomes[,ss] - preds)^2)/nrow( outcomes))) #should be something like the resid standard Deviation.
}
}
}
tau <- tau[1:(G-1),] #get rid of redundant parmaeters
#beta stuff in here...
beta <- matrix(0, ncol = ncol(X), nrow = G - 1)
#this code is a nice idea but glmnet uses a softmax link function, not an additive logistic...
# tmp.fm <- glmnet( y=as.factor( tmpGrp), x=X[,-1], family="multinomial", alpha=0, lambda=1/seq( from=1,to=10,length=25), standardize=FALSE, intercept=TRUE)
# beta <- t( sapply( coef( tmp.fm, s=locat.s), as.numeric))
# beta <- beta[1:(G-1),]
#################
#### Important magic number
my.sd <- mult <- 0.3
if (inits[1] == "hclust" & !quiet)
message( "Obtaining initial values for species' model from clustering algorithm -- no random component")
if (inits[1] == "random") {
# my.sd <- 0.1
alpha <- alpha + rnorm(S, sd = my.sd)
tau <- tau + as.numeric(matrix(rnorm((G - 1) * S, sd = my.sd), ncol = G - 1))
beta <- beta + as.numeric(matrix(rnorm((G - 1) * ncol(X), mean = 0, sd = my.sd), ncol = ncol(X), nrow = G - 1))
if( length( W) != 1 & !is.null( W))
gamma <- gamma + as.numeric( matrix( rnorm( S*ncol(W), mean=0, my.sd), ncol=ncol( W), nrow=S))
if( disty > 2)
disp <- disp + rnorm( S, sd=my.sd)
}
if (inits[1] == "random2") {
my.sd <- mult*sd( alpha); if( is.na( my.sd)) my.sd <- 0.1
alpha <- alpha + rnorm(S, sd = my.sd)
my.sd <- mult*sd( tau); if( is.na( my.sd)) my.sd <- 0.1
tau <- tau + as.numeric(matrix(rnorm((G - 1) * S, sd = my.sd), ncol = G - 1))
my.sd <- mult*apply( beta[,-1,drop=FALSE], 2, sd)
if( any( is.na( my.sd)) | any( my.sd== 0) | any( is.na( my.sd))) #na condition for G=2 groups
my.sd <- cbind( rep( 0.1, (G-1)), #for the intercepts
0.1*matrix( rep( 1/apply( X[,-1,drop=FALSE], 2, function(x) sd(x)), each=G-1), nrow=G-1, ncol=ncol( X)-1)) #for the covariates
beta <- beta + as.numeric( matrix( rnorm((G - 1) * ncol(X), mean = 0, sd = my.sd), ncol = ncol(X), nrow = G - 1))
if( length( W) != 1 & !is.null( W)){
my.sd <- mult*sd( gamma); if( is.na( my.sd) | my.sd==0) my.sd <- 0.1
gamma <- gamma + as.numeric( matrix( rnorm( S*ncol(W), mean=0, my.sd), ncol=ncol( W), nrow=S))
}
if( disty > 2){
my.sd <- mult*sd( disp); if( is.na( my.sd) | my.sd==0) my.sd <- 0.1
disp <- disp + as.numeric( rnorm( S, mean=0, my.sd))
# message( "My Starting Dispersions Are: ", disp,"\n")
}
}
if( any( alpha == -999999)) {
if( !quiet)
message("Using supplied initial values (unchecked). Responsibility is entirely the users!")
start <- 0
alpha <- inits[start+1:S]
start <- start + S
tau <- inits[start + 1:((G - 1) * S)]
start <- start + (G-1)*S
beta <- inits[start + 1:((G - 1) * ncol(X))]
start <- start + (G-1)*ncol(X)
if( length( W) != 1 & !is.null( W)){
gamma <- inits[start+ 1:(S*ncol(W))]
start <- start + S*ncol(W)
}
if( disty %in% 3:5)
disp <- inits[start+1:S]
}
res <- list()
res$alpha <- as.numeric( alpha)
res$tau <- as.numeric( tau)
res$beta <- as.numeric( beta)
res$gamma <- as.numeric( gamma)
res$disp <- as.numeric( disp)
return( res)
}
"get.titbits" <-
function( titbits, outcomes, X, W, offset, wts, form.RCP, form.spp, control, dist, p.w, power)
{
if( titbits==TRUE)
titbits <- list( Y = outcomes, X = X, W = W, offset = offset, wts=wts, form.RCP = form.RCP, form.spp = form.spp, control = control, dist = dist, power=power)
else{
titbits <- list()
if( "Y" %in% titbits)
titbits$Y = outcomes
if( "X" %in% titbits)
titbits$X <- X
if( "W" %in% titbits)
titbits$W <- W
if( "offset" %in% titbits)
titbits$offset <- offset
if( "wts" %in% titbits)
titbits$wts <- wts
if( "form.RCP" %in% titbits)
titbits$form.RCP <- form.RCP
if( "form.spp" %in% titbits)
titbits$form.spp <- form.spp
if( "control" %in% titbits)
titbits$control <- control
if( "dist" %in% titbits)
titbits$dist <- dist
if( "power" %in% titbits)
titbits$power <- power
}
if( p.w==0 & "W" %in% names( titbits))
titbits$W <- NULL
if( p.w!=0 & "form.spp" %in% names( titbits))
environment( titbits$form.spp) <- environment( titbits$form.RCP)
return( titbits)
}
"get.W" <-
function( form.spp, mf.W)
{
form.W <- form.spp
if( !is.null( form.spp)){
if( length( form.W)>2)
form.W[[2]] <- NULL #get rid of outcomes
W <- model.matrix( form.W, mf.W)
tmp.fun <- function(x){ all( x==1)}
intercepts <- apply( W, 2, tmp.fun)
W <- W[,!intercepts,drop=FALSE]
}
else
W <- -999999
return( W)
}
"get.wts" <-
function ( mf)
{
wts <- model.weights( mf)
if( is.null( wts))
return( rep( 1, nrow( mf))) #all weights assumed equal
return( wts)
}
"get.X" <-
function( form.RCP, mf.X)
{
form.X <- form.RCP
form.X[[2]] <- NULL
form.X <- as.formula(form.X)
X <- model.matrix(form.X, mf.X)
tmp <- apply( X[,!grepl("(Intercep)", colnames( X)),drop=FALSE], 2, sd)
eps <- 2
if( any( tmp*eps > 2 & tmp != 0) ){
message( "##At least one of the covariates has non-standardised (approx.) scaling.")
message( "##Please consider rescaling to avoid numerical issues.")
message( "##The function should still run, but results may be unstable.")
message( "##See ?regimix details section.")
}
return( X)
}
"globCIFinder" <-
function( x, en, alpha, nsim)
{
#this now works for both upper and lower CIs
c <- uniroot( f=globErrorFn, interval=c(0.1,5), x=x, en=en, alpha=alpha, nsim=nsim)$root
return( en*c)
}
"globErrorFn" <-
function( c1, x, en, alpha, nsim)
{
if( alpha > 0.5){
tmp <- apply( x, 2, function(x) any( x-c1*en > 0))
return( sum( tmp) / nsim - (1-alpha))
}
else{
tmp <- apply( x, 2, function(x) any( x-c1*en < 0))
return( sum( tmp) / nsim - alpha)
}
}
"inv.logit" <-
function(x)
{
eta <- exp( x)
mu <- eta / (1+eta)
return(mu)
}
"logLik.regimix" <-
function (object, ...)
{
return(object$logl)
}
"my.rmvnorm" <-
function (n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)),
method = c("eigen", "svd", "chol"))
{
if (!isSymmetric(sigma, tol = sqrt(.Machine$double.eps),
check.attributes = FALSE)) {
stop("sigma must be a symmetric matrix")
}
if (length(mean) != nrow(sigma)) {
stop("mean and sigma have non-conforming size")
}
sigma1 <- sigma
dimnames(sigma1) <- NULL
if (!isTRUE(all.equal(sigma1, t(sigma1)))) {
warning("sigma is numerically not symmetric")
}
method <- match.arg(method)
if (method == "eigen") {
ev <- eigen(sigma, symmetric = TRUE)
if (!all(ev$values >= -sqrt(.Machine$double.eps) * abs(ev$values[1]))) {
warning("sigma is numerically not positive definite")
}
retval <- ev$vectors %*% diag(sqrt(ev$values), length(ev$values)) %*%
t(ev$vectors)
}
else if (method == "svd") {
sigsvd <- svd(sigma)
if (!all(sigsvd$d >= -sqrt(.Machine$double.eps) * abs(sigsvd$d[1]))) {
warning("sigma is numerically not positive definite")
}
retval <- t(sigsvd$v %*% (t(sigsvd$u) * sqrt(sigsvd$d)))
}
else if (method == "chol") {
retval <- chol(sigma, pivot = TRUE)
o <- order(attr(retval, "pivot"))
retval <- retval[, o]
}
retval <- matrix(rnorm(n * ncol(sigma)), nrow = n) %*% retval
retval <- sweep(retval, 2, mean, "+")
colnames(retval) <- names(mean)
retval
}
"nd2" <-
function(x0, f, m=NULL, D.accur=4, eps=NULL, mc.cores=getOption("mc.cores", 4L), ...) {
# A function to compute highly accurate first-order derivatives
# Stolen (mostly) from the net and adapted / modified by Scott (scott.foster@csiro.au)
# From Fornberg and Sloan (Acta Numerica, 1994, p. 203-267; Table 1, page 213)
##multicore approach taken Fri June 26 2015
# x0 is the point where the derivative is to be evaluated,
# f is the function that requires differentiating
# m is output dimension of f, that is f:R^n -> R^m
#D.accur is the required accuracy of the resulting derivative. Options are 2 and 4. The 2 choice does a two point finite difference approximation and the 4 choice does a four point finite difference approximation.
#eps is the finite difference step size
#mc.cores is the number of cores to spread the computations over
#... other arguments to pass to f
# Report any bugs to Scott!
# require( parallel) #for mclapply
D.n<-length(x0)
if (is.null(m)) {
D.f0<-f(x0, ...)
m<-length(D.f0) }
if (D.accur==2) {
D.w<-tcrossprod(rep(1,m),c(-1/2,1/2))
D.co<-c(-1,1) }
else {
D.w<-tcrossprod(rep(1,m),c(1/12,-2/3,2/3,-1/12))
D.co<-c(-2,-1,1,2) }
D.n.c<-length(D.co)
if( is.null( eps)) {
macheps<-.Machine$double.eps
D.h<-macheps^(1/3)*abs(x0)
}
else
D.h <- rep( eps, D.accur)
D.deriv<-matrix(NA,nrow=m,ncol=D.n)
mc.fun <- function(ii){
D.temp.f<-matrix(0,m,D.n.c)
for (jj in 1:D.n.c) {
D.xd<-x0+D.h[ii]*D.co[jj]*(1:D.n==ii)
D.temp.f[,jj]<-f(D.xd, ...) }
ret<-rowSums(D.w*D.temp.f)/D.h[ii]
return( ret)
}
tmp.fun.vals <- parallel::mclapply( 1:D.n, mc.fun, mc.cores=mc.cores)
ret <- do.call( "rbind", tmp.fun.vals)
return( ret)
# for (ii in 1:D.n) {
# D.temp.f<-matrix(0,m,D.n.c)
# for (jj in 1:D.n.c) {
# D.xd<-x0+D.h[ii]*D.co[jj]*(1:D.n==ii)
# D.temp.f[,jj]<-f(D.xd, ...) }
# D.deriv[,ii]<-rowSums(D.w*D.temp.f)/D.h[ii] }
# return( D.deriv)
}
"noRCPfit" <-
function( outcomes, W, X, offy, wts, disty, nRCP, power, inits, control, n, S, p.x, p.w)
{
beta <- tau <- NULL
if( all(W==-999999)){
W <- matrix( 1, ncol=1, nrow=n)
gamma <- NULL
}
else{
W <- cbind( 1, W)
gamma <- matrix( NA, nrow=S, ncol=p.w)
}
logls <- alpha <- rep( 0, S)
if( disty>2) disp <- rep( NA, S) else disp <- NULL
mus <- array( NA, dim=c( n, S, nRCP)) #container for the fitted spp model
for( ss in 1:ncol( outcomes)){
if( disty == 1){ #Bernoulli
tmp.fm <- glm( cbind( outcomes[,ss], 1-outcomes[,ss]) ~ -1+W, family=binomial(), offset=offy, weights=wts)
logls[ss] <- sum( dbinom( outcomes[,ss], size=1, prob=tmp.fm$fitted, log=TRUE))
}
if( disty==2){ #Poisson
tmp.fm <- glm( outcomes[,ss] ~ -1+W, family=poisson(), offset=offy, weights=wts)
logls[ss] <- sum( dpois( outcomes[,ss], lambda=tmp.fm$fitted, log=TRUE))
}
if( disty==3){ #NegBin
df3 <- as.data.frame( cbind( y=outcomes[,ss], offy=offy, W))
tmp.fm <- MASS::glm.nb( y~.-1-offy+offset(offy), data=df3, weights=wts)
logls[ss] <- sum( dnbinom( x=outcomes[,ss], size=tmp.fm$theta, mu=tmp.fm$fitted, log = TRUE))
disp[ss] <- log( 1/tmp.fm$theta)
}
if( disty==4){ #Tweedie
df3 <- as.data.frame( cbind( y=outcomes[,ss], offy=offy, W))
tmp.fm <- fishMod::tglm( y~.-1-offy+offset(offy), wts=wts, data=df3, p=power[ss], vcov=FALSE, residuals=FALSE, trace=0)
logls[ss] <- sum( fishMod::dTweedie( y=outcomes[,ss], mu=tmp.fm$fitted, phi=tmp.fm$coef["phi"], p=power[ss], LOG=TRUE))
disp[ss] <- log( tmp.fm$coef["phi"])
tmp.fm$coef <- tmp.fm$coef[names( tmp.fm$coef) != "phi"]
}
if( disty==5){ #Normal
tmp.fm <- lm( outcomes[,ss] ~-1+W, offset=offy, weights=wts)
disp[ss] <- log(summary(tmp.fm)$sigma)
logls[ss] <- sum( sqrt(2*pi)+exp(disp[ss])+dnorm( outcomes[,ss], mean=tmp.fm$fitted, sd=exp(disp[ss]), log=TRUE))
}
alpha[ss] <- tmp.fm$coef[1]
if( p.w>0)
gamma[ss,] <- tmp.fm$coef[-1]
mus[,ss,1] <- fitted( tmp.fm)
}
logl <- sum( logls)
#add on the penalties
#no penalty for pi, as log(1)=0
#no penalty for tau as all zero
#gamma
if( !is.null( gamma))
logl <- logl - sum( (gamma^2)/(2*control$penalty.gamma*control$penalty.gamma))
#disp
if( !is.null( disp))
logl <- logl - sum( ((disp-control$penalty.disp[1])^2)/(2*control$penalty.disp[2]*control$penalty.disp[2]))
ret <- list()
ret$pis <- matrix(1, ncol = nRCP, nrow=n)
ret$mus <- array( mus, dim=c(n,S,nRCP))
ret$coefs <- list(alpha = alpha, tau = NULL, beta = NULL, gamma=gamma, disp=disp)
ret$scores <- NULL
ret$logCondDens <- NULL
ret$conv <- NULL
ret$S <- S; ret$nRCP <- nRCP; ret$p.x <- p.x; ret$p.w <- p.w; ret$n <- n
ret$start.vals <- NULL
ret$logl <- logl
ret$logl.sites <- NULL #for residuals
return( ret)
}
"notTweedieOptimise" <-
function( outcomes, X, W, offy, wts, S, nRCP, p.x, p.w, n, disty, start.vals, power, control)
{
inits <- c(start.vals$alpha, start.vals$tau, start.vals$beta, start.vals$gamma, start.vals$disp)
alpha <- start.vals$alpha; tau <- as.numeric( start.vals$tau); beta <- as.numeric( start.vals$beta); gamma <- as.numeric( start.vals$gamma); disp <- start.vals$disp
#scores
alpha.score <- as.numeric(rep(NA, S))
tau.score <- as.numeric(matrix(NA, ncol = S, nrow = nRCP - 1))
beta.score <- as.numeric(matrix(NA, ncol = ncol(X), nrow = nRCP - 1))
if( p.w > 0)
gamma.score <- as.numeric(matrix( NA, nrow=S, ncol=ncol(W)))
else
gamma.score <- -999999
if( disty %in% 3:5)
disp.score <- as.numeric( rep( NA, S))
else
disp.score <- -999999
scoreContri <- -999999#as.numeric(matrix(NA, ncol = length(inits), nrow = n))
#model quantities
pis <- as.numeric(matrix(NA, nrow = n, ncol = nRCP)) #container for the fitted RCP model
mus <- as.numeric(array( NA, dim=c( n, S, nRCP))) #container for the fitted spp model
logCondDens <- as.numeric(matrix(NA, nrow = n, ncol = nRCP))
logls <- as.numeric(rep(NA, n))
conv <- as.integer(0)
tmp <- .Call( "RCP_C", as.numeric(outcomes), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer( control$optimise), as.integer(control$loglOnly), as.integer( control$derivOnly), as.integer( TRUE), as.integer( FALSE), PACKAGE = "RCPmod")
ret <- list()
ret$pis <- matrix(pis, ncol = nRCP)
ret$mus <- array( mus, dim=c(n,S,nRCP))
ret$coefs <- list(alpha = alpha, tau = tau, beta = beta, gamma=gamma, disp=disp)
if( any( ret$coefs$gamma==-999999, na.rm=TRUE))
ret$coefs$gamma <- NULL
if( any( ret$coefs$disp==-999999, na.rm=TRUE))
ret$coefs$disp <- NULL
ret$names <- list( spp=colnames( outcomes), RCPs=paste( "RCP", 1:nRCP, sep=""), Xvars=colnames( X))
if( p.w>0)
ret$names$Wvars <- colnames( W)
else
ret$names$Wvars <- NA
ret$scores <- list(alpha = alpha.score, tau = tau.score, beta = beta.score, gamma = gamma.score, disp=disp.score)
if( any( ret$scores$gamma==-999999, na.rm=TRUE))
ret$scores$gamma <- NULL
if( any( ret$scores$disp==-999999, na.rm=TRUE))
ret$scores$disp <- NULL
ret$logCondDens <- matrix(logCondDens, ncol = nRCP)
if( control$optimise)
ret$conv <- conv
else
ret$conv <- "not optimised"
ret$S <- S; ret$nRCP <- nRCP; ret$p.x <- p.x; ret$p.w <- p.w; ret$n <- n
ret$start.vals <- inits
ret$logl <- tmp
ret$logl.sites <- logls #for residuals
return( ret)
}
"orderFitted" <-
function( fm, simDat)
{
RCPs <- attr( simDat, "RCP")
posts <- fm$postProbs
perms <- gtools::permutations( length( unique( RCPs)), length( unique( RCPs)))
classErr <- rep( NA, ncol( perms))
classErrRunnerUp <- classErr
for( ii in 1:nrow( perms)){
postsTMP <- posts[,perms[ii,]]
postsTMP <- apply( postsTMP, 1, which.max)
my.tab <- table( RCPs, postsTMP)
classErr[ii] <- sum( diag( my.tab)) / sum( my.tab)
}
perms <- perms[which.max( classErr),]
#coefs
tau <- matrix( fm$coefs$tau, nrow=fm$nRCP-1, ncol=fm$S)
tau <- rbind( tau, -colSums( tau))
tau <- tau[perms,]
beta <- matrix( fm$coefs$beta, nrow=fm$nRCP-1, ncol=fm$p.x)
beta <- rbind( beta, 0)
beta <- beta[perms,]
beta <- beta - rep( beta[fm$nRCP,], each=fm$nRCP)
fm$coefs$tau <- as.numeric( tau[-fm$nRCP,])
fm$coef$beta <- as.numeric( beta[-fm$nRCP,])
#scores
fm$scores <- NULL
#pis
fm$pis <- fm$pis[,perms]
#postProbs
fm$postProbs <- fm$postProbs[,perms]
#mus
fm$mus <- fm$mus[,,perms]
#vcov
fm$vcov <- NULL
#order
fm$perm <- perms
#classification error
fm$classErr <- max( classErr)
fm$classErrRunnerUp <- max( classErr[-(which.max( classErr))])
return( fm)
}
"orderPost" <-
function( new.fm=NULL, fm, RCPs=NULL, sample=NULL)
{
G1 <- G2 <- NULL
if( !is.null( new.fm))
G <- G1 <- new.fm$nRCP
if( !is.null( RCPs))
G <- G2 <- length( unique( RCPs))
if( sum( !is.null( c(G1,G2))) != 1){
message( "Problem with ordering -- provide new.fm *or* RCPs, but not both!")
return( NULL)
}
perms <- gtools::permutations( G, G)
if( !is.null( RCPs)){
fm$postProbs <- matrix( 0, nrow=nrow( fm$postProbs), ncol=ncol( fm$postProbs))
for( ii in 1:fm$nRCP)
fm$postProbs[,ii] <- ifelse( RCPs==ii, 1, 0)
}
if( !is.null( sample))
fm$postProbs <- fm$postProbs[sample,]
classErr <- rep( NA, ncol( perms))
for( ii in 1:nrow( perms)){
my.tab <- t(fm$postProbs) %*% new.fm$postProbs[,perms[ii,]]
classErr[ii] <- sum( diag( my.tab)) / sum( my.tab)
}
perms <- perms[which.max( classErr),]
#coefs
alpha <- new.fm$coefs$alpha
gamma <- new.fm$coefs$gamma
disp <- new.fm$coef$disp
tau <- matrix( new.fm$coefs$tau, nrow=new.fm$nRCP-1, ncol=new.fm$S)
tau <- rbind( tau, -colSums( tau))
tau <- tau[perms,]
new.fm$coefs$tau <- as.numeric( tau[-new.fm$nRCP,])
beta <- matrix( new.fm$coefs$beta, nrow=new.fm$nRCP-1, ncol=new.fm$p.x)
beta <- rbind( beta, 0)
beta <- beta[perms,]
beta <- beta - rep( beta[new.fm$nRCP,], each=3)
new.fm$coefs$beta <- as.numeric( beta[-new.fm$nRCP,])
#scores
new.fm$scores <- NULL
#pis
new.fm$pis <- new.fm$pis[,perms]
#postProbs
new.fm$postProbs <- new.fm$postProbs[,perms]
#mus
new.fm$mus <- new.fm$mus[,,perms]
#vcov
new.fm$vcov <- NULL
#order
new.fm$perm <- perms
#classification error
new.fm$classErr <- max( classErr)
new.fm$classErrRunnerUp <- max( classErr[-(which.max( classErr))])
return( new.fm)
}
"plot.regimix" <-
function (x, ..., type="RQR", nsim = 100, alpha.conf = c(0.9, 0.95, 0.99), quiet=FALSE, species="AllSpecies", fitted.scale="response")
{
if( ! type %in% c("RQR","deviance"))
stop( "Unknown type of residuals. Options are 'RQR' and 'deviance'.\n")
if( ! all( species %in% c("AllSpecies",x$names$spp)))
stop( "Unknown species. Options are 'AllSpecies' or any one of the species names as supplied (and stored in x$names$spp)")
if( type=="deviance"){
obs.resid <- residuals( x, type="deviance")
shad <- rev(seq(from = 0.8, to = 0.5, length = length(alpha.conf)))
allResids <- matrix(NA, nrow = x$n, ncol = nsim)
X <- x$titbits$X
p.x <- ncol( X)
if( inherits( x$titbits$form.spp, "formula")){
form.W <- x$titbits$form.spp
W <- x$titbits$W
p.w <- ncol( W)
}
else{
form.W <- NULL
W <- -999999
p.w <- 0
}
offy <- x$titbits$offset
wts <- x$titbits$wts
Y <- x$titbits$Y
disty <- x$titbits$disty
power <- x$titbits$power
S <- x$S
nRCP <- x$nRCP
p.x <- x$p.x
p.w <- x$p.w
n <- x$n
disty <- x$titbits$disty
control <- x$titbits$control
pis <- as.numeric( matrix( -999999, nrow = n, ncol = nRCP))
mus <- as.numeric( array( -999999, dim=c( n, S, nRCP)))
logCondDens <- as.numeric( matrix( -999999, nrow = n, ncol = nRCP))
logls <- as.numeric(rep(-999999, n))
alpha.score <- as.numeric(rep(-999999, S))
tau.score <- as.numeric(matrix(-999999, nrow = nRCP - 1, ncol = S))
beta.score <- as.numeric(matrix(-999999, nrow = nRCP - 1, ncol = p.x))
if( p.w > 0)
gamma.score <- as.numeric( matrix( -999999, nrow = S, ncol = p.w))
else
gamma.score <- -999999
if( !is.null( x$coef$disp))
disp.score <- as.numeric( rep( -999999, S))
else
disp.score <- -999999
conv <- FALSE
alpha = x$coefs$alpha
tau <- x$coefs$tau
beta <- x$coefs$beta
if( !is.null( form.W))
gamma <- x$coefs$gamma
else
gamma <- -999999
if( any( !is.null( x$coef$disp)))
disp <- x$coef$disp
else
disp <- -999999
scoreContri <- as.numeric(matrix(NA, ncol = length(unlist(x$coef)), nrow = x$n))
if( !quiet)
pb <- txtProgressBar(min = 1, max = nsim, style = 3, char = "><(('> ")
for (s in 1:nsim) {
if( !quiet)
setTxtProgressBar(pb, s)
newy <- as.matrix( simRCPdata( nRCP=nRCP, S=S, n=n, p.x=p.x, p.w=p.w, alpha=alpha, tau=tau, beta=beta, gamma=gamma, logDisps=disp, powers=power, X=X, W=W, offset=offy, dist=x$dist))
tmp <- .Call( "RCP_C", as.numeric(newy[, 1:S]), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer( FALSE), as.integer( TRUE), as.integer( FALSE), as.integer( TRUE), as.integer( FALSE), PACKAGE = "RCPmod")
allResids[, s] <- get.residuals( logls, Y, x$dist, x$coef, nRCP, type="deviance", powers=power, quiet=TRUE)
}
if( !quiet)
message("")
allResidsSort <- apply(allResids, 2, sort)
quants <- c(0.5, (1 - alpha.conf)/2, alpha.conf + (1 - alpha.conf)/2)
envel <- t(apply(allResidsSort, 1, quantile, probs = quants, na.rm = TRUE))
sort.resid <- sort(obs.resid)
empQuant <- envel[, 1]
diff <- sweep(envel[, -1], 1, empQuant, "-")
realMeans <- (sort.resid + empQuant)/2
realDiff <- sort.resid - empQuant
par(mfrow = c(1, 2))
plot(rep(realMeans, 1 + 2 * length(alpha.conf)), c(diff,
realDiff), sub = "Pointwise Confidence",
ylab = "Observed - Expected", xlab = "(Observed+Expected)/2",
type = "n")
for (aa in length(alpha.conf):1) polygon(c(realMeans, rev(realMeans)),
c(diff[, aa], rev(diff[, aa + length(alpha.conf)])),
col = grey(shad[aa]), border = NA)
points(realMeans, realDiff, pch = 20)
abline(h = 0)
globEnvel <- envel
for (ii in 2:(length(alpha.conf) + 1)) globEnvel[, ii] <- globCIFinder(x = allResidsSort, en = envel[, ii], alpha = quants[ii], nsim = nsim)
for (ii in 1 + (length(alpha.conf) + 1):(2 * length(alpha.conf))) globEnvel[, ii] <- globCIFinder(x = allResidsSort, en = envel[, ii], alpha = quants[ii], nsim = nsim)
empQuant <- globEnvel[, 1]
diff <- sweep(globEnvel[, -1], 1, empQuant, "-")
realMeans <- (sort.resid + empQuant)/2
realDiff <- sort.resid - empQuant
plot(rep(realMeans, 1 + 2 * length(alpha.conf)), c(diff,
realDiff), sub = "Global Confidence",
ylab = "Observed - Expected", xlab = "(Observed+Expected)/2",
type = "n")
for (aa in length(alpha.conf):1)
polygon(c(realMeans, rev(realMeans)), c(diff[, aa], rev(diff[, aa + length(alpha.conf)])), col = grey(shad[aa]), border = NA)
points(realMeans, realDiff, pch = 20)
abline(h = 0)
return(NULL)
}
if( type=="RQR"){
obs.resid <- residuals( x, type="RQR", quiet=quiet)
S <- x$S
sppID <- rep( TRUE, S)
if( species != "AllSpecies"){
sppID <- x$names$spp %in% species
obs.resid <- obs.resid[,sppID, drop=FALSE]
S <- ncol( obs.resid)
}
if( sum( obs.resid==Inf | obs.resid==-Inf) > 0){
message( "Infinite residuals removed from residual plots:", sum( obs.resid==Inf | obs.resid==-Inf), "in total.")
obs.resid[obs.resid==Inf | obs.resid==-Inf] <- NA
}
spp.cols <- rep( 1:S, each=x$n)
main <- match.call( expand.dots=TRUE)$main
if( is.null( main)){
if( species=="AllSpecies")
main <- "All Residuals"
else
if( length( species)==1)
main=species
else
main=""
}
sub <- match.call( expand.dots=TRUE)$sub
if( is.null( sub))
sub <- "Colours separate species"
par( mfrow=c(1,2))
qqnorm(obs.resid, col=spp.cols, pch=20, main=main, sub=sub)
# qqline( obs.resid) #this doesn't actually poduce a y=x line. It is only(?) appropriate if the scales of the two sets are different.
abline( 0,1,lwd=2)
preds <- matrix( NA, nrow=x$n, ncol=S)
for( ii in 1:x$n){
preds[ii,] <- rowSums( x$mu[ii,sppID,] * matrix( rep( x$pi[ii,], each=S), nrow=S, ncol=x$nRCP))
}
switch( fitted.scale,
log = { loggy <- "x"},
logit = { loggy <- ""; preds <- log( preds / (1-preds))},
{loggy <- ""})
plot( preds, obs.resid, xlab="Fitted", ylab="RQR", main="Residual versus Fitted", sub="Colours separate species", pch=20, col=rep( 1:S, each=x$n), log=loggy)
abline( h=0)
}
}
"plot.registab" <-
function(x, y, minWidth=1, ncuts=111, ylimmo=NULL, ...)
{
par(mfrow = c(1, 2))
matplot(c(0, x$oosSizeRange), rbind(0, x$disty), type = "b",
ylab = "Distance from Full Model Predictions", xlab = "Number of Obs Removed",
main = "Stability of Group Predictions", col = 1:x$nRCP,
pch = as.character(1:x$nRCP), lty = 1)
legend("topleft", bty = "n", lty = 1, pch = as.character(1:x$nRCP),
col = 1:x$nRCP, legend = paste("RCP ", 1:x$nRCP, sep = ""))
oosDiffs <- diff( c(0,x$oosSizeRange))
oosWidth <- max( minWidth, min( oosDiffs)) / 2
histy <- list()
for( ii in 1:length( x$oosSizeRange)){
tmp <- na.exclude( as.numeric( x$predlogls[ii,,]))
histy[[ii]] <- hist( tmp, breaks=ncuts, plot=FALSE)
}
max.dens <- max( sapply( sapply( histy, function(x) x$density), max))
if( is.null( ylimmo))
ylimmo <- range( sapply( histy, function(x) x$breaks))
plot( 0, 0, ylab = "Pred LogL (OOS)", xlab = "Number of Obs Removed", main = "Stability of Pred Logl", xlim = c(0-oosWidth, max(x$oosSizeRange)+oosWidth), ylim=ylimmo, type = "n")
for( ii in 1:length( x$oosSizeRange))
for( jj in 1:length( histy[[ii]]$density))
rect( xleft=x$oosSizeRange[ii]-oosWidth, xright=x$oosSizeRange[ii]+oosWidth, ybottom=histy[[ii]]$breaks[jj], ytop=histy[[ii]]$breaks[jj+1], col=rgb( colorRamp( c("#E6FFFF","blue"))(histy[[ii]]$density[jj]/max.dens), maxColorValue=255), border=NA)
tmp <- na.exclude( as.numeric( x$logl.sites))
histy <- hist( tmp, breaks=ncuts, plot=FALSE)
for( jj in 1:length( histy$density))
rect( xleft=0-oosWidth, xright=0+oosWidth, ybottom=histy$breaks[jj], ytop=histy$breaks[jj+1], col=rgb( colorRamp( c("#FFE6FF","red"))(histy$density[jj]/max( histy$density)), maxColorValue=255), border=NA)
lines(c(0, x$oosSizeRange), c(mean(x$logl.sites), apply(x$predlogls,
1, mean, na.rm = TRUE)), lwd = 2, col = "black")
invisible(TRUE)
}
"predict.regimix" <-
function (object, object2 = NULL, ..., newdata = NULL, nboot = 0,
alpha = 0.95, mc.cores = 1)
{
if (is.null(newdata)) {
X <- object$titbits$X
if ( inherits(object$titbits$form.spp,"formula")) {
form.W <- object$titbits$form.spp
W <- object$titbits$W
p.w <- ncol(W)
}
else {
form.W <- NULL
W <- -999999
p.w <- 0
}
}
else {
form.X <- as.formula(object$titbit$form.RCP)
if (length(form.X) == 3)
form.X[[2]] <- NULL
X <- model.matrix(form.X, model.frame(form.X, data = as.data.frame(newdata)))
if (inherits(object$titbits$form.spp, "formula")) {
W <- model.matrix(object$titbits$form.spp, model.frame(object$titbits$form.spp,
data = as.data.frame(newdata)))
p.w <- ncol(W)
}
else {
form.W <- NULL
W <- -999999
p.w <- 0
}
}
offy <- rep(0, nrow(X))
S <- object$S
G <- object$nRCP
n <- nrow(X)
p.x <- object$p.x
p.w <- object$p.w
if (is.null(object2)) {
if (nboot > 0) {
if( !object$titbits$control$quiet)
message("Using a parametric bootstrap based on the ML estimates and their vcov")
my.nboot <- nboot
}
else
my.nboot <- 0
allCoBoot <- regibootParametric(fm = object, mf = mf,
nboot = my.nboot)
}
else {
if( !object$titbits$control$quiet)
message("Using supplied regiboot object (non-parametric bootstrap)")
allCoBoot <- as.matrix(object2)
nboot <- nrow(object2)
}
if (is.null(allCoBoot))
return(NULL)
alphaBoot <- allCoBoot[, 1:S,drop=FALSE]
tauBoot <- allCoBoot[, S + 1:((G - 1) * S),drop=FALSE]
betaBoot <- allCoBoot[, S + (G - 1) * S + 1:((G - 1) * p.x),drop=FALSE]
alphaIn <- c(NA, as.numeric(object$coefs$alpha))
alphaIn <- alphaIn[-1]
tauIn <- c(NA, as.numeric(object$coef$tau))
tauIn <- tauIn[-1]
betaIn <- c(NA, as.numeric(object$coef$beta))
betaIn <- betaIn[-1]
if (inherits(object$titbits$form.spp,"formula")) {
gammaIn <- c(NA, as.numeric(object$coef$gamma))
gammaIn <- gammaIn[-1]
}
else gammaIn <- -999999
if (any(!is.null(object$coef$disp))) {
dispIn <- c(NA, as.numeric(object$coef$disp))
dispIn <- dispIn[-1]
}
else dispIn <- -999999
powerIn <- c(NA, as.numeric(object$titbits$power))
powerIn <- powerIn[-1]
predCol <- G
ptPreds <- as.numeric(matrix(NA, nrow = n, ncol = predCol))
bootPreds <- as.numeric(array(NA, c(n, predCol, nboot)))
conc <- as.numeric(NA)
mysd <- as.numeric(NA)
outcomes <- matrix(NA, nrow = nrow(X), ncol = S)
myContr <- object$titbits$control
nam <- paste("RCP", 1:G, sep = "_")
boot.funny <- function(seg) {
if (any(segments <= 0)) {
nboot <- 0
bootSampsToUse <- 1
}
else {
nboot <- segments[seg]
bootSampsToUse <- (sum( segments[1:seg])-segments[seg]+1):sum(segments[1:seg])
}
bootPreds <- as.numeric(array(NA, c(n, predCol, nboot)))
tmp <- .Call( "RCP_predict_C", as.numeric(-999999), as.numeric(X),
as.numeric(W), as.numeric(offy), as.numeric(object$titbits$wts),
as.integer(S), as.integer(G), as.integer(p.x), as.integer(p.w),
as.integer(n), as.integer(object$titbits$disty),
as.numeric(alphaIn), as.numeric(tauIn), as.numeric(betaIn),
as.numeric(gammaIn), as.numeric(dispIn), as.numeric(powerIn),
as.numeric(myContr$penalty), as.numeric(myContr$penalty.tau),
as.numeric(myContr$penalty.gamma), as.numeric(myContr$penalty.disp[1]),
as.numeric(myContr$penalty.disp[2]), as.numeric(alphaBoot[bootSampsToUse,]),
as.numeric(tauBoot[bootSampsToUse,]), as.numeric(betaBoot[bootSampsToUse,]),
as.integer(nboot), as.numeric(ptPreds), as.numeric(bootPreds), as.integer(1),
PACKAGE = "RCPmod")
if (nboot == 0) {
ret <- matrix(ptPreds, nrow = nrow(X), ncol = predCol)
colnames(ret) <- nam
return(ret)
}
bootPreds <- matrix(bootPreds, nrow = nrow(X) * predCol,
ncol = nboot)
return(bootPreds)
}
segments <- -999999
ret <- list()
ptPreds <- boot.funny(1)
if (nboot > 0) {
if (Sys.info()["sysname"] == "Windows") {
if( !object$titbits$control$quiet)
message("Parallelised version of function not available for Windows machines. Reverting to single processor.")
mc.cores <- 1
}
segments <- rep(nboot%/%mc.cores, mc.cores)
if( nboot %% mc.cores > 0)
segments[1:(nboot%%mc.cores)] <- segments[1:(nboot%%mc.cores)] + 1
tmp <- parallel::mclapply(1:mc.cores, boot.funny, mc.cores = mc.cores)
bootPreds <- do.call("cbind", tmp)
bPreds <- list()
row.exp <- rowMeans(bootPreds)
tmp <- matrix(row.exp, nrow = nrow(X), ncol = predCol)
bPreds$fit <- tmp
tmp <- sweep(bootPreds, 1, row.exp, "-")
tmp <- tmp^2
tmp <- sqrt(rowSums(tmp)/(nboot - 1))
tmp <- matrix(tmp, nrow = nrow(X), ncol = predCol)
bPreds$ses <- tmp
colnames(bPreds$fit) <- colnames(bPreds$ses) <- nam
tmp.fun <- function(x) return(quantile(bootPreds[x, ],
probs = c(0, alpha) + (1 - alpha)/2, na.rm = TRUE))
tmp1 <- parallel::mclapply(1:nrow(bootPreds), tmp.fun,
mc.cores = mc.cores)
tmp1 <- do.call("rbind", tmp1)
tmp1 <- array(tmp1, c(nrow(X), predCol, 2), dimnames = list(NULL,
NULL, NULL))
bPreds$cis <- tmp1[, 1:predCol, ]
dimnames(bPreds$cis) <- list(NULL, nam, c("lower", "upper"))
ret <- list(ptPreds = ptPreds, bootPreds = bPreds$fit,
bootSEs = bPreds$ses, bootCIs = bPreds$cis)
}
else ret <- ptPreds
gc()
return(ret)
}
"print.data.summ" <-
function( data, dat, S, form.RCP, form.spp, disty.cases, disty, quiet=FALSE)
{
if( quiet)
return( NULL)
n.tot <- nrow( data)
n <- length( dat$ids)
message("There are ", n, " fully present observations and ", n.tot, " observations in total")
message("There are ", S, " species")
form.RCP[[2]] <- NULL
message("The model for the (latent) RCP classes is: ", Reduce( "paste", deparse( form.RCP)))
if( !is.null( form.spp))
message("The model for each species is: ", Reduce( "paste", deparse( form.spp)))
else
message("There is NO model for each species (apart from intercept(s))")
message("The error distribution is: ", disty.cases[disty])
}
"print.regimix" <-
function (x, ...)
{
ret <- list()
ret$Call <- x$call
ret$Distribution <- x$dist
ret$coef <- coef(x)
print( ret)
invisible(ret)
}
"regiboot" <-
function (object, nboot=1000, type="BayesBoot", mc.cores=1, quiet=FALSE, orderSamps=FALSE, MLstart=TRUE)
{
if (nboot < 1)
stop( "No Boostrap samples requested. Please set nboot to something > 1.")
if( ! type %in% c("BayesBoot","SimpleBoot"))
stop( "Unknown boostrap type, choices are BayesBoot and SimpleBoot.")
n.reorder <- 0
object$titbits$control$optimise <- TRUE #just in case it was turned off (see regimix.multfit)
# object$titbits$control$reltol <- max(1e-05, object$titbits$control$reltol)
# if( object$p.w>0)
# orig.data <- data.frame( cbind( object$titbits$Y, object$titbits$X, object$titbits$W, offset=object$titbits$offset, weights=rep(0,nrow(object$titbits$Y))))
# else
# orig.data <- data.frame( cbind( object$titbits$Y, object$titbits$X, offset=object$titbits$offset), weights=rep(0,nrow(object$titbits$Y)))
if( !quiet)
pb <- txtProgressBar(min = 1, max = nboot, style = 3, char = "><(('> ")
if( type == "SimpleBoot"){
all.wts <- matrix( sample( 1:object$n, nboot*object$n, replace=TRUE), nrow=nboot, ncol=object$n)
tmp <- apply( all.wts, 1, table)
all.wts <- matrix( 0, nrow=nboot, ncol=object$n)
for( ii in 1:length( tmp))
all.wts[ii, as.numeric( names( tmp[[ii]]))] <- tmp[[ii]]
}
if( type == "BayesBoot")
all.wts <- object$n * gtools::rdirichlet( nboot, rep( 1, object$n))
if( MLstart)
my.inits <- unlist( object$coef)
else{
my.inits <- "random"
orderSamps <- TRUE
}
my.fun <- function( dummy){
if( !quiet)
setTxtProgressBar(pb, dummy)
dumbOut <- capture.output(
samp.object <- regimix.fit( outcomes=object$titbits$Y, W=object$titbits$W, X=object$titbits$X, offy=object$titbits$offset, wts=object$titbits$wts * all.wts[dummy,,drop=TRUE], disty=object$titbits$disty, nRCP=object$nRCP, power=object$titbits$power, inits=my.inits, control=object$titbits$control, n=object$n, S=object$S, p.x=object$p.x, p.w=object$p.w))
if( orderSamps)
samp.object <- orderPost( samp.object, object)
return( unlist( samp.object$coef))
}
flag <- TRUE
tmpOldQuiet <- object$titbits$control$quiet
object$titbits$control$quiet <- TRUE
if( Sys.info()['sysname'] == "Windows" | mc.cores==1){
boot.estis <- matrix(NA, nrow = nboot, ncol = length(unlist(object$coef)))
for (ii in 1:nboot) {
if( !quiet)
setTxtProgressBar(pb, ii)
boot.estis[ii, ] <- my.fun( ii)
}
flag <- FALSE
}
if( flag){ #has this already been done sequencially?
if( !quiet)
message( "Progress bar may not be monotonic due to the vaguaries of parallelisation")
tmp <- parallel::mclapply( 1:nboot, my.fun, mc.silent=quiet, mc.cores=mc.cores)
# if( !quiet)
# message("")
boot.estis <- do.call( "rbind", tmp)
}
object$titbits$control$quiet <- tmpOldQuiet
if( !quiet)
message( "")
colnames( boot.estis) <- get.long.names( object)
class( boot.estis) <- "regiboot"
return( boot.estis)
}
"regibootParametric" <-
function( fm, mf, nboot)
{
if( nboot > 0){
if( is.null( fm$vcov)){
message( "An estimate of the variance matrix for regression parameters is required. Please run fm$vcov <- vcov(), see ?vcov.regimix for help")
return( NULL)
}
allCoBoot <- my.rmvnorm( n=nboot, mean=as.numeric( unlist( fm$coefs)), sigma=fm$vcov, method='eigen')
return( allCoBoot)
}
else{
boot.estis <- matrix( unlist( fm$coef), nrow=1)
return( boot.estis)
}
}
"regimix" <-
function (form.RCP = NULL, form.spp = NULL, data, nRCP = 3, dist="Bernoulli", offset=NULL, weights=NULL, control = list(), inits="random2", titbits = TRUE, power=1.6)
{
#the control parameters
control <- set.control( control)
if( !control$quiet)
message( "RCP modelling")
call <- match.call()
if( !is.null(form.RCP))
form.RCP <- as.formula( form.RCP)
else{
if( !control$quiet)
message( "There is no RCP model! Please provide a model (intercept at least) -- exitting now")
return( NULL)
}
if( !is.null( form.spp))
form.spp <- as.formula( form.spp)
mf <- match.call(expand.dots = FALSE)
m <- match(c("data","offset","weights"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf$na.action <- "na.exclude"
mf[[1L]] <- quote(stats::model.frame)
mf <- eval(mf, parent.frame())
##data <- as.data.frame(data)
#get the data model frames and strip out any NAs
dat <- clean.data( mf, form.RCP, form.spp)
#get the outcomes
outcomes <- model.response(dat$mf.X)
S <- check.outcomes1(outcomes)
if (!S) {
if( !control$quiet)
message("Two species have the same name -- exitting now")
return(NULL)
}
if( !control$quiet)
message( "There are: ", nRCP, "RCPs to group the sites into")
#get the design matrix for RCP part of model
X <- get.X(form.RCP, dat$mf.X)
p.x <- ncol( X)
#get design matrix for spp part of the model -- if there is one
W <- get.W( form.spp, dat$mf.W)
if( all( W != -999999))
p.w <- ncol( W)
else
p.w <- 0
#get offset (if not specified then it will be zeros)
offy <- get.offset( mf, dat$mf.X, dat$mf.W)
#get model wts (if not specified then it will be ones)
wts <- get.wts( mf)
#get distribution
disty.cases <- c("Bernoulli","Poisson","NegBin","Tweedie","Normal")
disty <- get.dist( disty.cases, dist)
#get power params for Tweedie
power <- get.power( disty, power, S)
#summarising data to console
print.data.summ( data, dat, S, form.RCP, form.spp, disty.cases, disty, control$quiet)
tmp <- regimix.fit( outcomes, W, X, offy, wts, disty, nRCP, power, inits, control, nrow( X), S, p.x, p.w)
tmp$dist <- disty.cases[disty]
#calculate the posterior probs
if( nRCP>1)
tmp$postProbs <- calcPostProbs( tmp$pis, tmp$logCondDens)
else
tmp$postProbs <- rep( 1, nrow( X))
#Residuals --not calculating residuals here. Need to call residuals.regimix
#Information criteria
tmp <- calcInfoCrit( tmp)
#titbits object, if wanted/needed.
tmp$titbits <- get.titbits( titbits, outcomes, X, W, offy, wts, form.RCP, form.spp, control, dist, p.w=p.w, power)
tmp$titbits$disty <- disty
#the last bit of the regimix object puzzle
tmp$call <- call
gc()
tmp <- tmp[sort( names( tmp))]
class(tmp) <- "regimix"
return(tmp)
#documentation needs to be adjusted to fit new model.
}
"regimix.fit" <-
function( outcomes, W, X, offy, wts, disty, nRCP, power, inits, control, n, S, p.x, p.w){#
if( nRCP==1){ #if there is just one RCP type -- ie no dependence on environment
tmp <- noRCPfit(outcomes, W, X, offy, wts, disty, nRCP, power, inits, control, n, S, p.x, p.w)
return( tmp)
}
#initial values
start.vals <- get.start.vals( outcomes, W, X, offy, wts, disty, nRCP, S, power, inits, control$quiet)
#doing the optimisation
if( !control$quiet)
message( "Quasi-Newton Optimisation")
if( disty != 4){ #not Tweedie
optimiseDisp <- TRUE
tmp <- notTweedieOptimise( outcomes, X, W, offy, wts, S, nRCP, p.x, p.w, nrow( X), disty, start.vals, power, control)
}
else #Tweedie -- quite convoluted in comparison
tmp <- TweedieOptimise( outcomes, X, W, offy, wts, S, nRCP, p.x, p.w, nrow( X), disty, start.vals, power, control)
return( tmp)
}
"regimix.multifit" <-
function (form.RCP = NULL, form.spp = NULL, data, nRCP = 3, dist="Bernoulli", offset=NULL, weights=NULL, control = list(), inits = "random2", titbits = FALSE, power=1.6, nstart=10, mc.cores=1)
{
#the control parameters
control <- set.control( control)
if( !control$quiet)
message( "RCP modelling")
call <- match.call()
if( !is.null(form.RCP))
form.RCP <- as.formula( form.RCP)
else{
if( !control$quiet)
message( "There is no RCP model! Please provide a model (intercept at least) -- exitting now")
return( NULL)
}
if( !is.null( form.spp))
form.spp <- as.formula( form.spp)
mf <- match.call(expand.dots = FALSE)
m <- match(c("data","offset","weights"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf$na.action <- "na.exclude"
mf[[1L]] <- quote(stats::model.frame)
mf <- eval(mf, parent.frame())
##data <- as.data.frame(data)
#get the data model frames and strip out any NAs
dat <- clean.data( mf, form.RCP, form.spp)
#get the outcomes
outcomes <- model.response(dat$mf.X)
S <- check.outcomes1(outcomes)
if (!S) {
if( !control$quiet)
message("Two species have the same name -- exitting now")
return(NULL)
}
if( !control$quiet)
message( "There are: ", nRCP, "RCPs to group the sites into")
#get the design matrix for RCP part of model
X <- get.X(form.RCP, dat$mf.X)
p.x <- ncol( X)
#get design matrix for spp part of the model -- if there is one
W <- get.W( form.spp, dat$mf.W)
if( all( W != -999999))
p.w <- ncol( W)
else
p.w <- 0
#get offset (if not specified then it will be zeros)
offy <- get.offset( mf, dat$mf.X, dat$mf.W)
#get model wts (if not specified then it will be ones)
wts <- get.wts( mf)
#get distribution
disty.cases <- c("Bernoulli","Poisson","NegBin","Tweedie","Normal")
disty <- get.dist( disty.cases, dist)
#get power params for Tweedie
power <- get.power( disty, power)
#summarising data to console
print.data.summ( data, dat, S, form.RCP, form.spp, disty.cases, disty, control$quiet)
tmp.fun <- function(x){
if( !control$quiet & nstart>1)
setTxtProgressBar(pb, x)
tmpQuiet <- control$quiet
control$quiet <- TRUE
dumbOut <- capture.output( tmp <- regimix.fit( outcomes, W, X, offy, wts, disty, nRCP, power, inits, control, nrow(X), S, p.x, p.w))
control$quiet <- tmpQuiet
tmp$dist <- disty.cases[disty]
#calculate the posterior probs
if( nRCP>1)
tmp$postProbs <- calcPostProbs( tmp$pis, tmp$logCondDens)
else
tmp$postProbs <- rep( 1, nrow( X))
#Residuals --not calculating residuals here. Need to call residuals.regimix
#Information criteria
tmp <- calcInfoCrit( tmp)
#titbits object, if wanted/needed.
tmp$titbits <- get.titbits( titbits, outcomes, X, W, offy, wts, form.RCP, form.spp, control, dist, p.w=p.w, power)
tmp$titbits$disty <- disty
#the last bit of the regimix object puzzle
tmp$call <- call
class(tmp) <- "regimix"
return( tmp)
}
# require( parallel)
if( !control$quiet & nstart>1)
pb <- txtProgressBar(min = 1, max = nstart, style = 3, char = "><(('> ")
#Fit the model many times
many.starts <- parallel::mclapply(1:nstart, tmp.fun, mc.cores=mc.cores)
if( !control$quiet)
message("")
return(many.starts)
}
"residuals.regimix" <-
function( object, ..., type="RQR", quiet=FALSE)
{
if( ! type %in% c("deviance","RQR"))
stop( "Unknown type of residual requested. Only deviance and RQR (for randomised quantile residuals) are implemented\n")
if( type=="deviance"){
resids <- sqrt( -2*object$logl.sites)
if( !quiet){
message( "The sign of the deviance residuals is unknown -- what does sign mean for multiple species? Their mean is also unknown -- what is a saturated model in a mixture model?")
message( "This is not a problem if you are just looking for an over-all fit diagnostic using simulation envelopes (cf normal and half normal plots).")
message( "It is a problem however, when you try to see how residuals vary with covariates etc.. but the meaning of these plots needs to be considered carefully as the residuals are for multiple species anyway.")
}
}
if( type=="RQR"){
resids <- matrix( NA, nrow=object$n, ncol=object$S)
switch( object$dist,
Bernoulli = { fn <- function(y,mu,logdisp,power) pbinom( q=y, size=1, prob=mu, lower.tail=TRUE)},
Poisson = { fn <- function(y,mu,logdisp,power) ppois( q=y, lambda=mu, lower.tail=TRUE)},
NegBin = { fn <- function(y,mu,logdisp,power) pnbinom( q=y, mu=mu, size=1/exp( logdisp), lower.tail=TRUE)},
Tweedie = { fn <- function(y,mu,logdisp,power) fishMod::pTweedie( q=y, mu=mu, phi=exp( logdisp), p=power)},#CHECK!!!
Normal = { fn <- function(y,mu,logdisp,power) pnorm( q=y, mean=mu, sd=exp( logdisp), lower.tail=TRUE)})
for( ss in 1:object$S){
if( all( object$titbits$power==-999999)) tmpPow <- NULL else tmpPow <- object$titbits$power[ss]
if( object$dist %in% c("Bernoulli","Poisson","NegBin")){
tmpLower <- fn( object$titbits$Y[,ss]-1, object$mus[,ss,], object$coef$disp[ss], tmpPow)
tmpUpper <- fn( object$titbits$Y[,ss], object$mus[,ss,], object$coef$disp[ss], tmpPow)
tmpLower <- rowSums( tmpLower * object$pis)
tmpLower <- ifelse( tmpLower<0, 0, tmpLower) #get rid of numerical errors for really small negative values
tmpLower <- ifelse( tmpLower>1, 1, tmpLower) #get rid of numerical errors for 1+epsilon.
tmpUpper <- rowSums( tmpUpper * object$pis)
tmpUpper <- ifelse( tmpUpper<0, 0, tmpUpper) #get rid of numerical errors for really small negative values
tmpUpper <- ifelse( tmpUpper>1, 1, tmpUpper) #get rid of numerical errors for 1+epsilon.
resids[,ss] <- runif( object$n, min=tmpLower, max=tmpUpper)
resids[,ss] <- qnorm( resids[,ss])
}
if( object$dist == "Tweedie"){
nonzero <- object$titbits$Y[,ss]>0
tmpObs <- matrix( rep( object$titbits$Y[,ss], object$nRCP), ncol=object$nRCP)
tmp <- matrix( fn( as.numeric( tmpObs[nonzero,]), as.numeric( object$mus[nonzero,ss,]), object$coefs$disp[ss], object$titbits$power[ss]), ncol=object$nRCP)
tmp <- rowSums( tmp * object$pis[nonzero,])
resids[nonzero,ss] <- qnorm( tmp)
tmp <- matrix( fn( as.numeric( tmpObs[!nonzero,]), as.numeric( object$mus[!nonzero,ss,]), object$coefs$disp[ss], object$titbits$power[ss]), ncol=object$nRCP)
tmp <- rowSums( tmp * object$pis[!nonzero,])
resids[!nonzero,ss] <- qnorm( runif( sum( !nonzero), min=0, max=tmp))
}
if( object$dist == "Normal"){
tmp <- fn( object$titbits$Y[,ss], object$mus[,ss,], object$coef$disp[ss], object$titbits$power[ss])
tmp <- rowSums( tmp * object$pis)
resids[,ss] <- qnorm( tmp)
}
}
if( !quiet & sum( resids==Inf | resids==-Inf)>0)
message( "Some residuals, well",sum( resids==Inf | resids==-Inf), "to be precise, are very large (infinite actually).\nThese observations lie right on the edge of the realistic range of the model for the data (maybe even over the edge).")
}
if( type=="RQR.sim"){
nsim <- 1000
if( is.null( mc.cores))
mc.cores <- getOption("mc.cores", 4)
resids <- matrix( NA, nrow=object$n, ncol=object$S)
RQR.fun <- function(ii){
if( !quiet)
setTxtProgressBar(pb, ii)
X1 <- kronecker( matrix( 1, ncol=1, nrow=nsim), fm$titbits$X[ii,,drop=FALSE])
W1 <- kronecker( matrix( 1, ncol=1, nrow=nsim), fm$titbits$W[ii,,drop=FALSE])
sims <- simRCPdata( nRCP=object$nRCP, S=object$S, n=nsim, p.x=object$p.x, p.w=object$p.w, alpha=object$coef$alpha, tau=object$coef$tau, beta=object$coef$beta, gamma=object$coef$gamma, logDisps=object$coef$disp, powers=object$titbits$power, X=X1, W=W1, offset=object$titbits$offset,dist=object$dist)
sims <- sims[,1:object$S]
yi <- object$titbits$Y[ii,,drop=FALSE]
many_yi <- matrix( rep( yi, each=nsim), ncol=object$S)
F_i <- colMeans( sims <= many_yi)
F_i_minus <- colMeans( sims < many_yi)
r_i <- runif( object$S, min=F_i_minus, max=F_i)
return( qnorm( r_i))
}
if( !quiet)
pb <- txtProgressBar(min = 1, max = object$n, style = 3, char = "><(('> ")
if( Sys.info()['sysname'] == "Windows" | mc.cores==1)
resids <- lapply( 1:object$n, RQR.fun)
else
resids <- parallel::mclapply( 1:object$n, RQR.fun, mc.cores=mc.cores)
if( !quiet)
message("")
resids <- matrix( unlist( resids), nrow=object$n, ncol=object$S, byrow=TRUE)
if( !quiet & sum( resids==Inf | resids==-Inf)>0)
message( "Some residuals, well",sum( resids==Inf | resids==-Inf), "to be precise, are very large (infinite actually).\nThese observations lie right on the edge of the Monte Carlo approximation to the distribution function.\nThis may be remedied by getting a better approximation (increasing nsim).")
}
return( resids)
}
"scotts.rdirichlet" <-
function (n, alpha)
{
#stolen from gtools' rdirichlet
len <- length(alpha)
x <- matrix( rgamma( len * n, alpha), ncol = len, byrow = TRUE)
sm <- x %*% rep(1, len)
return( x/as.vector(sm))
}
"set.control" <-
function(control)
{
if (!("maxit" %in% names(control)))
control$maxit <- 500
if( !("quiet" %in% names( control)))
control$quiet <- FALSE
if (!("trace" %in% names(control)))
control$trace <- 1
if( control$quiet)
control$trace <- 0 #for no tracing
if (!("nreport" %in% names(control)))
control$nreport <- 10
if (!("abstol" %in% names(control)))
control$abstol <- 1e-05
if (!("reltol" %in% names(control)))
control$reltol <- sqrt(.Machine$double.eps)
if (!("optimise" %in% names( control)))
control$optimise <- TRUE
if (!("loglOnly" %in% names(control)))
control$loglOnly <- TRUE
if (!("derivOnly" %in% names( control)))
control$derivOnly <- TRUE
if (!("penalty" %in% names(control)))
control$penalty <- 0.01
else
if (control$penalty < 0) {
message("Supplied penalty for pis is negative, reverting to the default")
penalty <- 0.01
}
if (!("penalty.tau" %in% names( control)))
control$penalty.tau <- 10
else
if (control$penalty.tau <= 0) {
message("Supplied penalty for taus is negative, reverting to the default")
control$penalty.tau <- 10
}
if( !("penalty.gamma" %in% names( control)))
control$penalty.gamma <- 10
else
if( control$penalty.gamma <=0){
message("Supplied penalty for gammas is negative, reverting to the default")
control$penalty.gamma <- 10
}
if( !("penalty.disp" %in% names( control)))
control$penalty.disp <- c( 10, sqrt( 10)) #the mu and sd of a log-normal
else
if( control$penalty.disp[2] <= 0 | length( control$penalty.disp) != 2) {
message("Supplied penalty parameters for the dispersions is illogical, reverting to the default")
control$penalty.disp <- c( 10, sqrt( 10))
}
return( control)
}
"simRCPdata" <-
function (nRCP=3, S=20, n=200, p.x=3, p.w=0, alpha=NULL, tau=NULL, beta=NULL, gamma=NULL, logDisps=NULL, powers=NULL, X=NULL, W=NULL, offset=NULL, dist="Bernoulli")
{
if (is.null(alpha) | length(alpha) != S) {
message("Random alpha from normal (-1,0.5) distribution")
alpha <- rnorm(S,-1,0.5)
}
if (is.null(tau) | length(tau) != (nRCP - 1) * S) {
message("Random tau from standard normal")
tau <- rnorm( (nRCP-1)*S)
}
tau <- matrix(as.numeric(tau), nrow = nRCP - 1)
if (is.null(beta) | length(beta) != (nRCP - 1) * p.x) {
message("Random values for beta")
beta <- rnorm( p.x*(nRCP-1))#as.numeric(c(0, 0, 0.4, 0, -0.2, 1))
}
beta <- matrix(as.numeric(beta), nrow = nRCP - 1)
if( ( is.null(gamma) | length( gamma) != S * p.w)){
if( p.w != 0){
message("Random values for gamma")
gamma <- rnorm( p.w*S)
gamma <- matrix( as.numeric( gamma), nrow=S, ncol=p.w)
}
else
gamma <- NULL
}
else
gamma <- matrix( as.numeric( gamma), nrow=S)
if( dist == "NegBin" & (is.null( logDisps) | length( logDisps) != S)){
message( "Random values for overdispersions")
logDisps <- log( 1 + rgamma( n=S, shape=1, scale=0.75))
}
if( dist=="Tweedie" & (is.null( logDisps) | length( logDisps) != S)){
message( "Random values for species' dispersion parameters")
logDisps <- log( 1 + rgamma( n=S, shape=1, scale=0.75))
}
if( dist=="Tweedie" & (is.null( powers) | length( powers) != S)) {
message( "Power parameter assigned to 1.6 for each species")
powers <- rep( 1.6, S)
}
if( dist=="Normal" & (is.null( logDisps) | length( logDisps) != S)){
message( "Random values for species' variance parameters")
logDisps <- log( 1 + rgamma( n+S, shape=1, scale=0.75))
}
sppNames <- paste("spp", 1:S, sep = "")
if (is.null(X)) {
message("creating a RCP-level design matrix with random numbers")
X <- cbind(1, matrix(runif(n * (p.x - 1), min = -10, max = 10), nrow = n))
if( p.x > 1)
colnames(X) <- c("intercept", paste("x", 1:(p.x - 1), sep = ""))
else
colnames(X) <- "intercept"
}
if( p.w>0)
if( is.null( W)){
message("Creating a species-level design matrix with random factor levels")
W <- matrix(sample( c(0,1), size=(n*p.w), replace=TRUE), nrow=n, ncol=p.w)
colnames(W) <- c(paste("w", 1:p.w, sep = ""))
}
if( is.null( offset))
offset <- rep( 0, n)
if( !dist%in%c("Bernoulli","Poisson","NegBin","Tweedie","Normal")){
message( "Distribution not found, please choose from c('Bernoulli','Poisson','NegBin','Tweedie','Normal')")
return( NA)
}
etaPi <- X %*% t(beta)
pis <- t(apply(etaPi, 1, additive.logistic))
habis <- apply(pis, 1, function(x) sample(1:nRCP, 1, FALSE, x))
tau <- rbind(tau, -colSums(tau))
etaMu <- tau + rep(alpha, each = nRCP)
etaMu1 <- array( rep( offset, each=nRCP*S), dim=c(nRCP,S,n))
if( p.w > 0){
etaMu2 <- W %*% t( gamma)
for( hh in 1:nRCP)
etaMu1[hh,,] <- etaMu1[hh,,] + t( etaMu2)
}
for( hh in 1:nRCP)
etaMu1[hh,,] <- etaMu1[hh,,] + rep( etaMu[hh,], times=n)
etaMu <- etaMu1
if( dist=="Bernoulli")
mu <- inv.logit(etaMu)
if( dist %in% c("Poisson","NegBin","Tweedie"))
mu <- exp( etaMu)
if( dist == "Normal")
mu <- etaMu
fitted <- matrix( NA, nrow=n, ncol=S)
for( ii in 1:n)
fitted[ii,] <- mu[habis[ii], ,ii]
if( dist=="Bernoulli")
outcomes <- matrix(rbinom(n * S, 1, as.numeric( fitted)), nrow = n, ncol = S)
if( dist=="Poisson")
outcomes <- matrix(rpois(n * S, lambda=as.numeric( fitted)), nrow = n, ncol = S)
if( dist=="NegBin")
outcomes <- matrix(rnbinom(n * S, mu=as.numeric( fitted), size=1/rep(exp( logDisps), each=n)), nrow = n, ncol = S)
if( dist=="Tweedie")
outcomes <- matrix( fishMod::rTweedie( n * S, mu=as.numeric( fitted), phi=rep( exp( logDisps), each=n), p=rep( powers, each=n)), nrow=n, ncol=S)
if( dist=="Normal")
outcomes <- matrix( rnorm( n=n*S, mean=as.numeric( fitted), sd=rep( exp( logDisps), each=n)), nrow=n, ncol=S)
colnames(outcomes) <- paste("spp", 1:S, sep = "")
if( !all( offset==0))
res <- as.data.frame(cbind(outcomes, X, W, offset))
else
res <- as.data.frame(cbind(outcomes, X, W))
attr(res, "RCPs") <- habis
attr(res, "pis") <- pis
attr(res, "alpha") <- alpha
attr(res, "tau") <- tau[-nRCP, ]
attr(res, "beta") <- beta
attr(res, "gamma") <- gamma
attr(res, "logDisps") <- logDisps
attr(res, "mu") <- mu
return(res)
}
"stability.regimix" <-
function( model, oosSizeRange=NULL, times=model$n, mc.cores=1, quiet=FALSE, doPlot=TRUE)
{
if( is.null( oosSizeRange))
oosSizeRange <- round( seq( from=1, to=model$n%/%5, length=10))
if( any( oosSizeRange < 1))
stop( "Silly number of RCPs. Specified range is: ", oosSizeRange, " and they should all be >= 1")
disty <- matrix( NA, nrow=length( oosSizeRange), ncol=model$nRCP)
predlogls <- array( NA, dim=c(length( oosSizeRange), model$n, times)) #matrix( NA, nrow=length( oosSizeRange), ncol=times)
for( ii in oosSizeRange){
tmp <- cooks.distance( model, oosSize=ii, times=times, mc.cores=mc.cores, quiet=quiet)
disty[oosSizeRange==ii,] <- colMeans( abs( tmp$cooksD))
predlogls[oosSizeRange==ii,,] <- tmp$predLogL
#predlogls[oosSizeRange==ii,] <- colMeans( tmp$predLogL, na.rm=TRUE)
}
ret <- list( oosSizeRange=oosSizeRange, disty=disty, nRCP=model$nRCP,n=model$n, predlogls=predlogls, logl.sites=model$logl.sites)
class( ret) <- "registab"
if( doPlot)
plot( ret)
invisible( ret)
}
"summary.regimix" <-
function (object, ...)
{
if (is.null(object$vcov)) {
object$vcov <- matrix(NA, nrow = length(unlist(object$coef)),
ncol = length(unlist(object$coef)))
stop("No variance matrix has been supplied")
}
message("Standard errors for alpha, tau and (probably) gamma parameters may be (are likely to be) misleading")
res <- cbind(unlist(object$coefs), sqrt(diag(object$vcov)))
res <- cbind(res, res[, 1]/res[, 2])
res <- cbind(res, 2 * (1 - pnorm(abs(res[, 3]))))
colnames(res) <- c("Estimate", "SE", "z-score", "p")
return(res)
}
"TweedieOptimise" <-
function( outcomes, X, W, offy, wts, S, nRCP, p.x, p.w, n, disty, start.vals, power, control)
{
Tw.phi.func <- function( phi1, spp3){
disp3 <- disp
disp3[spp3] <- phi1
tmp1 <- .Call( "RCP_C", as.numeric(outcomes), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp3, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer(FALSE), as.integer(TRUE), as.integer( FALSE), as.integer( TRUE), as.integer( FALSE), PACKAGE = "RCPmod")
return( -as.numeric( tmp1))
}
Tw.phi.func.grad <- function( phi1, spp3){
disp3 <- disp
disp3[spp3] <- phi1
tmp.disp.score <- rep( -99999, S)
tmp1 <- .Call( "RCP_C", as.numeric(outcomes), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp3, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, tmp.disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer(FALSE), as.integer(FALSE), as.integer(TRUE), as.integer( TRUE), as.integer( FALSE), PACKAGE = "RCPmod")
return( -as.numeric( tmp.disp.score[spp3]))
}
inits <- c(start.vals$alpha, start.vals$tau, start.vals$beta, start.vals$gamma, start.vals$disp)
alpha <- start.vals$alpha; tau <- as.numeric( start.vals$tau); beta <- as.numeric( start.vals$beta); gamma <- as.numeric( start.vals$gamma); disp <- start.vals$disp
#scores
alpha.score <- as.numeric(rep(NA, S))
tau.score <- as.numeric(matrix(NA, ncol = S, nrow = nRCP - 1))
beta.score <- as.numeric(matrix(NA, ncol = ncol(X), nrow = nRCP - 1))
if( p.w > 0)
gamma.score <- as.numeric(matrix( NA, nrow=S, ncol=ncol(W)))
else
gamma.score <- -999999
if( disty %in% 3:5)
disp.score <- as.numeric( rep( NA, S))
else
disp.score <- -999999
scoreContri <- -999999 #as.numeric(matrix(NA, ncol = length(inits), nrow = n))
#model quantities
pis <- as.numeric(matrix(NA, nrow = n, ncol = nRCP)) #container for the fitted RCP model
mus <- as.numeric(array( NA, dim=c( n, S, nRCP))) #container for the fitted spp model
logCondDens <- as.numeric(matrix(NA, nrow = n, ncol = nRCP))
logls <- as.numeric(rep(NA, n))
conv <- as.integer(0)
optimiseDisp <- FALSE
kount <- 1
tmp.new <- tmp.old <- -999999
if( control$optimise){
while( (abs( abs( tmp.new - tmp.old) / ( abs( tmp.old) + control$reltol)) > control$reltol | kount==1) & (kount < 15)){
kount <- kount + 1
tmp.old <- tmp.new
message( "Updating Location Parameters: ", appendLF=FALSE)
tmp <- .Call( "RCP_C", as.numeric(outcomes), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer(control$optimise), as.integer(TRUE), as.integer( FALSE), as.integer(optimiseDisp), as.integer( FALSE), PACKAGE = "RCPmod")
message( "Updating Dispersion Parameters: ", appendLF=FALSE)
for( ii in 1:S){
tmp1 <- nlminb( disp[ii], Tw.phi.func, Tw.phi.func.grad, spp3=ii, control=list( trace=0))
disp[ii] <- tmp1$par
message( tmp1$objective, " ")
}
message( "")
tmp.new <- -tmp1$objective
}
}
tmp <- .Call( "RCP_C", as.numeric(outcomes), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer(FALSE), as.integer( TRUE), as.integer(TRUE), as.integer(TRUE), as.integer( FALSE), PACKAGE = "RCPmod")
ret <- list()
ret$pis <- matrix(pis, ncol = nRCP)
ret$mus <- array( mus, dim=c(n,S,nRCP))
ret$coefs <- list(alpha = alpha, tau = tau, beta = beta, gamma=gamma, disp=disp)
if( any( ret$coefs$gamma==-999999, na.rm=TRUE))
ret$coefs$gamma <- NULL
if( any( ret$coefs$disp==-999999, na.rm=TRUE))
ret$coefs$disp <- NULL
ret$names <- list( spp=colnames( outcomes), RCPs=paste( "RCP", 1:nRCP, sep=""), Xvars=colnames( X))
if( p.w>0)
ret$names$Wvars <- colnames( W)
else
ret$names$Wvars <- NA
ret$scores <- list(alpha = alpha.score, tau = tau.score, beta = beta.score, gamma = gamma.score, disp=disp.score)
if( any( ret$scores$gamma==-999999, na.rm=TRUE))
ret$scores$gamma <- NULL
if( any( ret$scores$disp==-999999, na.rm=TRUE))
ret$scores$disp <- NULL
ret$logCondDens <- matrix(logCondDens, ncol = nRCP)
if( control$optimise)
ret$conv <- conv
else
ret$conv <- "not optimised"
ret$S <- S; ret$nRCP <- nRCP; ret$p.x <- p.x; ret$p.w <- p.w; ret$n <- n
ret$start.vals <- inits
ret$logl <- tmp
ret$logl.sites <- logls #for residuals
return( ret)
}
"vcov.regimix" <-
function (object, ..., object2=NULL, method = "FiniteDifference", nboot = 1000, mc.cores=1, D.accuracy=2)
{
if( method %in% c("simple","Richardson"))
method <- "FiniteDifference"
if (!method %in% c("FiniteDifference", "BayesBoot", "SimpleBoot", "EmpiricalInfo")) {
error("Unknown method to calculate variance matrix, viable options are: 'FiniteDifference' (numerical), 'BayesBoot' (bootstrap), 'SimpleBoot' (bootstrap)', and 'EmpiricalInfo'.")
return(NULL)
}
if( Sys.info()['sysname'] == "Windows")
mc.cores <- 1
X <- object$titbits$X
p.x <- ncol( X)
if( inherits( object$titbits$form.spp, "formula")){
form.W <- object$titbits$form.spp
W <- object$titbits$W
p.w <- ncol( W)
}
else{
form.W <- NULL
W <- -999999
p.w <- 0
}
offy <- object$titbits$offset
wts <- object$titbits$wts
Y <- object$titbits$Y
disty <- object$titbits$disty
power <- object$titbits$power
S <- object$S
nRCP <- object$nRCP
p.x <- object$p.x
p.w <- object$p.w
n <- object$n
disty <- object$titbits$disty
control <- object$titbits$control
pis <- as.numeric( matrix( -999999, nrow = n, ncol = nRCP))
mus <- as.numeric( array( -999999, dim=c( n, S, nRCP)))
logCondDens <- as.numeric( matrix( -999999, nrow = n, ncol = nRCP))
logls <- as.numeric(rep(-999999, n))
alpha.score <- as.numeric(rep(-999999, S))
tau.score <- as.numeric(matrix(-999999, nrow = nRCP - 1, ncol = S))
beta.score <- as.numeric(matrix(-999999, nrow = nRCP - 1, ncol = p.x))
if( p.w > 0)
gamma.score <- as.numeric( matrix( -999999, nrow = S, ncol = p.w))
else
gamma.score <- -999999
if( !is.null( object$coef$disp))
disp.score <- as.numeric( rep( -999999, S))
else
disp.score <- -999999
conv <- FALSE
if (method %in% c("FiniteDifference")) {
my.fun <- function(x) {
start <- 0
alpha <- x[start + 1:S]
start <- start + S
tau <- x[start + 1:((nRCP - 1) * S)]
start <- start + (nRCP-1)*S
beta <- x[start + 1:((nRCP - 1) * p.x)]
start <- start + (nRCP-1)*p.x
if( p.w > 0){
gamma <- x[start + 1:(S*p.w)]
start <- start + S*p.w
}
else
gamma <- -999999
if( any( !is.null( object$coef$disp)))
disp <- x[start + 1:S]
else
disp <- -999999
scoreContri <- -999999
tmp <- .Call( "RCP_C", as.numeric(Y), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer( FALSE), as.integer( FALSE), as.integer( TRUE), as.integer( TRUE), as.integer( FALSE), PACKAGE = "RCPmod")
tmp1 <- c(alpha.score, tau.score, beta.score)
if( p.w > 0)#class( object$titbits$form.spp) == "formula")
tmp1 <- c( tmp1, gamma.score)
if( !is.null( object$coef$disp))
tmp1 <- c( tmp1, disp.score)
return(tmp1)
}
hess <- nd2(x0=unlist( object$coefs), f=my.fun, mc.cores=mc.cores, D.accur=D.accuracy)#numDeriv::jacobian(my.fun, unlist(object$coefs), method = method)
vcov.mat <- try( -solve(hess))
if( inherits( vcov.mat, 'try-error')){
attr(vcov.mat, "hess") <- hess
warning( "Hessian appears to be singular and its inverse (the vcov matrix) cannot be calculated\nThe Hessian is returned as an attribute of the result (for diagnostics).\nMy deepest sympathies. You could try changing the specification of the model, increasing the penalties, or getting more data.")
}
else
vcov.mat <- ( vcov.mat + t(vcov.mat)) / 2 #to ensure symmetry
}
if( method %in% c( "BayesBoot","SimpleBoot")){
object$titbits$control$optimise <- TRUE #just in case it was turned off (see regimix.multfit)
if( is.null( object2))
coefMat <- regiboot( object, nboot=nboot, type=method, mc.cores=mc.cores, quiet=TRUE, orderSamps=FALSE)
else
coefMat <- object2
vcov.mat <- cov( coefMat)
}
if( method=="EmpiricalInfo"){
message( "Information approximated by empirical methods. I have not been able to get this to work, even for simulated data. I hope that you are feeling brave!")
alpha <- object$coef$alpha
tau <- object$coef$tau
beta <- object$coef$beta
if( p.w > 0)
gamma <- object$coef$gamma
else
gamma <- -999999
if( any( !is.null( object$coef$disp)))
disp <- object$coef$disp
else
disp <- -999999
scoreContri <- as.numeric( matrix( NA, nrow=n, ncol=length( unlist( object$coef))))
tmp <- .Call( "RCP_C", as.numeric(Y), as.numeric(X), as.numeric(W), as.numeric( offy), as.numeric( wts),
as.integer(S), as.integer(nRCP), as.integer(p.x), as.integer(p.w), as.integer(n), as.integer( disty),
alpha, tau, beta, gamma, disp, power,
as.numeric(control$penalty), as.numeric(control$penalty.tau), as.numeric( control$penalty.gamma), as.numeric( control$penalty.disp[1]), as.numeric( control$penalty.disp[2]),
alpha.score, tau.score, beta.score, gamma.score, disp.score, scoreContri,
pis, mus, logCondDens, logls,
as.integer(control$maxit), as.integer(control$trace), as.integer(control$nreport), as.numeric(control$abstol), as.numeric(control$reltol), as.integer(conv),
as.integer( FALSE), as.integer( FALSE), as.integer( TRUE), as.integer( TRUE), as.integer( TRUE), PACKAGE = "RCPmod")
scoreContri <- matrix( scoreContri, nrow=n)
summy <- matrix( 0, ncol=ncol( scoreContri), nrow=ncol( scoreContri))
for( ii in 1:n){
summy <- summy + scoreContri[ii,] %o% scoreContri[ii,]
}
tmp <- colSums( scoreContri)
tmp <- tmp %o% tmp / n
emp.info <- summy - tmp
# diag( emp.info) <- diag( emp.info) + 0.00001 #makes it invertable but not realistic.
vcov.mat <- try( solve( emp.info))
if( inherits( vcov.mat, 'try-error')){
attr(vcov.mat, "hess") <- emp.info
warning( "Empirical information matrix (average of the cross-products of the scores for each observation) appears to be singular and its inverse (the vcov matrix) cannot be calculated\nThe empirical inverse is returned as an attribute of the result (for diagnostics).\nMy deepest sympathies. You could try changing the specification of the model, increasing the penalties, or getting more data. Note that you have chosen to use method=\"EmpricalInfo\", which is likely to cause heartache (albeit computationally thrifty heartache) -- try other methods (and probably do that first).")
}
else
vcov.mat <- ( vcov.mat + t(vcov.mat)) / 2 #to ensure symmetry
}
return(vcov.mat)
}
# MVB's workaround for futile CRAN 'no visible blah' check:
globalVariables( package="RCPmod",
names=c( ".Traceback"
,"dll.path"
,"libname"
,"pkgname"
,"subarch"
,"r_arch"
,"this.ext"
,"dynlib.ext"
,"dlls"
,"x"
,"tmp"
,"p"
,"object"
,"coefs"
,"k"
,"star.ic"
,"logl"
,"n"
,"ret"
,"logPostProbs"
,"pis"
,"logCondDens"
,"mset"
,"logSums"
,"postProbs"
,"nam"
,"outs"
,"mf.X"
,"form1"
,"data"
,"form2"
,"mf.W"
,"ids"
,"res"
,"alpha"
,"spp"
,"tau"
,"nRCP"
,"S"
,"p.x"
,"Xvars"
,"p.w"
,"Wvars"
,"disp"
,"logDisp"
,"oosSize"
,"model"
,"titbits"
,"quiet"
,"pb"
,"txtProgressBar"
,"times"
,"funny"
,"setTxtProgressBar"
,"OOBag"
,"inBag"
,"new.wts"
,"wts"
,"control"
,"tmpmodel"
,"Y"
,"W"
,"X"
,"disty"
,"OOSppPreds"
,"ss"
,"mus"
,"newPis"
,"r.negi"
,"alpha.score"
,"tau.score"
,"beta.score"
,"gamma.score"
,"disp.score"
,"scoreContri"
,"logls"
,"conv"
,"tmplogl"
,"penalty"
,"penalty.tau"
,"penalty.gamma"
,"penalty.disp"
,"maxit"
,"nreport"
,"abstol"
,"reltol"
,"ret.logl"
,"mc.cores"
,"parallel"
,"cooksD"
,"cooksDist"
,"OOpreds"
,"bb"
,"predLogL"
,"edf"
,"fit"
,"aic"
,"error.msg"
,"disty.cases"
,"dist1"
,"coef.obj"
,"colnammy"
,"offy"
,"mf"
,"type"
,"resids"
,"site.logls"
,"ii"
,"X1"
,"nsim"
,"W1"
,"sims"
,"n.sim"
,"pwers"
,"G"
,"inits"
,"outcomes"
,"tmp1"
,"tmpGrp"
,"tmpX"
,"lambda.seq"
,"fam"
,"tmp.fm"
,"locat.s"
,"my.coefs"
,"lastID"
,"tail"
,"df3"
,"tmp.fm1"
,"fishMod"
,"y"
,"."
,"MASS"
,"preds"
,"my.sd"
,"mult"
,"form.RCP"
,"form.spp"
,"form.W"
,"tmp.fun"
,"intercepts"
,"form.X"
,"eps"
,"en"
,"root"
,"c1"
,"eta"
,"mu"
,"double.eps"
,"sigma1"
,"method"
,"ev"
,"values"
,"retval"
,"vectors"
,"sigsvd"
,"d"
,"v"
,"u"
,"o"
,"D.n"
,"x0"
,"m"
,"D.f0"
,"f"
,"..."
,"D.accur"
,"D.w"
,"D.co"
,"D.n.c"
,"macheps"
,"D.h"
,"D.deriv"
,"mc.fun"
,"D.temp.f"
,"jj"
,"D.xd"
,"tmp.fun.vals"
,"theta"
,"scores"
,"start.vals"
,"logl.sites"
,"loglOnly"
,"derivOnly"
,"RCPs"
,"simDat"
,"posts"
,"fm"
,"perms"
,"gtools"
,"classErr"
,"classErrRunnerUp"
,"postsTMP"
,"my.tab"
,"perm"
,"G1"
,"G2"
,"new.fm"
,"species"
,"obs.resid"
,"shad"
,"alpha.conf"
,"allResids"
,"s"
,"newy"
,"allResidsSort"
,"quants"
,"envel"
,"sort.resid"
,"empQuant"
,"realMeans"
,"realDiff"
,"aa"
,"grey"
,"globEnvel"
,"sppID"
,"spp.cols"
,"main"
,"fitted.scale"
,"loggy"
,"oosSizeRange"
,"oosDiffs"
,"oosWidth"
,"minWidth"
,"histy"
,"predlogls"
,"ncuts"
,"max.dens"
,"ylimmo"
,"breaks"
,"rgb"
,"colorRamp"
,"newdata"
,"titbit"
,"object2"
,"nboot"
,"my.nboot"
,"allCoBoot"
,"alphaBoot"
,"tauBoot"
,"betaBoot"
,"alphaIn"
,"tauIn"
,"betaIn"
,"gammaIn"
,"dispIn"
,"powerIn"
,"predCol"
,"ptPreds"
,"bootPreds"
,"conc"
,"mysd"
,"myContr"
,"boot.funny"
,"bootSampsToUse"
,"seg"
,"bPreds"
,"row.exp"
,"ses"
,"cis"
,"n.tot"
,"dat"
,"Call"
,"Distribution"
,"n.reorder"
,"all.wts"
,"MLstart"
,"my.inits"
,"orderSamps"
,"my.fun"
,"dummy"
,"dumbOut"
,"capture.output"
,"samp.object"
,"flag"
,"tmpOldQuiet"
,"boot.estis"
,"drop.unused.levels"
,"stats"
,"optimiseDisp"
,"nstart"
,"tmpQuiet"
,"many.starts"
,"fn"
,"logdisp"
,"tmpPow"
,"tmpLower"
,"tmpUpper"
,"nonzero"
,"tmpObs"
,"RQR.fun"
,"yi"
,"many_yi"
,"F_i"
,"F_i_minus"
,"r_i"
,"len"
,"sm"
,"logDisps"
,"powers"
,"sppNames"
,"etaPi"
,"habis"
,"etaMu"
,"etaMu1"
,"etaMu2"
,"hh"
,"doPlot"
,"Tw.phi.func"
,"disp3"
,"spp3"
,"phi1"
,"Tw.phi.func.grad"
,"tmp.disp.score"
,"kount"
,"tmp.new"
,"tmp.old"
,"objective"
,"error"
,"hess"
,"D.accuracy"
,"vcov.mat"
,"coefMat"
,"summy"
,"emp.info"
))
|
083feed2496817d6b3066facd52c9f824dc96bcb | 942d40cffac9a26fb6a195d0b535eeb4ecc9c786 | /man/format_output.Rd | c876afb70c87b0ba839ecb4c2d14542973be5b57 | [
"MIT"
] | permissive | willpearse/squire | 7652a94bd2c75e75fbe24ff3e421c87ac02fa018 | c33ff50849f2d0886423bc7887e972ac3e0f99ec | refs/heads/master | 2022-11-06T14:11:54.940845 | 2020-06-16T15:28:19 | 2020-06-16T15:28:19 | 270,962,087 | 0 | 0 | MIT | 2020-06-09T09:39:49 | 2020-06-09T09:39:48 | null | UTF-8 | R | false | true | 1,157 | rd | format_output.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_output.R
\name{format_output}
\alias{format_output}
\title{Format model output as data.frame}
\usage{
format_output(
x,
var_select = NULL,
reduce_age = TRUE,
combine_compartments = TRUE,
date_0 = NULL
)
}
\arguments{
\item{x}{squire_simulation object}
\item{var_select}{Vector of compartment names, e.g. \code{c("S", "R")}. In
addition a number of summary compartment can be requested. These include:
\itemize{
\item{"deaths"}{ Daily Deaths }
\item{"infections"}{ Daily Infections }
\item{"hospital_occupancy"}{ Occupied Hospital Beds }
\item{"ICU_occupancy"}{ Occupied ICU Beds }
\item{"hospital_demand}{ Required Hospital Beds }
\item{"ICU_demand}{ Required ICU Beds }
}}
\item{reduce_age}{Collapse age-dimension, calculating the total in the
compartment.}
\item{combine_compartments}{Collapse compartments of same type together
(e.g. E1 and E2 -> E)}
\item{date_0}{Date of time 0, if specified a date column will be added}
}
\value{
Formatted long data.frame
}
\description{
Format model output as data.frame
}
|
57e4d8ded4f12ef112da882edda4d90321bac1e6 | 9aeff507412b57718da6db67e708bdf04aa83228 | /R/transformdata.back.R | 0f999ed2320aa30db9009bc2ed57b2aa0dad94d1 | [] | no_license | lozalojo/mem | df00ae00aa190e91d3217ddc60736eb86894f037 | e8bbdcc1df8e31cbeb036ac5037f79b7375d976c | refs/heads/master | 2023-07-03T13:44:40.371634 | 2023-06-21T06:21:11 | 2023-06-21T06:21:11 | 47,120,918 | 11 | 3 | null | 2023-06-21T06:21:12 | 2015-11-30T13:39:39 | R | UTF-8 | R | false | false | 8,298 | r | transformdata.back.R | #' @title Data transformation
#'
#' @description
#' Function \code{transformdata.back} transforms data from week,rate1,...,rateN to year,week,rate
#' format.
#'
#' @name transformdata.back
#'
#' @param i.data Data frame of input data.
#' @param i.name Name of the column that contains the values.
#' @param i.cutoff.original Cutoff point between seasons when they have two years
#' @param i.range.x.final Range of the surveillance period in the output dataset
#' @param i.fun sumarize function
#'
#' @return
#' \code{transformdata.back} returns a data.frame with three columns, year, week and rate.
#'
#' @details
#' Transforms data from the season in each column format (the one that uses \link{mem})
#' to the format year, week, rate in a 3 columns data.frame.
#'
#' Allows to set the cutoff point to separate between two seasons when one season has
#' two different years.
#'
#' @examples
#' # Castilla y Leon Influenza Rates data
#' data(flucyl)
#' # Transform data
#' newdata <- transformdata.back(flucyl)$data
#' @author Jose E. Lozano \email{lozalojo@@gmail.com}
#'
#' @references
#' Vega T, Lozano JE, Ortiz de Lejarazu R, Gutierrez Perez M. Modelling influenza epidemic - can we
#' detect the beginning and predict the intensity and duration? Int Congr Ser. 2004 Jun;1263:281-3.
#'
#' Vega T, Lozano JE, Meerhoff T, Snacken R, Mott J, Ortiz de Lejarazu R, et al. Influenza surveillance
#' in Europe: establishing epidemic thresholds by the moving epidemic method. Influenza Other Respir
#' Viruses. 2013 Jul;7(4):546-58. DOI:10.1111/j.1750-2659.2012.00422.x.
#'
#' Vega T, Lozano JE, Meerhoff T, Snacken R, Beaute J, Jorgensen P, et al. Influenza surveillance in
#' Europe: comparing intensity levels calculated using the moving epidemic method. Influenza Other
#' Respir Viruses. 2015 Sep;9(5):234-46. DOI:10.1111/irv.12330.
#'
#' Lozano JE. lozalojo/mem: Second release of the MEM R library. Zenodo [Internet]. [cited 2017 Feb 1];
#' Available from: \url{https://zenodo.org/record/165983}. DOI:10.5281/zenodo.165983
#'
#' @keywords influenza
#'
#' @export
# @importFrom stats aggregate
#' @importFrom tidyr extract gather
#' @importFrom dplyr %>% filter group_by summarise arrange
transformdata.back <- function(i.data, i.name = "rates", i.cutoff.original = NA, i.range.x.final = NA, i.fun = mean) {
if (is.na(i.cutoff.original)) i.cutoff.original <- min(as.numeric(rownames(i.data)[1:(min(3, NROW(i.data)))]))
if (i.cutoff.original < 1) i.cutoff.original <- 1
if (i.cutoff.original > 53) i.cutoff.original <- 53
if (any(is.na(i.range.x.final)) | !is.numeric(i.range.x.final) | length(i.range.x.final) != 2) i.range.x.final <- c(min(as.numeric(rownames(i.data)[1:(min(3, NROW(i.data)))])), max(as.numeric(rownames(i.data)[(max(1, NROW(i.data) - 2)):NROW(i.data)])))
if (i.range.x.final[1] < 1) i.range.x.final[1] <- 1
if (i.range.x.final[1] > 53) i.range.x.final[1] <- 53
if (i.range.x.final[2] < 1) i.range.x.final[2] <- 1
if (i.range.x.final[2] > 53) i.range.x.final[2] <- 53
if (i.range.x.final[1] == i.range.x.final[2]) i.range.x.final[2] <- i.range.x.final[2] - 1
if (i.range.x.final[2] == 0) i.range.x.final[2] <- 53
n.seasons <- NCOL(i.data)
# First: analize names of seasons and seasons with week 53
# if (n.seasons>1){
# seasons<-data.frame(names(i.data),matrix(stringr:: str_match(names(i.data),"(\\d{4})(?:.*(\\d{4}))?(?:.*\\(.*(\\d{1,}).*\\))?"),nrow=n.seasons,byrow=F)[,-1],stringsAsFactors = F)
# }else{
# seasons<-data.frame(t(c(names(i.data),stringr:: str_match(names(i.data),"(\\d{4})(?:.*(\\d{4}))?(?:.*\\(.*(\\d{1,}).*\\))?")[-1])),stringsAsFactors = F)
# }
# names(seasons)<-c("column","anioi","aniof","aniow")
# Changed dependency of stringr for tydir builtin function extract
column <- NULL
seasons <- data.frame(column = names(i.data), stringsAsFactors = F) %>%
extract(column, into = c("anioi", "aniof", "aniow"), regex = "^[^\\d]*(\\d{4})(?:[^\\d]*(\\d{4}))?(?:[^\\d]*(\\d{1,}))?[^\\d]*$", remove = F)
seasons[is.na(seasons)] <- ""
seasons$aniof[seasons$aniof == ""] <- seasons$anioi[seasons$aniof == ""]
seasonsname <- seasons$anioi
seasonsname[seasons$aniof != ""] <- paste(seasonsname[seasons$aniof != ""], seasons$aniof[seasons$aniof != ""], sep = "/")
seasonsname[seasons$aniow != ""] <- paste(seasonsname[seasons$aniow != ""], "(", seasons$aniow[seasons$aniow != ""], ")", sep = "")
seasons$season <- seasonsname
rm("seasonsname")
names(i.data) <- seasons$season
i.data$week <- as.numeric(row.names(i.data))
# Second: Transform the data, summarize (to avoid duplicates) and remove na's
# data.out.2<-reshape2::melt(i.data, "week", variable="season", value.name = "data", na.rm = T)
# replace melt with gather
season <- data <- week <- NULL
data.out <- i.data %>%
gather(season, data, -week, na.rm = T)
# adds year, based in the i.cutoff.original value
data.out$year <- NA
data.out$year[data.out$week < i.cutoff.original] <- as.numeric(substr(data.out$season, 6, 9))[data.out$week < i.cutoff.original]
data.out$year[data.out$week >= i.cutoff.original] <- as.numeric(substr(data.out$season, 1, 4))[data.out$week >= i.cutoff.original]
data.out$season <- NULL
# we aggregate in case data comes from two sources, for example when there are two parts of the same epidemic, notated as (1) and (2)
# data.out<-aggregate(data ~ year + week, data=data.out, FUN=i.fun, na.rm=T)
year <- week <- NULL
data.out <- data.out %>%
filter(!is.na(year) & !is.na(week)) %>%
group_by(year, week) %>%
summarise(data = i.fun(data, na.rm = T)) %>%
arrange(year, week)
# Third: create the structure of the final dataset, considering the i.range.x.final
week.f <- i.range.x.final[1]
week.l <- i.range.x.final[2]
if (week.f > week.l) {
i.range.x.values.52 <- data.frame(week = c(week.f:52, 1:week.l), week.no = 1:(52 - week.f + 1 + week.l))
i.range.x.values.53 <- data.frame(week = c(week.f:53, 1:week.l), week.no = 1:(53 - week.f + 1 + week.l))
data.out$season <- ""
data.out$season[data.out$week < week.f] <- paste(data.out$year - 1, data.out$year, sep = "/")[data.out$week < week.f]
data.out$season[data.out$week >= week.f] <- paste(data.out$year, data.out$year + 1, sep = "/")[data.out$week >= week.f]
seasons.all <- unique(data.out$season)
seasons.53 <- unique(subset(data.out, data.out$week == 53 & !is.na(data.out$data))$season)
seasons.52 <- seasons.all[!(seasons.all %in% seasons.53)]
data.scheme <- rbind(
merge(data.frame(season = seasons.52, stringsAsFactors = F), i.range.x.values.52, stringsAsFactors = F),
merge(data.frame(season = seasons.53, stringsAsFactors = F), i.range.x.values.53, stringsAsFactors = F)
)
data.scheme$year <- NA
data.scheme$year[data.scheme$week < week.f] <- as.numeric(substr(data.scheme$season, 6, 9))[data.scheme$week < week.f]
data.scheme$year[data.scheme$week >= week.f] <- as.numeric(substr(data.scheme$season, 1, 4))[data.scheme$week >= week.f]
} else {
i.range.x.values.52 <- data.frame(week = week.f:min(52, week.l), week.no = 1:(min(52, week.l) - week.f + 1))
i.range.x.values.53 <- data.frame(week = week.f:week.l, week.no = 1:(week.l - week.f + 1))
data.out$season <- ""
data.out$season <- paste(data.out$year, data.out$year, sep = "/")
seasons.all <- unique(data.out$season)
seasons.53 <- unique(subset(data.out, data.out$week == 53 & !is.na(data.out$data))$season)
seasons.52 <- seasons.all[!(seasons.all %in% seasons.53)]
data.scheme <- rbind(
merge(data.frame(season = seasons.52, stringsAsFactors = F), i.range.x.values.52, stringsAsFactors = F),
merge(data.frame(season = seasons.53, stringsAsFactors = F), i.range.x.values.53, stringsAsFactors = F)
)
data.scheme$year <- NA
data.scheme$year <- as.numeric(substr(data.scheme$season, 1, 4))
}
data.final <- merge(data.scheme, data.out, by = c("season", "year", "week"), all.x = T)
data.final$yrweek <- data.final$year * 100 + data.final$week
data.final$week.no <- NULL
data.final <- data.final[order(data.final$yrweek), ]
names(data.final)[names(data.final) == "data"] <- i.name
transformdata.back.output <- list(data = data.final)
transformdata.back.output$call <- match.call()
return(transformdata.back.output)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.