blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc742b32ba479cef166565fc0f59d447c57d3ada
|
396df2552224ffcb0294fe6e297b231aa2e59e68
|
/_working/0129b-fuel-prices.R
|
8a7e24104ac271533f61dd360ed0f89ea8c24439
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ellisp/blog-source
|
d072bed980a5074d6c7fac03be3635f70ab5f098
|
1227f83df23af06da5280214ac7f2e0182be5707
|
refs/heads/master
| 2023-09-05T07:04:53.114901
| 2023-08-27T21:27:55
| 2023-08-27T21:27:55
| 122,695,494
| 17
| 8
| null | 2023-08-27T21:15:33
| 2018-02-24T02:36:45
|
HTML
|
UTF-8
|
R
| false
| false
| 4,106
|
r
|
0129b-fuel-prices.R
|
library(tidyverse)
library(scales)
library(openxlsx)
library(forecast)
library(nlme)
# download manually from https://www.dropbox.com/s/i75ha9n1jc0vm2c/fuel%20prices.xlsx?dl=0
# edit the "Central Plateau" sheet by setting the whole "Date" column to be in date format
fn <- "fuel prices data 2.xlsx"
sn <- getSheetNames(fn)
sn <- sn[which(sn == "Auckland"):which(sn == "Fiordland")]
fuel_orig <- list()
for(i in 1:length(sn)){
tmp <- read.xlsx(fn, sheet = sn[i], cols = 1:7,
detectDates = TRUE, na.strings = c("NA", "n/a"))
tmp[ , "region"] <- sn[i]
# for some reason heading of column 1 is missing for the Marlborough and Fiordland sheets so we fix"
names(tmp)[1] <- "Company"
fuel_orig[[i]] <- tmp
}
fuel_df <- do.call("rbind", fuel_orig)
summary(fuel_df)
south_island <- c("Canterbury", "Nelson", "Otago", "Southland", "West Coast", "Marlborough", "Fiordland")
big_four <- c("CALTEX", "Z ENERGY", "BP", "MOBIL")
fuel_tidy <- fuel_df %>%
select(-LPG) %>%
gather(fueltype, value, -Company, -Date, -region) %>%
filter(!is.na(value)) %>%
mutate(island = ifelse(region %in% south_island, "South", "North"),
company_type = ifelse(Company %in% big_four, "Big Four", "Smaller")) %>%
mutate(region = fct_reorder(region, as.numeric(as.factor(island))),
Company = fct_reorder(Company, value))
p91 <- fuel_tidy %>%
filter(fueltype == "91") %>%
group_by(region, island, Date) %>%
summarise(value = mean(value, tr = 0.2)) %>%
ungroup()
p91_rel <- p91 %>%
group_by(Date) %>%
mutate(Auckland = value[region == "Auckland"]) %>%
filter(! region %in% c("Auckland", "Wairarapa")) %>%
mutate(perc_of_auck = value / Auckland)
svg("../img/0129b-petrol-comp-auckland.svg", 10, 8)
ggplot() +
geom_rect(data = data.frame("hello world"),
xmin = as.Date("2018-07-01"), xmax = Inf, ymin = -Inf, ymax = Inf, fill = "blue", alpha = 0.1) +
geom_ribbon(data = p91_rel, aes(x = Date, ymin = Auckland, ymax = value), fill = "grey", alpha = 0.5) +
geom_line(data = p91_rel, aes(x = Date, y = Auckland), colour = "grey50") +
geom_line(data = p91_rel, aes(x= Date, y = value, colour = island), size = 1.2) +
facet_wrap(~region, ncol = 3) +
scale_y_continuous("Price of 91 octane petrol compared to in Auckland\n", label = dollar) +
labs(x = "2018; grey line shows Auckland",
caption = "Source: pricewatch.co.nz, collated by @Economissive")
dev.off()
# Data on the difference between Auckland's average price and those in other areas:
diff_data <- fuel_tidy %>%
filter(fueltype == "91" & company_type == "Big Four") %>%
group_by(Date) %>%
summarise(auck_v_rest =
mean(value[region == "Auckland"]) -
mean(value[region != "Auckland"]),
auck_v_si =
mean(value[region == "Auckland"]) -
mean(value[island == "South"]),
auck_v_ni =
mean(value[region == "Auckland"]) -
mean(value[island == "North" & region != "Auckland"]),
) %>%
mutate(post_tax = as.integer(Date >= as.Date("2018-07-01"))) %>%
gather(comparison, value, -Date, -post_tax) %>%
mutate(comparison = case_when(
comparison == "auck_v_si" ~ "Compared to South Island",
comparison == "auck_v_ni" ~ "Compared to rest of North island",
comparison == "auck_v_rest" ~ "Compared to all NZ except Auckland"))
svg("../img/0129b-auck-minus-rest.svg", 9, 5)
ggplot(diff_data, aes(x = Date, y = value)) +
facet_wrap(~comparison, ncol = 3) +
geom_line() +
geom_smooth(aes(group = post_tax), method = "lm") +
scale_y_continuous("Average price of 91 octane petrol in Auckland\nminus average price in comparison area",
label = dollar) +
labs(x = "Date in 2018\nAverage prices have not been weighted by population or sales",
caption = "Source: pricewatch.co.nz, collated by @Economissive") +
ggtitle("Fuel prices in Auckland compared to three other comparison areas",
"Restricted to prices from BP, Caltex, Mobil and Z Energy")
dev.off()
convert_pngs("0129b")
|
dd028762e5d2588a23ef48def96fef93fdef4322
|
0f8a97baf9c9373ea62476abbce05bf8f89a9363
|
/furnaceCycle.R
|
ebc916fa1ab0aad2e8a84d87ae3a6bc2f4f5201f
|
[] |
no_license
|
tastyCanOfMalk/historical.MAL
|
642d5b3fcef9a020f48d92d0593f6a72cf9dceaf
|
7f34f2496e4ef70182415a85df1a7c614f73d268
|
refs/heads/master
| 2020-04-20T09:35:06.681740
| 2019-07-24T13:59:06
| 2019-07-24T13:59:06
| 168,768,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,454
|
r
|
furnaceCycle.R
|
# since aluminum uses a different furnace, change all to NA
x$furnace.cycle[x$alloy=="aluminum"] <- NA
# test df
# select first letter to call furnace
xx <- x %>%
select(request, furnace.cycle, alloy) %>%
filter(alloy != "aluminum") %>%
mutate(furnace = str_sub(furnace.cycle,1,1)) %>%
mutate(cycle = NA) %>%
mutate(name = NA)
# some NA values
xx[is.na(xx$furnace.cycle),]
# if NA, pull value above
for (i in 1:nrow(xx)){
if (is.na(xx$furnace[[i]])){
xx$furnace[[i]] <- xx$furnace[[i-1]]
}
}
# increment if furnace before = furnace current
# if not, assign value = 1
cycle.counter=1
for (i in 2:nrow(xx)){
# first row = 1
xx$cycle[[1]] <- 1
# vars
before = i-1
current = i
# current != before, start counter over
if (xx$furnace[[current]] != xx$furnace[[before]]){
cycle.counter=1
xx$cycle[[current]] <- cycle.counter
}
if (xx$furnace[[current]] == xx$furnace[[before]]){
cycle.counter=cycle.counter+1
xx$cycle[[current]] <- cycle.counter
}
}
# add some personality to the furnace names
if(!require(lexicon)) install.packages("lexicon")
library(lexicon)
data("common_names")
names <- common_names[1:length(common_names)]
names <- sample(names)
name.counter = 0
for (i in 1:nrow(xx)){
if (xx$cycle[[i]] == 1){
name.counter=name.counter+1
xx$name[[i]] <- names[[name.counter]]
}
if (xx$cycle[[i]] != 1){
xx$name[[i]] <- names[[name.counter]]
}
}
x <- full_join(x,xx)
|
533036a9d615ac81085b0287b86ca4c79143b9cc
|
8f501777660f04ddadf06400074bc6b412c90fb9
|
/IsoriX/R/create_aliens.R
|
29c3b59d4ad3de9ce9746da0a69132a30b18c8e6
|
[] |
no_license
|
PhDMeiwp/IsoriX_project
|
db0e323fd2822a98cf16c4708fc9ef31df85b9f8
|
14510f948a3497a99554e80d563a9131d40550c0
|
refs/heads/master
| 2020-03-09T18:24:08.128374
| 2017-08-14T08:23:56
| 2017-08-14T08:23:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,439
|
r
|
create_aliens.R
|
#' Simulate datasets for calibrations or assignments
#'
#' This function allows to simulate data so to provide examples for the
#' calibration and for the assignment procedure. We name the simulated
#' individuals 'Aliens' so to make it clear that the data we use to illustrate
#' our package are not real data.
#'
#' The isostopic values for the organisms are assumed to be linearly related to
#' the one from the environement. The linear function can be parameterized using
#' the first argument of the function (\code{calib_fn}). With this function the
#' user can simulate data for different sites.
#'
#' The number and locations of sites can be controled in two ways. A first
#' possiblity is to use the argument \code{n_sites}. The sites will then be
#' selected randomly among the locations present in the isoscape (argument
#' \code{isoscape}) provided to this function. An alternative possiblity is to
#' provide a data frame containing three columns (\code{siteID}, \code{long} and
#' \code{lat}) to input the coordinate of the sampling site manually.
#'
#' Irrespectivelly of how locations are choosen, a random number of observations
#' will be drawn, at each site, according to a uniform distribution bounded by
#' the values of the argument \code{min_n_samples} and \code{max_n_samples}.
#'
#' From the selected coordinates, the isotope values for the environement are
#' directly extracted from the corresponding point predictions stored in the
#' isoscape object. No uncertainty is considered during this process. Then the
#' linear calibration defines the means of the isotope values for the simulated
#' organims. The actual values is then drawn from a Gaussian distribution
#' centered around such mean and a variance defined by the residual variance
#' (\code{resid_var}) input within the list \code{calib_fn}.
#'
#' @param calib_fn A \var{list} containing the parameter values describing the
#' relationship between the isotope values in the environment and those in the
#' simulated organisms. This list must contain three parameters: the
#' intercept, the slope, and the residual variance.
#'
#' @param isoscape The output of the function \code{\link{isoscape}}
#'
#' @param coordinates An optional \var{data.frame} with columns \code{siteID},
#' \code{long} and \code{lat}
#'
#' @param elevation_raster A \var{RasterLayer} containing an elevation raster
#'
#' @param n_sites The number of sites from which the simulated organisms
#' originate (\var{integer})
#'
#' @param min_n_samples The minimal number of observations (\var{integer}) per
#' site
#'
#' @param max_n_samples The maximal number of observations (\var{integer}) per
#' site
#'
#' @return This functions returns a \var{data.frame} (see example for column
#' names)
#'
#' @seealso \code{\link{calibfit}} for a calibration based on simulated data
#'
#' \code{\link{isofind}} for an assignment based on simulated data
#'
#' \code{\link{IsoriX}} for the complete work-flow of our package
#' @keywords simulate simulation
#' @examples
#'
#' ## The examples below will only be run if sufficient time is allowed
#' ## You can change that by typing e.g. IsoriX.options(example_maxtime = XX)
#' ## if you want to allow for examples taking up to ca. XX seconds to run
#' ## (so don't write XX but put a number instead!)
#'
#' if(IsoriX.getOption("example_maxtime") > 30) {
#'
#' ## We fit the models for Germany
#' GNIPDataDEagg <- prepdata(data = GNIPDataDE)
#'
#' GermanFit <- isofit(iso.data = GNIPDataDEagg)
#'
#' ## We build the isoscapes
#' isoscape <- isoscape(elevation.raster = ElevRasterDE, isofit = GermanFit)
#'
#' ## We create a simulated dataset with 25 sites and 5 observations per site
#' Aliens <- create_aliens(calib_fn = list(intercept = 3, slope = 0.5, resid_var = 5),
#' isoscape = isoscape,
#' elevation_raster = ElevRasterDE,
#' n_sites = 25,
#' min_n_samples = 5,
#' max_n_samples = 5)
#'
#' ## We display the simulated dataset
#' Aliens
#'
#' ## We plot the relationship between the environmental isotope values
#' ## and those from the simulated organisms
#' plot(tissue.value ~ env.value, data = Aliens, ylab = "Tissue", xlab = "Environment")
#' abline(3, 0.5, col = "blue") ## the true relationship
#'
#' ## We create a simulated dataset with 2 sites imputing coordinates manually
#' Aliens2 <- create_aliens(calib_fn = list(intercept = 3, slope = 0.5, resid_var = 5),
#' isoscape = isoscape,
#' coordinates = data.frame(siteID = c("Berlin", "Bielefeld"),
#' long = c(13.52134, 8.49914),
#' lat = c(52.50598, 52.03485)),
#' elevation_raster = ElevRasterDE,
#' n_sites = 25,
#' min_n_samples = 5,
#' max_n_samples = 5)
#'
#' head(Aliens2)
#'
#' }
#'
#' @export
create_aliens <- function(calib_fn = list(intercept = 3, slope = 0.5, resid_var = 5),
isoscape = NULL,
coordinates = NA,
elevation_raster = NULL,
n_sites = 1,
min_n_samples = 1,
max_n_samples = 10) {
## Complete the arguments
.CompleteArgs(create_aliens)
## Choose location for the aliens
if(length(coordinates) == 1 && is.na(coordinates)) {
LocationData <- data.frame(siteID = sample(1:raster::ncell(isoscape$isoscape$mean), n_sites, replace = FALSE))
xy <- raster::xyFromCell(isoscape$isoscape$mean, LocationData$siteID)
LocationData$long <- xy[, "x"]
LocationData$lat <- xy[, "y"]
} else {
if(!all(c("siteID", "long", "lat") %in% colnames(coordinates))) {
stop("the argument coordinates must contain the columns 'siteID', 'long' and 'lat'")
}
if(n_sites != nrow(coordinates)) {
warnings("the argument coordinates has been used so the argument 'n_sites' has been ignored")
}
LocationData <- coordinates
xy <- coordinates[, c("long", "lat")]
n_sites <- nrow(coordinates)
}
LocationData$elev = raster::extract(x = elevation_raster, y = xy)
LocationData$n.samples <- round(stats::runif(n = n_sites, min = min_n_samples, max = max_n_samples))
## Predict environmental values at the locations
LocationData$env.value <- raster::extract(isoscape$isoscape$mean, xy)
## Replicate the dataset per animal
AlienData <- LocationData[rep(1:nrow(LocationData), times = LocationData$n.samples), ]
AlienData$animalID <- factor(paste("Alien", 1:nrow(AlienData), sep = "_"))
## Turning siteID into a factor
AlienData$siteID <- factor(AlienData$siteID)
## Predict the tissue value for each animal
AlienData$tissue.value <- stats::rnorm(n = nrow(AlienData),
mean = rep(calib_fn$intercept + LocationData$env.value * calib_fn$slope,
times = LocationData$n.samples),
sd = sqrt(calib_fn$resid_var))
## Cleanup and return
rownames(AlienData) <- NULL
return(AlienData[, c("siteID", "long", "lat", "elev", "animalID", "env.value", "tissue.value")])
}
|
31b2be8bac49fe470ff7ac43b941c1a2e63db36c
|
4700c8fa4b68ee1b2e02be10bbab72d60daed6c5
|
/R/beta.ab.R
|
aaa8ea31ead615ef4990751ef61cb917024b6aba
|
[] |
no_license
|
alyssamv/iAdapt
|
95e1b1826ccd40c64a7701b08c329e05d0bbe531
|
65c3912d66e77ee4172aadb9ebdc3205a85408fd
|
refs/heads/master
| 2021-08-07T12:15:23.490531
| 2019-08-28T15:07:33
| 2019-08-28T15:07:33
| 155,378,228
| 1
| 0
| null | 2019-08-13T22:52:33
| 2018-10-30T12:02:32
|
R
|
UTF-8
|
R
| false
| false
| 1,155
|
r
|
beta.ab.R
|
#' @title Generates parameters for the beta distribution # I don't think we need to show this as a separate function,
#' but put together with gen.eff.stg1 or be called by gen.eff.stg1
#'
#' @description Function \code{beta.ab()} returns parameters alpha and beta for generating beta r.v. (per dose)
#'
#' @return Vector of alpha and beta values for generating beta random variable for a dose.
#'
#' @param m Vector of mean efficacies per dose. Values range from 0 - 100. (e.g, T cell persistence - values b/w 5 and 80 per cent)
#' @param v Vector of efficacy variances per dose. Values range from 0 - 1. (e.g., 0.01)
#'
#' @export
#'
#' @keywords internal
beta.ab <- function(m, v) {
a <- seq(0.5, 20, 0.01) # a is a seq of alpha in beta distr.
b <- a * (1 - m) / m
vfit <- a * b / ((a + b + 1) * (a + b)^2)
diff <- abs(vfit - v)
index <- (1:length(diff))[diff == min(diff)] # return the index of the min var.
return(list(a = a[index],
b = b[index])) # return alpha and beta for the min.var.
}
|
c34ec91b0e8af7fd5bfe9c2187775e472008e891
|
cea3466a2947e429a4f4fff5a65df740241c8190
|
/R/pdims.R
|
4df3830cfd54da8a57a8dcbd5b89fcfe73468a68
|
[
"MIT"
] |
permissive
|
cran/term
|
73420e2257b6a0ed12a063a0133ec1e25ba4aa3d
|
167b06989f44e37d0dd592b0a7ff5470edd91b65
|
refs/heads/master
| 2022-10-08T15:13:04.078807
| 2022-09-29T15:20:11
| 2022-09-29T15:20:11
| 236,950,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 908
|
r
|
pdims.R
|
#' @export
universals::pdims
#' @details Errors if the parameter dimensions are invalid or inconsistent.
#'
#' A named list of the dimensions of each parameter can be converted
#' into the equivalent [term-vector()] using [term()].
#'
#' @inherit universals::pdims
#' @export
#'
#' @examples
#' pdims(term("alpha[1]", "alpha[3]", "beta[1,1]", "beta[2,1]"))
pdims.term <- function(x, ...) {
if(anyNA(x)) abort_chk("`x` must not have any missing values")
if (is_inconsistent_terms(x)) {
abort_chk("`x` must have terms with consistent parameter dimensions")
}
x <- split(x, pars_terms(x))
x <- lapply(x, max_index)
x
}
#' @details Errors if the parameter dimensions are inconsistent.
#'
#' @inherit universals::pdims
#' @export
#'
#' @examples
#' pdims(as_term_rcrd(term("alpha[1]", "alpha[3]", "beta[1,1]", "beta[2,1]")))
pdims.term_rcrd <- function(x, ...) {
x <- as_term(x)
pdims(x)
}
|
4c444b331e62b884eab2ee851fe51319cc66469c
|
c723b14038ea8628ceb74ce04e291c281cc976d4
|
/R/ltx_sideways.R
|
dde8212a496d1627c40706f66b0e52c9d054405a
|
[
"MIT"
] |
permissive
|
hokerl/ltxtab
|
a14b12dc3951789aa4e7f6a6fddb28267f56a1c0
|
483d2678d533b78f8620f347859572ea257e9717
|
refs/heads/master
| 2021-10-19T08:29:44.423944
| 2019-02-19T14:48:27
| 2019-02-19T14:48:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
r
|
ltx_sideways.R
|
#' Rotate LaTeX cell contents
#'
#' @param df table data
#' @param row row number
#' @param col column number
#'
#' @return table data
#' @export
#'
#' @examples \dontrun{
#' ltx.sideways(df, "test", 1)
#' }
ltx_sideways <- function(df, row, col){
df <- as.data.frame(df)
df[row, col] <- paste0("\\begin{sideways}", df[row, col], "\\end{sideways}")
return(df)
}
|
561508c4fd7cd93ab3758020f8ce937af39d5be8
|
06d57535f6d974c4923a0882e967c2fd829f058e
|
/ClassPerformance/server.R
|
75762126da0cd753fef6b7e8a8a977c0b6d60ea9
|
[
"MIT"
] |
permissive
|
dhadka/investr
|
01cd1e1e0a659df627f1fc516dad16db6df0e9fd
|
a1705768196aaa8b0ced1d43b2c4d6895acf6eca
|
refs/heads/master
| 2016-09-10T18:58:29.947885
| 2014-12-12T17:46:57
| 2014-12-12T17:46:57
| 23,683,643
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,082
|
r
|
server.R
|
# Copyright (c) 2014 David Hadka
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
library(shiny)
library(quantmod)
library(stringr)
lookupSymbols <- function(symbols, ratio=rep(1, length(symbols)), duration=10*365, from=Sys.Date()-duration, to=Sys.Date(), src="yahoo") {
result <- list()
for (i in 1:length(symbols)) {
quotes <- getSymbols(symbols[i], from=from, to=to, warnings=FALSE, src=src, auto.assign=FALSE)
if (!is.null(data)) {
quotes <- to.monthly(quotes)
result <- append(result, list(list(symbol=symbols[i], quotes=quotes, ratio=ratio[i])))
}
}
merged <- do.call(merge, lapply(result, function(e) Ad(e$quotes)))
merged <- na.locf(merged)
colnames(merged) <- sapply(result, function(e) e$symbol)
list(symbols=sapply(result, function(e) e$symbol),
ratio=sapply(result, function(e) e$ratio),
quotes=merged)
}
to.total <- function(qty, per.share) {
per.share <- as.numeric(per.share)
per.share[is.na(per.share)] <- 0
sum(qty * per.share)
}
to.qty <- function(amount, ratio, per.share) {
per.share <- as.numeric(per.share)
per.share[is.na(per.share)] <- 0
ratio[per.share == 0] <- 0
if (sum(ratio) == 0) {
ratio <- rep(1/length(ratio), length(ratio))
} else {
ratio <- ratio / sum(ratio)
}
qty <- amount * ratio / per.share
qty[is.nan(qty)] <- 0
qty
}
invest <- function(data, ratio=data$ratio, rebalance=TRUE, initial=100000, monthly=0) {
quotes <- data$quotes
symbols <- data$symbols
M <- length(symbols)
N <- nrow(quotes)
value <- rep(0, N)
contribution <- rep(0, N)
qty <- rep(0, M)
if (sum(ratio) <= 0) {
ratio <- rep(1/M, M)
warning("Ratio values are not postitive, defaulting to 1/M")
} else {
ratio <- ratio / sum(ratio)
}
qty <- qty + to.qty(initial, ratio, quotes[1,])
value[1] <- to.total(qty, quotes[1,])
contribution[1] <- initial
for (i in 2:N) {
qty <- qty + to.qty(monthly, ratio, quotes[i,])
value[i] <- to.total(qty, quotes[i,])
contribution[i] <- contribution[i-1] + monthly
if (rebalance && format(time(quotes)[i], "%m") == format(time(quotes)[1], "%m")) {
qty <- to.qty(value[i], ratio, quotes[i,])
}
}
list(value=xts(value, index(quotes)), contribution=xts(contribution, index(quotes)))
}
to.percent <- function(result) {
100*(result$value - result$contribution) / result$contribution
}
to.value <- function(result) {
result$value
}
to.contribution <- function(result) {
result$contribution
}
# TODO: Since Google/Yahoo do not currently support exporting market indices,
# we have to instead rely on mutual funds to represent each asset class.
class.symbols <- c(
"FMAGX",
"FVDFX",
"FDEGX",
"FSMVX",
"WSMGX",
"VISVX",
"FOSFX",
"FSTGX",
"FHIGX",
"PCRIX")
class.names = c(
"Large Growth",
"Large Value",
"Mid Growth",
"Mid Value",
"Small Growth",
"Small Value",
"International",
"Gov't Bond",
"Muni Bond",
"Commodities")
sector.symbols <- c(
"FSTCX",
"FSCPX",
"FDFAX",
"FSENX",
"FSAGX",
"FSVLX",
"FRESX",
"FSPHX",
"FCYIX",
"FSRFX",
"FSDPX",
"FSPTX",
"FSUTX")
sector.names <- c(
"Communications",
"Consumer Cyclical",
"Consumer Defensive",
"Energy",
"Precious Metals",
"Financial",
"Real Estate",
"Healthcare",
"Industrial",
"Transportation",
"Natural Resources",
"Technology",
"Utilities")
years <- 10
current.year <- format(Sys.Date(), "%Y")
start <- as.Date(paste(current.year, "-01-01", sep=""))-years*365
end <- Sys.Date()
class.quotes <- lookupSymbols(class.symbols, from=start, to=end)
sector.quotes <- lookupSymbols(sector.symbols, from=start, to=end)
shinyServer(function(input, output, server) {
output$ui <- renderUI({
if (input$plot) {
plotOutput("plot")
} else {
tableOutput("table")
}
})
output$plot <- renderPlot({
if (input$view == "Class") {
names <- class.names
quotes <- class.quotes
} else {
names <- sector.names
quotes <- sector.quotes
}
data <- NULL
for (j in 1:length(names)) {
ratio <- rep(0, length(names))
ratio[j] <- 1
result <- invest(quotes, ratio)
value <- as.numeric(result$value)
contribution <- as.numeric(result$contribution)
entry <- rollapply(result$value, 12, function(x) {
x <- as.numeric(x)
100*(x[length(x)]-x[1])/x[1]
}, align="center")
entry <- na.omit(entry)
if (is.null(data)) {
data <- entry
} else {
data <- merge(data, entry)
}
}
matplot(index(data), data, type='l', ylab="Percent Change", xlab="Date", lwd=2)
grid(nx=NA, ny=NULL)
abline(h=0)
legend("topleft", names, col=1:6, lty=1:5, cex=0.7, lwd=2)
})
output$table <- renderTable({
if (input$view == "Class") {
names <- class.names
quotes <- class.quotes
} else {
names <- sector.names
quotes <- sector.quotes
}
if (!input$cumulative) {
data <- matrix(0, nrow=years, ncol=length(names))
for (j in 1:length(names)) {
ratio <- rep(0, length(names))
ratio[j] <- 1
result <- invest(quotes, ratio)
value <- as.numeric(result$value)
contribution <- as.numeric(result$contribution)
data[,j] <- sapply(seq(12,length(value),12), function(i) 100*(value[i]-value[i-11])/value[i-11])
}
colnames(data) <- names
rownames(data) <- sprintf("%d", (2013-years+1):2013)
output <- matrix("", nrow=length(names), ncol=years)
for (i in 1:years) {
ordering <- rev(order(data[i,]))
for (j in 1:length(names)) {
output[j,i] <- paste("<div style=\"text-align: center\">", names[ordering[j]], "<br><font size=\"-2\">", round(data[i,ordering[j]], 1), "%</font></div>", sep="")
}
}
colnames(output) <- sprintf("%d", (2013-years+1):2013)
} else {
periods <- 6
period.names <- c("Last Month", "Last 3 Months", "Last 6 Months", "Last Year", "Last 5 Years", "Last 10 Years")
data <- matrix(0, nrow=periods, ncol=length(names))
for (j in 1:length(names)) {
ratio <- rep(0, length(names))
ratio[j] <- 1
result <- invest(quotes, ratio)
value <- as.numeric(result$value)
contribution <- as.numeric(result$contribution)
N <- length(value)
data[1,j] <- 100*(value[N]-value[N-1])/value[N-1]
data[2,j] <- 100*(value[N]-value[N-3])/value[N-3]
data[3,j] <- 100*(value[N]-value[N-6])/value[N-6]
data[4,j] <- 100*(value[N]-value[N-12])/value[N-12]
data[5,j] <- 100*(value[N]-value[N-5*12])/value[N-5*12]
data[6,j] <- 100*(value[N]-value[1])/value[1]
}
colnames(data) <- names
rownames(data) <- period.names
output <- matrix("", nrow=length(names), ncol=periods)
for (i in 1:periods) {
ordering <- rev(order(data[i,]))
for (j in 1:length(names)) {
output[j,i] <- paste("<div style=\"text-align: center;\">", names[ordering[j]], "<br><font size=\"-2\">", round(data[i,ordering[j]], 1), "%</font></div>", sep="")
}
}
colnames(output) <- period.names
}
output
},
include.rownames = FALSE,
sanitize.text.function = function(x) x,
sanitize.colnames.function = function(x) paste("<div style=\"text-align: center\">", x, "</div>", sep=""))
})
|
210ec52c51ecc2f89cbd8f35e2f236c3672ba53d
|
b2a1bbadcfdec95b35702a96bb5817578bc64093
|
/CaretProjectVisSVM.R
|
2a66a38b673b8cba95c9c3aea8c23205aa5018a5
|
[] |
no_license
|
tenfoldpaper/caret_project
|
35c2d01d5f0ec7c329d88b335368c0f052ef9db6
|
96e99383cc5cebdd8eef83f73a60fd483a3a86f1
|
refs/heads/master
| 2020-03-06T19:00:04.889269
| 2018-03-30T16:45:52
| 2018-03-30T16:45:52
| 127,018,560
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,896
|
r
|
CaretProjectVisSVM.R
|
#SVM Visualisaion written by Seongjin Bien, TquanT 2018
library(shiny)
library(ggplot2)
ui <- fluidPage(
titlePanel("Graphs"),
sidebarLayout(
sidebarPanel(
radioButtons("vsize", "Select the training sample size:",
c("80/20" = "eightyTwenty",
"64/33" = "oneThird",
"50/50" = "halfHalf")),
checkboxGroupInput("vc", "Select Cost value(s):",
choiceNames = list("2", "4", "6", "8", "10"),
choiceValues = list("2", "4", "6", "8", "10"),
selected = "2"
),
print("Test data details:"),
verbatimTextOutput("testData"),
print("Test data quality prediction:"),
verbatimTextOutput("testQuality"),
print("Test data real quality:"),
verbatimTextOutput("realQuality"),
actionButton("randomize", "Randomize data!")
),
mainPanel(
plotOutput("svmplot1")
)
)
)
server <- function(input, output){
#Graph variables
vsizeget <- reactive(input$vsize)
vcget <- reactive(input$vc)
svmETdf = svmET.train$results
svmOTdf = svmOT.train$results
svmHHdf = svmHH.train$results
testData <- eightTwoTest.white[1,]
testQuality <- predict(svmET.train, testData)
observeEvent(input$randomize, {
testData <- eightTwoTest.white[sample(1:nrow(eightTwoTest.white), 1),]
testQuality <- predict(svmET.train, testData)
cat("Button pressed!")
print(testData)
print(testQuality)
output$testData <- renderPrint(
print(testData)
)
output$testQuality <- renderPrint(
print(testQuality)
)
output$realQuality <- renderPrint(
print(testData$quality)
)
} )
output$svmplot1 <- renderPlot(
if(vsizeget() == "eightyTwenty"){
ggplot(data = svmETdf, aes(x = sigma, y = Accuracy, color = C)) + geom_line(data=subset(svmETdf, C == vcget()[1])) + geom_point(data=subset(svmETdf, C == vcget()[1])) +
geom_line(data=subset(svmETdf, C == vcget()[2])) + geom_point(data=subset(svmETdf, C == vcget()[2])) +
geom_line(data=subset(svmETdf, C == vcget()[3])) + geom_point(data=subset(svmETdf, C == vcget()[3])) +
geom_line(data=subset(svmETdf, C == vcget()[4])) + geom_point(data=subset(svmETdf, C == vcget()[4])) +
geom_line(data=subset(svmETdf, C == vcget()[5])) + geom_point(data=subset(svmETdf, C == vcget()[5])) +
ylim(0.6080, 0.6510)
}
else if(vsizeget() == "oneThird"){
ggplot(data = svmOTdf, aes(x = sigma, y = Accuracy, color = C)) + geom_line(data=subset(svmOTdf, C == vcget()[1])) + geom_point(data=subset(svmOTdf, C == vcget()[1])) +
geom_line(data=subset(svmOTdf, C == vcget()[2])) + geom_point(data=subset(svmOTdf, C == vcget()[2])) +
geom_line(data=subset(svmOTdf, C == vcget()[3])) + geom_point(data=subset(svmOTdf, C == vcget()[3])) +
geom_line(data=subset(svmOTdf, C == vcget()[4])) + geom_point(data=subset(svmOTdf, C == vcget()[4])) +
geom_line(data=subset(svmOTdf, C == vcget()[5])) + geom_point(data=subset(svmOTdf, C == vcget()[5])) +
ylim(0.5080, 0.6510)
}
else if(vsizeget() == "halfHalf"){
ggplot(data = svmHHdf, aes(x = sigma, y = Accuracy, color = C)) + geom_line(data=subset(svmHHdf, C == vcget()[1])) + geom_point(data=subset(svmHHdf, C == vcget()[1])) +
geom_line(data=subset(svmHHdf, C == vcget()[2])) + geom_point(data=subset(svmHHdf, C == vcget()[2])) +
geom_line(data=subset(svmHHdf, C == vcget()[3])) + geom_point(data=subset(svmHHdf, C == vcget()[3])) +
geom_line(data=subset(svmHHdf, C == vcget()[4])) + geom_point(data=subset(svmHHdf, C == vcget()[4])) +
geom_line(data=subset(svmHHdf, C == vcget()[5])) + geom_point(data=subset(svmHHdf, C == vcget()[5])) +
ylim(0.5080, 0.6510)
}
)
}
shinyApp(ui=ui, server=server)
|
2b2af28122276fb3211cb48967d28cfdcff79083
|
608ec1c8c815281933c66861fca044b294e310d2
|
/packages.R
|
35973e9a5437567969ac93c2890e439d906101fe
|
[] |
no_license
|
nohturfft/BDiB_2021_ChIP_Seq
|
18ea7c3fac7aa4961550e83865a98477d6150796
|
758b3e29b0222e104abf57f1c4a85b724f2fcaa2
|
refs/heads/main
| 2023-03-23T12:22:26.621345
| 2021-03-11T17:15:46
| 2021-03-11T17:15:46
| 346,429,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,544
|
r
|
packages.R
|
#################
# Load packages #
#################
# Help for specific packages can be obtained as shown in the code chunk below
# (remove the '#' to run the code).
# help(package="ChIPpeakAnno")
# help(package="biomaRt")
library(readr)
requireNamespace("knitr")
library(scales)
# **LChIPpeakAnno package (https://doi.org/doi:10.18129/B9.bioc.ChIPpeakAnno)
# Annotation of peaks identified by ChIP-seq or other experiments that produce
# large numbers of chromosome coordinates.
library(ChIPpeakAnno)
# biomaRt package (https://doi.org/doi:10.18129/B9.bioc.biomaRt)
# "Interface to BioMart databases (e.g. Ensembl, COSMIC, Wormbase and Gramene)".
# Used to map gene identifiers.
library(biomaRt)
# org.Hs.eg.db package (https://doi.org/doi:10.18129/B9.bioc.org.Hs.eg.db)
# A package with human genome annotation data.
library(org.Hs.eg.db)
# rstudioapi package (https://cran.r-project.org/package=rstudioapi)
# The 'rstudioapi' package is useful for programmatically obtaining the current
# script name and its parent directory.
requireNamespace("rstudioapi")
# pryr package (https://cran.r-project.org/package=pryr)
# We will use a function from the 'pryr' package to determine the
# size of a data object.
requireNamespace("pryr")
# magrittr package (https://cran.r-project.org/package=magrittr)
# "Provides a mechanism for chaining commands with a new forward-pipe operator, %>%.
# This operator will forward a value, or the result of an expression, into the next
# function call/expression."
library(magrittr)
|
06d2e50c0c7eb9112a0fdb569a62d160c76f0bfe
|
2c116d1ab776aa297d80e864fb4c7b9b9e44f62d
|
/r-package-setup.R
|
83ffe57723f6c7cdf0e2a860288bef44d09cf7ea
|
[] |
no_license
|
jaybee84/r-packages
|
b07a9e9d4273ef2b7fbb1dfb82046749ee553406
|
d1cd077323c7540478c3c2f05c1a5d6bb037cfc8
|
refs/heads/master
| 2020-08-28T05:46:08.928340
| 2019-10-25T18:45:49
| 2019-10-25T18:45:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 748
|
r
|
r-package-setup.R
|
###############################
#### Set up an R package ####
###############################
library("usethis")
library("devtools")
## Create package structure
create_package("mypackage")
## Helper functions to customize package, e.g.:
use_mit_license("My Name")
## Check package with devtools
check()
## Add a new R file within R/
use_r("create_message.R") # write a function in this file
## Add roxygen documentation. In RStudio: Code -> Insert Roxygen Skeleton (or
## Ctrl+Alt+Shift+R)
## Edit roxygen comments
## Generate documentation and updated NAMESPACE:
document()
## Declare a dependency for your package: edit the DESCRIPTION or use
## usethis::use_package()
use_package("glue", "Imports")
## Install your package
install()
|
2d3474129a85b9c51bd591474b480c9b4a4c4270
|
d1ed29eb17fd79cd5adfa4276254c973f3d3c60a
|
/man/dpr_create_package.Rd
|
3eadc8775fce3742ca49718ee125ecd95dfa813d
|
[
"MIT"
] |
permissive
|
BYUIDSS/DataPushR
|
f7527a6ab34a1de08e2e71b508a89bb21b519073
|
561365687a35b030ea57303ecf9a99d8f8d3af22
|
refs/heads/master
| 2021-02-06T00:26:01.292411
| 2020-05-29T19:51:35
| 2020-05-29T19:51:35
| 243,853,077
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 984
|
rd
|
dpr_create_package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dpr_create_package.R
\name{dpr_create_package}
\alias{dpr_create_package}
\title{Create an R package for data}
\usage{
dpr_create_package(
package_name,
export_folder = getwd(),
git_remote,
list_data = NULL
)
}
\arguments{
\item{package_name}{is the name of the created data R package.}
\item{export_folder}{is the base folder where the package folder will be created.}
\item{git_remote}{is the `HTTPS` url of the GitHub remote.}
\item{list_data}{is a list object of named ojbects that can be written to a csv. If NULL then no data writing actions happen.}
}
\description{
This function automates the process of building a Github R package with the desired data stored in the `raw-data` folder.
}
\examples{
dd <- read_csv(system.file("extdata", "Draft_vietnam.csv", package = "DataPushR"))
dpr_create_package(list_data = list(dat_draft = dd), package_name = "Test3", export_folder = getwd())
}
|
144486447fd65249f9940ecd65926105fde65048
|
b05693137745a6706707b786d94fa01124864ca8
|
/man/safely.Rd
|
db075ca685773922466da65c9c35cd542baddae4
|
[
"MIT"
] |
permissive
|
BB1464/purrr
|
f19ea507aacba57ae636eab26192498bd094183c
|
5aca9df41452f272fcef792dbc6d584be8be7167
|
refs/heads/master
| 2023-05-09T07:58:33.385849
| 2021-04-12T07:24:19
| 2021-04-12T07:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,329
|
rd
|
safely.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output.R
\name{safely}
\alias{safely}
\alias{quietly}
\alias{possibly}
\alias{auto_browse}
\title{Capture side effects.}
\usage{
safely(.f, otherwise = NULL, quiet = TRUE)
quietly(.f)
possibly(.f, otherwise, quiet = TRUE)
auto_browse(.f)
}
\arguments{
\item{.f}{A function, formula, or vector (not necessarily atomic).
If a \strong{function}, it is used as is.
If a \strong{formula}, e.g. \code{~ .x + 2}, it is converted to a function. There
are three ways to refer to the arguments:
\itemize{
\item For a single argument function, use \code{.}
\item For a two argument function, use \code{.x} and \code{.y}
\item For more arguments, use \code{..1}, \code{..2}, \code{..3} etc
}
This syntax allows you to create very compact anonymous
functions. Note that formula functions conceptually take dots
(that's why you can use \code{..1} etc). They silently ignore
additional arguments that are not used in the formula expression.
If \strong{character vector}, \strong{numeric vector}, or \strong{list}, it is
converted to an extractor function. Character vectors index by
name and numeric vectors index by position; use a list to index
by position and name at different levels. If a component is not
present, the value of \code{.default} will be returned.}
\item{otherwise}{Default value to use when an error occurs.}
\item{quiet}{Hide errors (\code{TRUE}, the default), or display them
as they occur?}
}
\value{
\code{safely}: wrapped function instead returns a list with
components \code{result} and \code{error}. If an error occurred, \code{error} is
an \code{error} object and \code{result} has a default value (\code{otherwise}).
Else \code{error} is \code{NULL}.
\code{quietly}: wrapped function instead returns a list with components
\code{result}, \code{output}, \code{messages} and \code{warnings}.
\code{possibly}: wrapped function uses a default value (\code{otherwise})
whenever an error occurs.
}
\description{
These functions wrap functions so that instead of generating side effects
through printed output, messages, warnings, and errors, they return enhanced
output. They are all adverbs because they modify the action of a verb (a
function).
}
\details{
If you would like to include a function created with \code{safely}, \code{slowly}, or
\code{insistently} in a package, see \link{faq-adverbs-export}.
}
\examples{
safe_log <- safely(log)
safe_log(10)
safe_log("a")
list("a", 10, 100) \%>\%
map(safe_log) \%>\%
transpose()
# This is a bit easier to work with if you supply a default value
# of the same type and use the simplify argument to transpose():
safe_log <- safely(log, otherwise = NA_real_)
list("a", 10, 100) \%>\%
map(safe_log) \%>\%
transpose() \%>\%
simplify_all()
# To replace errors with a default value, use possibly().
list("a", 10, 100) \%>\%
map_dbl(possibly(log, NA_real_))
# For interactive usage, auto_browse() is useful because it automatically
# starts a browser() in the right place.
f <- function(x) {
y <- 20
if (x > 5) {
stop("!")
} else {
x
}
}
if (interactive()) {
map(1:6, auto_browse(f))
}
# It doesn't make sense to use auto_browse with primitive functions,
# because they are implemented in C so there's no useful environment
# for you to interact with.
}
|
1d45f084b5735b79cd48f775981ebf6cb893c5ef
|
2af16e2c4eb8f17acd739349d3b7f8eab00124d1
|
/R/mainKmeans.R
|
414a034054f55703b636e56df4a0511ea3dc7f44
|
[] |
no_license
|
boulbi777/k-means-clustering-using-cpp
|
6e954daa3dd096ae82cb4fab32099a809740930d
|
8613b45c25c4794be0b6048740f01d2b9c47f9cb
|
refs/heads/master
| 2022-12-12T22:13:10.754701
| 2020-09-06T13:37:36
| 2020-09-06T13:37:36
| 293,289,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,601
|
r
|
mainKmeans.R
|
###################################################################################
##' Cette fonction effectue la somme entre deux nombres
##'
##' @param x numeric. Matrice des donnees (1 ligne par observation).
##' @param K numeric. Nombre de clusters.
##' @param nbinit numeric. Nombre d'initalisations aléatoires de l'algorithme.
##' @param nbCPU numeric. Nombre de CPU (si nbCPU>1, on parallelise).
##' @param useC boolean. Si TRUE, on appelle le code C++.
##'
##' @examples
##' set.seed(123)
##' ech <- simuledata(100, 3)
##'
##' set.seed(123)
##' T1 <- Sys.time()
##' res <- myKmeans(ech$x, 4, 200, 1, FALSE)
##' T2 <- Sys.time()
##' set.seed(123)
##' res2 <- myKmeans(ech$x, 4, 200, 2, TRUE)
##' T3 <- Sys.time()
##' all.equal(res, res2)
##' difftime(T3, T2)
##' difftime(T2, T1)
##'
##' @return list
##' @export
##'
myKmeans <- function(x, K, nbinit, nbCPU, useC){
checkInputs(x, K, nbinit, nbCPU, useC)
all.centers <- giveStarting(x, K, nbinit)
all.res <- NULL
if (nbCPU == 1){
if (useC)
all.res <- lapply(1:nbinit, function(it) singleKmeansC(x, all.centers[[it]]))
else
all.res <- lapply(all.centers, singleKmeans, x=x)
}else{
grappe <- makeCluster(nbCPU)
clusterExport(cl=grappe, varlist = c("x"), envir = environment())
clusterEvalQ(grappe, {require(packageKmeans)})
if (useC){
all.res <- parLapply(grappe, all.centers, function(u) singleKmeansC(x, u))
}else{
all.res <- parLapply(grappe, all.centers, function(u) singleKmeans(x, u))
}
on.exit(stopCluster(grappe))
}
all.res <- all.res[[which.min(sapply(all.res, function(u) u$criterion))]]
if (useC) all.res$zactu <- as.numeric(all.res$zactu)
all.res
}
# K: nbr de classes
# nbinit: nbr d initialisations de l algo des Kmeans
# nbCPU: nbr de CPU (parallelisation si >1)
# useC: boolean (appel a du code C si TRUE)
# centers: une initialisation (matrice avec K lignes) en dont les centres de classes
# Fonction principale des Kmeans:
# Un Kmeans avec initialisation donnee
singleKmeans <- function(x, centers){
K <- nrow(centers)
zprec <- rep(0, nrow(x))
zactu <- apply(sapply(1:K, function(k) rowSums(sweep(x, 2, centers[k,], "-")**2)), 1, which.min)
while (any(zprec!=zactu)){
centers <- t(sapply(1:K, function(k) colMeans(x[which(zactu==k), , drop = FALSE])))
zprec <- zactu
zactu <- apply(sapply(1:K, function(k) rowSums(sweep(x, 2, centers[k,], "-")**2)), 1, which.min)
}
list(zactu = zactu,
centers = centers,
criterion = sum(apply(sapply(1:K, function(k) rowSums(sweep(x, 2, centers[k,], "-")**2)), 1, min)))
}
|
e6507f23e98c4c2c3e9970a0a5f688bf50f55764
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PMCMR/examples/posthoc.friedman.nemenyi.test.Rd.R
|
8de72c1986c750f9d142639ea368a3712d169ff1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 956
|
r
|
posthoc.friedman.nemenyi.test.Rd.R
|
library(PMCMR)
### Name: posthoc.friedman.nemenyi.test
### Title: Pairwise post-hoc Test for Multiple Comparisons of Mean Rank
### Sums for Unreplicated Blocked Data (Nemenyi-Test)
### Aliases: posthoc.friedman.nemenyi.test
### posthoc.friedman.nemenyi.test.default
### posthoc.friedman.nemenyi.test.formula
### Keywords: htest nonparametric
### ** Examples
##
## Sachs, 1997, p. 675
## Six persons (block) received six different diuretics (A to F, treatment).
## The responses are the Na-concentration (mval)
## in the urine measured 2 hours after each treatment.
##
y <- matrix(c(
3.88, 5.64, 5.76, 4.25, 5.91, 4.33, 30.58, 30.14, 16.92,
23.19, 26.74, 10.91, 25.24, 33.52, 25.45, 18.85, 20.45,
26.67, 4.44, 7.94, 4.04, 4.4, 4.23, 4.36, 29.41, 30.72,
32.92, 28.23, 23.35, 12, 38.87, 33.12, 39.15, 28.06, 38.23,
26.65),nrow=6, ncol=6,
dimnames=list(1:6,c("A","B","C","D","E","F")))
print(y)
friedman.test(y)
posthoc.friedman.nemenyi.test(y)
|
72ae9e1b17f36df204d7e16f3d4bfa34999f5c5a
|
7081286a0f4ae00c3cbf4e52a1fa96ec461a02c1
|
/medical-data/SLNB/GPT-kaplan-meier3.R
|
d3378c917d1c70d438f6d80e3e27414715fd2b66
|
[
"Apache-2.0"
] |
permissive
|
kapsitis/ddgatve-stat
|
98aea8d020f2f8e7ba66559cb59dc6367f2cce60
|
86b867bb16a11da3619be149f60e4dfd6474e0bb
|
refs/heads/master
| 2023-05-04T02:45:53.879448
| 2023-04-23T13:26:08
| 2023-04-23T13:26:08
| 18,703,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,846
|
r
|
GPT-kaplan-meier3.R
|
# (1) -demorāfisko eglīti
# (2) -stabiņi cik slnb taisīja un cik netaisīja
# (3) -pa vecumiem slnb veica slnb neveica
# (4) -stadijas(IA,IB, IIA ,IIB, IIC) slnb veica slnb neveica
# (5) -lokalizācijas slnb veica slnb neveica
# (6) -lokalizācijas male/female
# ***(7) -Kaplana-Meijera ar izčūlojumu un bez izčūlojuma (un Logrank p-value)
library(survival)
library(ggplot2)
library(survminer)
# Read data from CSV file
setwd('/Users/kapsitis/workspace-public/ddgatve-stat/medical-data/SLNB')
data <- read.csv("SLNB_MASTER_COPY3.csv", stringsAsFactors = FALSE)
# Convert date columns to Date format
data$Gads <- as.Date(data$Gads, format = "%m/%d/%y")
data$Mirsanasdatums <- as.Date(data$Mirsanasdatums, format = "%m/%d/%Y")
# Create a new column 'time' as the difference in days between hospitalization and death
data$time <- as.numeric(difftime(data$Mirsanasdatums, data$Gads, units = "days"))
data$time <- ifelse(is.na(data$time), 3000, data$time)
# Create a new column 'event' to indicate death event (1 for death, 0 for censored)
# data$censor <- ifelse(is.na(data$Mirsanasdatums), 0, 1)
data$censor <- ifelse(is.na(data$Mirsanasdatums) | (data$Navescelonis != "Ļaundabīga ādas melanoma"), 0, 1)
# Create a survival object
surv_obj <- Surv(time = data$time, event = data$censor)
km_fit <- survfit(Surv(time, event=data$censor) ~ ulceracija, data = data)
png(file="GPT(7)-kaplan-meier-specific-survival-by-ulceracija.png", width=500, height=700)
survival_slnb <- ggsurvplot(km_fit, data = data, risk.table = FALSE, pval = TRUE, conf.int = FALSE,
xlab = "Time (days)", ylab = "Survival Probability",
title = "Kaplan-Meier (Specific) Survival by Ulceration")
# survival_slnb$plot <- survival_slnb$plot + xlim(0, 3000) + ylim(0.7, 1)
print(survival_slnb)
dev.off()
|
8c3bf7a72901c7e3469b270437a422a783a1e938
|
40234ef2ad5efa4c566ff501f3972ab03b181bd9
|
/code/figures/Fig6_hovmoller_mgmt_decisions.R
|
06134efb87c8dc654bf00033eaba2bc192d4995d
|
[] |
no_license
|
cfree14/domoic_acid
|
63fefd3c577d0cd277747254aa50f425401c438f
|
dfe6f4d9b94ad7a71c092c92bf63100a46cb3d0c
|
refs/heads/master
| 2023-07-15T10:28:49.815164
| 2021-08-25T22:31:47
| 2021-08-25T22:31:47
| 279,933,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,946
|
r
|
Fig6_hovmoller_mgmt_decisions.R
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(sf)
library(raster)
library(tidyverse)
library(lubridate)
library(grid)
library(gridExtra)
# Directories
inputdir <- "input"
preddir <- "output/model_preds"
hovmollerdir <- "output/contam_events"
closuredir <- "data/closures/data"
plotdir <- "figures"
# Study species
study_species <- c("Dungeness crab", "Rock crab",
"Spiny lobster", "Razor clam")
# Read advisory/contamination overlap data
overlap_data <- readRDS(file.path(hovmollerdir, "mismatch_in_advisories_and_contamination.Rds"))
# Read and format advisories data
################################################################################
# Read closures data
closures_orig <- readRDS(file.path(closuredir, "CDFW_2015_2020_fishery_closures.Rds"))
advisories_orig <- readRDS(file.path(closuredir, "CDPH_2014_2020_health_advisories.Rds"))
# Format for merge
range(a)
advisories_use <- advisories_orig %>%
# Reduce to study species
filter(comm_name %in% c("Dungeness crab", "Rock crab", "Spiny lobster", "Clams")) %>%
# TEMPORARY: rename clamas
mutate(comm_name=recode(comm_name, "Clams"="Razor clam")) %>%
# Reduce to partial/full advisories
filter(advisory %in% c("Partial", "Full")) %>%
# Reduce to date range
filter(date>="2014-01-01" & date <= "2020-05-19")
# Build season lines
################################################################################
# Function to build season key
# species <- "Dungeness crab"; fishery_type <- "Commercial"; region <- "Northern"; open_date <- "12-01"; close_date <- "07-15"
build_season_key <- function(species, fishery_type, region, open_date, close_date){
dates_open <- paste(2013:2019, open_date, sep="-") %>% ymd()
dates_close <- paste0(2014:2020, close_date, sep="-") %>% ymd()
season_key <- tibble(species=species,
fishery_type=fishery_type,
region=region,
open_date=dates_open,
close_date=dates_close) %>%
mutate(line_group=1:n()) %>%
select(species:region, line_group, everything()) %>%
gather(key="endpoint", value="date", 5:ncol(.)) %>%
arrange(species, fishery_type, region, line_group)
return(season_key)
}
# Dungeness crab season keys
dcrab_comm_n_key <- build_season_key(species="Dungeness crab", fishery_type="Commercial",
region="Northern", open_date="12-01", close_date="07-15")
dcrab_comm_c_key <- build_season_key(species="Dungeness crab", fishery_type="Commercial",
region="Central", open_date="11-15", close_date="06-30")
dcrab_rec_n_key <- build_season_key(species="Dungeness crab", fishery_type="Recreational",
region="Northern", open_date="11-01", close_date="07-30")
dcrab_rec_c_key <- build_season_key(species="Dungeness crab", fishery_type="Recreational",
region="Central", open_date="11-01", close_date="06-30")
# Lobster season keys
lobster_comm_key <- build_season_key(species="Spiny lobster", fishery_type="Commercial",
region="All", open_date="10-01", close_date="03-15")
lobster_rec_key <- build_season_key(species="Spiny lobster", fishery_type="Recreational",
region="All", open_date="10-01", close_date="03-15")
# Season key
season_key <- bind_rows(dcrab_comm_n_key, dcrab_comm_c_key,
dcrab_rec_n_key, dcrab_rec_c_key,
lobster_comm_key, lobster_rec_key) %>%
# Add latitudes to plot at
mutate(lat_plot=32.5, #31.5,
lat_plot=ifelse(fishery_type=="Commercial", lat_plot+0.3, lat_plot),
lat_plot=ifelse(region=="Central", lat_plot-0.15, lat_plot)) %>%
# Make new line group id (unique)
mutate(line_group=paste(species, fishery_type, region, line_group), sep="-")
# Build sampling sites reference file
################################################################################
# Read sampling data
samples <- readRDS(file.path(inputdir, "CDPH_crab_bivalve_domoic_acid_data.Rds")) %>%
# Get/rename columns
select(comm_name, date, lat_dd, da_ppm) %>%
rename(species=comm_name) %>%
# Reduce to study species and factor
filter(species %in% study_species) %>%
mutate(species=factor(species, study_species)) %>%
# Get rid of 1900 values
filter(year(date)>=2014)
# Read and format Dungeness crab ports
dcrab_sites <- read.csv("/Users/cfree/Dropbox/Chris/UCSB/projects/dungeness/data/cdfw/landings_public/processed/dungeness_ports.csv", as.is=T) %>%
filter(da_sampling=="routine") %>%
select(port, lat_dd) %>%
rename(site_name=port) %>%
mutate(species="Dungeness crab") %>%
select(species, everything())
# Read and format razor clam sites
rclam_sites <- read.csv("input/bivalve_sampling_sites.csv", as.is=T) %>%
filter(comm_name=="Razor clam") %>%
select(comm_name, site, lat_dd) %>%
rename(site_name=site, species=comm_name)
# Build site key
site_key <- bind_rows(dcrab_sites, rclam_sites)
# Build data
################################################################################
# Model
model_key <- tibble(species = study_species,
model = c("dungeness_crab_model_rf_cda.Rds",
"rock_crab_model_brt_cda.Rds",
"spiny_lobster_model_rf_cda.Rds",
"razor_clam_model_rf_cda.Rds"))
# Plotting functions
################################################################################
# For testing
species <- "Dungeness crab"
date <- "2016-04-01"
# Base theme (fore testing) - overwritten below
base_theme <- theme(axis.text=element_text(size=6),
axis.title=element_blank(),
plot.title=element_text(size=8),
legend.text=element_text(size=6),
legend.title=element_text(size=8),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.y = element_text(angle = 90, hjust = 0.5))
# Function to plot map
plot_map <- function(species, date){
# Build data
######################
# Read predictions
species_do <- species
model_do <- model_key %>% filter(species==species_do) %>% pull(model) %>% gsub(".Rds", "", .)
infile <- paste0(model_do, "_predictions_range_mask.grd")
preds <- brick(file.path(preddir, infile))
# Format predictions
date_layer <- gsub("-", ".", date) %>% paste0("X", .)
pred_ras <- preds[[date_layer]]
pred_df <- pred_ras %>%
as.data.frame(xy=T) %>%
setNames(c("long_dd", "lat_dd", "pcontam")) %>%
filter(!is.na(pcontam)) %>%
mutate(ncrabs=cut(pcontam, breaks=c(0, 1/6, 2/6, 3/6, 999), right=F, labels=c("0", "1", "2", "≥3")))
# Build plot
######################
# Get US states and Mexico
usa <- rnaturalearth::ne_states(country = "United States of America", returnclass = "sf")
mexico <- rnaturalearth::ne_countries(country="Mexico", returnclass = "sf")
# Plot map
g1 <- ggplot() +
# Plot p(contamination)
geom_tile(data=pred_df, mapping=aes(x=long_dd, y=lat_dd, fill=ncrabs)) +
# Add California and Mexico
geom_sf(data=usa, fill="grey85", col="white", size=0.2) +
geom_sf(data=mexico, fill="grey85", col="white", size=0.2) +
# Crop extent
coord_sf(xlim = c(-125, -116), ylim = c(32, 42)) +
# Label species
annotate("text", x=-125, y=42, label=species_do, hjust=0, fontface="bold", size=2.5) +
# Labels
labs(x=" ", y="") +
# Legend
scale_fill_manual(name="# of 6 samples contaminated", values=c("white", "pink", "coral", "darkred")) +
# Theme
theme_bw() + base_theme +
theme(legend.position="none",
axis.text.x=element_text(size=5))
g1
}
# Plot raster
frac=0.1
plot_raster <- function(species, frac=1){
# Read data
species_do <- species
model_do <- model_key %>% filter(species==species_do) %>% pull(model) %>% gsub(".Rds", "", .)
infile <- paste0(model_do, "_predictions_range_mask_hovmoller_imputed_events.Rdata")
load(file=file.path(hovmollerdir, infile))
# Get advsories data
advisories_use_spp <- advisories_use %>%
filter(comm_name==species_do)
# Sample data if necessary
data_hov_imputed_use <- sample_frac(data_hov_imputed, size=frac) %>%
mutate(ncrabs=cut(pcontam_avg, breaks=c(0, 1/6, 2/6, 3/6, 999), right=F, labels=c("0", "1", "2", "≥3")))
# Plot data
g <- ggplot(data_hov_imputed_use, aes(x=date, y=lat_dd, fill=ncrabs)) +
# Plot raster
geom_tile() +
# Plot advisories
geom_tile(data=advisories_use_spp, mapping=aes(x=date, y=lat_dd), fill="grey40", alpha=0.6) +
# Plot season lines
geom_line(season_key %>% filter(species==species_do), inherit.aes = F,
mapping=aes(x=date, y=lat_plot, group=line_group, color=fishery_type), lwd=0.3) +
# Labels
labs(x="", y="") +
scale_y_continuous(breaks=seq(32, 42, 2), lim=c(32,42), labels = paste0(seq(32, 42, 2), "°N")) +
scale_x_date(lim=c(ymd("2014-01-01"), NA), date_breaks = "1 year", date_labels="%Y") +
scale_fill_manual(name="# of 6 samples contaminated", values=c("white", "pink", "coral", "darkred")) +
scale_color_manual(name="Fishery", values=c("black", "grey40")) +
# Theme
theme_bw() + base_theme +
theme(legend.position="none")
g
# Return
return(g)
}
# Plot number of precautionary closures
plot_nprecautionary <- function(species){
# Subset data
sdata <- overlap_data %>%
# Reduce to species and right category
filter(comm_name==species & type=="Identifying precautionary closures") %>%
# Count by latitude
group_by(lat_dd) %>%
summarise(nevents = sum(catg_name=="Closure (low risk)"))
# Plot events
g <- ggplot(sdata, aes(x=lat_dd, y=nevents)) +
# Plot # of events
geom_area(fill="pink") +
# Flip vertical
coord_flip() +
# Labels
labs(x="", y="") +
scale_y_continuous(n.breaks=3) +
scale_x_continuous(breaks=seq(32, 42, 2), lim=c(32,42), labels = paste0(seq(32, 42, 2), "°N")) +
# Theme
theme_bw() + base_theme
g
# Return
return(g)
}
# Plot number of overlooked closures
plot_noverlooked <- function(species){
# Subset data
sdata <- overlap_data %>%
# Reduce to species and right category
filter(comm_name==species & type=="Identifying overlooked closures") %>%
# Count by latitude
group_by(lat_dd) %>%
summarise(nevents = sum(catg_name=="In season (high risk)"))
# Plot events
g <- ggplot(sdata, aes(x=lat_dd, y=nevents)) +
# Plot # of events
geom_area(fill="darkred") +
# Flip vertical
coord_flip() +
# Labels
labs(x="", y="") +
scale_y_continuous(n.breaks=3) +
scale_x_continuous(breaks=seq(32, 42, 2), lim=c(32,42), labels = paste0(seq(32, 42, 2), "°N")) +
# Theme
theme_bw() + base_theme
g
# Return
return(g)
}
# Plot data
################################################################################
# Sample data for fast plotting
# data_sample <- data %>%
# sample_frac(size=0.01)
# Base theme
base_theme <- theme(axis.text=element_text(size=6),
axis.title=element_blank(),
plot.title=element_text(size=8),
legend.text=element_text(size=6),
legend.title=element_text(size=8),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.y = element_text(angle = 90, hjust = 0.5))
# Plot maps
date_do <- "2016-04-01"
m1 <- plot_map(species="Dungeness crab", date=date_do)
m2 <- plot_map(species="Rock crab", date=date_do)
m3 <- plot_map(species="Spiny lobster", date=date_do)
m4 <- plot_map(species="Razor clam", date=date_do)
# Plot rasters
r1 <- plot_raster(species="Dungeness crab") #, frac=0.01)
r2 <- plot_raster(species="Rock crab") #, frac=0.01)
r3 <- plot_raster(species="Spiny lobster") #, frac=0.01)
r4 <- plot_raster(species="Razor clam") #, frac=0.01)
# Plot number of precautionarcy closures
p1 <- plot_nprecautionary(species="Dungeness crab")
p2 <- plot_nprecautionary(species="Rock crab")
p3 <- plot_nprecautionary(species="Spiny lobster")
p4 <- plot_nprecautionary(species="Razor clam")
# Plot number of overlooked closures
o1 <- plot_noverlooked(species="Dungeness crab")
o2 <- plot_noverlooked(species="Rock crab")
o3 <- plot_noverlooked(species="Spiny lobster")
o4 <- plot_noverlooked(species="Razor clam")
# Merge plots
g <- grid.arrange(m1, r1, p1, o1,
m2, r2, p2, o2,
m3, r3, p3, o3,
m4, r4, p4, o4,
ncol=4, widths=c(0.25, 0.45, 0.15, 0.15))
#g
# Export plot
ggsave(g, filename=file.path(plotdir, "Fig6_hovmoller_mgmt_decisions.png"),
width=6.5, height=7.5, units="in", dpi=600)
# Old attempt at converting closures to polygons for plotting
# # Read closures data
# closures_df <- readRDS(file.path(closuredir, "CDFW_2015_2020_fishery_closures.Rds")) %>%
# # Reduce closure to those that span domoic predictions
# filter(date>="2014-01-01" & date <= "2020-05-19")
#
#
# # Convert the closures to polygons
# x <- "Dungeness crab"
# closures_poly <- purrr::map(study_species, function(x){
#
# # Build data key
# date_key <- closures_df %>%
# select(date) %>%
# unique() %>%
# arrange(date) %>%
# mutate(date_id=1:n())
#
# # Subset to species, fishery of interest, adn domoic closures
# sdata_df <- closures_df %>%
# filter(comm_name==x & fishery=="Commercial") %>%
# left_join(date_key) %>%
# mutate(status_catg=ifelse(status=="Domoic acid delay", 1, 0))
#
# # Plot check
# g <- ggplot(sdata_df, aes(x=date_id, y=lat_dd, fill=status_catg)) +
# geom_raster()
# g
#
# # Convert to raster
# sdata_ras <- sdata_df %>%
# select(date_id, lat_dd, status_catg) %>%
# raster::rasterFromXYZ()
#
# # Plot check
# image(sdata_ras)
#
# # Convert to polygon
# sdata_poly <- sdata_ras %>%
# # Convert to polygons (SP) - each cell is a polygon
# rasterToPolygons() %>%
# # Convert to SF
# sf::st_as_sf() %>%
# # Dissolve into single polygon
# summarize() %>%
# # Break into seperate polygons
# sf::st_cast("POLYGON") %>%
# sf::st_cast("MULTIPOLYGON") %>%
# # Add event number
# mutate(closure_id=1:n(),
# comm_name=x) %>%
# # Arrange
# select(comm_name, closure_id)
#
# # Plot check
#
#
#
# })
#
# plot()
|
322e5aa9faa202ec6d020dff9cd3582addde5776
|
cfacbfb653f0662be0c70d2c6659c3d1d3305b71
|
/Computational-Statistics/HW3.R
|
c78fd2888e27b321fe2f15a6c68b1e3f811e41f8
|
[] |
no_license
|
ihaawesome/Graduate
|
37327af1acd4b2f2bf56648485e5a8378a2bbddd
|
a0ee4b8863b2cd03855685d17cab802e2b5898d3
|
refs/heads/master
| 2020-05-03T07:46:48.563738
| 2019-09-17T05:49:38
| 2019-09-17T05:49:38
| 178,507,439
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,774
|
r
|
HW3.R
|
library(dplyr)
library(ggplot2)
library(GenSA)
library(GA)
# (1) kmeans
set.seed(1234)
x1 = matrix(rnorm(100, sd = 0.5), ncol = 2)
x2 = matrix(rnorm(100, 1, sd = 0.5), ncol = 2)
x = rbind(x1, x2) ; colnames(x) = c("x1", "x2") ; x = as.data.frame(x)
kmean = kmeans(x, 2, nstart = 10) ; kmean
ggplot() + theme_test() + theme(legend.position = "top", plot.title = element_text(hjust = 0.5)) +
geom_point(data = data.frame(x, cluster = factor(kmean$cluster)), aes(x1, x2, color = cluster), size = 2) + scale_color_viridis_d() +
geom_point(data = data.frame(kmean$centers), aes(x1, x2), size = 3, shape = 21, color = "black", fill = "white", stroke = 3) +
labs(title = "Result of R kmeans function")
# function
mykmeans <- function(x, k, maxiter=10, nstart=1) {
n = nrow(x)
tss = sum(dist(x)^2) / n
niter = 0
init.mean = list() ; output = list()
for (ns in 1:nstart) {
init.mean[[ns]] <- x[sample(n,k),]
cl.mean = init.mean[[ns]]
cl = numeric(k)
ss = matrix(numeric(n*k), nrow = n)
change = F
while (change == F & niter <= maxiter) {
cl.mean0 <- cl.mean
cl0 <- cl
for (j in 1:k) {
for (i in 1:n) {
ss[i,j] = sum((x[i,]-cl.mean[j,])^2)
cl[i] = which.min(ss[i,])
}
}
spl = split(x, cl)
for (j in 1:k) { cl.mean[j,] = apply(spl[[j]], mean, MARGIN = 2) }
change = identical(cl, cl0)
niter <- niter + 1
}
size = c() ; wss = c()
for (j in 1:k) {
size[j] = nrow(spl[[j]])
wss[j] = sum(dist(spl[[j]])^2) / size[j]
}
tot.wss = sum(wss)
bss = tss - tot.wss
output[[ns]] <- list(cluster=cl, centers=cl.mean, tss=tss, wss=wss, tot.wss=tot.wss, bss=bss, size=size, niter=niter)
}
final <- output[[which.min(tot.wss)]]
return(final)
}
mykmean = mykmeans(x, k=2, nstart=10) ; mykmean
data = data.frame(x, cluster = factor(mykmean$cluster))
ggplot() + theme_test() +
geom_point(data = data, aes(x1, x2, color = cluster), size = 2) + scale_color_viridis_d() +
geom_point(data = mykmean$centers, aes(x1, x2), size = 3, shape = 21, color = "black", fill = "white", stroke = 3) +
theme(legend.position = "top", plot.title = element_text(hjust = 0.5)) + labs(title = "Result of mykmeans function")
# (2) baseball
baseball = read.csv("C:/Users/HG/Documents/R/data/baseball.txt", header = T, sep = " ")
myaic <- function(par) {
index = as.logical(par >= 0.5)
y = baseball[1]
x = baseball[-1][index]
model <- lm(log(salary) ~., data.frame(y, x))
aic <- extractAIC(model)[2]
return(aic)
}
# (2-1) GenSA
gensa.out <- GenSA(fn = myaic,
lower = rep(0, 27), upper = rep(1, 27),
control = list(max.time = 10, seed = 1))
gensa.out$value
as.logical(gensa.out$par >= 0.5) %>% which()
gensa.info <- as.data.frame(gensa.out$trace.mat)
g1 <- ggplot(gensa.info) +
geom_line(aes(1:length(function.value), current.minimum), color = "blue", linetype = "dashed") +
geom_line(aes(1:length(function.value), function.value)) +
labs(x = "iterations", y = "AIC", title = "GenSA Output") + ylim(-420, -360) + theme_test() +
theme(plot.title = element_text(hjust = 0.5))
g2 <- ggplot(gensa.info) + geom_line(aes(1:length(temperature), temperature), linetype = "dashed") +
labs(x = "iterations", title = "") + theme_test() +
theme(plot.subtitle = element_text(hjust = 0.5))
library(gridExtra)
grid.arrange(g1, g2, nrow = 2)
# (2-2) GA
ga.out <- ga(type = "real-valued",
fitness = function(x) -myaic(x),
lower = rep(0, 27), upper = rep(1, 27),
popSize = 20, maxiter = 100, seed = 1)
ga.info <- summary(ga.out)
ga.info$fitness
which(ga.info$solution[1,] >= 0.5)
plot(ga.out, main = "GA Output")
# try ---------------------------------------------
myaic <- function(par) {
y = baseball[1]
x = sample(baseball[-1], as.integer(par))
current <- lm(log(salary) ~ ., data.frame(y, x))
new = sample(baseball[-1], 1)
if (!(names(new) %in% names(x))) {
newdata = data.frame(y, x, new)
add <- lm(log(salary) ~ ., newdata)
if (extractAIC(current)[2] > extractAIC(add)[2]) { current <- add }
}
else if (names(new) %in% names(x)) {
x = x[names(x) != names(new)]
newdata = data.frame(y, x)
subst <- lm(log(salary) ~ ., newdata)
if (extractAIC(current)[2] > extractAIC(subst)[2]) { current <- subst }
}
myaic = extractAIC(current)[2]
return(myaic)
}
gensa.out <- GenSA(fn = myaic,
lower = 1, upper = 26,
control = list(maxit = 1000, seed = 123))
gensa.info = as.data.frame(gensa.out$trace.mat)
ggplot(gensa.info) + geom_line(aes(1:length(function.value), function.value)) +
labs(x = "iterations", y = "AIC", title = "GenSA Output") + ylim(-420,-390) + theme_test()
ggplot(gensa.info) + geom_line(aes(1:length(temperature), temperature), linetype = "dashed") +
labs(x = "index", title = "Temperature") + ylim(0,1000) + theme_test()
plot(gensa.info$function.value, ylim = c(-420,-380), type = "l",
ylab = "AIC", main = "GenSA Output")
ga.out <- ga(type = "real-valued",
fitness = function(x) -myaic(x),
lower = 1, upper = 26,
popSize = 20, maxiter = 300, seed = 123) # keepBest = T, optim = T
ga.info = summary(ga.out)
plot(ga.out, main = "GA Output")
|
e18a294a9b4cd6d70da9b6fd28c11a9920a9d4e0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Scalelink/examples/FOI.Rd.R
|
eec859f2d12afe9c2d0f1d29961a8e2002a8e08e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 170
|
r
|
FOI.Rd.R
|
library(Scalelink)
### Name: FOI
### Title: File of interest
### Aliases: FOI
### Keywords: datasets
### ** Examples
data(FOI, package = "Scalelink")
summary(FOI)
|
f78745df4cbf7f28f9a8d4ba2fddd3775c2ec189
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/openintro/examples/toohey.Rd.R
|
4f4001b70b3aa49e1a30ea9af782f28a7ff2aa20
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
toohey.Rd.R
|
library(openintro)
### Name: toohey
### Title: Simulated polling data set
### Aliases: toohey
### Keywords: datasets
### ** Examples
data(toohey)
## maybe str(toohey) ; plot(toohey) ...
|
8a7a1ba2b8977b19596d74f7f6c5eb3a1bdb150c
|
26b0fcba9fde7cf9ccdb0423f768ca417a6ce4ec
|
/tests/testthat/test_assemble_bsplines.R
|
c7541a980a3de5303da8467745f00d3a6b2b5037
|
[] |
no_license
|
cran/hero
|
5334a80aae95acdffa48003ded4e35922d862d21
|
6608b2b4953706ed19d447b554d0e288ef6e33f4
|
refs/heads/master
| 2023-07-26T05:45:27.750716
| 2023-07-15T21:10:09
| 2023-07-15T22:32:24
| 155,420,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 597
|
r
|
test_assemble_bsplines.R
|
context("check assemble.hero_bsplines")
s = seq(0, 1, len = 101)
test_that("assemble.hero_splines matches pspline.setting", {
nk = seq(10, 25, len = 4)
o = 1:4
mc = 1:5
for (i in seq_along(nk)) {
for (j in seq_along(o)) {
for (k in seq_along(mc)) {
list1 = pspline.setting(s, knots = nk[i], p = o[j] - 1, m = mc[k])
x = bspline(knots = list1$knots, norder = o[j])
list2 = assemble(x, x = s, m = mc[k])
expect_equivalent(as.matrix(list1$B), as.matrix(list2$B))
expect_equivalent(list1$s, list2$s)
}
}
}
})
|
9c88d3f3cc131cf1cd83da3d5bced7e961b33646
|
807f70d6951c6cd928ea4b72cb3e644cf997678a
|
/man/get_project_walkthroughs.Rd
|
1ff6db7e91428309dbcdc5a01de80477ed447ff6
|
[] |
no_license
|
jonlinca/galvanizer
|
c1574365374b10676c23e05c8b298ddf2ef30980
|
b2e5a767b3a317bf18437515d8dca801668955d2
|
refs/heads/master
| 2023-05-06T20:20:05.016669
| 2021-05-28T16:19:36
| 2021-05-28T16:19:36
| 277,917,653
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,092
|
rd
|
get_project_walkthroughs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/projects.R
\name{get_project_walkthroughs}
\alias{get_project_walkthroughs}
\title{Retrieve Highbond Project - Walkthroughs / Execute Procedures}
\usage{
get_project_walkthroughs(
auth,
walkthrough_id,
fields = NULL,
pagesize = 50,
waittime = 0.2
)
}
\arguments{
\item{auth}{Highbond authentication credentials, created from \code{\link{setup_highbond}}}
\item{walkthrough_id}{Will get only one.}
\item{fields}{OPTIONAL. A character vector each field requested within the
project. NULL will default to all fields.}
\item{pagesize}{Defaults to 50. Maximum is 100.}
\item{waittime}{Time in seconds to wait between requests.}
}
\value{
A tibble of walkthroughs
}
\description{
"A walkthrough is a series of steps you perform to establish the
reliability of controls and test the design of controls. Each control you
define has a corresponding walkthrough that is used to verify that the
control is designed appropriately. In a Workplan workflow project, a
walkthrough is called an execute procedure."
}
|
9958ef9b5ec0f412d162bde492d51ce1179536a1
|
3ced8c2cb355a188ec610e7f288b2cdda2bbfcc1
|
/materyaller/kodlar/ggplot2_baslangic.r
|
01de79ba140ca00415177af3d2715a3e0acc2d7e
|
[
"MIT"
] |
permissive
|
r338/ab-2016
|
b99bf1817f340f9821260d78532f02c5ae79d807
|
9625abf936cd35a3d90f3c4602921f13241f765c
|
refs/heads/master
| 2021-01-10T04:52:15.747960
| 2016-02-02T12:31:34
| 2016-02-02T12:31:34
| 50,600,817
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,852
|
r
|
ggplot2_baslangic.r
|
##ggplot2 ornekleri
options(stringsAsFactors=FALSE) #data.frame yapısında characterleri factor değil character şeklinde tanımla
options(dplyr.width = Inf) #dplyr tablolarının genişliğini tam göster
options(scipen = 7) #Ondalık verilerde bu derinliği kullan
# install.packages("dplyr")
# install.packages("ggplot2")
library(dplyr)
library(ggplot2)
#once on fonksiyonlari yukleyelim
#Veri setimizi olusturalim
sinif<-sinif_olustur() %>% tbl_df
#Bu bos grafik cikaracaktir
ggplot(data=sinif)
#Matematik_1 ve Matematik_2'de notlarin dagilimi
ggplot(data=sinif) + geom_point(aes(x=Matematik_1,y=Matematik_2))
#Boyle de yazilir
ggplot(data=sinif,aes(x=Matematik_1,y=Matematik_2)) + geom_point()
#Matematik_1 ve Matematik_2'de notlarin dagilimi kiz erkek ayrimi
ggplot(data=sinif) + geom_point(aes(x=Matematik_1,y=Matematik_2,color=cinsiyet))
ggplot(data=sinif) + geom_point(aes(x=Matematik_1,y=Matematik_2,color=cinsiyet,shape=sube))
ggplot(data=sinif) + geom_point(aes(x=Matematik_1,y=Matematik_2,color=cinsiyet,shape=sube), size=4)
ggplot(data=sinif) + geom_point(aes(x=Matematik_1,y=Matematik_2,color=cinsiyet,shape=sube,alpha=Tarih_1), size=4)
ggplot(data=sinif %>% filter(Matematik_1>=50 & Matematik_2 >= 50)) + geom_point(aes(x=Matematik_1,y=Matematik_2,color=cinsiyet,shape=sube), size=4)
ggplot(data=sinif) + geom_histogram(aes(Matematik_1),binwidth=1)
ozet1<-sinif %>% group_by(sube,cinsiyet) %>% summarise(Matematik_1=mean(Matematik_1)) %>% ungroup %>% mutate(sube_cinsiyet=paste0(sube,"_",cinsiyet))
ggplot(data=ozet1,aes(x=sube_cinsiyet,y=Matematik_1)) + geom_bar(stat="identity",aes(fill=sube))
df<-data.frame(gun=1:50,
deger=cumsum(runif(50,0,1)),
deger2=cumsum(runif(50,0,1))) %>% tbl_df
ggplot(data=df,aes(x=gun)) + geom_line(aes(y=deger),color="red") + geom_line(aes(y=deger2),color="blue")
|
f2f0446d220d47d065948384828230d6c1c5ffaa
|
e760ec7eff2aa44cd87e85a6fcb18b9e82240f44
|
/man/format_exp_data.Rd
|
2ea2d55885fdb1b0c4ea1f3d5248843f3c0bdb73
|
[
"MIT"
] |
permissive
|
fentouxungui/CryptDriftR
|
81ec9372c5553aae3fb20e37077fd62f24039372
|
ee708eeecfd47d9e6ea43ddcf59394f4a7f207da
|
refs/heads/master
| 2023-06-30T13:05:19.418613
| 2021-08-05T16:16:31
| 2021-08-05T16:16:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 666
|
rd
|
format_exp_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_data.R
\name{format_exp_data}
\alias{format_exp_data}
\title{Calculate proportions with 95\% credible interval (confidence interval).
If there are replicates they will be pooled(!). It will work for both count table format and list format.
For it to work for table format give time vector}
\usage{
format_exp_data(data_x, time_points)
}
\description{
Calculate proportions with 95\% credible interval (confidence interval).
If there are replicates they will be pooled(!). It will work for both count table format and list format.
For it to work for table format give time vector
}
|
014f7f4d8b2b96e838a3abaf5470415abc83faea
|
401f2375173bd502cb7419230bc4c75c530c9b6e
|
/man/monetdb.read.csv.Rd
|
09340ea86c4280f6a64672dedf1005d7446b3fef
|
[] |
no_license
|
MonetDB/MonetDBLite-R
|
cab5c16538ed215eaf5d3aa36085e6d7b0f6dfb6
|
3fa31575efd66c673b67c9ddb24e5668a397a047
|
refs/heads/master
| 2023-08-28T00:40:36.593695
| 2022-01-13T09:02:00
| 2022-01-13T09:02:00
| 103,245,827
| 19
| 8
| null | 2019-09-20T08:05:01
| 2017-09-12T08:49:13
|
C
|
UTF-8
|
R
| false
| false
| 2,390
|
rd
|
monetdb.read.csv.Rd
|
\name{monetdb.read.csv}
\alias{monetdb.read.csv}
\alias{monet.read.csv}
\title{
Import a CSV file into MonetDBLite
}
\description{
Instruct MonetDBLite to read a CSV file, optionally also create the table for it.
}
\usage{
monetdb.read.csv (conn, files, tablename, header=TRUE,
locked=FALSE, best.effort=FALSE, na.strings="", nrow.check=500, delim=",",
newline = "\\\\n", quote = "\"", col.names=NULL, lower.case.names=FALSE,
sep=delim, ...)
}
\arguments{
\item{conn}{A MonetDBLite database connection. Created using \code{\link[DBI]{dbConnect}} with the
\code{\link[MonetDBLite]{MonetDBLite}} database driver.}
\item{files}{A single string or a vector of strings containing the absolute file names of the CSV files to be imported.}
\item{tablename}{Name of the database table the CSV files should be imported in. Created if necessary.}
\item{header}{Whether or not the CSV files contain a header line.}
\item{locked}{Whether or not to disable transactions for import.
Setting this to TRUE can greatly improve the import performance.}
\item{best.effort}{Use best effort flag when reading csv files and continue importing even if parsing of fields/lines fails.}
\item{na.strings}{Which string value to interpret as \code{NA} value.}
\item{nrow.check}{Amount of rows that should be read from the CSV when the
table is being created to determine column types.}
\item{delim}{Field separator in CSV file.}
\item{newline}{Newline in CSV file, usually \\n for UNIX-like systems and \\r\\r on Windows.}
\item{quote}{Quote character(s) in CSV file.}
\item{lower.case.names}{Convert all column names to lowercase in the database?}
\item{col.names}{Optional column names in case the ones from CSV file should not be used}
\item{sep}{alias for \code{delim}}
\item{...}{Additional parameters. Currently not in use.}
}
\value{
Returns the number of rows imported if successful.
}
\seealso{
\code{dbWriteTable} in \code{\link[DBI]{DBIConnection-class}}
}
\examples{
# initiate a MonetDBLite server
library(DBI)
dbdir <- file.path( tempdir() , 'readcsv' )
con <- dbConnect( MonetDBLite::MonetDBLite() , dbdir )
# write test data to temporary CSV file
file <- tempfile()
write.table(iris, file, sep=",", row.names=FALSE)
# create table and import CSV
monetdb.read.csv(con, file, "iris")
dbDisconnect(con, shutdown=TRUE)
}
\keyword{interface}
|
12d066350579c9f3b5e866df8061ac0090ecc551
|
63a770312db431190f9bf7db60dacdb86134fa76
|
/src_raw/2.5_KEGG_DEG.R
|
253b5a43a7df115433d8cc8e983942a492c11371
|
[] |
no_license
|
zhilongjia/nCoV2019
|
0ee4aab7dcc35a4273a36dd4be97e9b6d85c55f3
|
57b616e83aa638fbfcdd4be09101e0c4331eb0e0
|
refs/heads/master
| 2023-02-20T11:17:52.551457
| 2020-07-25T08:00:21
| 2020-07-25T08:00:21
| 236,764,272
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,102
|
r
|
2.5_KEGG_DEG.R
|
load("../results/DEA_list.RData")
load("../results/DEA_pneu_list.RData")
symbol2entrezID <- function(gene_symbols) {
symbol_ENTREZID <- clusterProfiler::bitr(gene_symbols, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Hs.eg.db")
return(symbol_ENTREZID$ENTREZID)
}
DEA_list <- c(DEA_list[["limma_DEG"]], DEA_pneu_list[["limma_DEG"]])
c1 <- c("nCoV_Heal", "pneu_Heal", "nCoV_pneu",
"pneuVir_Heal", "pneuBac_Heal",
"pneuBac_pneuVir", "nCoV_pneuBac", "nCoV_pneuVir")
c1 <- c("nCoV_Heal", "pneu_Heal", "pneuVir_Heal")
################################################################################
# Pathway & GO analysis of DEG
library(clusterProfiler)
genesEntrezID_3g <- sapply(DEA_list[c1], symbol2entrezID )
sapply(genesEntrezID_3g, length)
genesEntrezID_3g_KEGG <- compareCluster(genesEntrezID_3g, fun='enrichKEGG')
dotplot(genesEntrezID_3g_KEGG, showCategory=10)
genesEntrezID_3g_BP <- compareCluster(genesEntrezID_3g, fun='enrichGO', OrgDb='org.Hs.eg.db', ont="BP")
dotplot(genesEntrezID_3g_BP, showCategory=20)
save.image("../results/2.0_KEGG_DEG.RData")
|
047ad96c7e2682f7efb9893a4614065deb95b5ad
|
1d023a92fe9b31ebfd21a845ba4e928dad2e848a
|
/binomial/tests/testthat/test-auxiliary.R
|
5516649df3040ff6b26e2f18241557a9cfc3fbaa
|
[] |
no_license
|
stat133-sp19/hw-stat133-jsbshin
|
4051317b9de69a4329dd10fdc7ada093479069ea
|
9d2e9a9d116f9f0f6fa7f989ba627703a76cc56d
|
refs/heads/master
| 2020-04-28T10:30:31.881685
| 2019-05-03T18:01:52
| 2019-05-03T18:01:52
| 175,203,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,462
|
r
|
test-auxiliary.R
|
context("test auxiliary functions")
# test aux_mean() function
test_that("test that aux_mean works",{
expect_equal(aux_mean(10, 0.5), 5)
expect_equal(aux_mean(100, 0.3), 30)
expect_equal(aux_mean(1000, 0.1), 100)
expect_error(aux_mean('a', 0.3))
expect_length(aux_mean(10,0.5),1)
expect_is(aux_mean(10,0.5),'numeric')
})
# test aux_variance() function
test_that("test that aux_variance works",{
expect_equal(aux_variance(10, 0.5), 2.5)
expect_equal(aux_variance(100, 0.3), 21)
expect_equal(aux_variance(1000, 0.1), 90)
expect_error(aux_variance('a', 0.3))
expect_length(aux_variance(10,0.5),1)
})
# test aux_mode() function
test_that("test that aux_mode works",{
expect_equal(aux_mode(10, 0.5), 5)
expect_equal(aux_mode(100, 0.3), 30)
expect_equal(aux_mode(1000, 0.1), 100)
expect_error(aux_mode('a', 0.3))
expect_length(aux_mode(10, 0.5),1)
expect_length(aux_mode(11, 0.5),2)
})
# test aux_skewness() function
test_that("test that aux_skewness works",{
expect_equal(aux_skewness(10, 0.5), 0)
expect_equal(signif(aux_skewness(10, 0.3),3), signif(0.2760262,3))
expect_error(aux_skewness('a', 0.3))
expect_length(aux_skewness(10, 0.3),1)
})
# test aux_kurtosis() function
test_that("test that aux_kurtosis works",{
expect_equal(aux_kurtosis(10, 0.5),-0.2)
expect_equal(signif(aux_kurtosis(10, 0.3),3), signif(-0.1238095,3))
expect_error(aux_kurtosis('a', 0.3))
expect_length(aux_kurtosis(10, 0.3),1)
})
|
61ba1d34596e13f87f30a92972d87e68bdfcd1f3
|
f499f99b54008f18e3aa128ff41e94748deb5626
|
/man/mlr_learners_ordinal.clm.Rd
|
86e272a42ad29506d53dd7c0bcb98a2223845626
|
[
"MIT"
] |
permissive
|
mlr-org/mlr3ordinal
|
93bb02bc10689a45ba3001c49ee892cf5dc18b32
|
026d8507baaac4e9bc44eb8c074b5b0363305122
|
refs/heads/main
| 2022-12-22T02:22:18.115673
| 2022-12-08T13:33:05
| 2022-12-08T13:33:05
| 164,665,475
| 4
| 0
|
MIT
| 2022-12-08T13:32:31
| 2019-01-08T14:24:31
|
R
|
UTF-8
|
R
| false
| true
| 410
|
rd
|
mlr_learners_ordinal.clm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LearnerOrdinalClm.R
\name{mlr_learners_ordinal.clm}
\alias{mlr_learners_ordinal.clm}
\alias{LearnerOrdinalClm}
\title{Cumulative Link Model Learner}
\format{
\link[R6:R6Class]{R6::R6Class} inheriting from \link{LearnerOrdinal}.
}
\description{
A learner for Cumulative Link Models implemented in \link[ordinal:clm]{ordinal::clm}.
}
|
c2c6a4f0789ac89178b8e5a1a3aa621ef2ade003
|
c5339895b90adeb68db2317f82057a8a70ee763a
|
/paper_examples/results/muscle/dend.R
|
ccf760ab3ea0411e872374279d2318e3ec784746
|
[
"MIT"
] |
permissive
|
PengTao-HUST/GDNB
|
966ccfd06e5fa59dca0923cb0bd59f9cc9c8743e
|
e38ad0e316a2ff4c68bed06217e685d55452a49c
|
refs/heads/master
| 2023-07-28T05:59:45.572835
| 2021-09-09T16:36:30
| 2021-09-09T16:36:30
| 402,607,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,831
|
r
|
dend.R
|
install.packages('pheatmap')
library(pheatmap)
install.packages("factoextra")
library(factoextra)
data = read.table('../../figures/muscle/expr.txt')
df = as.matrix(data)
#df[which(df < 5, arr.ind = T)] = 5
#df[which(df > 15, arr.ind = T)] = 15
dfs = apply(df, 1, scale)
dfs = t(dfs)
dfs[which(dfs < -2, arr.ind = T)] = -2
dfs[which(dfs > 2, arr.ind = T)] = 2
s = apply(dfs[,1:27], 1, sum)
dfs2 = dfs[order(s),]
pheatmap(dfs2,
show_rownames = F,
show_colnames = F,
cluster_cols = F,
cluster_rows=F,
filename='expr_sort.pdf',
height=6,
width=5,
scale = "none",
color =colorRampPalette(c("#8854d0", "#ffffff","#fa8231"))(100),
clustering_distance_cols = 'euclidean',
clustering_method = 'single',
)
colnames(dfs_avg) = c(0, 0.5, 1, 2, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5,
7, 7.5, 8, 8.5, 9, 9.5, 10, 11, 12, 13, 14,
16, 20, 30, 40)
dfs = t(apply(df, 1, scale))
dfs_1 = dfs[,seq(1, 54,by=2)]
dfs_2 = dfs[,seq(2, 54,by=2)]
dfs_avg = dfs_1 + dfs_2
dfs_bind = rbind(dfs_1, dfs_2)
dfdist = dist(t(dfs_avg))
dfhc = hclust(dfdist, method='single')
fviz_dend(dfhc)
fviz_dend(dfhc, k = 4,
cex = 0.5,
k_colors = c("#2E9FDF", "#00AFBB", "#E7B800", "#FC4E07"),
color_labels_by_k = TRUE,
rect = TRUE
)
colnames(dfs_bind) = c(0, 0.5, 1, 2, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5,
7, 7.5, 8, 8.5, 9, 9.5, 10, 11, 12, 13, 14,
16, 20, 30, 40)
dfdist = dist(t(dfs_bind))
dfhc = hclust(dfdist, method='complete')
fviz_dend(dfhc)
fviz_dend(dfhc, k = 3,
cex = 1,
lwd = 0.8,
#xlab = 'Day',
ylab = '',
k_colors = c("#00AFBB", "#E7B800", "#FC4E07"),
color_labels_by_k = TRUE,
rect = F
)
|
6623d55f77a9ad5192a6fb534f110b4fa992bac3
|
b8a19cc9c443d367da8ce10c435a8c7d9bbffa9b
|
/man/plot.missingness.Rd
|
e263e2fcac9aaecf8858f1a2c655b1f3b41c7a9c
|
[
"MIT"
] |
permissive
|
g3rley/healthcareai-r
|
9b0a68cc5406f2af2a85dc5318769a94075787a6
|
9b00c1be1daaa5c387ecee910d7571684447e4ff
|
refs/heads/main
| 2023-07-21T19:08:15.174485
| 2022-09-01T17:16:55
| 2022-09-01T17:16:55
| 451,652,775
| 1
| 0
|
NOASSERTION
| 2022-01-24T22:20:57
| 2022-01-24T22:20:57
| null |
UTF-8
|
R
| false
| true
| 1,110
|
rd
|
plot.missingness.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/missingness.R
\name{plot.missingness}
\alias{plot.missingness}
\title{Plot missingness}
\usage{
\method{plot}{missingness}(
x,
remove_zeros = FALSE,
max_char = 40,
title = NULL,
font_size = 11,
point_size = 3,
print = TRUE,
...
)
}
\arguments{
\item{x}{Data frame from \code{\link{missingness}}}
\item{remove_zeros}{Remove variables with no missingness from the plot?
Default = FALSE}
\item{max_char}{Maximum length of variable names to leave untruncated.
Default = 40; use \code{Inf} to prevent truncation. Variable names longer
than this will be truncated to leave the beginning and end of each variable
name, bridged by " ... ".}
\item{title}{Plot title}
\item{font_size}{Relative size of all fonts in plot, default = 11}
\item{point_size}{Size of dots, default = 3}
\item{print}{Print the plot? Default = TRUE}
\item{...}{Unused}
}
\value{
A ggplot object, invisibly.
}
\description{
Plot missingness
}
\examples{
pima_diabetes \%>\%
missingness() \%>\%
plot()
}
\seealso{
\code{\link{missingness}}
}
|
744ff39998fde4b8c62f574205417d1588e6c5eb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tor/tests/test-list_any.R
|
d57a29d0ccf6db3f65a702ed7aec42b3acfcea22
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,304
|
r
|
test-list_any.R
|
context("list_any")
test_that("list_any with read.csv lists (file)named dataframes", {
res <- list_any(
tor_example("csv"),
utils::read.csv,
regexp = "[.]csv$"
)
expect_is(res, "list")
expect_named(res, c("csv1", "csv2"))
expect_is(res[[1]], "data.frame")
expect_is(res[[1]], "tbl")
})
test_that("list_any accepts lambda functions and formulas", {
res <- list_any(
tor_example("rdata"),
~ get(load(.x))
)
expect_is(res, "list")
expect_named(res, c("rdata1", "rdata2"))
expect_is(res[[1]], "data.frame")
expect_identical(
list_any(
tor_example("rdata"),
function(x) get(load(x))
),
res
)
})
test_that("list_any reads specific files extention in a mixed directory", {
expect_is(
list_any(
tor_example("mixed"),
utils::read.csv,
regexp = "[.]csv$"
),
"list"
)
})
test_that("list_any errs with informative message if `regexp` matches no file", {
expect_error(
list_any(
tor_example("csv"),
get(load(.)),
regexp = "[.]rdata$"
),
"Can't find.*rdata"
)
})
test_that("list_any passes arguments to the reader function via `...`", {
expect_is(
list_any(
tor_example("csv"),
read.csv
)[[2]]$y,
"factor"
)
expect_is(
list_any(
tor_example("csv"),
~ read.csv(., stringsAsFactors = FALSE)
)[[2]]$y,
"character"
)
})
test_that("list_any with emtpy path reads from working directory", {
expect_is(
list_any(, read.csv, "[.]csv")[[1]],
"data.frame"
)
})
test_that("list_any is sensitive to `ignore.case`", {
expect_named(
list_any(
tor_example("mixed"),
function(x) get(load(x)),
regexp = "[.]rdata$",
ignore.case = FALSE
),
c("lower_rdata")
)
expect_named(
list_any(
tor_example("mixed"),
function(x) get(load(x)),
regexp = "[.]rdata$",
ignore.case = TRUE
),
c("lower_rdata", "upper_rdata")
)
expect_named(
list_any(
tor_example("mixed"),
function(x) get(load(x)),
regexp = "[.]csv$",
ignore.case = TRUE,
invert = TRUE
),
c("lower_rdata", "rda", "upper_rdata")
)
})
|
b012c9edcc1e66c8cd6833ec6b5f88a5c856f4c4
|
1460dc122a0cb6584ed382135f78e78d2520030a
|
/test.R
|
56436210ec762560f744470200ff4ab3f1f6cdc7
|
[] |
no_license
|
douglasquinn/programmingPrepClass
|
ec626835ba9ce3a4f9055a1f03c9bd7854613e73
|
b5c71a1dcc9e8b37e58de1cf5dd71ecb42058f69
|
refs/heads/main
| 2023-08-04T09:58:52.990083
| 2021-09-30T23:07:15
| 2021-09-30T23:07:15
| 410,948,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35
|
r
|
test.R
|
stats <- load("stats.rdata")
stats
|
50542ed24ab03e857fa761bf0a8b3c3cb405269b
|
68eaf6ab25ad2af91fbcffd994b1f0dbe710b801
|
/Unif_one_samp.r
|
966f291e38124d10317fe2072da3a5b311aaa266
|
[] |
no_license
|
idc9/FalseConfidence
|
ac13107f8d9bb6df2d93bfe19d3e6027738113a1
|
6b8a1c0d55a21a1cf97c579f02cf9ae3dbbf50fe
|
refs/heads/master
| 2020-03-23T02:39:19.631187
| 2018-07-19T02:14:50
| 2018-07-19T02:14:50
| 140,984,669
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,474
|
r
|
Unif_one_samp.r
|
library(latex2exp)
set.seed(1000)
# Sampling probability of the event P(A^c) < alpha.
pFun = function( epsilon, alpha, n, theta_0){
s11 = (theta_0/(theta_0-epsilon))^n - (theta_0/(theta_0+epsilon))^n
s1 = min( 1/alpha, s11^(-1)) * ((theta_0-epsilon)/theta_0)^n
s2 = ((theta_0+epsilon)/theta_0)^n
s3 = (epsilon <= theta_0*((1-alpha)^(-1/n) - 1)) * (1 - ((theta_0-epsilon)/theta_0)^n)
return( alpha*s1 + (1 - (1 - alpha)*s2)*s3 )
}
pFun = Vectorize( pFun, vectorize.args='epsilon')
# Set the global parameters.
theta_0 = 1
alpha = .5
dx = .001
pdf('Figure2.pdf', width=11, height=11, paper = "USr")
par(mfrow=c(1,3), pty = "s")
#---------------------------------------------------------------------------------------------------------------
# Produce the sample probability versus epsilon plots for various values of n.
#---------------------------------------------------------------------------------------------------------------
epsilon = seq( dx, 1, by=dx)
n_vals = c(1,5)
plot( NULL, NULL, ylab='sampling probability', ylim=0:1,
xlab=TeX('radius of $\\epsilon$-ball'), cex.lab=2, cex.main=2, cex.axis=2, xlim=range(epsilon))
# Loop over all values of n.
for(t in 1:length(n_vals)){
n = n_vals[t]
# Compute the sampling probability that the posterior P(A^c) < alpha with an emirical mean.
lines( epsilon, pFun( epsilon, alpha=alpha, n=n, theta_0=theta_0), lty=t, lwd=2)
}
legend( 'topright', bty='n', legend=c('n = 1','n = 5'), lwd=2, lty=1:2, cex=2)
#---------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------------
# Produce the density plots for various values n.
#---------------------------------------------------------------------------------------------------------------
grid = seq( dx, 3, by=dx)
num_posterior_samples = 7
for(t in 1:length(n_vals)){
n = n_vals[t]
# Sample posterior distributions.
post_density = NULL
for(k in 1:num_posterior_samples){
# Sample data.
x_max = theta_0*rbeta( n=1, shape1=n_vals[t], shape2=1)
# Compute the posterior density.
post_density = cbind( post_density, n * x_max^(n) * grid^(-n-1) * (grid >= x_max))
}
plot( grid, post_density[,1], ylab=NA, xlab=TeX('$\\theta$ | $x_{1}^{n}$'), type='l', lwd=2,
cex.lab=2, cex.main=2, cex.axis=2, col='white', ylim=c(0,6),
main=TeX(paste0('$n = $',toString(n_vals[t]))),
panel.first=polygon( x=grid, y=c(0, post_density[c(-1,-length(grid)),1], 0), border=NA,
col=rgb(147,112,219,255/6,max=255), density=80,angle=-45))
for(k in 2:num_posterior_samples){
lines( grid, post_density[,k], lwd=2, col='white',
panel.first=polygon( x=grid, y=c(0, post_density[c(-1,-length(grid)),k], 0), border=NA,
col=rgb(147,112,219,255/6,max=255), density=80,angle=-45))
}
for(k in 1:num_posterior_samples) lines( grid, post_density[,k], lwd=1)
polygon( x=c( theta_0-.3, theta_0-.3, theta_0+.3, theta_0+.3), y=c(0,7,7,0), col='green', border=NA,
density=20, angle=-45)
}
#---------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------------
dev.off()
|
ec58ace58e2831b4bf407ae8a56bb20f8ccd2aac
|
96dac3b379db632cc577600f1041ecafbddca400
|
/Working scripts/The final scripts (sourced scripts)/distribpart.R
|
b240f03293dcfb8c817d6dc4c9b6db3fab81c566
|
[] |
no_license
|
kaye11/Some-R-scripts
|
78e53b0c37254945120fca91255801b392835cb1
|
632b16a3269c7ce5c7c14efceb26fb02bf66eac1
|
refs/heads/master
| 2021-01-23T06:44:20.200098
| 2016-09-01T18:56:25
| 2016-09-01T18:56:25
| 21,462,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,977
|
r
|
distribpart.R
|
library(ggplot2)
library(reshape)
##always use data from t1
library(data.table)
NT = data.table(t1)
NT2=NT[, T := seq(from = 1L, by = 1L, length.out = .N), by = A]
##for distrib particles analyis
NTS <- aggregate( A ~ T , data = NT2 , max, na.rm = TRUE )
#every min
qplot(T, A, data=NTS [NTS$T<61, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))+ geom_line()
qplot(T, A, data=NTS [NTS$T>60 & NTS$T<121, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>120 & NTS$T<181, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>180 & NTS$T<241, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>240 & NTS$T<301, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>300 & NTS$T<361, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>360 & NTS$T<421, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>420 & NTS$T<481, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>480 & NTS$T<541, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>540 & NTS$T<601, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
# every 2min
qplot(T, A, data=NTS [NTS$T<121, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))+geom_line()
qplot(T, A, data=NTS [NTS$T>120 & NTS$T<241, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>240 & NTS$T<361, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>360 & NTS$T<481, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
qplot(T, A, data=NTS [NTS$T>480 & NTS$T<601, ])+ labs(list(x = "Time (s)", y = "Number of Tracks"))
##if you want to check untransformed data
TO <- aggregate( A ~ T , data = t1 , max, na.rm = TRUE )
qplot(T,A, data=TO [TO$T<121, ]) + labs(list(x = "Time (s)", y = "Number of Tracks"))
|
9f217a1e5ce77f447e58489cfdf5f432a1f7ed20
|
950fb55ee7441e4d0ddeeb858b4c47641675c5d2
|
/regression/data/data_setup.R
|
9b56e660e3d902fee4af95865245575f3ee53cd5
|
[] |
no_license
|
lmyint/shiny_education_apps
|
5a658316862c2dff16f1b8e9173603fe7fead10a
|
d78384530eeeb26956e3d274d5c44142b22ad809
|
refs/heads/master
| 2021-01-16T18:26:31.725792
| 2017-10-10T17:59:54
| 2017-10-10T17:59:54
| 100,080,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,456
|
r
|
data_setup.R
|
library(readr)
library(glmnet)
library(dplyr)
## Kaggle: Wisconsin breast cancer data
bc <- read_csv("breast-cancer-wisconsin-data.csv")
colnames(bc)[colnames(bc)=="concave points_mean"] <- "concave_points_mean"
colnames(bc)[colnames(bc)=="concave points_se"] <- "concave_points_se"
colnames(bc)[colnames(bc)=="concave points_worst"] <- "concave_points_worst"
bc <- bc[,2:32]
bc_expanded <- bc
par(mfrow = c(5,6), mar = c(3,3,3,1))
for (i in 2:31) {
plot(density(bc[[i]]), xlab = "", ylab = "", main = colnames(bc)[i])
}
for (i in 2:31) {
new_col <- paste0(colnames(bc)[i], "_cat")
bc_expanded[[new_col]] <- cut(bc[[i]], c(-Inf, quantile(bc[[i]], c(0.33,0.67,1))))
levels(bc_expanded[[new_col]]) <- c("low", "medium", "high")
}
write_csv(bc_expanded, "breast-cancer-wisconsin-data-expanded.csv")
dataset <- read_csv("breast-cancer-wisconsin-data-expanded.csv")
dataset$diagnosis <- as.factor(dataset$diagnosis)
dataset$diagnosis_cat <- dataset$diagnosis
factor_cols <- colnames(dataset)[grep("_cat", colnames(dataset))]
for (x in factor_cols) {
dataset[[x]] <- as.factor(dataset[[x]])
}
## Split into training and test
set.seed(16)
n_train <- floor(nrow(dataset)*0.8)
rows_train <- sample.int(nrow(dataset), size = n_train)
rows_test <- setdiff(seq_len(nrow(dataset)), rows_train)
train <- dataset[rows_train,]
test <- dataset[rows_test,]
cvfit_lasso <- cv.glmnet(x = as.matrix(train[,2:31]), y = train$diagnosis, family = "binomial", alpha = 1, type.measure = "class")
par(mfrow = c(1,1))
plot(cvfit_lasso)
vars <- tail(colnames(train), -1)
## 3 vars: 17.837 sec
## 4 vars: 140.064 sec
system.time({
fits <- do.call(rbind, lapply(1:4, function(i) {
combos <- combn(vars, m = i)
do.call(rbind, lapply(seq_len(ncol(combos)), function(j) {
form <- paste("diagnosis ~", paste(combos[,j], collapse = "+"))
aic <- glm(as.formula(form), data = train, family = binomial)$aic
data.frame(num_vars = i, formula = form, aic = aic)
}))
}))
})
best_fits_by_num_vars <- fits %>%
group_by(num_vars) %>%
top_n(n = -1, wt = aic) %>%
as.data.frame %>%
mutate(formula = as.character(formula))
predict_prob <- function(fit, data) {
logodds <- predict(fit, data)
odds <- exp(logodds)
prob <- odds/(1+odds)
return(prob)
}
optimal_prob <- function(fit, data) {
pred_prob <- predict_prob(fit, data)
p_seq <- seq(0, 1, 0.001)
true_class <- as.integer(data$diagnosis)-1
classif_acc <- sapply(p_seq, function(p) {
classif <- pred_prob > p
acc <- sum(classif==true_class)/length(true_class)
return(acc)
})
p_seq[which.max(classif_acc)]
}
true_outcome_test <- as.integer(test$diagnosis)-1
acc_best_subsets <- sapply(1:4, function(nv) {
form <- filter(best_fits_by_num_vars, num_vars==nv)$formula
## Get fit from training data
fit <- glm(as.formula(form), data = train, family = binomial)
opt_p <- optimal_prob(fit, train)
## Get predicted probabilities on test
pred_prob_test <- predict_prob(fit, test)
## Get predicted outcomes on test
pred_outcome_test <- pred_prob_test > opt_p
acc <- sum(pred_outcome_test==true_outcome_test)/length(true_outcome_test)
return(acc)
})
pred_outcome_lasso_test <- predict(cvfit_lasso, newx = as.matrix(test[,2:31]), s = "lambda.1se", type = "class")
acc_lasso <- sum(pred_outcome_lasso_test==test$diagnosis)/nrow(test)
save(train, test, true_outcome_test, acc_best_subsets, acc_lasso, predict_prob, optimal_prob, file = "regression_data.rda")
|
022aefa7da2022c06a5664262f7e1fb98e24c723
|
eb127bbb4e75966296b4a2234250ba6819e513b1
|
/code_analysis_obj/utils.R
|
7ddb68890c461f6d89b1318cd04be50647a61f53
|
[] |
no_license
|
davidchampredon/stiagent
|
29cc33cc8e1a54763ccd5f12f05949ac80354575
|
dc6cd187b7649ee4517fc27ea66aff377c8ff892
|
refs/heads/master
| 2021-01-10T12:50:45.273558
| 2016-03-21T03:45:58
| 2016-03-21T03:45:58
| 43,753,973
| 0
| 0
| null | 2015-11-18T01:53:12
| 2015-10-06T13:56:06
|
C++
|
UTF-8
|
R
| false
| false
| 2,413
|
r
|
utils.R
|
library(plyr)
library(dplyr)
library(tidyr)
get.nMC <- function(sim){
### RETURN THE NUBER OF MONTE CARLO ITERATIONS
return(sum(grepl("MC_",names(sim))))
}
get.timeseries <- function(sim){
### RETRIEVE ALL TIME SERIES (FOR EVERY MC ITER)
### IN A DATA FRAME FORMAT
stinames <- sim[[1]]$STInames
n.sti <- length(stinames)
n.mc <- get.nMC(sim)
D <- list()
for(i in 1:n.mc){
# from list to data frame:
D[[i]]<- as.data.frame(sim[[i]]$df_sim)
D[[i]]$mc <- i
# add usefull transformed variables:
D[[i]]$month <- ceiling(D[[i]]$time*12)
D[[i]]$year <- ceiling(D[[i]]$time)
D[[i]]$fem.ratio <- D[[i]]$nFemale/D[[i]]$nAlive
D[[i]]$partn.ratio <- D[[i]]$nPartn/D[[i]]$nAlive
D[[i]]$sp.ratio <- D[[i]]$nSp/D[[i]]$nAlive
D[[i]]$csw.prop <- D[[i]]$nSp/D[[i]]$nAlive
# STIs specifics:
for(k in 1:n.sti) {
# prevalence (percentage)
D[[i]]$tmp <- D[[i]][,stinames[k]]/D[[i]]$nAlive
names(D[[i]])[length(names(D[[i]]))] <- paste0("prev",stinames[k])
# incidence:
D[[i]]$tmp <- c(0,pmax(0,diff(D[[i]][,stinames[k]])))
names(D[[i]])[length(names(D[[i]]))] <- paste0("inc",stinames[k])
}
}
return(dplyr::rbind_all(D))
}
calc.incidence.rate <- function(sim,period,stiname){
### CALCULATE INCIDENT CASES AND RATE FOR A GIVEN PERIOD
DF.all <- get.timeseries(sim)
DF <- DF.all[,c("time",period,"mc",stiname,"nAlive")]
z = unlist(c(DF[,stiname]))
DF$inc <- c(0,pmax(0,diff(z)))
# Manage the transition b/w 2 MC iterations
# (the 'diff' did not manage that):
DF$inc[DF$time==0] <- 0
DF.summ <- ddply(DF,c(period,"mc"),summarize,
sinc = sum(inc),
avgpop = mean(nAlive))
head(DF.summ$avgpop)
str(DF.summ$avgpop)
DF.summ$incrate <- DF.summ$sinc / DF.summ$avgpop
DF.summ$incrate[DF.summ$avgpop == 0] <- 0
names(DF.summ)[names(DF.summ)=="sinc"] <- paste("inc",period,stiname,sep=".")
names(DF.summ)[names(DF.summ)=="incrate"] <- paste("incrate",period,stiname,sep=".")
return(DF.summ)
}
get.population <- function(sim,alive.only=TRUE){
### RETRIEVE ALL POPULATION (ONE FOR EVERY MC ITER)
### IN A DATA FRAME FORMAT
n.mc <- get.nMC(sim)
D <- list()
for(i in 1:n.mc){
D[[i]] <- as.data.frame(sim[[i]]$population)
D[[i]]$mc <- i
D[[i]]$Gender <- "female"
D[[i]]$Gender[D[[i]]$gender==1] <- "male"
}
D.all <- dplyr::rbind_all(D)
if(alive.only) D.all <- subset(D.all,isalive>0)
return(D.all)
}
|
042809a2eceb0438d709474e659bc6581e5f5b90
|
63e1231faa30a4cea6dd9f25e87c2372383aa2f4
|
/R/Data_documentation.R
|
2e12472cbd055a48075559de841226d299378a58
|
[] |
no_license
|
cran/MSEtool
|
35e4f802f1078412d5ebc2efc3149c46fc6d13a5
|
6b060d381adf2007becf5605bc295cca62f26770
|
refs/heads/master
| 2023-08-03T06:51:58.080968
| 2023-07-19T22:10:23
| 2023-07-20T01:47:18
| 145,912,213
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,215
|
r
|
Data_documentation.R
|
# This file is automatically built by build_tools/build_data.r
# Don't edit by hand!
#
#' @rdname Stock-class-objects
"Albacore"
#' @rdname Stock-class-objects
"Blue_shark"
#' @rdname Stock-class-objects
"Bluefin_tuna"
#' @rdname Stock-class-objects
"Bluefin_tuna_WAtl"
#' @rdname Stock-class-objects
"Butterfish"
#' @rdname Stock-class-objects
"Herring"
#' @rdname Stock-class-objects
"Mackerel"
#' @rdname Stock-class-objects
"Porgy"
#' @rdname Stock-class-objects
"Rockfish"
#' @rdname Stock-class-objects
"Snapper"
#' @rdname Stock-class-objects
"Sole"
#' @rdname Stock-class-objects
"Toothfish"
#' Stock class objects
#'
#' Example objects of class Stock
#'
#' @name Stock-class-objects
#' @format NULL
#' @examples
#' avail("Stock")
NULL
#' @rdname Fleet-class-objects
"DecE_Dom"
#' @rdname Fleet-class-objects
"DecE_HDom"
#' @rdname Fleet-class-objects
"DecE_NDom"
#' @rdname Fleet-class-objects
"FlatE_Dom"
#' @rdname Fleet-class-objects
"FlatE_HDom"
#' @rdname Fleet-class-objects
"FlatE_NDom"
#' @rdname Fleet-class-objects
"Generic_DecE"
#' @rdname Fleet-class-objects
"Generic_FlatE"
#' @rdname Fleet-class-objects
"Generic_Fleet"
#' @rdname Fleet-class-objects
"Generic_IncE"
#' @rdname Fleet-class-objects
"IncE_HDom"
#' @rdname Fleet-class-objects
"IncE_NDom"
#' @rdname Fleet-class-objects
"Low_Effort_Non_Target"
#' @rdname Fleet-class-objects
"Target_All_Fish"
#' @rdname Fleet-class-objects
"Targeting_Small_Fish"
#' Fleet class objects
#'
#' Example objects of class Fleet
#'
#' @name Fleet-class-objects
#' @format NULL
#' @examples
#' avail("Fleet")
NULL
#' @rdname Obs-class-objects
"Generic_Obs"
#' @rdname Obs-class-objects
"Imprecise_Biased"
#' @rdname Obs-class-objects
"Imprecise_Unbiased"
#' @rdname Obs-class-objects
"Perfect_Info"
#' @rdname Obs-class-objects
"Precise_Biased"
#' @rdname Obs-class-objects
"Precise_Unbiased"
#' Obs class objects
#'
#' Example objects of class Obs
#'
#' @name Obs-class-objects
#' @format NULL
#' @examples
#' avail("Obs")
NULL
#' @rdname Imp-class-objects
"Overages"
#' @rdname Imp-class-objects
"Perfect_Imp"
#' Imp class objects
#'
#' Example objects of class Imp
#'
#' @name Imp-class-objects
#' @format NULL
#' @examples
#' avail("Imp")
NULL
#' DataSlots
#'
#' Dataframe with details of slots in Dat object
#'
#'
"DataSlots"
#' @rdname Data-class-objects
"Atlantic_mackerel"
#' @rdname Data-class-objects
"China_rockfish"
#' @rdname Data-class-objects
"Cobia"
#' @rdname Data-class-objects
"Example_datafile"
#' @rdname Data-class-objects
"Gulf_blue_tilefish"
#' @rdname Data-class-objects
"ourReefFish"
#' @rdname Data-class-objects
"Red_snapper"
#' @rdname Data-class-objects
"Simulation_1"
#' Data class objects
#'
#' Example objects of class Data
#'
#' @name Data-class-objects
#' @format NULL
#' @examples
#' avail("Data")
NULL
#' @rdname OM-class-objects
"testOM"
#' OM class objects
#'
#' Example objects of class OM
#'
#' @name OM-class-objects
#' @format NULL
#' @examples
#' avail("OM")
NULL
#' @rdname MOM-class-objects
"Albacore_TwoFleet"
#' MOM class objects
#'
#' Example objects of class MOM
#'
#' @name MOM-class-objects
#' @format NULL
#' @examples
#' avail("MOM")
NULL
#' SimulatedData Data
#'
#' An object of class Data
#'
"SimulatedData"
#' ReqData
#'
#' Dataframe with required data slots for built-in MPs
#'
#'
"ReqData"
#' LHdatabase
#'
#' Database from the FishLife package with predicted life-history parameters for all species on FishBase
#'
#' @references Thorson, J. T., S. B. Munch, J. M. Cope, and J. Gao. 2017.
#' Predicting life history parameters for all fishes worldwide. Ecological Applications. 27(8): 2262--2276
#' @source \url{https://github.com/James-Thorson-NOAA/FishLife/}
#'
#'
"LHdatabase"
#' StockDescription
#'
#' A data.frame with description of slots for class Stock
#'
"StockDescription"
#' FleetDescription
#'
#' A data.frame with description of slots for class Fleet
#'
"FleetDescription"
#' ObsDescription
#'
#' A data.frame with description of slots for class Obs
#'
"ObsDescription"
#' ImpDescription
#'
#' A data.frame with description of slots for class Imp
#'
"ImpDescription"
#' HistDescription
#'
#' A data.frame with description of slots for class Hist
#'
"HistDescription"
#' DataDescription
#'
#' A data.frame with description of slots for class Data
#'
"DataDescription"
#' OMDescription
#'
#' A data.frame with description of slots for class OM
#'
"OMDescription"
#' MSEDescription
#'
#' A data.frame with description of slots for class MSE
#'
"MSEDescription"
#' Taxa_Table
#'
#' Database from rfishbase
#'
#' @references Carl Boettiger and Duncan Temple Lang and Peter Wainwright
#' 2012. Journal of Fish Biology
#' @source \doi{10.1111/j.1095-8649.2012.03464.x}
#'
#'
"Taxa_Table"
|
c117e8b2a3cf3e196ea6dd55f1e9a37625506eb0
|
aa6c6c778f43c75d40a40c0599ebfc2a31f5267d
|
/01_clean-data.R
|
54a26727da8b31097f03b43f844e9228ed466a9a
|
[] |
no_license
|
wbeck1990/temp-nutrient-interactions
|
5770720687244d97fd87e805d7f89297f2626c9f
|
06d5225fe83ff64ba4dff703addee06bb6c836a1
|
refs/heads/master
| 2020-04-05T02:00:19.944409
| 2018-12-18T16:36:22
| 2018-12-18T16:36:22
| 156,459,861
| 0
| 0
| null | 2018-11-06T22:59:12
| 2018-11-06T22:59:12
| null |
UTF-8
|
R
| false
| false
| 34
|
r
|
01_clean-data.R
|
# script to clean and wrangle data
|
e04adff224966124239782418087647251138bdf
|
b20188200897e1b86950b186a143b18c38217ec8
|
/bitcoinpred_old_algorithm.R
|
8e15b1aae3fa861354044f083977916249cde745
|
[] |
no_license
|
roptanov/bitcoin
|
c751de61d7bca60221c98e11d6b6b8ec95a2b4db
|
cb4b33f0161912854a4bba1934206506288ca38d
|
refs/heads/master
| 2021-08-23T12:01:45.093009
| 2017-12-04T20:40:08
| 2017-12-04T20:40:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,433
|
r
|
bitcoinpred_old_algorithm.R
|
library(dplyr)
library(rusquant)
library(quantmod)
library(rpart)
library(rpart.plot)
library(caret)
library(curl)
library(nnet)
library(lubridate)
library(dplyr)
library("e1071")
library(ggplot2)
library(lubridate)
library(xts)
coindesk = read.csv('C:/Users/Ilya/Desktop/МОР/coindesk.csv', sep = ',', header=TRUE)
bitc = coindesk[2000:2742,]
bitc$Date = ymd_hms(bitc$Date) # пребразовываем в time-series
rownames(bitc) = bitc$Date
stocks <- xts(bitc[,-1], order.by=as.Date(bitc[,1], "%Y-%m-%d %H:%M:%S"))
bitc = stocks
PriceChange<- Cl(bitc)-Op(bitc)
Responce<-data.frame(ifelse(PriceChange>0,"UP", "DOWN"))
colnames(Responce) = 'Result'
compare <- data.frame(PriceChange, Responce)
Responce <- as.character(Responce$Result)
Responce <- c(Responce, NA)
Responce <- Responce[2:length(Responce)]
Responce
bitc = na.omit(bitc)
ADX <- ADX(bitc, n=4)
ADX <- ADX[,4]
BB <- BBands(Cl(bitc), n=8, sd = 2)
BBdiff <- BB$up-BB$dn
BBpos <- (BB$up-Cl(bitc))/BBdiff
BB <- BBands(Cl(bitc), n=9, sd = 2)
BB = na.omit(BB)
BBdiff <- BB$up-BB$dn
CCI <- CCI(Cl(bitc),n=6)
CCImove <- diff(CCI(Cl(bitc),n=32))
RSI <- RSI(Cl(bitc),n=3)
RSImove <- diff(RSI(Cl(bitc),n=18))
EMA<-EMA(Cl(bitc),n=5)
EMAcross<- Cl(bitc)-EMA(Cl(bitc),n=4)
EMAmove <- diff(Cl(bitc)-EMA(Cl(bitc),n=4))
MACD<-MACD(Cl(bitc), nFast = 4, nSlow = 8, nSig = 7)
MACDdiff<-MACD[,1]-MACD[,2]
MACD<-MACD(Cl(bitc), nFast = 9, nSlow = 16, nSig = 7)
MACDmove<-diff(MACD[,1]-MACD[,2])
data<-data.frame(Responce,ADX,BBpos,CCI,CCImove,RSI,RSImove,EMAcross,EMAmove,MACDdiff,MACDmove)
colnames(data)<-c('Responce','ADX', 'BBpos','CCI','CCImove','RSI','RSImove','EMAcross','EMAmove','MACDdiff','MACDmove')
data<-na.omit(data)
train<-data[1:c(round(nrow(data)*0.8, digits = 0)), ]
test<-data[c(round(nrow(data)*0.8, digits = 0) + 1): nrow(data), ]
set.seed(3)
library(randomForest)
# пробуем случайный лес
model = randomForest(Responce~.,data=train,ntree=25)
model.predict = predict(model,newdata=test,ntree=25)
confusionMatrix(model.predict,test$Responce)
# пробуем дерево решений
Resptree<-rpart(Responce~.,data=train, cp=.001, method="class")
prp(Resptree,type=2,extra=8)
printcp(Resptree)
Respprunedtree<-prune(Resptree,cp= 0.001)
prp(Respprunedtree, type=2, extra=8)
confusionMatrix(predict(Respprunedtree,train,type="class"), train[,1])
confusionMatrix(predict(Respprunedtree,test,type="class"), test[,1])
|
fcebd0b8c936324b2c9dfdf30ce66ce09a3d073c
|
4b5c3f74e8ea6e6384fb4f3514e962b75af1b397
|
/modules/analise/analiseUI.R
|
f89cd9ab44516f56584e2bcac42c41de02c46fab
|
[] |
no_license
|
FelipheStival/inmetShiny
|
3d4574ef99e4a4a875872d09abc144ccbe9c7a05
|
bd9bb83a2ba90e69d96966928e260ba9ef872378
|
refs/heads/main
| 2023-05-14T23:37:14.285945
| 2021-06-06T23:25:19
| 2021-06-06T23:25:19
| 334,207,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,452
|
r
|
analiseUI.R
|
#==================================================================
# Analise tabela UI
#==================================================================
analiseUI = function() {
tabItem(tabName = "tabelaAnalise",
tabBox(
width = "100%",
selected = "Tabela sumario",
tabPanel(
"Tabela sumario",
withSpinner(dataTableOutput("tabelaSumario",width = "100%",height = "80vh")),
downloadButton("DownloadSumario", label = "Download")
)
)
)
}
#==================================================================
# dados perdidos UI
#==================================================================
dadosperdidosUI = function() {
tabItem(tabName = "dadosPerdidosUI",
box(
width = 12,
withSpinner(plotOutput("dadosPerdidosPlot",width = "100%",height = "85vh"))
)
)
}
#==================================================================
# Tabela menu item
#==================================================================
itemMenuAnalise = function() {
#criando janela
menuItem(
text = "Analise",
tabName = "analiseUI",
icon = icon("search"),
menuSubItem(
text = "Tabela",
tabName = "tabelaAnalise",
icon = icon("bar-chart")
),
menuSubItem(
text = "Dados perdidos",
tabName = "dadosPerdidosUI",
icon = icon("bar-chart")
)
)
}
|
09578b7369aaf20884be16a6838f442b089ff5aa
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diceR/inst/testfiles/indicator_matrix/libFuzzer_indicator_matrix/indicator_matrix_valgrind_files/1609959467-test.R
|
9a0515496f6039581969ff1917eae0b1634e029f
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
1609959467-test.R
|
testlist <- list(x = c(1.01670330560775e-316, 7.39437241408225e-304, -8.53897486142116e-280, 2.1238739044437e-314))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result)
|
def4681c577aa1a6604154ee8ae60ede948a004c
|
e443c50f825638cace4d329e73fe6faeb9a9bad1
|
/R/MagnesRutiner/vec.from.merds.to.farm.r
|
589ad04f7e03ad65bc90640648ed8713755066fb
|
[] |
no_license
|
Kotkot/RecaSimfish
|
07b3d1773497d0887e1b4903b9449bbad46ca8a9
|
03c7a33a9a7d9419712f9b62953a4a67c4509552
|
refs/heads/master
| 2020-07-05T05:01:48.064166
| 2019-08-15T11:47:11
| 2019-08-15T11:47:11
| 202,530,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 558
|
r
|
vec.from.merds.to.farm.r
|
vec.from.merds.to.farm<-function(x,antall.merds,antall.farm) {
n.merds<-length(x)
merd.names<-names(x)
tmp<-x[[1]]
dn<-dimnames(tmp)
time.names<-dn[[1]]
n.times<-length(time.names)
w<-antall.merds/as.vector(antall.farm)
w[is.na(w)]<-0
res<-vector("list",1)
names(res)<-"AllMerds"
### res[[1]]<-x[[1]]
for (merd in merd.names) {
xx<-x[[merd]]
xx[is.na(xx)]<-0
if (merd==merd.names[1]) {
tmp<-xx*w[,merd]
} else {
tmp<-tmp+xx*w[,merd]
}
}
tmp[antall.farm==0]<-NA
res[[1]]<-tmp
res
}
|
66142e1ae074c3bfe986981b55ddec63f17f6ed4
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615829691-test.R
|
d760b08b9157073f9922e0d17e6d9326c17bca7f
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
r
|
1615829691-test.R
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.43812608695272e-308, 2.08853788077799e-236, 2.05226840067026e-289, 3.33870925339418e-294, 1.44867561321978e+306, 1.41286214203445e-303, 1.44695764522227e-303, 1.18177156179874e-294, 1.45810387698431e-303, 1.38386568550094e-48, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(2L, 9L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
53d8a015310c5c6e9c1b35742a86d04b678b8200
|
c2dd13c7cc71651643d148b47dfacab970f39736
|
/plot.R
|
9bd442e300286190b08e7b364147949c85657456
|
[] |
no_license
|
shabss/exdata.proj2
|
a0729c4f8d28acddf125c77c0e1ca60b890aa87d
|
babc594ea654592a8ea49a7aebadb1c95c7580ed
|
refs/heads/master
| 2016-09-06T04:57:48.687139
| 2014-05-28T15:07:58
| 2014-05-28T15:07:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
plot.R
|
source("plot1.R.txt")
source("plot2.R.txt")
source("plot3.R.txt")
source("plot4.R.txt")
source("plot5.R.txt")
source("plot6.R.txt")
|
94010a034f2cbc608c7d68dafd4eca651900fddf
|
66a4d7725ab7f37d1d536bfcd284a8f6a64431b2
|
/man/parse_status_response.Rd
|
459175c0e30603e42dd69a18bab86f70170b4b59
|
[] |
no_license
|
meerapatelmd/glitter
|
6ee867162657c2c83e80a02cf683bd132fde39d9
|
0b986b74682e2870b17c9bfc01112e2cbd42d046
|
refs/heads/master
| 2023-07-29T10:40:44.255146
| 2021-09-03T05:04:15
| 2021-09-03T05:04:15
| 296,978,497
| 1
| 0
| null | 2021-03-28T20:32:04
| 2020-09-20T01:09:41
|
R
|
UTF-8
|
R
| false
| true
| 691
|
rd
|
parse_status_response.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.R
\name{parse_status_response}
\alias{parse_status_response}
\title{Parse Response to Git Status}
\usage{
parse_status_response(status_response)
}
\value{
A list of vectors that has split on the following headers: "On branch","Changes to be committed:", "Changes not staged for commit:", "Untracked files:" with headers removed.
}
\description{
Parse the status response received when calling \code{\link{status}} to filter for new, modified, deleted, staged, and unstaged files.
}
\seealso{
\code{\link[purrr]{keep}},\code{\link[purrr]{map}},\code{\link[purrr]{map2}}
\code{\link[rubix]{map_names_set}}
}
|
9e8b3874a0e39fc30019b18a16e5d3b811b75ea8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sROC/examples/kROC.Rd.R
|
2dd399c42c8e03f236e27e24300d87dce22c8c87
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 300
|
r
|
kROC.Rd.R
|
library(sROC)
### Name: kROC
### Title: Kernel Estimation for ROC Curves
### Aliases: kROC
### Keywords: nonparametric smooth
### ** Examples
## --------------------
set.seed(100)
n <- 200
x <- rgamma(n,2,1)
y <- rnorm(n)
xy.ROC <- kROC(x,y, bw.x="pi_sj",bw.y="pi_sj")
xy.ROC
plot(xy.ROC)
|
ed3047a37e2859d2b5c867a23021761a3de74bb3
|
00a6e8378c523b048399b3a7438f0fe22a6f5d4e
|
/R/engineer.R
|
0b83bb433073836da1cbae78ea82593de6d44284
|
[] |
no_license
|
sxinger/DKD_PM_temporal
|
46c117401ff7757ab440b216e4074efd5cf0bcb4
|
dbbb35a2e18411422665958e27ecb1be7f675a62
|
refs/heads/master
| 2020-04-10T12:12:30.682542
| 2019-04-23T01:03:17
| 2019-04-23T01:03:17
| 161,014,841
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,403
|
r
|
engineer.R
|
#### Feature Engineering ####
rm(list=ls()); gc()
setwd("~/proj_dkd/DKD_PM_wip")
source("./util.R")
require_libraries(c( "Matrix"
,"pROC"
,"dplyr"
,"tidyr"
,"magrittr"
))
fact_stack<-readRDS("./data2/DKD_heron_facts_prep.rda")
## add historical distinct fact counts (distinct day,concept) update to date
add_FCNT<-fact_stack %>%
dplyr::select(PATIENT_NUM, CONCEPT_CD, day_from_dm) %>%
dplyr::mutate(day_from_dm = pmax(0,day_from_dm)) %>% unique %>%
group_by(PATIENT_NUM,day_from_dm) %>%
dplyr::mutate(fcnt_pday = n()) %>%
ungroup %>% arrange(PATIENT_NUM,day_from_dm) %>%
dplyr::select(PATIENT_NUM,day_from_dm,fcnt_pday) %>% unique %>%
dplyr::mutate(VARIABLE_CATEG = "engineered",
CONCEPT_CD = "fact_cnt",
NVAL_NUM = cumsum(fcnt_pday)) %>%
dplyr::mutate(NVAL_NUM_lag = lag(NVAL_NUM,n=1L,0)) %>%
dplyr::mutate(NVAL_NUM2 = NVAL_NUM-NVAL_NUM_lag)
add_FCNT2<-fact_stack %>%
dplyr::select(-VARIABLE_CATEG,-CONCEPT_CD,-NVAL_NUM) %>% unique %>%
inner_join(add_FCNT %>% dplyr::mutate(CONCEPT_CD = "fact_cnt") %>%
dplyr::select(PATIENT_NUM,day_from_dm,
VARIABLE_CATEG,CONCEPT_CD,NVAL_NUM),
by=c("PATIENT_NUM","day_from_dm")) %>%
dplyr::select(PATIENT_NUM, VARIABLE_CATEG, CONCEPT_CD,
START_DATE,DM_DATE,END_DATE,
day_from_dm,day_to_end,yr_from_dm,NVAL_NUM)
#eyeball an example
add_FCNT2 %>% filter(PATIENT_NUM==70) %>% View
add_FCNT3<-fact_stack %>%
dplyr::select(-VARIABLE_CATEG,-CONCEPT_CD,-NVAL_NUM) %>% unique %>%
inner_join(add_FCNT %>% dplyr::mutate(CONCEPT_CD = "newfact_cnt_slast") %>%
dplyr::select(PATIENT_NUM,day_from_dm,
VARIABLE_CATEG,CONCEPT_CD,NVAL_NUM2) %>%
dplyr::rename(NVAL_NUM = NVAL_NUM2),
by=c("PATIENT_NUM","day_from_dm")) %>%
dplyr::select(PATIENT_NUM, VARIABLE_CATEG, CONCEPT_CD, START_DATE,DM_DATE,END_DATE,
day_from_dm,day_to_end,yr_from_dm,NVAL_NUM)
#eyeball an example
add_FCNT3 %>% filter(PATIENT_NUM==70) %>% View
## stack new features
fact_stack %<>%
bind_rows(add_FCNT2)
rm(add_FCNT2); gc()
fact_stack %<>%
bind_rows(add_FCNT3)
rm(add_FCNT3); gc()
## update data
saveRDS(fact_stack, file="./data2/DKD_heron_facts_prep.rda")
rm(list=ls())
gc()
|
b23d707a5f9d5f3a329543cf634dbe278f849c28
|
3bb6d71ba47f9d22185654565805fb69324020e3
|
/TPM_Scripts/Making_Praveen-Mean_TPM_Dataset.R
|
a86c17ba371e28b8ad52624733763a0a7b2f6301
|
[] |
no_license
|
HongyuanWu/PanCan_RNA-Seq
|
c4b908f3ea80eebfd8c6358efc6fed36e06797ac
|
c1a82e41e1a1e6549ab853fc717f6b9e7fac2c9b
|
refs/heads/master
| 2022-02-02T17:01:18.008193
| 2019-06-25T18:53:24
| 2019-06-25T18:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,261
|
r
|
Making_Praveen-Mean_TPM_Dataset.R
|
rm(list = ls())
library(data.table)
library(dplyr)
library(magrittr)
library(stringr)
library(doParallel)
nCores <-
detectCores() %>%
subtract(2)
cl <- makeCluster(nCores)
registerDoParallel(cl)
setwd("/Users/a703402454/Desktop/UCSC_Project_Testing/Test_Data_Set_70%/TPM_Analysis")
Tumor <-
fread("Test_Set_TCGA_TARGET_TPM+1_coding.csv",
sep = ",",
header = TRUE
)
Normal <-
fread("Test_Set_GTEX_TPM+1_coding.csv",
sep = ",",
header = TRUE
)
TumorMeta <-
fread("Tumor_Meta_Common_Primaries.csv",
sep = ",",
header = TRUE
)
NormalMeta <-
fread("Normal_Meta_Common_Primaries.csv",
sep = ",",
header = TRUE
)
allSites <- unique(NormalMeta$`_primary_site`)
normalMeans <-
foreach(site = allSites) %dopar% {
tmpNames <- NormalMeta$sample[NormalMeta$`_primary_site` == site]
means <- rowMeans(Normal[,colnames(Normal) %in% tmpNames])
return(means)
}
rm(Normal)
normalMeans %<>% do.call(rbind, .)
normalMeans %<>% colMeans
Tumor <- log(Tumor/normalMeans)
Tumor %>%
fwrite("Test_Set_TCGA_TARGET_TPM_Praveen_Mean_coding.csv",
sep = ",",
col.names = TRUE,
row.names = FALSE
)
stopCluster(cl)
|
6d5ff04b9e8b8eee6da03d51efcc5349c86efeab
|
607847657e271d3c5b505066d2983504a765e06e
|
/Code/RPackages/keyDriver/man/mergeTwoMatricesByKeepAllPrimary.Rd
|
3e94f28cae4824a658ac8264980f6199cc848002
|
[] |
no_license
|
kippjohnson/RASNetwork
|
f7b513e0925297765224516b1e5d4e9d09d42a36
|
8a1a3a7ad6a9d1929e0e1238bc521bf6e740e54f
|
refs/heads/master
| 2020-12-24T13:44:19.378865
| 2015-08-04T14:38:28
| 2015-08-04T14:38:28
| 39,347,327
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,635
|
rd
|
mergeTwoMatricesByKeepAllPrimary.Rd
|
\name{mergeTwoMatricesByKeepAllPrimary}
\alias{mergeTwoMatricesByKeepAllPrimary}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Something
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
mergeTwoMatricesByKeepAllPrimary(primaryMatrix, minorMatrix, missinglabel = "", keepAllPrimary = T, keepPrimaryOrder = T, keepAll = F)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{primaryMatrix}{
%% ~~Describe \code{primaryMatrix} here~~
}
\item{minorMatrix}{
%% ~~Describe \code{minorMatrix} here~~
}
\item{missinglabel}{
%% ~~Describe \code{missinglabel} here~~
}
\item{keepAllPrimary}{
%% ~~Describe \code{keepAllPrimary} here~~
}
\item{keepPrimaryOrder}{
%% ~~Describe \code{keepPrimaryOrder} here~~
}
\item{keepAll}{
%% ~~Describe \code{keepAll} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function(primaryMatrix, minorMatrix, missinglabel="", keepAllPrimary=T, keepPrimaryOrder=T, keepAll=F)
{
no.promarycols <- dim(primaryMatrix)[2]
no.mustbegenes <- dim(primaryMatrix)[1]
# we add in one more column to indicate which genes are mustbeincluded after being merged with mcg
keyword="mustbeused"
mustbeGenesMatrix = cbind(primaryMatrix, c(1:no.mustbegenes), rep(keyword, no.mustbegenes) )
if (is.null(colnames(primaryMatrix)) ){
colnames(mustbeGenesMatrix) <- c( c(1:no.promarycols), "primorder", keyword)
}else{
colnames(mustbeGenesMatrix) <- c( colnames(primaryMatrix), "primorder", keyword)
}
dim(mustbeGenesMatrix)
if(is.null(keepAllPrimary) ){ #normal merge: to have the common elements
myMatrix = merge(mustbeGenesMatrix, minorMatrix, by.x=1, by.y=1,all.x=F,sort=F,all=F)
}else{
myMatrix = merge(mustbeGenesMatrix, minorMatrix, by.x=1, by.y=1,all.x=T,sort=F,all=T)
}
dim(myMatrix)
nocols.mymatrix <- dim(myMatrix)[2]
#the mustbeused genes which are not included in minor have NAs in the column $mustbeused
#so we can use this information to figure out which mustbeused genes missing in minorMatrix
myMatrix[,nocols.mymatrix] = ifelse( is.na(myMatrix[,nocols.mymatrix]), missinglabel, as.character(myMatrix[,nocols.mymatrix]) )
orders = order( as.numeric(as.matrix(myMatrix[, no.promarycols+1])))
if (keepPrimaryOrder)
myMatrix = myMatrix[orders,]
if (is.null(keepAllPrimary) ){
selected = rep(T, dim(myMatrix)[1])
}else{
if (keepAllPrimary)
selected = !(is.na(myMatrix[, no.promarycols+2]))
else #return the row-elements in minor which are missed in primary
selected = is.na(myMatrix[, no.promarycols+2])
}
sum(selected)
#keep the primary matrix and remove the mustbeused column
myMatrix[selected, -c(no.promarycols+1, no.promarycols+2)]
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
f44735b5d628c5414bb5ca9a487e1689c02f1f90
|
6390c203df735c874044a8ffa0f3692bf6010a6a
|
/man/Warehouse.Rd
|
a4f2c604e091ecb6bf018fed1b565734acb140bd
|
[
"MIT"
] |
permissive
|
felixlindemann/HNUORTools
|
c8c61ec550e2c6673c8d3e158bd7bc21208b26ab
|
0cb22cc0da14550b2fb48c996e75dfdad6138904
|
refs/heads/master
| 2020-05-15T18:37:48.423808
| 2018-02-04T11:04:52
| 2018-02-04T11:04:52
| 16,206,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,971
|
rd
|
Warehouse.Rd
|
\docType{class}
\name{Warehouse}
\alias{Warehouse}
\alias{Warehouse-class}
\title{The Warehouse class}
\description{
This class is part of the \pkg{HNUORTools}. It represents
the base class for every locateable class in an
\dfn{Operations-Research (OR)}-context.
}
\details{
Find here the defined slots for this class.
}
\note{
for citing use: Felix Lindemann (2014). HNUORTools:
Operations Research Tools. R package version 1.1-0.
\url{http://felixlindemann.github.io/HNUORTools/}.
}
\section{Slots}{
\describe{ \item{\code{supply}:}{ Object of class
\code{"numeric"}, containing the amount for the supply.
The default value cannot be negative. }
\item{\code{fixcosts}:}{ Object of class
\code{"numeric"}, containing the amount for the fixcosts
The default value cannot be negative. }
\item{\code{open}:}{ Object of class \code{"logical"},
indicating if a Warehouse is used within a WLP. }
\item{\code{vrp}:}{ Object of class \code{"list"}, a List
of Tours genereated for a VRP. } \item{\code{isDummy}:}{
Object of class \code{"logical"}, indicating if a
Warehouse was added in an algorithm (e.g.
TPP-Column-Minimum-Method) in order to avoid degenerated
soulutions. } }
}
\section{Slots (from \code{\link{Node}})}{
\describe{ \item{\code{id}:}{ Object of class
\code{"character"}, containing data from id.
\strong{Should be unique}. The default value will be
caluclated randomly. } \item{\code{label}:}{ Object of
class \code{"character"}, containing the label of the
\code{\link{Warehouse}}. The default value will be
caluclated randomly. } \item{\code{x}:}{ Object of class
\code{"numeric"}, containing the x-coordinate of the
\code{\link{Warehouse}}. The default value will be
caluclated randomly. } \item{\code{y}:}{ Object of class
\code{"numeric"}, containing the y-coordinate of the
\code{\link{Warehouse}}. The default value will be
caluclated randomly. } }
}
\section{Creating objects of type \code{\link{Warehouse}}}{
\describe{ \item{Creating an \code{S4-Object}}{
\code{new("Warehouse", ...)} } \item{Converting from a
\code{\link{data.frame}}}{
\code{as.Warehouse{<data.frame>}} See also below in the
Methods-Section. } \item{Converting from a
\code{\link{list}}}{ \code{as.Warehouse{<list>}} See also
below in the Methods-Section. } }
}
\section{Methods}{
\describe{ \item{\code{as.list(Warehouse, ...)}}{
Converts a \code{\link{Warehouse}} into a
\code{\link{list}}. \code{...} are user-defined (not
used) parameters. } \item{\code{as.data.frame(Warehouse,
...)}}{ Converts a \code{\link{Warehouse}} into a
\code{\link{data.frame}}. \code{as.data.frame} accepts
the optional parameter \code{withrownames} of class
\code{"logical"} (default is \code{TRUE}). If
\code{withrownames == TRUE} the returned
\code{\link{data.frame}} will recieve the \code{id} as
rowname. \code{...} are user-defined (not used)
parameters. } \item{\code{as.Warehouse(obj)}}{ Converts
an object of class \code{\link{data.frame}} or of class
\code{\link{list}} into a \code{\link{Warehouse}}. }
\item{\code{is.Warehouse(obj)}}{ Checks if the object
\code{obj} is of type \code{\link{Warehouse}}. }
\item{\code{\link{getDistance}}}{ Calculating the
distance between two \code{\link{Node}s} (As the classes
\code{\link{Customer}} and \code{\link{Warehouse}} depend
on \code{\link{Node}}, distances can be calculated
between any of these objects). }
\item{\code{\link{getpolar}}}{ Calculating the
polar-angle to the x-Axis of a link, connecting two
\code{\link{Node}s} (As the classes
\code{\link{Customer}} and \code{\link{Warehouse}} depend
on \code{\link{Node}}, polar-angles can be calculated
between any of these objects). } }
}
\section{Derived Classes}{
None.
}
\section{To be used for}{
\describe{ \item{WLP}{ Warehouse-Location-Problem }
\item{TPP}{ Transportation Problem } \item{VRP}{ Vehicle
Routing Problem } }
}
\examples{
# create a new Warehouse with specific values
x<- new("Warehouse", x= 10, y=20, id="myid", label = "mylabel", supply = 20)
x
# create from data.frame
df<- data.frame(x=10,y=20, supply = 30)
new("Warehouse", df)
as(df, "Warehouse")
as.Warehouse(df)
#create some Warehouses
n1 <- new("Warehouse",x=10,y=20, supply = 30, id ="n1")
n2 <- new("Warehouse",x=13,y=24, supply = 40, id ="n2")
# calculate Beeline distance
#getDistance(n1,n2) # should result 5
#getDistance(n1,n2, costs = 2) # should result 10
}
\author{
Dipl. Kfm. Felix Lindemann
\email{felix.lindemann@hs-neu-ulm.de}
Wissenschaftlicher Mitarbeiter Kompetenzzentrum Logistik
Buro ZWEI, 17
Hochschule fur angewandte Wissenschaften Fachhochschule
Neu-Ulm | Neu-Ulm University Wileystr. 1
D-89231 Neu-Ulm
Phone +49(0)731-9762-1437 Web
\url{www.hs-neu-ulm.de/felix-lindemann/}
\url{http://felixlindemann.blogspot.de}
}
\seealso{
The classes are derived from this class and the following
Methods can be used with this class.:
}
|
7a1b8015fae1ce8c856c587b0b4ecbdc0a8ba914
|
a992dc5179eebb2779e63a6284ef64bf6b22904d
|
/man/memoiseCache.Rd
|
eed7d964d1c6669f61a0dc369d3ffeced09a45ad
|
[] |
no_license
|
philliplab/yasss
|
707e1029bfc6054de2d6de933761b6e044123f5b
|
20a53fda82c9019438ee4067cb7c6e87aca6aeaa
|
refs/heads/master
| 2021-06-05T09:45:23.338398
| 2020-09-07T14:39:02
| 2020-09-07T14:39:02
| 146,469,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,003
|
rd
|
memoiseCache.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/memoiseCache.R
\name{memoiseCache}
\alias{memoiseCache}
\title{Cache a function call with an associated seed value}
\usage{
memoiseCache(fun, args, cacheNamePrefix = NULL, seed = NULL, ...)
}
\arguments{
\item{fun}{The name of the function to execute as a character string}
\item{args}{A list of the arguments used by do.call to run fun.}
\item{cacheNamePrefix}{Optional prefix that gets added to the cacheName. Frequent usage is a diagnostic marker for OCD.}
\item{seed}{A positive integer that allows extra control over whether or not to compute or load the value from a cache.}
}
\description{
Uses simpleCache::simpleCache to implement a form of memoisation. It caches
a function's result based on the arguments provided and will reload that
value instead of recomputing the function if the arguments match.
Additionally allows the user to specify a seed value so that different
results based on the same arguments.
}
|
34c9cb33f28394e4ddefc9bf443d27c1687126b4
|
b72e0d0d9d3b25d4909c4893c0b2db16843d8e0c
|
/plans.R
|
8e3ae3df3f3cd8fdf1ddca2864b50536297315ab
|
[] |
no_license
|
zaintejani/FinTech_Impulse
|
765528d87a555049eb6bae7eb032ec3b0d43c78a
|
c290edf7622cd9dc2b97a357ebd115d025c8a378
|
refs/heads/master
| 2021-01-10T00:58:35.183111
| 2015-11-10T02:15:50
| 2015-11-10T02:15:50
| 45,881,130
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 430
|
r
|
plans.R
|
## Algorithm Skeleton:
## Account is the User.
## Tie transaction data to User using account ID variable
## Build transaction history (date: as Posixct)
## order containers by absolute freq, relative (recent) freq (date sub-filter)
## Remove lending style containers.
## Match "most preferred" containers by User to "most preferred" merchants by company (based on contracts, market share, etc)
## Promos, coupons, etc. make $$$
|
3717c67cf2ed7672083f2feb4cb794b0a7ab0617
|
13867cf2f13f520a0ab24227a47b53b5a74a2a4c
|
/Dropbox/HD-Quintana/CQuintana/Coursera/complete.R
|
0f691c87ccfaaeb6549fdcc9d28ad8dac2e28cdb
|
[] |
no_license
|
cquintanam/Coursera-Exm-2
|
7ac3c19b6e1d5a31c7d514221cd1d0ed76a7dd42
|
de11da370585b32ea57c5b69ee8fdbda34d66e34
|
refs/heads/master
| 2023-04-02T21:00:04.360529
| 2021-04-06T03:51:10
| 2021-04-06T03:51:10
| 310,116,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
complete.R
|
setwd("//Users//administrador//Dropbox//HD-Quintana//CQuintana//Coursera")
complete <- function(directory, files = 1:332)
{
dat <- data.frame(id = 1:332)
for(i in 1:332)
{
dat$nobs[i] <- dim(na.omit(read.csv(list.files(directory, full.names = TRUE)[i])))[1]
}
data = dat[files,]
return(data)
}
|
934cf5ba5d443fe630ea098e50ec33784d447ba1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/coreCT/examples/rootSize.Rd.R
|
b0ac175c3c88491d87f327c9054f0a7f1426f2ac
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 963
|
r
|
rootSize.Rd.R
|
library(coreCT)
### Name: rootSize
### Title: Convert a matrix of semi-processed DICOM images to root particle
### counts, volumes, and surface areas
### Aliases: rootSize
### ** Examples
ct.slope <- unique(extractHeader(core_426$hdr, "RescaleSlope"))
ct.int <- unique(extractHeader(core_426$hdr, "RescaleIntercept"))
# convert raw units to Hounsfield units
HU_426 <- lapply(core_426$img, function(x) x*ct.slope + ct.int)
rootChars <- rootSize(HU_426, pixelA = 0.0596,
diameter.classes = c(2.5, 10))
## Not run:
##D # plot using "ggplot" package after transforming with "reshape2" package
##D area.long <- reshape2::melt(rootChars, id.vars = c("depth"),
##D measure.vars = grep("Area", names(rootChars)))
##D ggplot2::ggplot(data = area.long, ggplot2::aes(y = -depth, x = value,
##D color = variable)) + ggplot2::geom_point() + ggplot2::theme_classic() +
##D ggplot2::xlab("root external surface area per slice (cm2)")
## End(Not run)
|
1dbf948c6b50186eb769ef756afb542b72280704
|
2e8fcc79e61ed9f80673834834fcf2abb4b8ac75
|
/R/zzz.R
|
49eef129bed5774dbb48ff028a3732cb62540052
|
[
"MIT"
] |
permissive
|
nickmckay/GeoChronR
|
893708e6667ee898165c208d200f002063e6d83f
|
f37236e1fa6616f55798bbd4e1530b5b564d0f53
|
refs/heads/master
| 2023-05-24T01:32:59.690518
| 2023-01-17T23:16:47
| 2023-01-17T23:16:47
| 32,468,418
| 30
| 2
|
MIT
| 2023-01-19T17:46:24
| 2015-03-18T15:50:12
|
R
|
UTF-8
|
R
| false
| false
| 199
|
r
|
zzz.R
|
.onAttach <- function(...){
if(!interactive()){
packageStartupMessage(
cat(crayon::bold(glue::glue("Welcome to geoChronR version {utils::packageVersion('geoChronR')}!")),"\n")
)
}
}
|
62ff32f1ad9d1c7b2cf7bf64c56962a28646b4b2
|
399d8ec2a319ba33da29c819964d32660ccff1c2
|
/CleanData.R
|
d5d7259aa139c31fe3b0c96b607c148cbeb85261
|
[] |
no_license
|
thuggeanalyst/University-Ranking-R-Shiny
|
90224a796ad7082a36197750f8381e417e5b5f46
|
21154e03cb2e7ba8217ce480da7332f49b853271
|
refs/heads/main
| 2023-07-01T12:47:44.463220
| 2021-07-31T18:38:35
| 2021-07-31T18:38:35
| 391,436,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 330
|
r
|
CleanData.R
|
na_count1 <-sapply(timesData, function(y) sum(length(which(is.na(y)))))
na_count1 <- data.frame(na_count1)
na_count1
str(timesData)
timesData$world_rank<-as.numeric(timesData$world_rank)
timesData$total_score<-as.numeric(timesData$total_score)
#timesData<-na.omit(timesData)
timesData$year<-factor(timesData$year)
|
7fef9f42f042eb1be43cd1f5975baada1bb9511f
|
29f8f3ee59c366ea408633d183614bc39b49b26d
|
/Duke_DGHI/[DUEM] US_3DUSsamplesize_code.R
|
9b5d255cc02edcf66a33f3eb534ac6a4a3060370
|
[] |
no_license
|
souzajvp/analytical_codes
|
92db345dc75f128c2f25fb7b28f0891139ffea98
|
dcc49662253ba1dbd4f54b8c4caea40232632783
|
refs/heads/master
| 2023-05-23T06:06:12.058469
| 2021-06-07T18:11:00
| 2021-06-07T18:11:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,685
|
r
|
[DUEM] US_3DUSsamplesize_code.R
|
install.packages("MKmisc")
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("limma")
library("MKmisc")
## see n2 on page 1202 of Chu and Cole (2007)
power.diagnostic.test(spec = 0.70,
delta = 0.10,
power = 0.80,
sig.level=0.05,
prev=0.14) # 40
## see n2 on page 1202 of Chu and Cole (2007)
power.diagnostic.test(spec = 0.95,
delta = 0.1,
power = 0.95,
sig.level=0.01,
prev=0.03) # 40
# power.diagnostic.test(sens = 0.99, delta = 0.13, power = 0.95) # 43
# power.diagnostic.test(sens = 0.99, delta = 0.12, power = 0.95) # 47
## see n2 on page 1202 of Chu and Cole (2007)
power.diagnostic.test(sens = 0.99,
delta = 0.15,
power = 0.95,
sig.level=0.05,
prev=1) # 40
For sample size, in the first study (adult rule), out of 3435 adults, 311 (9.1%) had abdominal injuries, and 109 (3%) required acute intervention (surgery or angiographic embolization).
a. For free fluid, LR+ was 36, LR- 0.24
b. In hypotensive patients (the sickest part of the shock population), the sensitivity was >90% (fig 4 b).
c. LR negative for abdominal organ injuries was 0.21.
d. Sensitivity/specificity for organ injuries (tables 3, 5). In general, high specificity (>95%), low sensitivity (15-38%).
e. As an estimate of the prevalence of injury, table 5 lists spleen as 7%, liver 3%, kidney 3%, small bowel 1%, free fluid 16-19%. Our numbers might be higher if our inclusion criteria focus on shock.
install.packages("kappaSize")
library(kappaSize)
PowerBinary(kappa0=0.4, kappa1=0.8, props=0.14, alpha=0.05, power=0.80);
|
5b929eba6a2e7a3766c214fd5cdc0dc2c81f6c5a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FactoMineR/examples/print.catdes.Rd.R
|
35be80a6cf2bcd5653a41368ee0504a1689b5283
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
print.catdes.Rd.R
|
library(FactoMineR)
### Name: print.catdes
### Title: Print the catdes results
### Aliases: print.catdes
### Keywords: print
### ** Examples
## Not run:
##D data(wine)
##D res <- catdes(wine, num.var=2)
##D print(res)
## End(Not run)
|
e28d8b862c9618863a665d7eb4412631d66953ce
|
48bcbc2f996f4afe02f39b9a033810dced553c67
|
/data/OpenWeatherMap.R
|
c0c01fecdb2f3fd10faa8d33d2d681a82a6276fd
|
[] |
no_license
|
dstoiko/hackathon-datapower
|
2731eba0ca05551d6f6e2baee7787365258c89d1
|
07151b8224c0fdba4f914860db063dc121fec01b
|
refs/heads/master
| 2021-01-16T23:21:40.667280
| 2016-06-27T12:04:21
| 2016-06-27T12:04:21
| 61,956,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 208
|
r
|
OpenWeatherMap.R
|
### OpenWeatherMap Data API call
library(ROpenWeatherMap)
key = "5a3167b76fef776330af151a62afba29"
data=get_current_weather(api_key=key,city="paris")%>% as.data.frame()
temperatureC = data$main.temp - 273.15
|
524c2f082cab9951db964a23b7f9bb5b87076966
|
3824d9a06dede35d38c1e9a80375c131cf4e502f
|
/man/tdi.Rd
|
bf67e2c088b85e3c402b47cf85a9f3c973e5f560
|
[] |
no_license
|
king8w/diathor
|
1b8b39b21402c7fc3d49a714362b183b9ddafd0b
|
5b0e529fb24ca8f36acabd3923b6782c113c939a
|
refs/heads/master
| 2023-03-07T14:57:46.827205
| 2021-02-24T04:50:06
| 2021-02-24T04:50:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 581
|
rd
|
tdi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\encoding{UTF-8}
\name{tdi}
\alias{tdi}
\title{TDI}
\format{
A data frame with the ecological values for 3445 species
}
\source{
\url{https://link.springer.com/article/10.1007/BF00003802}
}
\usage{
data(tdi)
}
\description{
Index values for diatom species included in the TDI index
}
\references{
Kelly, M. G., & Whitton, B. A. (1995). The trophic diatom index: a new index for monitoring eutrophication in rivers. Journal of Applied Phycology, 7(4), 433-444.
}
\keyword{datasets}
|
81ac01a0c2b78f766a1fa88ad3e5e18d33ff8655
|
af28d2289d826d08e2c1e61be84e470988976848
|
/man/combinatorics.add.parameter.Rd
|
12f2b309bf998b605e9312024be8981698386142
|
[] |
no_license
|
mbich/combinatorics
|
6ed3b0f256b831f3e7337fea0bd22bf1a3e06705
|
c46212d477c02e0608ff9226697fe517f958c4af
|
refs/heads/master
| 2020-12-02T18:05:52.525420
| 2017-07-09T19:52:46
| 2017-07-09T19:52:46
| 96,470,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,184
|
rd
|
combinatorics.add.parameter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combinatorics.design.add.R
\docType{methods}
\name{combinatorics.add.parameter}
\alias{combinatorics.add.parameter}
\alias{combinatorics.add.parameter,Combinatorics,character,character,numeric,missing-method}
\alias{combinatorics.add.parameter,Combinatorics,character,missing,numeric,missing-method}
\alias{combinatorics.add.parameter,Combinatorics,character,character,missing,missing-method}
\alias{combinatorics.add.parameter,Combinatorics,character,missing,missing,missing-method}
\alias{combinatorics.add.parameter,Combinatorics,character,character,numeric,vector-method}
\alias{combinatorics.add.parameter,Combinatorics,character,character,missing,vector-method}
\alias{combinatorics.add.parameter,Combinatorics,character,missing,numeric,vector-method}
\title{Add parameter in model}
\usage{
combinatorics.add.parameter(model, name, dimension, value, arrangementValues)
\S4method{combinatorics.add.parameter}{Combinatorics,character,character,numeric,missing}(model,
name, dimension, value, arrangementValues)
\S4method{combinatorics.add.parameter}{Combinatorics,character,missing,numeric,missing}(model,
name, dimension, value, arrangementValues)
\S4method{combinatorics.add.parameter}{Combinatorics,character,character,missing,missing}(model,
name, dimension, value, arrangementValues)
\S4method{combinatorics.add.parameter}{Combinatorics,character,missing,missing,missing}(model,
name, dimension, value, arrangementValues)
\S4method{combinatorics.add.parameter}{Combinatorics,character,character,numeric,vector}(model,
name, dimension, value, arrangementValues)
\S4method{combinatorics.add.parameter}{Combinatorics,character,character,missing,vector}(model,
name, dimension, value, arrangementValues)
\S4method{combinatorics.add.parameter}{Combinatorics,character,missing,numeric,vector}(model,
name, dimension, value, arrangementValues)
}
\arguments{
\item{model}{объект класса \code{"Combinatorics"}, содержащий комбинаторную модель}
\item{name}{объект класса \code{"character"}, содержащий наименование добавляемого
показателя}
\item{dimension}{объект класса \code{"character"}, содержащий размернойть добавляемого
показателя}
\item{value}{объект класса \code{"numeric"}, содержащий значение огранченного ресурса.}
\item{arrangementValues}{объект класса \code{"vector"}, содержащий числовые значения показателя для.
всех мероприятий в комбинаторной модели. Количество значений в векторе должно быть равно количеству
мероприятий в комбинаторной модели}
}
\value{
возвращает объект класса \code{"Combinatorics"} с внесёнными изменениями
}
\description{
Добавляет показатель в комбинаторную модель. При указании значения в парамете \code{"value"}
показатель используется в качестве как ограниченного ресурса
}
\section{Methods (by class)}{
\itemize{
\item \code{model = Combinatorics,name = character,dimension = character,value = numeric,arrangementValues = missing}: добавление ограниченного ресурса в комбинаторную модель
не содержащей мероприятий
\item \code{model = Combinatorics,name = character,dimension = missing,value = numeric,arrangementValues = missing}: добавление ограниченного ресурса без указания его размерности
в комбинаторную модель не содержащей мероприятий
\item \code{model = Combinatorics,name = character,dimension = character,value = missing,arrangementValues = missing}: добавление показателя в комбинаторную модель не
содержащей мероприятий
\item \code{model = Combinatorics,name = character,dimension = missing,value = missing,arrangementValues = missing}: добавление показателя без указания его размерности
в комбинаторную модель не содержащей мероприятий
\item \code{model = Combinatorics,name = character,dimension = character,value = numeric,arrangementValues = vector}: добавление ограниченного ресурса в комбинаторную модель
\item \code{model = Combinatorics,name = character,dimension = character,value = missing,arrangementValues = vector}: добавление показателя в комбинаторную модель
\item \code{model = Combinatorics,name = character,dimension = missing,value = numeric,arrangementValues = vector}: добавление ограниченного ресурса без указания его размерности
в комбинаторную модель
}}
\examples{
model <- combinatorics.model()
combinatorics.add.parameter (model, "R1", "pc", 10)
model <- combinatorics.model()
combinatorics.add.parameter (model, "R1", value=10)
model <- combinatorics.model()
combinatorics.add.parameter (model, "R1", "pc.")
model <- combinatorics.model()
combinatorics.add.parameter (model, "R1")
model <- combinatorics.example.Model2()
combinatorics.add.parameter (model, "Electricity", "Thousand kWh/hour", 75,
c(39, 24.3, 17.7, 26.7, 11.1, 10.8, 18.3, 12.3))
model <- combinatorics.example.Model2()
combinatorics.add.parameter (model, "Electricity", "Thousand kWh/hour",
arrangementValues=c(39, 24.3, 17.7, 26.7, 11.1, 10.8, 18.3, 12.3))
model <- combinatorics.example.Model2()
combinatorics.add.parameter (model, "Electricity", value=75,
arrangementValues=c(39, 24.3, 17.7, 26.7, 11.1, 10.8, 18.3, 12.3))
}
|
eaf4ad2ad3c71a6bb361823902a7009e5e39dce5
|
689635789d25e30767a562933f39fcba1cebecf1
|
/Alpha Modelling/QuantStrat/Packages/IKTrading/demo/stepwiseCorRank.R
|
369b57a8792396347907ba2d675a90f7287e2902
|
[] |
no_license
|
Bakeforfun/Quant
|
3bd41e6080d6e2eb5e70654432c4f2d9ebb5596c
|
f2874c66bfe18d7ec2e6f2701796fb59ff1a0ac8
|
refs/heads/master
| 2021-01-10T18:23:23.304878
| 2015-08-05T12:26:30
| 2015-08-05T12:26:30
| 40,109,179
| 5
| 0
| null | 2015-08-05T12:12:09
| 2015-08-03T06:43:12
|
R
|
UTF-8
|
R
| false
| false
| 3,181
|
r
|
stepwiseCorRank.R
|
stepwiseCorRank <- function(corMatrix, startNames=NULL, stepSize=1, bestHighestRank=FALSE) {
#edge cases
if(dim(corMatrix)[1] == 1) {
return(corMatrix)
} else if (dim(corMatrix)[1] == 2) {
ranks <- c(1.5, 1.5)
names(ranks) <- colnames(corMatrix)
return(ranks)
}
if(is.null(startNames)) {
corSums <- rowSums(corMatrix)
corRanks <- rank(corSums)
startNames <- names(corRanks)[corRanks <= stepSize]
}
nameList <- list()
nameList[[1]] <- startNames
rankList <- list()
rankCount <- 1
rankList[[1]] <- rep(rankCount, length(startNames))
rankedNames <- do.call(c, nameList)
while(length(rankedNames) < nrow(corMatrix)) {
rankCount <- rankCount+1
subsetCor <- corMatrix[, rankedNames]
if(class(subsetCor) != "numeric") {
subsetCor <- subsetCor[!rownames(corMatrix) %in% rankedNames,]
if(class(subsetCor) != "numeric") {
corSums <- rowSums(subsetCor)
corSumRank <- rank(corSums)
lowestCorNames <- names(corSumRank)[corSumRank <= stepSize]
nameList[[rankCount]] <- lowestCorNames
rankList[[rankCount]] <- rep(rankCount, min(stepSize, length(lowestCorNames)))
} else { #1 name remaining
nameList[[rankCount]] <- rownames(corMatrix)[!rownames(corMatrix) %in% names(subsetCor)]
rankList[[rankCount]] <- rankCount
}
} else { #first iteration, subset on first name
subsetCorRank <- rank(subsetCor)
lowestCorNames <- names(subsetCorRank)[subsetCorRank <= stepSize]
nameList[[rankCount]] <- lowestCorNames
rankList[[rankCount]] <- rep(rankCount, min(stepSize, length(lowestCorNames)))
}
rankedNames <- do.call(c, nameList)
}
ranks <- do.call(c, rankList)
names(ranks) <- rankedNames
if(bestHighestRank) {
ranks <- 1+length(ranks)-ranks
}
ranks <- ranks[colnames(corMatrix)] #return to original order
return(ranks)
}
mutualFunds <- c("VTSMX", #Vanguard Total Stock Market Index
"FDIVX", #Fidelity Diversified International Fund
"VEIEX", #Vanguard Emerging Markets Stock Index Fund
"VFISX", #Vanguard Short-Term Treasury Fund
"VBMFX", #Vanguard Total Bond Market Index Fund
"QRAAX", #Oppenheimer Commodity Strategy Total Return
"VGSIX" #Vanguard REIT Index Fund
)
#mid 1997 to end of 2012
getSymbols(mutualFunds, from="1997-06-30", to="2012-12-31")
tmp <- list()
for(fund in mutualFunds) {
tmp[[fund]] <- Ad(get(fund))
}
#always use a list hwne intending to cbind/rbind large quantities of objects
adPrices <- do.call(cbind, args = tmp)
colnames(adPrices) <- gsub(".Adjusted", "", colnames(adPrices))
adRets <- Return.calculate(adPrices)
subset <- adRets["2012"]
corMat <- cor(subset)
tmp <- list()
for(i in 1:length(mutualFunds)) {
rankRow <- stepwiseCorRank(corMat, startNames=mutualFunds[i])
tmp[[i]] <- rankRow
}
rankDemo <- do.call(rbind, tmp)
rownames(rankDemo) <- mutualFunds
origRank <- rank(rowSums(corMat))
rankDemo <- rbind(rankDemo, origRank)
rownames(rankDemo)[8] <- "Non-Sequential"
heatmap(-rankDemo, Rowv=NA, Colv=NA, col=heat.colors(8), margins=c(6,6))
|
fc1f2eb3fb4899fc9696459e1bb703b0ad4a7ef4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bqtl/examples/residuals.bqtl.Rd.R
|
dc577bd15f2bcbdc1d3eee039a8637d61e085213
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
residuals.bqtl.Rd.R
|
library(bqtl)
### Name: residuals.bqtl
### Title: Residuals from QTL models
### Aliases: residuals.bqtl
### Keywords: methods
### ** Examples
data(little.ana.bc)
fit.pheno <- bqtl(bc.phenotype~locus(15)+locus(42),little.ana.bc)
summary(residuals(fit.pheno))
plot( fitted( fit.pheno ), residuals( fit.pheno) )
## Don't show:
rm(little.ana.bc,fit.pheno)
## End(Don't show)
|
a20e474fef7247c5e1c92be4b4ad02989b875180
|
e661887eb7058f962333e4dfb5887c42ffe4891b
|
/plot1.R
|
25ecc97457359e3ed40580396aa01687cdda25cd
|
[] |
no_license
|
Rastermyosin/ExData_Plotting1
|
31391afd587e287affe9b72dfe65184b7ba69d2e
|
17710b3653143dcbf90ac004c38bb9736b9f4ef2
|
refs/heads/master
| 2020-11-30T23:34:31.924094
| 2015-03-08T23:10:03
| 2015-03-08T23:10:03
| 31,860,771
| 0
| 0
| null | 2015-03-08T18:40:36
| 2015-03-08T18:40:36
| null |
UTF-8
|
R
| false
| false
| 705
|
r
|
plot1.R
|
############################################################
# Plot1: Histrogram of Global Active Power
############################################################
#### Load Data ####
data = read.csv(file = "./Data/household_power_consumption.txt", sep = ";")
# Subset Data #
dateTest = as.Date(data$Date,"%d/%m/%Y")
idx = dateTest >= as.Date("01/02/2007","%d/%m/%Y") & dateTest <= as.Date("02/02/2007","%d/%m/%Y")
plotData = data[idx,]
# Plot Data of interest
png(filename = "plot1.png", width = 480, height = 480)
hist(as.numeric(as.character(plotData$Global_active_power)), breaks = 12, col = "Red",
main = "Global Active power", xlab = "Global Active Power (kilowatts)")
dev.off()
# EOF
|
39991329d6484beabf9336957f54ff30d1932d6d
|
113255ebb19fac37698d5897a96660977c4a9ca6
|
/src/init_spatial.R
|
272ded801c263230ab782e0e55dc09d83cd28e25
|
[] |
no_license
|
Ludwigm6/tRacking
|
e3f4fc69dc4b4144b2c282abe91417762f336c9a
|
abeda02ae1bf1fa3225221f96dfcc702815a82a5
|
refs/heads/master
| 2020-04-16T18:21:42.382018
| 2019-02-05T17:18:55
| 2019-02-05T17:18:55
| 165,816,300
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 861
|
r
|
init_spatial.R
|
# rteu spatial init
library(rgdal)
library(mapview)
library(raster)
library(plyr)
source("~/repositories/envimaR/R/getEnvi.R")
p <- getEnvi("/home/marvin/rteu/field_test/data/")
s <- getEnvi("/home/marvin/rteu/field_test/scripts/")
# antenna as spatial
antennas <- read.csv(paste0(p$gps_data$here, "antennas.csv"))
antennas <- antennas[c(1,5,9),]
scheme <- list(antennas = antennas)
coordinates(antennas) <- ~ Longitude + Latitude
projection(antennas) <- CRS("+proj=longlat +datum=WGS84")
# reference as spatial
ref <- read.csv(paste0(p$gps_data$here, "lut_measures.csv"), stringsAsFactors = FALSE)
ref <- ref[ref$method == "RUHE",]
scheme$ref <- ref
coordinates(ref) <- ~ pos.X + pos.Y
projection(ref) <- CRS("+proj=longlat +datum=WGS84")
mapview(antennas) + mapview(ref)
scheme_sp <- list(antennas = antennas, ref = ref)
rm(antennas, ref)
|
34d5bf607a1cc3a873502a46d8f2f0a32488eb7a
|
2d39c37ead4338a40b263515a31b55ccc719fdf5
|
/R/load_chromhmm_emissions.R
|
f6d735322fc16d674a57f034ea82fe31a759a3c1
|
[
"MIT"
] |
permissive
|
csiu/hmmpickr
|
b9f6a8d831f93c0a6a3753aad3c65bb079271da5
|
eadb6476150c83086a245ae71754caa0066be2da
|
refs/heads/master
| 2021-01-19T17:10:27.779925
| 2017-03-14T07:27:33
| 2017-03-14T07:27:33
| 83,733,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
r
|
load_chromhmm_emissions.R
|
#' Load ChromHMM emission probabilities
#' @param filename The ChromHMM model emissions file
#' @param ... options for readr::read_tsv(...)
#' @export
load_chromhmm_emissions <- function(filename, ...) {
emissions_probs <- readr::read_tsv(filename, progress=FALSE, ...)
colnames(emissions_probs)[1] <- "state"
tidyr::gather(emissions_probs, mark, prob, -state)
}
|
cc878c16e22d2a7526064cc1927eaa0e62c3f444
|
38c16978738ffac95bfcf1e78fcb243fc4195305
|
/R/standardize.R
|
f9b86130dfd097db02761829f76fd60de288ac50
|
[] |
no_license
|
ebenmichael/balancer
|
ca3e2f733c52450d8e7b5b1a4ebd0d182713d4eb
|
55173367e2c91f1a3ce47070f8430c6686a049bd
|
refs/heads/master
| 2023-07-10T20:52:54.547666
| 2023-06-20T14:40:01
| 2023-06-20T14:40:01
| 129,783,286
| 7
| 3
| null | 2023-05-16T19:21:44
| 2018-04-16T17:47:11
|
R
|
UTF-8
|
R
| false
| false
| 15,677
|
r
|
standardize.R
|
################################################################################
## Wrapper to standardize to target means
################################################################################
#' Re-weight groups to target population means
#' @param X n x d matrix of covariates
#' @param target Vector of population means to re-weight to
#' @param Z Vector of group indicators with J levels
#' @param lambda Regularization hyper parameter, default 0
#' @param lowlim Lower limit on weights, default 0
#' @param uplim Upper limit on weights, default 1
#' @param scale_sample_size Whether to scale the dispersion penalty by the sample size of each group, default T
#' @param data_in Optional list containing pre-computed objective matrix/vector and constraints (without regularization term)
#' @param verbose Whether to show messages, default T
#' @param return_data Whether to return the objective matrix and vector and constraints, default T
#' @param exact_global Whether to enforce exact balance for overall population
#' @param init_uniform Wheter to initialize solver with uniform weights
#' @param eps_abs Absolute error tolerance for solver
#' @param eps_rel Relative error tolerance for solver
#' @param ... Extra arguments for osqp solver
#'
#' @return \itemize{
#' \item{weights }{Estimated primal weights as an n x J matrix}
#' \item{imbalance }{Imbalance in covariates as a d X J matrix}
#' \item{data_out }{List containing elements of QP min 0.5 x'Px + q'x st l <= Ax <= u \itemize{
#' \item{P, q}{}
#' \item{constraints }{A, l , u}
#'}}}
#' @export
standardize <- function(X, target, Z, lambda = 0, lowlim = 0, uplim = 1,
scale_sample_size = T,
data_in = NULL, verbose = TRUE, return_data = TRUE,
exact_global = T, init_uniform = F,
eps_abs = 1e-5, eps_rel = 1e-5, ...) {
# convert X to a matrix
X <- as.matrix(X)
# split matrix by targets
Z_factor <- as.factor(Z)
Xz <- split.data.frame(X, Z_factor)
# ensure that target is a vector
target <- c(target)
check_data(X, target, Z, Xz, lambda, lowlim, uplim, data_in)
unique_Z <- levels(Z_factor)
J <- length(unique_Z)
# dimension of auxiliary weights
aux_dim <- J * ncol(X)
n <- nrow(X)
idxs <- split(1:nrow(X), Z_factor)
# Setup the components of the QP and solve
if(verbose) message("Creating linear term vector...")
if(is.null(data_in$q)) {
q <- create_q_vector(Xz, target, aux_dim)
} else {
q <- data_in$q
}
if(verbose) message("Creating quadratic term matrix...")
if(is.null(data_in$P)) {
P <- create_P_matrix(n, aux_dim)
} else {
P <- data_in$P
}
I0 <- create_I0_matrix(Xz, scale_sample_size, n, aux_dim)
P <- P + lambda * I0
if(verbose) message("Creating constraint matrix...")
if(is.null(data_in$constraints)) {
constraints <- create_constraints(Xz, target, Z, lowlim,
uplim, exact_global, verbose)
} else {
constraints <- data_in$constraints
constraints$l[(J + 1): (J + n)] <- lowlim
constraints$u[(J + 1): (J + n)] <- uplim
}
settings <- do.call(osqp::osqpSettings,
c(list(verbose = verbose,
eps_rel = eps_rel,
eps_abs = eps_abs),
list(...)))
if(init_uniform) {
if(verbose) message("Initializing with uniform weights")
# initialize with uniform weights
unifw <- get_uniform_weights(Xz)
obj <- osqp::osqp(P, q, constraints$A,
constraints$l, constraints$u, pars = settings)
obj$WarmStart(x = unifw)
solution <- obj$Solve()
} else {
solution <- osqp::solve_osqp(P, q, constraints$A,
constraints$l, constraints$u,
pars = settings)
}
# convert weights into a matrix
nj <- sapply(1:J, function(j) nrow(Xz[[j]]))
weights <- matrix(0, ncol = J, nrow = n)
if(verbose) message("Reordering weights...")
cumsumnj <- cumsum(c(1, nj))
for(j in 1:J) {
weights[idxs[[j]], j] <- solution$x[cumsumnj[j]:(cumsumnj[j + 1] - 1)]
}
# compute imbalance matrix
imbalance <- as.matrix(target - t(X) %*% weights)
if(return_data) {
data_out <- list(P = P - lambda * I0,
q = q, constraints = constraints)
} else {
data_out <- NULL
}
return(list(weights = weights, imbalance = imbalance, data_out = data_out))
}
#' Create diagonal regularization matrix
#' @param Xz list of J n x d matrices of covariates split by group
#' @param scale_sample_size Whether to scale the dispersion penalty by the sample size of each group, default T
#' @param n Total number of units
#' @param aux_dim Dimension of auxiliary weights
create_I0_matrix <- function(Xz, scale_sample_size, n, aux_dim) {
if(scale_sample_size) {
# diagonal matrix n_j / n for each group j
subdiags <- lapply(Xz,
function(x) Matrix::Diagonal(nrow(x), nrow(x)))
I0 <- Matrix::bdiag(subdiags)
} else {
# all diagonal entries are 1
I0 <- Matrix::Diagonal(n)
}
I0 <- Matrix::bdiag(I0, Matrix::Diagonal(aux_dim, 0))
return(I0)
}
#' Create the q vector for an QP that solves min_x 0.5 * x'Px + q'x
#' @param Xz list of J n x d matrices of covariates split by group
#' @param target Vector of population means to re-weight to
#' @param aux_dim Dimension of auxiliary weights
#'
#' @return q vector
create_q_vector <- function(Xz, target, aux_dim) {
q <- -c(do.call(rbind, Xz) %*% target)
q <- Matrix::sparseVector(q, 1:length(q),
length(q) + aux_dim)
return(q)
}
#' Create the P matrix for an QP that solves min_x 0.5 * x'Px + q'x
#' @param X n x d matrix of covariates
#' @param Z Vector of group indicators
#'
#' @return P matrix
create_P_matrix <- function(n, aux_dim) {
return(Matrix::bdiag(Matrix::Diagonal(n, 0), Matrix::Diagonal(aux_dim, 1)))
}
#' Get a set of uniform weights for initialization
#' @param Xz list of J n x d matrices of covariates split by group
#'
get_uniform_weights <- function(Xz) {
# uniform weights for each group
uniw <- do.call(c, lapply(Xz, function(x) rep(1 / nrow(x), nrow(x))))
# transformed auxiliary uniform weights
sqrtP <- Matrix::bdiag(lapply(Xz, t))
aux_uniw <- as.numeric(sqrtP %*% uniw)
return(c(uniw, aux_uniw))
}
#' Create the constraints for QP: l <= Ax <= u
#' @param Xz list of J n x d matrices of covariates split by group
#' @param target Vector of population means to re-weight to
#' @param Z Vector of group indicators
#' @param lowlim Lower limit on weights
#' @param uplim Upper limit on weights
#'
#' @return A, l, and u
create_constraints <- function(Xz, target, Z, lowlim, uplim, exact_global, verbose) {
J <- length(Xz)
nj <- sapply(1:J, function(j) nrow(Xz[[j]]))
d <- ncol(Xz[[1]])
cumsum_nj <- cumsum(c(1, nj))
n <- sum(nj)
Xzt <- lapply(Xz, t)
# dimension of auxiliary weights
aux_dim <- J * d
if(verbose) message("\tx Sum to one constraint")
# sum-to-one constraint for each group
A1 <- Matrix::t(Matrix::bdiag(lapply(Xz, function(x) rep(1, nrow(x)))))
A1 <- Matrix::cbind2(A1, Matrix::Matrix(0, nrow=nrow(A1), ncol = aux_dim))
l1 <- rep(1, J)
u1 <- rep(1, J)
if(verbose) message("\tx Upper and lower bounds")
# upper and lower bounds
A2 <- Matrix::Diagonal(n)
A2 <- Matrix::cbind2(A2, Matrix::Matrix(0, nrow = nrow(A2), ncol = aux_dim))
l2 <- rep(lowlim, n)
u2 <- rep(uplim, n)
if(exact_global) {
if(verbose) message("\tx Mantain overall population mean")
# Constrain the overall mean to be equal to the target
A3 <- do.call(cbind, lapply(Xzt, function(x) x * ncol(x)))
A3 <- Matrix::cbind2(A3, Matrix::Matrix(0, nrow = nrow(A3), ncol = aux_dim))
l3 <- n * target
u3 <- n * target
} else {
if(verbose) message("\t(SKIPPING) Mantain overall population mean")
# skip this constraint and just make empty
A3 <- matrix(, nrow = 0, ncol = ncol(A2))
l3 <- numeric(0)
u3 <- numeric(0)
}
if(verbose) message("\tx Fit weights to data")
# constrain the auxiliary weights to be sqrt(P)'gamma
sqrtP <- Matrix::bdiag(Xzt)
A4 <- Matrix::cbind2(sqrtP, -Matrix::Diagonal(aux_dim))
l4 <- rep(0, aux_dim)
u4 <- rep(0, aux_dim)
if(verbose) message("\tx Combining constraints")
A <- rbind(A1, A2, A3, A4)
l <- c(l1, l2, l3, l4)
u <- c(u1, u2, u3, u4)
return(list(A = A, l = l, u = u))
}
#' Check that data is in right shape and hyparameters are feasible
#' @param X n x d matrix of covariates
#' @param target Vector of population means to re-weight to
#' @param Z Vector of group indicators with J levels
#' @param Xz list of J n x d matrices of covariates split by group
#' @param lambda Regularization hyper parameter
#' @param lowlim Lower limit on weights, default 0
#' @param uplim Upper limit on weights, default 1
#' @param data_in Optional list containing pre-computed objective matrix/vector and constraints (without regularization term)
#' @param verbose Whether to show messages, default T
#' @param return_data Whether to return the objective matrix and vector and constraints, default T
check_data <- function(X, target, Z, Xz, lambda, lowlim, uplim, data_in) {
# NA checks
if(any(is.na(X))) {
stop("Covariate matrix X contains NA values.")
}
if(any(is.na(Z))) {
stop("Grouping vector Z contains NA values.")
}
if(any(is.na(target))) {
stop("Target vector contains NA values.")
}
#dimension checks
n <- nrow(X)
d <- ncol(X)
J <- length(Xz)
aux_dim <- d * J
nj <- as.numeric(lapply(Xz, nrow))
if(length(Z) != n) {
stop("The number of rows in covariate matrix X (", n,
") does not equal the dimension of and grouping vector Z (",
length(Z), ").")
}
if(sum(nj) != n) {
stop("Implied number of weights (", sum(nj),
") does not equal number of units (", n, ").")
}
if(length(target) != d) {
stop("Target dimension (", length(target),
") is not equal to data dimension (", d, ").")
}
if(!is.null(data_in$q)) {
if(length(data_in$q) != n + aux_dim) {
stop("data_in$q vectors should have dimension ", n + aux_dim)
}
}
if(!is.null(data_in$P)) {
if(dim(data_in$P)[1] != dim(data_in$P)[2]) {
stop("data_in$P matrix must be square")
}
if(dim(data_in$P)[1] != n + aux_dim) {
stop("data_in$P should have ", n + aux_dim,
" rows and columns")
}
}
if(!is.null(data_in$constraints)) {
if(length(data_in$constraints$l) != length(data_in$constraints$u)) {
stop("data_in$constraints$l and data_in$constraints$u",
" must have the same dimension")
}
if(length(data_in$constraints$l) != J + n + d +aux_dim) {
stop("data_in$constraints$l must have dimension ",
J + d + n + aux_dim)
}
if(nrow(data_in$constraints$A) != length(data_in$constraints$l)) {
stop("The number of rows in data_in$constraints$A must be ",
"the same as the dimension of data_in$constraints$l")
}
if(ncol(data_in$constraints$A) != n + aux_dim) {
stop("The number of columns in data_in$constraints$A must be ",
n + aux_dim)
}
}
# hyerparameters are feasible
if(lambda < 0) {
stop("lambda must be >= 0")
}
if(lowlim > uplim) {
stop("Lower threshold must be lower than upper threshold")
}
if(lowlim > 1/max(nj)) {
stop("Lower threshold must be lower than 1 / size of largest group")
}
if(uplim < 1 / min(nj)) {
stop("Upper threshold must be higher than 1 / size of smallest group")
}
}
#' Re-weight populations to group targets
#' @param X n x d matrix of covariates
#' @param Z Vector of group indicators with J levels
#' @param lambda Regularization hyper parameter, default 0
#' @param lowlim Lower limit on weights, default 0
#' @param uplim Upper limit on weights, default 1
#' @param scale_sample_size Whether to scale the dispersion penalty by the sample size of each group, default T
#' @param verbose Whether to show messages, default T
#' @param n_cores Number of cores to find weights in parallel
#' @param eps_abs Absolute error tolerance for solver
#' @param eps_rel Relative error tolerance for solver
#' @param ... Extra arguments for osqp solver
#'
#' @return \itemize{
#' \item{weights }{Estimated weights as an n x J matrix}
#' \item{imbalance }{Imbalance in covariates as a d X J matrix}
#' }
#' @export
standardize_indirect <- function(X, Z, lambda = 0, lowlim = 0, uplim = 1,
scale_sample_size = F, verbose = TRUE, n_cores = 1,
eps_abs = 1e-5, eps_rel = 1e-5, ...) {
# get distinct values of Z
uni_z <- sort(unique(Z))
# iterate over them, using the average in Z as the target
standz <- function(z) {
standardize_indirect_z(z, X, Z, lambda, lowlim, uplim, scale_sample_size,
verbose, eps_abs, eps_rel)
}
out <- parallel::mclapply(uni_z, standz, mc.cores = n_cores)
# combine into one list
out <- Reduce(function(x,y) {
list(weights = cbind(x$weights, y$weights),
imbalance = cbind(x$imbalance, y$imbalance)
)}, out)
return(out)
}
#' Re-weight population to group z's target
#' @param focal_z Group to use as target
#' @param X n x d matrix of covariates
#' @param Z Vector of group indicators with J levels
#' @param lambda Regularization hyper parameter, default 0
#' @param lowlim Lower limit on weights, default 0
#' @param uplim Upper limit on weights, default 1
#' @param scale_sample_size Whether to scale the dispersion penalty by the sample size of each group, default T
#' @param verbose Whether to show messages, default T
#' @param eps_abs Absolute error tolerance for solver
#' @param eps_rel Relative error tolerance for solver
#' @param ... Extra arguments for osqp solver
#'
#' @return \itemize{
#' \item{weights }{Estimated primal weights as an n x J matrix}
#' \item{imbalance }{Imbalance in covariates as a d X J matrix}
#' }
#' @export
standardize_indirect_z <- function(focal_z, X, Z, lambda = 0,
lowlim = 0, uplim = 1,
scale_sample_size = F, verbose = TRUE,
eps_abs = 1e-5, eps_rel = 1e-5,
...) {
# create target
z_idx <- which(Z == focal_z)
nz <- length(z_idx)
n <- nrow(X)
target_z <- colMeans(X[z_idx, , drop = F])
# get standardization weights
stand_z <- standardize(X[-z_idx, , drop = F], target_z, rep(1, n - nz),
lambda, lowlim, uplim, scale_sample_size, NULL,
verbose, FALSE, FALSE, FALSE, eps_abs, eps_rel, ...)
# set weights to zero within group z
weights <- numeric(n)
weights[-z_idx] <- stand_z$weights
return(list(weights = weights, imbalance = stand_z$imbalance))
}
|
24a805baf8d15b24990a259b703c76e409aec384
|
93ebef2e3663445bb5dc2de07569dadc31ef0325
|
/R/plos_records.R
|
2c90d386e5ce369808ab529b6bdef386eb1ed1a0
|
[] |
no_license
|
poldham/oldhammisc
|
6817f9d3036583dea5a48a4b65e8d03c9bf8c343
|
63eb6fbc9a358525e17d444d8522ba0997bea265
|
refs/heads/master
| 2020-05-30T07:16:06.917686
| 2018-02-09T13:03:09
| 2018-02-09T13:03:09
| 59,103,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 966
|
r
|
plos_records.R
|
#' @title Retrieve count of results for a query with PLOS
#' @description Use this function to work out how many results a query of PLOS using rplos will return. Useful for deciding on data to download.
#' @param query A search term or vector of search terms. For multiple terms use double quotes (see examples).
#' @return prints the maximum value of results
#' @export
#' @importFrom plyr ldply
#' @importFrom dplyr %>%
#' @importFrom dplyr select
#' @importFrom dplyr filter
#' @examples \dontrun{drones <- plos_records(drones)}
#' @examples \dontrun{synbio <- c('"synthetic biology"', '"synthetic genomics"', '"synthetic genome"', '"synthetic genomes"')
#' synbio <- plos_records(synbio)}
plos_records <- function(query) {
lapply(query, function(x) rplos::searchplos(x, limit = 0)) %>%
plyr::ldply("[[", 1) %>% # access meta
dplyr::select(numFound) %>% # select numFound
dplyr::filter(numFound == max(numFound)) %>% # filter max value
print()
}
|
90d35149b0734a111702a49d1d74418de5469889
|
d8f1ba7075531ef75a1f139a8da0bea02ab9c9fc
|
/R/data_script.R
|
fbe20342a4dcbc7728be39fe472854e22282260c
|
[] |
no_license
|
jasonhilton/viz_weekly
|
7b81d6c7df575ee993bf89969c022cf78035c004
|
80b1bda28445fc5e858dc4ab8015c5ad3675936c
|
refs/heads/master
| 2021-06-27T15:39:12.378321
| 2019-06-14T13:00:39
| 2019-06-14T13:00:39
| 135,202,788
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,195
|
r
|
data_script.R
|
library(curl)
library(dplyr)
library(tidyr)
library(ggplot2)
library(magrittr)
library(readxl)
library(purrr)
library(ggfan)
library(httr)
library(HMDHFDplus)
# 21st century mortality files.
# deaths by age, sex, year, and cause of death.
deaths_file <- paste0("https://www.ons.gov.uk/file?uri=",
"/peoplepopulationandcommunity/birthsdeathsandmarriages/deaths/",
"datasets/the21stcenturymortalityfilesdeathsdataset/",
"current/regdeaths2001to2015.xls")
dir.create("data")
curl_download(deaths_file,destfile = "data/c21deaths.xls")
dataset <- readxl::read_excel("data/c21deaths.xls",sheet=3, skip=1)
get_year <- function(sheet, file, skip){
data <- readxl::read_excel(file,sheet=sheet, skip=skip)
return(data)
}
sheets <- 3:15
COD_data <- map_df(sheets, get_year, file="data/c21deaths.xls", skip=1)
# 20th Century Mortality Files ------
base <- paste0("http://webarchive.nationalarchives.gov.uk/",
"20160105160709/http://www.ons.gov.uk/ons/",
"rel/subnational-health1/the-20th-century-mortality-files/")
url_popC20 <- paste0(base,"20th-century-deaths/populations-1901-2000.xls")
curl_download(url_popC20,destfile = "data/c20pop.xls")
dataset <- readxl::read_excel("data/c21deaths.xls",sheet=3, skip=1)
url_D79_84 <- paste0(base,"20th-century-deaths/1979-1984-icd9a.zip")
url_D85_93 <- paste0(base,"20th-century-deaths/1985-1993-icd9b.zip")
url_D94_00 <- paste0(base,"20th-century-deaths/1994-2000-icd9c.zip")
# Weekly
base <- paste0("https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/",
"birthsdeathsandmarriages/deaths/datasets/",
"weeklyprovisionalfiguresondeathsregisteredinenglandandwales/")
dir.create("data/weekly")
get_weekly_year<-function(year, base){
curl_download(paste0(base, year,"/publishedweek", year,".xls"),
destfile = paste0("data/weekly/weekly_",year,".xls"))
}
years <- 2010:2015
map(years, get_weekly_year, base)
# latest data for 2016, 2017 and 2018
curl_download(paste0(base,"2016/publishedweek522016.xls"),
destfile = "data/weekly/weekly_2016.xls")
curl_download(paste0(base,"2017/publishedweek522017.xls"),
destfile = "data/weekly/weekly_2017.xls")
curl_download(paste0(base,"2018/publishedweek522018.xls"),
destfile = "data/weekly/weekly_2018.xls")
weeks <- 1:52
get_url <- function(week,year,base){
url <- paste0(base, year,"/publishedweek", sprintf("%02d", week),year, ".xls")
return(url)
}
data_available <- map_lgl(weeks, function(week, year,base){
url <- get_url(week, year,base)
identical(status_code(HEAD(url)), 200L)
}, year=2019, base=base)
last_week_avail <- tail(which(data_available),1)
url <- get_url(last_week_avail, 2019, base)
curl_download(url,
destfile = "data/weekly/weekly_2019.xls")
# HMD
user <- Sys.getenv("HFD_user")
pass <- Sys.getenv("HFD_pass")
exp_hmd <- readHMDweb(CNTRY = "GBRTENW", item = "Exposures_1x1", fixup = TRUE,
username = user, password = pass)
deaths_hmd <- readHMDweb(CNTRY = "GBRTENW", item = "Deaths_1x1", fixup = TRUE,
username = user, password = pass)
dir.create(file.path("data", "HMD"))
saveRDS(exp_hmd, "data/HMD/exposures_hmd.Rdata")
saveRDS(deaths_hmd, "data/HMD/deaths_hmd.Rdata")
pop_hmd <- readHMDweb(CNTRY = "GBRTENW", item = "Population", fixup = TRUE,
username = user, password = pass)
saveRDS(pop_hmd, file="data/HMD/pop_hmd.Rdata")
# Exposures
# Using mid-year population 2015
url <- paste0("https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/",
"populationandmigration/populationestimates/datasets/",
"populationestimatesforukenglandandwalesscotlandandnorthernireland/",
"mid2016detailedtimeseries/ukandregionalpopulationestimates1838to2016.zip")
path <- file.path("data","midyear")
dir.create(path)
filename <- "EW_midyear_2016"
curl_download(url, destfile=file.path(path,paste0(filename,".zip")))
unzip(file.path(path,paste0(filename, ".zip")), exdir=path)
# url <- paste0("https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/",
# "populationandmigration/populationestimates/datasets/",
# "populationestimatesforukenglandandwalesscotlandandnorthernireland/",
# "mid2016/ukmidyearestimates2016.xls")
# old age
url <- paste0("https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/",
"birthsdeathsandmarriages/ageing/datasets/",
"midyearpopulationestimatesoftheveryoldincludingcentenariansengland",
"/current/e2016.xls")
filename <- "england_old"
curl_download(url, destfile=file.path(path,paste0(filename,".xls")))
url <- paste0("https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/",
"birthsdeathsandmarriages/ageing/datasets/",
"midyearpopulationestimatesoftheveryoldincludingcentenarianswales",
"/current/w2016.xls")
filename <- "wales_old"
curl_download(url, destfile=file.path(path,paste0(filename,".xls")))
# forecasts
path <- "data/forecast/"
dir.create(path)
url <- paste0("https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/",
"populationandmigration/populationprojections/datasets/",
"z2zippedpopulationprojectionsdatafilesgbandenglandandwales/2016based/",
"tablez2opendata16ewgb.zip")
curl_download(url,destfile = paste0(path, "forecast.zip"))
unzip(paste0(path, "forecast.zip"), exdir = path)
list.files(path)
# zip(zipfile = file.path(path,"ew_ppp_opendata2016.xlsx"),
# files = file.path(path,"ew_ppp_opendata2016.xml"))
# unfortunately I couldn't work out an easy way of reading these forecasts directly
# as an xml file.
# xls files are just basically xml files, but I couldn't open these with the
# read_xls functions.
# Unfortunately, I needed to manually open with excel and save out as xlsx before I could
# open from R
# It should be possible to parse these from xml, but it looked like it needed
# more time to figure out than I had available... Hit me up if you know how.
|
dc17e812ef8309c86c87a9a1da282d565e35b7f0
|
a12c2a31361ec8b6fdc5ac9e73130801aec43e40
|
/resmatch/man/create_text_corpus.Rd
|
5667dd999966e29a3ea1773c0a35a131790c0718
|
[] |
no_license
|
wjburton/resume-matching
|
143728174b7e0146e405b3a6f0c6d53dee45d04c
|
2e0062a5f4ac0b490843f64cb1b0eedf31b24560
|
refs/heads/master
| 2021-01-13T08:07:33.688581
| 2018-01-16T22:06:43
| 2018-01-16T22:06:43
| 71,744,820
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 499
|
rd
|
create_text_corpus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{create_text_corpus}
\alias{create_text_corpus}
\title{Uses tm_package to clean a resume or vector of documents.}
\usage{
create_text_corpus(text)
}
\arguments{
\item{text}{= a resume or vector of documents}
}
\value{
a Vcorpus/Corpus ready to turn into a document term matrix
}
\description{
Creates a corpus of the text/vector. Removes punctuation, numbers, stopwords,
whitespace, and stems the document
}
|
3d37fdd7d7abf70fb51f6471a7e61b6b3710e49f
|
f77d4ae139d960f6138e29f4e5e9e39fcba528fb
|
/R_CODES/masters_project/Parallelize_code_in_R.R
|
44e2834e6a8e62495464eab5e310ee517d7ea6b5
|
[] |
no_license
|
zenabu-suboi/masters_project
|
fc80eb077af8e92bf81cda94952d4dec196bb263
|
d865eb68e66d35c52229023d7aa83b78fd7518f4
|
refs/heads/master
| 2022-04-22T15:01:15.063667
| 2020-04-28T10:50:17
| 2020-04-28T10:50:17
| 179,095,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,433
|
r
|
Parallelize_code_in_R.R
|
#################################################################################
setwd("C:/Users/ZENABU/Documents/GitHub/masters_project/R_CODES/masters_project")
source("my_functions.R")
#################################################################################
##########################################################################
## runnunig rej ABC in parallel
library(doParallel)
library(foreach)
##########################################################################
# Calculate the number of cores
no_cores <- detectCores() - 3 # detects number of cores on computer and no_cores in use
cl <- makeCluster(no_cores) # makes cluster with number of cores assigned for the simulation
registerDoParallel(cl) # registers the cluster
ABC_rejref <- foreach(simulations = 1:5,# runs simulations five times to obtain
# hence gives one to each core
.combine = c,
.packages = c("SimInf", "EasyABC")) %dopar%
ABC_rejection(model = modelforABC,
prior = list(c("unif",0.1,0.4),
c("unif",0.01,0.03)),
summary_stat_target = targets(c(0.2, 0.02)),
nb_simul = 200000,
tol = 1,
progress_bar = T,
use_seed = T) # each core runs 2e5 simulations in the calibration
#method and and retains 2e5 parameter combinations
stopImplicitCluster() # stops cluster and reverts back to using only one core/ exit cluster
################################################################################
################################################################################
# check compute time
computime = ABC_rejref$computime
##################################################################################
#gathering desired output and save
gather_params <- ABC_rejref[seq(1, length(ABC_rejref), 7)] # Gathers all parameter combinations
posterior <- data.frame(rbind(gather_params[[1]],
gather_params[[2]],
gather_params[[3]],
gather_params[[4]],
gather_params[[5]])) #
#gather_params[[1]]
saveRDS(posterior, "ref_posterior.rds")
###################################################################################
# class(posterior)
# class(gather_params[[1]])
|
85019376f9b462357314ccdf28d3a6a126786c3b
|
5db34fe55462f237703358e5ead7c80299de3d02
|
/R/powerUntransform.R
|
0165dbaa8b9ad4dd27a1beb5add1e6e832c3005a
|
[] |
no_license
|
cran/tlm
|
687fe4cb6d25a1086f46e61afb5faa898037f9e2
|
4a399dc84a6b38f8681ef4709c14115d89505f27
|
refs/heads/master
| 2021-01-17T07:11:00.175043
| 2017-04-10T12:15:19
| 2017-04-10T12:15:19
| 23,803,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
powerUntransform.R
|
powerUntransform <-
function(xt, power)
{
if (power == 0) x <- exp(xt) else x <- xt^(1/power)
x
}
|
5092b6a3a6e0236f86ff49cd951461ca79bd1d32
|
fd0622e97276bba2c04d3c2fcba902cdfb65e214
|
/packages/nimble/inst/classic-bugs/vol1/bones/bones-init.R
|
36c66ae5b4da45a59b8c6e008a439a9b2f7c730a
|
[
"GPL-2.0-only",
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-1.0-or-later",
"MPL-2.0",
"GPL-2.0-or-later"
] |
permissive
|
nimble-dev/nimble
|
7942cccd73815611e348d4c674a73b2bc113967d
|
29f46eb3e7c7091f49b104277502d5c40ce98bf1
|
refs/heads/devel
| 2023-09-01T06:54:39.252714
| 2023-08-21T00:51:40
| 2023-08-21T00:51:40
| 20,771,527
| 147
| 31
|
BSD-3-Clause
| 2023-08-12T13:04:54
| 2014-06-12T14:58:42
|
C++
|
UTF-8
|
R
| false
| false
| 1,888
|
r
|
bones-init.R
|
"theta" <-
c(0.5, 1, 2, 3, 5, 6, 7, 8, 9, 12, 13, 16, 18)
"grade" <-
structure(c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, 1,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, 1, NA, NA, 1,
NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA,
NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, 1, 1, NA, NA, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, 1, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), .Dim = as.integer(c(13,
34)))
|
9bc48189d052d5adfc92f1fe4e268e574af7a8f7
|
790e5f064a41aac88f3465591158a73e6faaec77
|
/R/parameters.R
|
4a3442357d805f81c3aa3242e64e45159ca8066a
|
[] |
no_license
|
cristianmejia00/heatmaps3
|
033a08c786d2dc4e3caebdbb0431f756ebf7c33a
|
763850d8f2d640d51f0dea950596330aecc236f8
|
refs/heads/master
| 2020-06-20T07:49:33.950688
| 2017-06-14T10:19:40
| 2017-06-14T10:19:40
| 94,198,532
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
r
|
parameters.R
|
# Write names for the Output files
similarity_matrix <- "heatmap_matrix.csv" #The similarity matrix based on cosine similarity
edge_list <- "heatmap_list.csv" #The Top pairs of topics - clusters
heatmapTC <- "heatmap.png" #Heatmap image of the similarity matrix
#######################################################
# Other parameters
# Select the number of top connections to retrive as summary
# Not in use!!
top_pairs <- 100
|
25d749ce3920ba47f79cdd9d16b28e658296fb26
|
84e7b589d3d8b05e52e927dc7ce77b79515e71fa
|
/ch11 - 회귀분석/04..로지시틱 회귀분석.R
|
521b9f1b4a3069c6ba5edaa8a4564fc676b9c691
|
[
"MIT"
] |
permissive
|
Lee-changyul/Rstudy_Lee
|
d1e0f28190de74643d5c0a14f178b41250db7860
|
837a88d6cb4c0e223b42ca18dc5a469051b48533
|
refs/heads/main
| 2023-06-29T20:21:10.968106
| 2021-08-02T01:48:00
| 2021-08-02T01:48:00
| 325,493,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,114
|
r
|
04..로지시틱 회귀분석.R
|
#### 04.로지스틱 회귀분석(Regression) #####
# 01.데이터 불러오기
lreg.df <- read.csv("Ch1104.로지스틱회귀분석(LREG).csv",
header=TRUE,
na.strings = ".")
lreg.df$exp <- factor(lreg.df$exp,
levels=c(0:1),
labels=c("No","Yes"))
lreg.df$chun <- factor(lreg.df$chun,
levels=c(0:1),
labels=c("No","Yes"))
str(lreg.df)
# 02.기본통계치 확인
library(psych)
describe(lreg.df)
pairs.panels(lreg.df)
# 03.로지스틱 회귀분석
lreg.model <- glm(chun ~ phy+psy+cmmt+exp,
family = binomial,
data=lreg.df)
options(scipen=10) # 소숫점 아래 확인
summary(lreg.model)
# Odds 계산
odds <- data.frame(summary(lreg.model)$coefficients,
odds = exp(coef(lreg.model)))
round(odds, 5)
# 오즈비 해석 1을 기준으로 조직몰입도가 1단위 증가하면 이직의도는 0.547배 증가한다.
# => 다른 표현으로는 이직의도가 45.3% 감소한다. (1-0.547)
|
ef88f9812340076ceb2df0323f5416c99081bb9b
|
a3541fa9afdcbc4bd1360afda4b6f8d170244889
|
/data-raw/vars-ejscreen-acs.R
|
d56fc7ddb3bc59b3cf6e1e11790ed8103066436e
|
[] |
no_license
|
ejanalysis/ejscreen
|
4349236260c94dd9a9d0cfdcae237adebcec2d8a
|
6af10b7d3b47c683cb512fd4792c2eef0e1d695a
|
refs/heads/master
| 2023-05-27T11:59:13.144072
| 2023-05-25T23:40:52
| 2023-05-25T23:40:52
| 40,103,218
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,137
|
r
|
vars-ejscreen-acs.R
|
## code to prepare `vars.ejscreen.acs` dataset
# mytables <- c("B01001", "B03002", "B15002", 'B23025', "B25034", "C16002", "C17002")
# get.table.info(mytables)
# ID title
# 1 B01001 SEX BY AGE
# 2 B03002 HISPANIC OR LATINO ORIGIN BY RACE
# 3 B15002 SEX BY EDUCATIONAL ATTAINMENT FOR THE POPULATION 25 YEARS AND OVER
# 4 B23025 EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# 5 B25034 YEAR STRUCTURE BUILT
# 6 C16002 HOUSEHOLD LANGUAGE BY HOUSEHOLD LIMITED ENGLISH SPEAKING STATUS
# 7 C17002 RATIO OF INCOME TO POVERTY LEVEL IN THE PAST 12 MONTHS
# get.field.info(mytables[4])[,1:4] # get.field.info('b23025')[,1:4]
# B23025.004 Employed unemployed
# B23025.003 Civilian labor force unemployedbase
# essential ones are these for ejscreen 2.1:
vars.ejscreen.acs <- ejscreenformulas$acsfieldname[!is.na(ejscreenformulas$acsfieldname)]
# former, more extensive list, but not really needed:
# vars.ejscreen.acs <- c(
# "B01001.001", "B01001.003", "B01001.004", "B01001.005", "B01001.006",
# "B01001.020", "B01001.021", "B01001.022", "B01001.023", "B01001.024",
# "B01001.025", "B01001.027", "B01001.028", "B01001.029", "B01001.030",
# "B01001.044", "B01001.045", "B01001.046", "B01001.047", "B01001.048",
# "B01001.049",
#
# "B03002.001", "B03002.002", "B03002.003", "B03002.004",
# "B03002.005", "B03002.006", "B03002.007", "B03002.008", "B03002.009",
# "B03002.010", "B03002.011", "B03002.012", "B03002.013", "B03002.014",
# "B03002.015", "B03002.016", "B03002.017", "B03002.018", "B03002.019",
# "B03002.020", "B03002.021",
#
# "B15002.001", "B15002.003", "B15002.004",
# "B15002.005", "B15002.006", "B15002.007", "B15002.008", "B15002.009",
# "B15002.010", "B15002.020", "B15002.021", "B15002.022", "B15002.023",
# "B15002.024", "B15002.025", "B15002.026", "B15002.027",
#
# "B16001.001",
# "B16001.002", "B16001.003", "B16001.005", "B16001.006", "B16001.008",
# "B16001.009", "B16001.011", "B16001.012", "B16001.014", "B16001.015",
# "B16001.017", "B16001.018", "B16001.020", "B16001.021", "B16001.023",
# "B16001.024", "B16001.026", "B16001.027", "B16001.029", "B16001.030",
# "B16001.032", "B16001.033", "B16001.035", "B16001.036", "B16001.038",
# "B16001.039", "B16001.041", "B16001.042", "B16001.044", "B16001.045",
# "B16001.047", "B16001.048", "B16001.050", "B16001.051", "B16001.053",
# "B16001.054", "B16001.056", "B16001.057", "B16001.059", "B16001.060",
# "B16001.062", "B16001.063", "B16001.065", "B16001.066", "B16001.068",
# "B16001.069", "B16001.071", "B16001.072", "B16001.074", "B16001.075",
# "B16001.077", "B16001.078", "B16001.080", "B16001.081", "B16001.083",
# "B16001.084", "B16001.086", "B16001.087", "B16001.089", "B16001.090",
# "B16001.092", "B16001.093", "B16001.095", "B16001.096", "B16001.098",
# "B16001.099", "B16001.101", "B16001.102", "B16001.104", "B16001.105",
# "B16001.107", "B16001.108", "B16001.110", "B16001.111", "B16001.113",
# "B16001.114", "B16001.116", "B16001.117", "B16001.119",
#
# "C16002.001",
# "C16002.003", "C16002.004", "C16002.006", "C16002.007", "C16002.009",
# "C16002.010", "C16002.012", "C16002.013",
#
# "C17002.001", "C17002.002",
# "C17002.003", "C17002.004", "C17002.005", "C17002.006", "C17002.007",
# "C17002.008",
#
# "B25034.001", "B25034.002", "B25034.003", "B25034.004",
# "B25034.005", "B25034.006", "B25034.007", "B25034.008", "B25034.009",
# "B25034.010",
#
# "B23025.001", "B23025.002", "B23025.003", "B23025.004",
# "B23025.005", "B23025.006", "B23025.007"
# )
# get.field.info(mytables[4])[,1:4]
# metadata <- list(ejscreen_releasedate = 'October 2022', ejscreen_version = '2.1', ACS_version = '2016-2020', ACS_releasedate = '3/17/2022')
vars.ejscreen.acs <- add_metadata(vars.ejscreen.acs)
# attributes(geoformat2020) <- c(attributes(geoformat2020), metadata)
usethis::use_data(vars.ejscreen.acs, overwrite = TRUE)
|
6ff071a7b8359d436d6c5b98b2f810c950b767ae
|
a44839546bb036ae0a8c086595cc8babe9f16ace
|
/man/getFinalDispersions.Rd
|
8de171efc21ba1495ce3c89e92acaf5c027d422d
|
[
"MIT"
] |
permissive
|
eachanjohnson/concensusGLM
|
6ba2219890ea93e39a15e4be16c07140ec63787e
|
6d310a4bd15116306c9c2395ac73dd48672b4356
|
refs/heads/master
| 2021-06-11T19:02:50.535116
| 2019-06-24T18:12:11
| 2019-06-24T18:12:11
| 45,924,291
| 2
| 0
|
MIT
| 2019-06-24T18:12:12
| 2015-11-10T16:21:30
|
R
|
UTF-8
|
R
| false
| true
| 1,783
|
rd
|
getFinalDispersions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/concensus-statistical-methods.R
\name{getFinalDispersions}
\alias{getFinalDispersions}
\alias{getFinalDispersions.concensusDataSet}
\alias{getFinalDispersions.concensusWorkflow}
\alias{getFinalDispersions.default}
\title{Estimate Negative Binomial dispersion paramteter taking into account batch effects}
\usage{
getFinalDispersions(x, ...)
\method{getFinalDispersions}{default}(x, ...)
\method{getFinalDispersions}{concensusWorkflow}(x, ...)
\method{getFinalDispersions}{concensusDataSet}(x, max_rows = 10000, ...)
}
\arguments{
\item{x}{concensusWorkflow or concensusDataSet.}
\item{...}{Other arguments.}
\item{max_rows}{Numeric. Maximum number of observations to use for MLE.}
}
\value{
concensusWorkflow or concensusDataSet with a new \code{small_model_dispersion} and a new
\code{full_model_dispersion} column in the \code{dispersion} attribute.
}
\description{
Estimate Negative Binomial dispersion paramteter taking into account experimental batch effects.
}
\details{
Uses the CR penalized maximum profile likelihood method, holding the \eqn{\mu} of a GLM fixed and finding the
optimal dispersion \eqn{\alpha} using a Newton-type algorithm as implemented in \code{nlm}.
If the \code{predicted_null_count} column is present in the \code{data} attribute of \code{concensusDataSet}, it is added to
the GLM as an \code{offset}. If the batch effects are real, this should raise the likelihood of and shrink the size of
the final dispersion parameter.
This method will find a dispersion value with and without taking into account \code{predicted_null_count}, saving both results to
the columns of the \code{dispersion} attribute of \code{concensusDataSet}.
}
\seealso{
\link{nlm}, \link{glm}
}
|
8901f09ffc7d93249fd97b1ab5ad8105b2f47c0c
|
34ba13f4b49f2abbd059f2de1bccecd5564e4708
|
/fungicide.app.R
|
544b05ebd6ae662067867f278d2f54d4480b78ee
|
[] |
no_license
|
sithjaisong/SKEP2DB
|
e8ce62275a7256934802db6571f2e39dd010ef8c
|
9a0bd831661f81e06b33613888ab9f24032f9304
|
refs/heads/master
| 2021-01-21T13:17:37.000088
| 2016-04-25T02:34:57
| 2016-04-25T02:34:57
| 43,069,491
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,738
|
r
|
fungicide.app.R
|
data <- all.pesticide %>%
filter(location == "IDN" & season == "DS") %>%
select(location, year, season, fno, fung, fung.dvs) %>%
group_by(location, year, season, fno, fung, fung.dvs)%>%
filter(!fung == "0" ) %>%
summarise(n.fung.app = n()) %>%
ungroup() %>%
arrange(fno)
data$fung.dvs <- as.factor(data$fung.dvs)
levels(data$fung.dvs)[levels(data$fung.dvs) == "Tillering"] <- "TR"
levels(data$fung.dvs)[levels(data$fung.dvs) == "pi"] <- "PI"
levels(data$fung.dvs)[levels(data$fung.dvs) == "Milk"] <- "FL"
levels(data$fung.dvs)[levels(data$fung.dvs) == "TL"] <- "TR"
levels(data$fung.dvs)[levels(data$fung.dvs) == "HD"] <- "HE"
levels(data$fung.dvs)[levels(data$fung.dvs) == "0"] <- "AT"
levels(data$fung.dvs)[levels(data$fung.dvs) == "PF"] <- "SD"
data$fung.dvs <- factor(data$fung.dvs, levels = c("SO","TR","ET", "AT", "MT", "PI", "SD", "ME", "BT","EB","MB","HE","FL","ER","AR", "LR", "HA"))
levels(data$fung.dvs)[levels(data$fung.dvs) == "Tillering"] <- "TR"
levels(data$fung.dvs)[levels(data$fung.dvs) == "pi"] <- "PI"
levels(data$fung.dvs)[levels(data$fung.dvs) == "Milk"] <- "FL"
data$level <- ifelse(data$fno %in% y_in_box$fn, "median",
ifelse(data$fno %in% y_low_box$fn,"low",
ifelse(data$fno %in% y_high_box$fn,"high", NA
)))
data$level <- factor(data$level, level = c("high", "median", "low"))
data$nofarmers <- ifelse(data$level == "high", length(y_high_box$fn),
ifelse(data$level == "median", length(y_in_box$fno),
ifelse(data$level == "low", length(y_low_box), 0)))
levels(data$fung)[levels(data$fung) == "Carbendaxim"] <- "Carbendazim"
levels(data$fung)[levels(data$fung) == "Dinenoconazole"] <- "Difenoconazole"
#=====================================#
##### select farmer ###
#=====================================#
data %>%
filter(!fung == "0", !level == "NA" ) %>%
group_by(location, year, season,fung, fung.dvs, level, nofarmers) %>%
summarise(n.fung.app = n()) %>%
mutate(freq = n.fung.app/nofarmers) %>%
ggplot(., aes(x=fung, y = freq, fill =fung)) + geom_bar(stat = "identity") + facet_grid(level ~fung.dvs, scale = "free" , space ="free") + ylim(0,1) +ggtitle("Fungicide Application in Red River Delta, Vietnam from Survey Data \nin Dry Season from 2013 to 2014") + mytheme + xlab("Fungicide") + ylab("No. Applications Normalized by No. Farmers/Group\n (applications/season)") + scale_fill_brewer(palette= "Set3", name = "Active ingredient") + theme(legend.position = "right")
ggsave("pic/idn.ds.fungicide.png", height = 10, width = 14, dpi = 300)
|
72f46c41e38b65ba4a21e10932ceab43269a096c
|
f9010a6b4f7d042b27e0952a04f1c76e23243e8f
|
/src/003_validation.R
|
0435428dc9f8025bc56abe978bc250132dfc4dbe
|
[] |
no_license
|
envima/ForestModellingRLP
|
0690223c0c7f81043122e5eb9922bb61904244b1
|
944dd07d1ae4245537bf67862e9ebb6a1f2e9034
|
refs/heads/master
| 2022-12-08T01:32:14.843785
| 2022-11-29T15:18:07
| 2022-11-29T15:18:07
| 244,846,593
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,030
|
r
|
003_validation.R
|
#' @name 003_validation.R
#' sub control script for data preparation
#'
#'
#' @description Use this script for controlling the processing.
#'
#' @author [name], [email@com]
#'
# 0 - set up ####
#---------------#
library(envimaR)
library(rprojroot)
root_folder = find_rstudio_root_file()
source(file.path(root_folder, "src/functions/000_setup.R"))
# 1 - validation ####
#-------------------#
# extraction set with relevant class information attatched
extr = readRDS(file.path(envrmt$model_training_data, "extract_merge.RDS"))
for (i in c("main", "diverse")) {
validation_aoa(extr,
model = i,
idCol = "FAT__ID",
responseCol = "BAGRu",
FID = sf::read_sf(file.path(envrmt$FID, "Trainingsgebiete.gpkg"))
)
print(paste("Finished validation for model: ", i))
} # end for loop
# 1.1 - Validation for successional stages ####
#---------------------------------------------#
for (i in c("Bu", "Fi", "Ei", "Dou", "Lä", "Ki", "Lbk", "Lbl")) {
validation_aoa(extr = extr,
model = paste0("quality_", i),
idCol = "FAT__ID",
responseCol = "Quality",
FID = sf::read_sf(file.path(envrmt$FID, "Trainingsgebiete.gpkg"))
)
print(paste("Finished confusion matrix for model: ", i))
} # end for loop
# 2 - confusion matrices ####
#---------------------------#
models = c("main", "diverse")
for (m in models) {
df = readRDS(file.path(envrmt$confusionmatrix, paste0(m, "_confusionmatrix.RDS")))
cm = confusionMatrix_ggplot(caretConfMatr = df)
ggsave(plot = cm, path = file.path(envrmt$illustrations),
filename = paste0(m, "_confusionmatrix.png"),
width = 10,
height = 7,
dpi = 400)
}
# 3 - successional stages confusion matrices ####
#-----------------------------------------------#
lstFiles = list.files("E:/Waldmodellierung/ForestModellingRLP/data/validation/", full.names = TRUE, pattern = glob2rx("quality*confusionmatrix.RDS"))
lstFiles = list.files(file.path(envrmt$confusionmatrix), pattern = glob2rx("quality*confusionmatrix.RDS"))
for (i in 1:length(lstFiles)) {
cm <- readRDS(file.path(envrmt$confusionmatrix,lstFiles[[i]]))
cm <- as.data.frame(cm$table)
if (nlevels(cm$Observed) == 3) {
cm$Observed <- as.character(cm$Observed)
cm[grepl("Qua", cm$Observed), "Observed"] <- "Q"
cm[grepl("Dim", cm$Observed), "Observed"] <- "D"
cm[grepl("Rei", cm$Observed), "Observed"] <- "M"
cm$Predicted <- as.character(cm$Predicted)
cm[grepl("Qua", cm$Predicted), "Predicted"] <- "Q"
cm[grepl("Dim", cm$Predicted), "Predicted"] <- "D"
cm[grepl("Rei", cm$Predicted), "Predicted"] <- "M"
} else {
cm$Observed <- as.character(cm$Observed)
cm[grepl("Dim", cm$Observed), "Observed"] <- "D"
cm[grepl("Rei", cm$Observed), "Observed"] <- "M"
cm$Predicted <- as.character(cm$Predicted)
cm[grepl("Dim", cm$Predicted), "Predicted"] <- "D"
cm[grepl("Rei", cm$Predicted), "Predicted"] <- "M"
}
cm$Observed <- as.factor(cm$Observed)
cm$Predicted <- as.factor(cm$Predicted)
if (nlevels(cm$Observed) == 3) {
cm$Observed <- factor(cm$Observed,levels = c("Q", "D", "M"))
cm$Predicted <- factor(cm$Predicted,levels = c("M", "D","Q"))
} else {
cm$Observed <- factor(cm$Observed,levels = c("D", "R"))
cm$Predicted <- factor(cm$Predicted,levels = c( "R", "D"))
}
# Name of plot
modelName = gsub("quality_", "", lstFiles[[i]])
modelName= gsub("_confusionmatrix.RDS", "", modelName)
plot_succession = successional_stages_cm(cm)
ggsave(plot = plot_succession, path = file.path(envrmt$illustrations),
filename = paste0(modelName, "_confusionmatrix.png"),
width = 10,
height = 7,
dpi = 400)
} # end for loop
# 3 - table metadata ####
#-----------------------#
lstYaml = list.files("E:/Waldmodellierung/ForestModellingRLP/data/validation/", pattern = ".yaml", full.names = TRUE)
meta_table = table_metadata(lstYaml)
gtsave(meta_table,
filename = "no_training_pixel_table.png",
path = file.path(envrmt$illustrations))
# 4 - table selected variables ####
#---------------------------------#
# 5 - selected variables plots ####
#---------------------------------#
# 5.1 successional stages ####
#----------------------------#
var_imp = variable_importance(modelList = list.files(file.path(envrmt$models), pattern = "quality", full.names = TRUE),
plotNames = c("Beech", "Douglas fir", "Oak", "Spruce", "Pine",
"Larch", "short-lived DT", "long-lived DT")
)
# save image
successional = grid.arrange(Beech, `Douglas fir`, Larch, `long-lived DT`, Oak, Pine, `short-lived DT`, Spruce, nrow = 4, ncol =2)
ggsave(filename = file.path(envrmt$illustrations, "variable_importance_successional.png"),
plot = successional,
width = 8,
height = 12,
limitsize = FALSE,
device = png())
|
bf50e15a34c8856d4d139562e63f9d7823ecaef3
|
d2e738d6d32a9f5ffbd14f2bada4dc734a8340f2
|
/case_studies/VMJG2018/data/OrigLevyKellerData/prediction_experiment_data/experiment2/lmr/scripts/analyze.R
|
7698b6abc2447494b8c957e1aed0db0e17a1458c
|
[
"MIT"
] |
permissive
|
vasishth/IntroductionBayes
|
703766c08385c7751c98214ca70b28415b3885d7
|
739bcabc527b18c050d22d62eebeace88bbca1b5
|
refs/heads/master
| 2020-04-29T11:47:52.796613
| 2020-03-10T15:40:53
| 2020-03-10T15:40:53
| 176,113,078
| 44
| 14
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,719
|
r
|
analyze.R
|
#condition dat adj
#a sub sub
#b sub main
#c main sub
#d main main
library(lme4)
cat('########## EXPERIMENT 2 ##########\n\n')
cat('########## REGION 8 ##########\n\n')
cat('# First fixation\n\n')
reading_time <- read.table('exp3_1fx_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region8 != 0,]
interact <- lmer(region8 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
show(interact)
cat('\n\n# First pass\n\n')
reading_time <- read.table('exp3_1ps_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region8 != 0,]
interact <- lmer(region8 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
show(interact)
cat('\n\n# Regression path time\n\n')
reading_time <- read.table('exp3_rpt_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region8 != 0,]
interact <- lmer(region8 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
show(interact)
cat('\n\n# Total time\n\n')
reading_time <- read.table('../results/exp3_tt_r.res', header=TRUE)
head(reading_time)
condition<-ifelse(reading_time$dat=="sub" & reading_time$adj=="sub","a",ifelse(reading_time$dat=="sub" & reading_time$adj=="main","b",ifelse(reading_time$dat=="main" & reading_time$adj=="sub","c",
ifelse(reading_time$dat=="main" & reading_time$adj=="main","d","NA"))))
summary(factor(condition))
reading_time$condition<-factor(condition)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region8 != 0,]
interact <- lmer(region8 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
summary(interact)
dat<-reading_time
## sliding contrasts:
dat$adj_ba<-ifelse(dat$condition%in%c("a"),-1/2,ifelse(dat$condition%in%c("b"),1/2,0))
dat$datvsadj_cb<-ifelse(dat$condition%in%c("b"),-1/2,ifelse(dat$condition%in%c("c"),1/2,0))
dat$adjdatvsdat_dc<-ifelse(dat$condition%in%c("c"),-1/2,ifelse(dat$condition%in%c("d"),1/2,0))
sliding<-lmer(region7~adj_ba + datvsadj_cb + adjdatvsdat_dc + (1+adj_ba + datvsadj_cb + adjdatvsdat_dc||subj)+(1+adj_ba + datvsadj_cb + adjdatvsdat_dc||item),subset(dat,region8>0))
summary(sliding)
cat('\n\n# Second pass\n\n')
reading_time <- read.table('../results/exp3_2ps_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
interact <- lmer(region8 ~ dat*adj + (dat*adj||subj) + (dat*adj||item), data=reading_time)
summary(interact)
op<-par(mfrow=c(1,2),pty="s")
hist(reading_time$region8,freq=FALSE,main="zero RRT included")
summary(reading_time$region8)
hist(subset(reading_time,region8>0)$region8,freq=FALSE,main="zero RRT excluded")
summary(subset(reading_time,region8>0)$region8)
qqPlot(residuals(interact))
165.536-2*67.809;165.536+2*67.809
#retrodesign(165.536,67.809)
cat('\n\n# First pass regressions\n\n')
reading_time <- read.table('exp3_fpr_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
interact <- lmer(region8 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time, family=binomial)
show(interact)
cat('\n\n# Skipping probability\n\n')
reading_time <- read.table('exp3_skp_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
interact <- lmer(region8 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time, family=binomial)
show(interact)
cat('\n\n########## REGION 9 ##########\n\n')
cat('# First fixation\n\n')
reading_time <- read.table('exp3_1fx_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region9 != 0,]
interact <- lmer(region9 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
show(interact)
cat('\n\n# First pass\n\n')
reading_time <- read.table('exp3_1ps_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region9 != 0,]
interact <- lmer(region9 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
show(interact)
cat('\n\n# Regression path time\n\n')
reading_time <- read.table('exp3_rpt_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region9 != 0,]
interact <- lmer(region9 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
show(interact)
cat('\n\n# Total time\n\n')
reading_time <- read.table('exp3_tt_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
reading_time_nozeros <- reading_time[reading_time$region9 != 0,]
interact <- lmer(region9 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time_nozeros)
show(interact)
cat('\n\n# Second pass\n\n')
reading_time <- read.table('exp3_2ps_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
interact <- lmer(region9 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time)
show(interact)
cat('\n\n# First pass regressions\n\n')
reading_time <- read.table('exp3_fpr_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
interact <- lmer(region9 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time, family=binomial)
show(interact)
cat('\n\n# Skipping probability\n\n')
reading_time <- read.table('exp3_skp_r.res', header=TRUE)
reading_time$dat <- as.numeric(reading_time$dat)
reading_time$dat <- reading_time$dat - mean(reading_time$dat)
reading_time$adj <- as.numeric(reading_time$adj)
reading_time$adj <- reading_time$adj - mean(reading_time$adj)
interact <- lmer(region9 ~ dat*adj + (dat*adj|subj) + (dat*adj|item), data=reading_time, family=binomial)
show(interact)
############################################################
# for computing confidence intervals around model predicted values
# (curtesy of Roger Levy)
# fit the model as "interact";
# we could then do (for the dat=main,adj=sub condition):
X <- c(-0.5,0.5,-0.25)
Sigma <- vcov(interact)[2:4,2:4]
SE <- sqrt(t(X) %*% Sigma %*% X)
# and then the size of the confidence interval would be
( qnorm(0.975) - qnorm(0.025) ) * SE
# Then we use the other three values of X
X <- c(0.5,0.5,0.25)
X <- c(0.5,-0.5,-0.25)
X <- c(-0.5,-0.5,0.25)
|
1a65936d4031e428f3e31d637885580a4409286b
|
e23426737bf92cb62b1d0136b410b8dc4e5c213c
|
/scripts/00.readData.R
|
5e83967540a57e816e35d2cbfc55160b4bd27517
|
[] |
no_license
|
andrew-hipp/white-oak-syngameon
|
9018fe0477743e631d77621ca552501dfa82719d
|
d5f85b96be2ca2957bb3cfd9fb1ec9641666d42b
|
refs/heads/master
| 2020-04-11T08:49:01.518742
| 2019-08-09T21:08:16
| 2019-08-09T21:08:16
| 161,656,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,990
|
r
|
00.readData.R
|
## merge 2017 OAKCODING data with 2018 Sequenom data
## format for analysis
library(openxlsx)
library(Biostrings)
library(magrittr)
library(adegenet)
if(!exists('haversine')) source('https://raw.githubusercontent.com/andrew-hipp/morton/master/R/haversine.R')
indsThreshold <- 0.9 # proportion of individuals required for each locus
locThreshold <- 0.7 # proportion of loci required for each individual
includeJacorns <- FALSE
cleanNamesAgain <- FALSE
string.oaks <- c('alba',
'macrocarpa',
'muehlenbergii',
'stellata',
'bicolor',
'michauxii',
'montana',
'prinoides')
string.keep <- paste(string.oaks, collapse = '|')
dat.seq <- read.csv('../data/sequenom.dat.cleaned_2018-11-27.csv',
as.is = T, row.names = 1)
dat.seq.mapping <- read.xlsx('../data/dat.table.2018-11-25_MH_AH.xlsx', 1)
if(!includeJacorns) dat.seq <- dat.seq[grep('CC', row.names(dat.seq), fixed = T, invert = T), ]
dat.seq.stats <- c(
samples.orig = dim(dat.seq)[1] - sum(dat.seq.mapping$dropDupe),
meanMissingLoci.orig = apply(dat.seq, 1, function(x) sum(x == '') / dim(dat.seq)[1]) %>% mean,
sdMissingLoci.orig = apply(dat.seq, 1, function(x) sum(x == '') / dim(dat.seq)[1]) %>% sd,
meanMissingInds.orig = apply(dat.seq, 2, function(x) sum(x == '') / dim(dat.seq)[2]) %>% mean,
sdMissingInds.oSpeciesrig = apply(dat.seq, 2, function(x) sum(x == '') / dim(dat.seq)[2]) %>% sd
) # close dat.seq.stats
dat.seq.mapping$reps[is.na(dat.seq.mapping$reps)] <- ''
## prune to sufficiently inclusive data
dat.seq <- dat.seq[ , which(apply(dat.seq, 2, function(x) sum(x != '')) / dim(dat.seq)[1] >= locThreshold)]
dat.seq <- dat.seq[which(apply(dat.seq, 1, function(x) sum(x != '')) / dim(dat.seq)[2] >= indsThreshold), ]
dat.seq <- dat.seq[dat.seq.mapping$codeOrig, ]
#dat.seq.mapping$Species <- factor(dat.seq.mapping$Species)
#levels(dat.seq.mapping$Species) <-
# c('Quercus macrocarpa',
# sort(grep('macrocarpa', levels(dat.seq.mapping$Species), value = T, invert = T))
# )
row.names(dat.seq) <- row.names(dat.seq.mapping) <-
apply(dat.seq.mapping[c('Species', 'state', 'county', 'specimenCodeUnique')], 1, paste, collapse = '|') %>%
as.character
checkReps <- lapply(unique(dat.seq.mapping$reps), function(w) {
a <- dat.seq[dat.seq.mapping$rep == w, ]
out <- a[, which(!apply(a, 2, function(x) x[1] == x[2])) %>% as.numeric]
out
}
) # close lapply
names(checkReps) <- unique(dat.seq.mapping$reps)
checkReps <- checkReps[!names(checkReps) == '']
dat.seq <- dat.seq[!dat.seq.mapping$dropDupe,]
dat.seq.mapping <- dat.seq.mapping[!dat.seq.mapping$dropDupe,]
dat.seq.stats <- c(dat.seq.stats,
samples.pruned = dim(dat.seq)[1],
meanMissingLoci.pruned = apply(dat.seq, 1, function(x) sum(x == '') / dim(dat.seq)[1]) %>% mean,
sdMissingLoci.pruned = apply(dat.seq, 1, function(x) sum(x == '') / dim(dat.seq)[1]) %>% sd,
meanMissingInds.pruned = apply(dat.seq, 2, function(x) sum(x == '') / dim(dat.seq)[2]) %>% mean,
sdMissingInds.pruned = apply(dat.seq, 2, function(x) sum(x == '') / dim(dat.seq)[2]) %>% sd
)
dat.seq.mat <- matrix(dat.seq.stats, 5)
row.names(dat.seq.mat) <- gsub('.orig', '', names(dat.seq.stats)[1:5], fixed = T)
colnames(dat.seq.mat) <- c('orig', 'pruned')
## and make a table of genotypes
dat.gen <- df2genind(dat.seq, ncode=1, ploidy = 2)
## Basis of table 2
dat.dists <- sapply(unique(dat.seq.mapping$Species), function(x) {
haversine(dat.seq.mapping[dat.seq.mapping$Species == x, ],
lat.long.labels=c('lat', 'long')) %>%
quantile(probs = c(0.0, 0.5, 1.0), na.rm = T)
}) %>%
t
row.names(dat.dists) <- unique(dat.seq.mapping$Species)
## add a function here to find the closest population of each species to
## some Q. macrocarpa.
dat.mac.minD <- sapply(unique(dat.seq.mapping$Species), function(x) {
})
|
2d514dcccbd1ae9570d3f7a8faf68fe440a4eac6
|
d34bd74c59358c0eb0cf927bbee8551d1e7eed97
|
/src/2020/day1_solution.R
|
1c2455bea6fd8eac35519d96945d262650796912
|
[] |
no_license
|
bradisbrad/advent_of_code_2020
|
7dc50174cc047945c4119df83f176e240bc7bd25
|
5cc0ee78ee6c44e517420e65d9f234135de30b5d
|
refs/heads/main
| 2023-01-24T18:54:52.928004
| 2020-12-04T04:27:11
| 2020-12-04T04:27:11
| 318,323,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
day1_solution.R
|
# Q1: Find the two entries that sum to 2020 and return the product
# Q2: Find the three entries that sum to 2020 and return the product
library(tidyverse)
library(here)
data <- read_tsv(here('data/2020/day1_input.txt'), col_names = F)
a1 <-
expand_grid(data,
rename(data, X2 = X1)) %>%
mutate(sm = X1 + X2,
pd = X1 * X2) %>%
filter(sm == 2020) %>%
distinct(pd) %>%
pull()
a2 <-
expand_grid(data,
rename(data, X2 = X1),
rename(data, X3 = X1)) %>%
mutate(sm = X1 + X2 + X3,
pd = X1 * X2 * X3) %>%
filter(sm == 2020) %>%
distinct(pd) %>%
pull()
|
c0a6ccd8b23f33c726d70a59d06a878f9120f531
|
f97987c497e5d0aade2b4057f54915af25e25090
|
/run_analysis.R
|
85d5d775a19ed2d3e09ae4ee22f640d10c7400d8
|
[] |
no_license
|
jptodd/Getdata-031
|
e14f1083d2858a0462613eb09524f2838608f502
|
cf46b2d7711305474c5ff1176349e4780ba4785f
|
refs/heads/master
| 2016-09-05T10:07:40.223163
| 2015-08-23T23:13:18
| 2015-08-23T23:13:18
| 41,269,495
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,085
|
r
|
run_analysis.R
|
# getdata-031 project
# Uses dataset published in
# [1] Davide Anguita, Alessandro Ghio, Luca Oneto, Xavier Parra and Jorge L. Reyes-Ortiz.
# Human Activity Recognition on Smartphones using a Multiclass Hardware-Friendly Support Vector Machine.
# International Workshop of Ambient Assisted Living (IWAAL 2012). Vitoria-Gasteiz, Spain. Dec 2012
# set environment specifics as necessary
# .libPaths(new = "C:/Program Files/R/R-3.2.1/library")
# library("dplyr", lib.loc="C:/Program Files/R/R-3.2.1/library")
# setwd("C:/Coursera/Data Science/getdata-031")
# requires dplyr
library("dplyr")
# Read in the Activity descriptions and column names (features)
activities <- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt")
features <- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/features.txt")
# Get the test data
subject_test <- tbl_df(read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt"))
X_test <- tbl_df(read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt"))
Y_test <- tbl_df(read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/Y_test.txt"))
# Get the training data
subject_train <- tbl_df(read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt"))
X_train <- tbl_df(read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt"))
Y_train <- tbl_df(read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/Y_train.txt"))
# Append the measurement description to the column names for the test and train data.
# Note we are not just replacing the column names with the descriptive name as the descriptive names are not unique
# and later operations (select) require unique column names.
# We can remove the prefix (ofiginal column name and dot (e.g. V1., V318.) later once the data set is tidy as the
# non-unique columns will no longer be present.
names(X_test) <- paste(names(X_test), features$V2, sep = ".")
names(X_train) <- paste(names(X_train), features$V2, sep = ".")
# add the subject and activity codes to the measurements for both data sets
test <- cbind(select(subject_test, subject = 1), X_test, select(Y_test, activity_code = 1))
train <- cbind(select(subject_train, subject = 1), X_train, select(Y_train, activity_code = 1))
# combine the datasets
combined <- rbind(test, train)
# identify the mean and standard deviation measurements.
# Create a boolean array to represent any columns with "mean" or "std" in the column name
tf <- grepl("mean", names(combined)) | grepl("std", names(combined))
# We need the column position to do our select, so create an array with position of the TRUE values
tfx <- c(1:length(tf))[tf]
smaller <- select(combined, subject, activity_code, tfx)
# remove the Vx. prefix
names(smaller) <- sub("^V\\d+[.]", "", names(smaller))
# set the activiity description activity_desc by associating the activity code with the description from
# activity_labels.txt
smaller <- mutate(smaller, activity_desc = activities[activity_code, 2])
#remove the activity_code now that we have the activity_desc
smaller <- select(smaller, subject, activity_desc, everything(), -activity_code)
nurow <- smaller[1,] # used to create a summary line of the mean of each measurement for each subject/activity
tidy <- NULL # initial new summary data set
for(s in unique(smaller$subject)) # for each subject
{
nurow$subject = s
for(a in unique(smaller$activity_desc)) #for each activity
{
nurow$activity_desc <- a
for(c in 3:81) # for each measurement column
{
# take the average of the readings for that column for that subject / activity
nurow[,c] <- mean(filter(smaller, subject == s, activity_desc == a)[,c])
}
tidy <- rbind(tidy, nurow) # add the summarized row to the new tidy dataset
}
}
# write the tidy data set to a file
write.table(tidy, file="tidy.txt", row.names = FALSE)
|
e42a5670ce4622bf16ce406a4b1e147a4b105b54
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stochprofML/examples/toycluster.LNLN.Rd.R
|
bb27e4b24d8ee4c8819f6eac48e1da1bafb888dd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
toycluster.LNLN.Rd.R
|
library(stochprofML)
### Name: toycluster.LNLN
### Title: Synthetic data from the LN-LN model
### Aliases: toycluster.LNLN
### Keywords: datasets synthetic data stochastic profiling
### ** Examples
data(toycluster.LNLN)
par(mfrow=c(3,4))
for (i in 1:ncol(toycluster.LNLN)) {
hist(toycluster.LNLN[,i],xlab="synthetic data from LN-LN model",
main=colnames(toycluster.LNLN)[i],col="grey")
}
par(mfrow=c(1,1))
|
1b4541ced1d21953faf1795f2d9ef43c9ab49ba5
|
7a9b8f6512e497bab53e579ede4814c18d92d562
|
/helper.R
|
3e8db049ffaa6c3458f079c24ad783533463f7a2
|
[] |
no_license
|
slowbro1/thomson_reuters_paint
|
4f203d5cd71dfd9912a338d4207cde9f5f022db0
|
f608c0640976b69f4bca56dd48554f7feb0dcb39
|
refs/heads/master
| 2020-12-31T04:29:13.771534
| 2016-04-01T18:39:09
| 2016-04-01T18:39:09
| 55,251,912
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,446
|
r
|
helper.R
|
#Helper
url<-"https://dev.api.thomsonreuters.com/eikon/v1/timeseries?X-TR-API-APP-ID=r5LjbEgTGh3ZBYumNIhN8qvut7r9p2oW"
jsquery <-
'{
"rics": ["IBM.N"],
"interval": "Daily",
"startdate": "2015-10-03T00:00:00Z",
"enddate": "2015-12-07T23:59:59Z",
"fields":
["TIMESTAMP","OPEN","HIGH","LOW","CLOSE","VOLUME"]
}'
writedf = function(ticker, n){
newquery=gsub('"IBM.N"', ticker, jsquery)
post = POST(url=url, body=newquery, encode='json')
content=content(post,'text')
content=substring(content,1)
content = jsonlite::fromJSON(content)
df = data.frame(matrix(unlist(content)))
l = length(df[,1])
df = df[-(l-13*n+1):-l,]
df = df[(-1*(n-1)):(-1*n)]
l2 = length(df)
ldf=l2/(n*6)
dfl = list()
l3=l2/n
for(j in 1:n){
dfl[[j]]=data.frame(df[(1+l3*(j-1)):(ldf+l3*(j-1))],df[(ldf+1+l3*(j-1)):(2*ldf+l3*(j-1))],df[(2*ldf+1+l3*(j-1)):(3*ldf+l3*(j-1))],df[(3*ldf+1+l3*(j-1)):(4*ldf+l3*(j-1))],df[(4*ldf+1+l3*(j-1)):(5*ldf+l3*(j-1))],df[(5*ldf+1+l3*(j-1)):(6*ldf+l3*(j-1))])
names(dfl[[j]]) = c('TIMESTAMP','OPEN','HIGH','LOW','CLOSE','VOLUME')
wdf = length(dfl[[j]][1,])
for(i in 2:wdf){
dfl[[j]][,i] = as.numeric(as.character(dfl[[j]][,i]))
}
}
VolMatrix = matrix(nrow = ldf-19, ncol = n)
for(i in 1:n){
for(j in 1:(ldf-19)){
VolMatrix[j,i]=sd(dfl[[i]][j:(j+19),5])
}
VolMatrix = data.frame(VolMatrix)
}
for(i in 1:n){
dfl[[i]]=dfl[[i]][20:ldf,]
dfl[[i]] = data.frame(dfl[[i]],VolMatrix[,i])
}
for(i in 1:n){
names(dfl[[i]]) = c('TIMESTAMP','OPEN','HIGH','LOW','CLOSE','VOLUME','VOLATILITY')
}
return(dfl)
}
getnumber<-function(text){
if (text=='"AAPL.O","IBM.N"') {
num<-2
} else if (text=='"IBM.N"') {
num<-1
}
return(num)
}
getlim<-function(datalist,n){
list<-list()
for (i in 1:n){
mins<-sapply(datalist[[i]][,2:7],min)
maxs<-sapply(datalist[[i]][,2:7],max)
df<-data.frame(mins,maxs)
list[[length(list)+1]]<-df
}
mindf<-data.frame(list[[1]][,1])
maxdf<-data.frame(list[[1]][,2])
for (i in 2:n){
mindf<-data.frame(mindf,list[[i]][,1])
maxdf<-data.frame(maxdf,list[[i]][,2])
}
minvec<-c()
maxvec<-c()
for (j in 1:6){
minvec[j]<-min(mindf[j,])
maxvec[j]<-max(maxdf[j,])
}
df<-data.frame(minvec,maxvec)
row.names(df)<-c('OPEN','HIGH','LOW','CLOSE','VOLUME','VOLATILITY')
return(df)
}
|
bffed66e146e82f9b3c4f03365b733c48fe4cc8c
|
d28aac84af4b538137205c9afe8764c6c7dd0911
|
/extract-microarray-data.R
|
6aacb6f5817d0bf27ff1fdad460a23e170dcfb36
|
[] |
no_license
|
ClaireLevy/HVE-microarray
|
9c67a92b8168ca9fd4aa34d77137d817f38013d1
|
ca21449cea883e1daf12083a987f63bef84f6b65
|
refs/heads/master
| 2021-01-19T08:10:18.544493
| 2015-11-19T23:29:25
| 2015-11-19T23:29:25
| 30,942,256
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,388
|
r
|
extract-microarray-data.R
|
folder <- "J:\\MacLabUsers\\Claire\\Projects\\HVE-microarray\\microarrayData\\"
file <- readLines(paste0(folder, "FinalReport_exvivo_TNF_HVE.txt"))
# File structure
# Line 1: [Header]
# 2-7: Header details
# 8: [Sample Probe Profile]
# 9: Column names
# 10-47332: Microarray data
# 47333: [Control Gene Profile]
# 47334: Column names
# 47335-432: Control gene data
# 47433: [Excluded and Imputed Probes]
# 47434: Column names
# 47435-45: Excluded and imputed data
# 47446: [Samples Table]
# 47447: Column names
# 47448-47483: Sample table data
# # save just microarray data as separate file
# fileConn <- file(paste0(folder, "MicroarrayDataExtracted.txt"))
# writeLines(file[1:47332], fileConn)
# close(fileConn)
# get control gene profiles
ctrl <- read.table(paste0(folder, "FinalReport_exvivo_TNF_HVE.txt"),
sep = "\t", skip = 47333, nrows = 98, header = TRUE)
# get excluded and imputed probes
# the following skips over several lines between header and the first gene in
# the table for some reason????
excludedImputed <- read.table(paste0(folder, "FinalReport_exvivo_TNF_HVE.txt"),
sep = "\t", skip = 47433, nrows = 7, header = TRUE)
# get sample table
samples <- read.table(paste0(folder, "FinalReport_exvivo_TNF_HVE.txt"),
sep = "\t", skip = 47446, nrows = 36, header = TRUE)
|
9f11c4e5b746f4f3d99bb2a5b810f18da9f00a3e
|
7b99e0516455a5e61f010dd7015da2461117263e
|
/inst/tinytest/test-ergmito-checkers.R
|
f9f07d7ed2e2d6e140c85e2864ae5deba6faabe8
|
[
"MIT"
] |
permissive
|
muriteams/ergmito
|
75ec8830de7bcf47250c2038f418123eb9fc9c9e
|
f3a2ede1ed3a97eaed71987ec5b555a853cbd11d
|
refs/heads/master
| 2023-06-25T08:57:37.368032
| 2023-06-13T19:46:18
| 2023-06-13T19:46:18
| 157,758,250
| 9
| 1
|
NOASSERTION
| 2020-07-06T05:17:20
| 2018-11-15T18:56:47
|
R
|
UTF-8
|
R
| false
| false
| 1,297
|
r
|
test-ergmito-checkers.R
|
# Fully connected network
x <- rbernoulli(c(4,4,4), 1)
ans0 <- ergmito(x ~ edges + ttriad)
# Very high density
x <- lapply(x, function(x.) {
x.[2] <- 0L
x.
})
ans0b <- ergmito(x ~ edges + ttriad)
ans0b$formulae$loglik(c(1e3, coef(ans0b)[2]))
# Empty graph
x <- rbernoulli(c(4,4,4), 0)
ans1 <- ergmito(x ~ edges + ttriad)
# Very low density
x <- lapply(x, function(x.) {
x.[2:3] <- 1L
x.
})
ans2 <- ergmito(x ~ edges + ttriad)
expect_true(all(is.infinite(coef(ans0))))
expect_true(all(coef(ans1) < 0) & all(is.infinite(coef(ans1))))
expect_true(coef(ans2)[2] < 0 & is.infinite(coef(ans2)[2]))
expect_equal(summary(ans0)$coefs[, "Pr(>|z|)"], c(0, 0), tol = 1e-2)
expect_equal(summary(ans1)$coefs[, "Pr(>|z|)"], c(0, 0), tol = 1e-2)
expect_equal(summary(ans2)$coefs[, "Pr(>|z|)"], c(0, 0), tol = 1e-2)
pretty_printer <- function(x) {
paste(
"\n",
paste(capture.output(eval(x)), collapse = "\n"),
"\n"
)
}
message(
"\nOn some OSs these are not easy to test. So here is the expected results:\n",
pretty_printer(summary(ans0)), "\n----- should be c(Inf, Inf) \n----- with pval 0.\n",
pretty_printer(summary(ans1)), "\n----- should be -c(Inf, Inf), \n----- with pval 0.\n",
pretty_printer(summary(ans2)), "\n----- should be -Inf (2nd) \n----- with pval 0."
)
|
44be8a58df4d3ce27fbbccad3ad232d93fef8bf5
|
b0e6d906265d8eec88232193b424e2eca3a9f4bc
|
/functions/get_lift_and_leverage.r
|
1792f0427a013e2f3ba9bcc7a2cb4e25e2a1eb67
|
[] |
no_license
|
EL-SHREIF/Apriori-Algorithm
|
0bc6cbe8c715b09e3fc12c655f69485e88cdd0fa
|
85603a87eb3b653590d1bdbe6d2395639760d6fe
|
refs/heads/master
| 2022-10-28T20:32:54.933716
| 2020-06-14T16:50:47
| 2020-06-14T16:50:47
| 257,570,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 968
|
r
|
get_lift_and_leverage.r
|
get_lift_and_leverage <- function(rules, supports){
lifts_leverages = matrix(ncol=2)
colnames(lifts_leverages) = c('lift', 'leverage')
for (rule in rules){
left_right = strsplit(rule, split = '/')
left = strsplit(left_right[[1]][1], split = '-')[[1]]
right = strsplit(left_right[[1]][2], split = '-')[[1]]
first = supports[paste(sort(unlist(c(left, right))), collapse = '-')]
sup_left = supports[paste(sort(unlist(left)), collapse = '-')]
sup_right = supports[paste(sort(unlist(right)), collapse = '-')]
second = sup_left * sup_right
lift = c(first/second)
leverage = c(first-second)
lift_leverage = cbind(lift, leverage)
row.names(lift_leverage) = paste(c(paste(left, collapse = ', '), paste(right, collapse = ', ')), collapse = ' \u2192 ')
lifts_leverages = rbind(lifts_leverages, lift_leverage)
}
lifts_leverages = lifts_leverages[-1,]
return(lifts_leverages)
}
|
fdee2fd996b261a14c05471582443769c7537d77
|
358842d28556f07557977e228a82b9014ddd08ab
|
/R/write_stat_results_omics.R
|
d7e711558ab9f7dd74ce10ff141100972c19a74b
|
[
"BSD-2-Clause"
] |
permissive
|
rarichardson92/pmartR
|
519ccc42936e1735850c2ce9a8cbbe34fe2d83e0
|
cbae3a82fd923359f11c7ba6cb726a88d6855301
|
refs/heads/master
| 2021-07-17T11:24:54.068489
| 2020-07-08T21:19:25
| 2020-07-08T21:19:25
| 185,486,874
| 0
| 0
|
BSD-2-Clause
| 2019-10-11T15:47:41
| 2019-05-07T22:28:41
|
R
|
UTF-8
|
R
| false
| false
| 4,019
|
r
|
write_stat_results_omics.R
|
#' Creates a list of three sheets, Normalized Data, DA Test Results, and Only DA biomolecules - for OMICS project
#
#'
#' @param omicsData an object of one of the classes "pepData", "proData", "metabData", or "lipidData", usually created by \code{\link{as.pepData}}, \code{\link{as.proData}}, \code{\link{as.metabData}}, or \code{\link{lipidData}}, respectively.
#' @param statResData an object of class statRes, created by \code{\link{imd_anova}}
#' @param refCondition character string, what the reference condition looks like (e.g. "MOCK", "Mock", "mock", etc.)
#' @param filePath default is NULL, if NULL then workbook will not be written out, if it is a character string specifying the file path, file name and .xlsx extension
#'
#' @return a list of three data frames Normalized_Data, DA_Test_Results, and DA_"data type"_Only
#'
#' @author Natalie Heller
#'
#'
write_stat_results_omics <- function(omicsData, statResData, refCondition, filePath = NULL){
if(!require(openxlsx)){
stop("openxlsx package is required")
}
## INITIAL CHECKS ##
## Make sure both ojects are present
if(is.null(omicsData)){
stop("'omicsData' must be provided")
}
if(is.null(statResData)){
stop("'statResData' must be provided")
}
## Make sure both are the correct class
if(!(class(omicsData) %in% c("pepData", "proData", "metabData", "lipidData"))){
stop("omicsData is not of the appopriate class")
}
if(!(class(statResData) %in% c("statRes"))){
stop("statResData is not of the appropriate class")
}
## END OF CHECKS ##
## First get the Normalized Data tab with metadata
if("e_meta" %in% attributes(omicsData)$names){
Normalized_Data <- left_join(omicsData$e_meta, omicsData$e_data, by = intersect(names(omicsData$e_meta), names(omicsData$e_data)))
}else{
Normalized_Data <- omicsData$e_data
}
## Get the query condition(s)
ii <- unique(as.character(attributes(statResData)$group_DF$VIRUS))
q_cond <- ii[which(ii != refCondition)]
pval <- statResData$P_values
flag <- statResData$Flags
fc <- statResData$Full_results[, c(which(attributes(omicsData)$cname$edata_cname == colnames(statResData$Full_results)), grep("fold_change", tolower(colnames(statResData$Full_results))))]
# Rename columns of pval
a1 <- which(attributes(omicsData)$cname$edata_cname == colnames(pval))
colnames(pval)[-a1] <- gsub(pattern = "pvals_", replacement = "", x = tolower(colnames(pval)[-a1]))
colnames(pval)[-a1] <- paste(q_cond, colnames(pval)[-a1], "Pval", sep = "_")
# Renames the columns of flag
a2 <- which(attributes(omicsData)$cname$edata_cname == colnames(flag))
colnames(flag)[-a2] <- gsub(pattern = "flags_", replacement = "", x = tolower(colnames(flag)[-a2]))
colnames(flag)[-a2] <- paste(q_cond, colnames(flag)[-a1], "Flag", sep = "_")
a3 <- which(attributes(omicsData)$cname$edata_cname == colnames(fc))
colnames(fc)[-a3] <- gsub(pattern = "Fold_change_", replacement = "", x = colnames(fc)[-a3])
colnames(fc)[-a3] <- paste(colnames(fc)[-a3], "Log2FC", sep = "_")
DA_Test_Results <- merge(x = pval, y = fc, by = intersect(colnames(pval), colnames(fc)))
DA_Test_Results <- merge(x = DA_Test_Results, y = flag, by = intersect(colnames(DA_Test_Results), colnames(flag)))
## Now get the DA_Molecule_Only tab
tmp <- flag
tmp$TMP <- apply(flag[, -a1], 1, function(x){abs(sum(x))})
indx <- which(tmp$TMP != 0)
DA_Only <- DA_Test_Results[indx, ]
outputData <- list()
outputData$Normalized_Data <- Normalized_Data
outputData$DA_Test_Results <- DA_Test_Results
if(class(omicsData) == "metabData"){
outputData$DA_Metabolites_Only <- DA_Only
}else{
if(class(omicsData) == "pepData"){
outputData$DA_Peptides_Only <- DA_Only
}else{
if(class(omicsData) == "proData"){
outputData$DA_Proteins_Only <- DA_Only
}else{
outputData$DA_Lipids_Only <- DA_Only
}
}
}
if(!is.null(filePath)){
write.xlsx(x = outputData, file = filePath)
}
}
|
d90a67f0d1ecb25bb0442aee428e5d7305265eb8
|
863aa7e71911423a9096c82a03ef755d1cf34654
|
/man/get_metadata.Rd
|
af9088780d7c87781f98cac549c7f61c4036e99c
|
[] |
no_license
|
BioSystemsUM/specmine
|
8bd2d2b0ee1b1db9133251b80724966a5ee71040
|
13b5cbb73989e1f84e726dab90ff4ff34fed68df
|
refs/heads/master
| 2023-08-18T05:51:53.650469
| 2021-09-21T13:35:11
| 2021-09-21T13:35:11
| 313,974,923
| 1
| 1
| null | 2021-09-21T13:35:12
| 2020-11-18T15:22:49
|
R
|
UTF-8
|
R
| false
| false
| 723
|
rd
|
get_metadata.Rd
|
\name{get_metadata}
\alias{get_metadata}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get metadata
}
\description{
Get the metadata from the dataset
}
\usage{
get_metadata(dataset)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
list representing the dataset from a metabolomics experiment.
}
}
\value{
returns a data frame with the metadata.
}
\examples{
## Example of getting the metadata
library(specmine.datasets)
data(cachexia)
cachexia.mt = get_metadata(cachexia)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ metadata }
\keyword{ dataset }% __ONLY ONE__ keyword per line
|
3c9113409df80c1aaff7adbfaa58db1400061ab4
|
7adb1775672fb7b320ad9da1bb3f1edeb8d33f0d
|
/archive/regardvapanalysis_iv ver 2.0 10 August (with group sequential).R
|
037341a37e6221744ffbdf3a7df72169f90a099e
|
[
"CC0-1.0"
] |
permissive
|
vitallish/NItrialsimulation
|
0a60b60bd053a8c2e1e92954c69562ff4c15bff4
|
ef7f1e6e934440e8c7bc5e3a8293988051dd4b3f
|
refs/heads/master
| 2022-04-12T04:41:17.880021
| 2020-04-06T11:45:39
| 2020-04-06T11:45:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42,322
|
r
|
regardvapanalysis_iv ver 2.0 10 August (with group sequential).R
|
########################################################################
###################Simulation for REGARD-VAP analysis###################
########################################################################
setwd("/Users/moyin/Desktop/VAP studd/Causal inference simulation") #set working directory
rm(list=ls()) # Clean working environment
library(Hmisc); library(rms); library(gsDesign);library(ivpack); library(ggplot2);
library(data.table); library(dplyr); library(plotly); library(ggpubr) # Required libraries
set.seed(1234)
##########BIAS##########
########################
simdata.bias<- function(n, wna, pc0, pc1, pa, pn, nIterations){
# n: number of participants per group;
# wna: proportion of never takers over proportion of always takers;
# pc0: mortality and recurrence rate in compliers in intervention 0;
# pc1: mortality and recurrence rate in compliers in intervention 1;
# pa: mortality and recurrence rate in always takers;
# pn: mortality and recurrence rate in never takers;
# nIterations: number of iterations
alpha=0.025
#define proportions of patients
wc=seq(0.1,1, by=0.05) # proportion of compliers
wa= (1-wc)/(wna+1) # proportion of always takers
wn= wa*wna # proportion of never takers
w00=wc+wn # Randomised to 0, intervention 0 (C+NT), proportion
w01=rep(wn, length (wc)) # Randomised to 0, intervention 1 (NT), proportion
w10= wa # Randomised to 1, intervention 0 (AT), proportion
w11=wc+wa # Randomised to 1, intervention 1 (C+AT), proportion
#define proportions of events
p00=(wc*pc0+wn*pn)/(wc+wn) # Randomised to 0, intervention 0 (C+NT), outcome
p01=rep(pn, length(wc)) # Randomised to 0, intervention 1 (NT), outcome
p10=rep(pa, length(wc)) # Randomised to 1, intervention 0 (AT), outcome
p11=(wc*pc1+wa*pa)/(wc+wa) # Randomised to 1, intervention 1 (C+AT), outcome
#make up vectors for simulations
eff.iv<-eff.itt<-eff.pp<-eff.at<-c()
.eff.iv<-.eff.itt<-.eff.pp<-.eff.at<-c()
#simulate and derive treatment effect
sim<- function() {
for(i in 1:length(wc)) {
for(l in 1:nIterations) {
#simulate data frame
simdata<- data.frame (
"randomisation" = c(rep(1,n), rep(0,n))) #half randomised to 1, half randomised to 0
simdata$intervention <- c(rbinom(n,1,w11[i]), # for those randomised to 1, intervention proportion w11
rbinom(n,1,w10[i])) # for those randomised to 0, intervention proportion w10
simdata$outcome <- rep(NA,2*n)
for(j in 1:(2*n)){
if(simdata$randomisation[j]==1 & simdata$intervention[j]==1){
simdata$outcome[j] <- rbinom(1,1,prob=p11[i])# for those randomised to 1, intervention 1, outcome proportion p11
}
else if(simdata$randomisation[j]==1 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p01[i]) # for those randomised to 1, intervention 0, outcome proportion p01
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==1){
simdata$outcome[j] <-rbinom(1,1,prob=p10[i]) # for those randomised to 0, intervention 1, outcome proportion p10
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p00[i]) # for those randomised to 0, intervention 0, outcome proportion p00
}
}
#generate proportions from simulated data
p00.value<- mean((filter(simdata, intervention==0, randomisation==0))$outcome)
p11.value<- mean((filter(simdata, intervention==1, randomisation==1))$outcome)
w1.value<- w11.value<- mean((filter(simdata, randomisation==1))$intervention)
w0.value<- w10.value<- mean((filter(simdata, randomisation==0))$intervention)
wc.value<- w1.value-w0.value
if (wc.value==0) {wc.value=wc[i]}
pz1.value<- mean((filter(simdata, randomisation==1))$outcome)
pz0.value<- mean((filter(simdata, randomisation==0))$outcome)
pd1.value<- mean((filter(simdata, intervention==1))$outcome)
pd0.value<- mean((filter(simdata, intervention==0))$outcome)
#treatment effect
.eff.itt[l]<- pz1.value-pz0.value #intention to treat
.eff.pp[l] <- p11.value-p00.value #per protocol
.eff.at[l] <- pd1.value-pd0.value # as treated
ivmodel<-ivreg(outcome ~ intervention, ~ randomisation, x=TRUE, data=simdata) #iv with 2 stage regression
.eff.iv[l]<-ivmodel$coef[2]
#mean of iterated data
eff.itt[i]<- mean(.eff.itt, na.rm=TRUE)
eff.pp[i] <- mean(.eff.pp, na.rm=TRUE)
eff.at[i] <- mean(.eff.at, na.rm=TRUE)
eff.iv[i]<-mean(.eff.iv, na.rm = TRUE)
}
}
return(data.frame(wc, eff.iv, eff.itt, eff.pp, eff.at))
}
sim.df<-sim()
sim.df$pred.eff.iv<- predict(lm(eff.iv~log(wc), data=sim.df))
sim.df$pred.eff.itt<- predict(lm(eff.itt~log(wc), data=sim.df))
sim.df$pred.eff.at<- predict(lm(eff.at~log(wc), data=sim.df))
sim.df$pred.eff.pp<- predict(lm(eff.pp~log(wc), data=sim.df))
bias.plot <- ggplot(sim.df, aes(sim.df[1]))+
geom_point(aes(y=sim.df[2], colour="IV effect")) +
geom_point(aes(y=sim.df[3], colour="ITT effect")) +
geom_point(aes(y=sim.df[4], colour="PP effect")) +
geom_point(aes(y=sim.df[5], colour="AT effect")) +
geom_line(aes(y = pred.eff.iv, colour="IV effect" ))+
geom_line(aes(y = pred.eff.itt, colour="ITT effect"))+
geom_line(aes(y = pred.eff.pp, colour="PP effect"))+
geom_line(aes(y = pred.eff.at, colour="AT effect"))+
geom_line(aes(y=pc1-pc0, colour='True effect'),linetype="dotted") +
xlab("Proportion of compliers")+
ylab("Effect")
return(bias.plot)
}
bias1<-simdata.bias(n=230,wna=1,pc1=0.4, pc0=0.4, pa=0.4, pn=0.4, nIterations=1000)
bias2<-simdata.bias(n=230,wna=2,pc1=0.4, pc0=0.4, pa=0.4, pn=0.4, nIterations=1000)
bias3<-simdata.bias(n=230,wna=3,pc1=0.4, pc0=0.4, pa=0.4, pn=0.4, nIterations=1000)
ggarrange(bias1, bias2, bias3, ncol = 3, nrow = 1)
bias4<-bias1 #0
bias5<-simdata.bias(n=230,wna=1,pc1=0.4, pc0=0.2, pa=0.4, pn=0.4, nIterations=1000) #-0.2
bias6<-simdata.bias(n=230,wna=1,pc1=0.4, pc0=0.6, pa=0.4, pn=0.4, nIterations=1000) # 0.2
bias7<-simdata.bias(n=230,wna=1,pc1=0.3, pc0=0.4, pa=0.4, pn=0.4, nIterations=1000) # 0.1
bias8<-simdata.bias(n=230,wna=1,pc1=0.5, pc0=0.4, pa=0.4, pn=0.4, nIterations=1000) # -0.1
bias9<-simdata.bias(n=230,wna=1,pc1=0.4, pc0=0.4, pa=0.2, pn=0.4, nIterations=1000) # -0.2
bias10<-simdata.bias(n=230,wna=1,pc1=0.4, pc0=0.4, pa=0.6, pn=0.4, nIterations=1000) # 0.2
bias11<-simdata.bias(n=230,wna=1,pc1=0.4, pc0=0.4, pa=0.4, pn=0.3, nIterations=1000) # 0.1
ggarrange(bias4, bias5, bias6, bias7,
bias8, bias9, bias10, bias11,
ncol = 4, nrow = 2)
######TYPE 1 ERROR######
########################
simdata.type1<- function(n, wna, pc1, pa, pn, NImargin, nIterations){
# n: number of participants per group;
# wna: proportion of never takers over proportion of always takers;
# pc0: mortality and recurrence rate in compliers in intervention 0;
# pc1: mortality and recurrence rate in compliers in intervention 1;
# pa: mortality and recurrence rate in always takers;
# pn: mortality and recurrence rate in never takers;
# NImargin: non inferiority margin
# nIterations: number of iterations
alpha=0.025
#define proportions of patients
wc=seq(0.1,0.99, by=0.05) # proportion of compliers
wa= (1-wc)/(wna+1) # proportion of always takers
wn= wa*wna # proportion of never takers
w00=wc+wn # Randomised to 0, intervention 0 (C+NT), proportion
w01=rep(wn, length (wc)) # Randomised to 0, intervention 1 (NT), proportion
w10= wa # Randomised to 1, intervention 0 (AT), proportion
w11=wc+wa # Randomised to 1, intervention 1 (C+AT), proportion
#define proportions of events
pc0=pc1-NImargin # data simulated with assumption of null hypothesis pc1-pc0=NImargin
p00=(wc*pc0+wn*pn)/(wc+wn) # Randomised to 0, intervention 0 (C+NT), outcome
p01=rep(pn, length(wc)) # Randomised to 0, intervention 1 (NT), outcome
p10=rep(pa, length(wc)) # Randomised to 1, intervention 0 (AT), outcome
p11=(wc*pc1+wa*pa)/(wc+wa) # Randomised to 1, intervention 1 (C+AT), outcome
#make up vectors for simulations
type1.error.iv<-type1.error.itt<-type1.error.pp<-type1.error.at<- c()
.type1.error.iv<-.type1.error.itt<- .type1.error.pp<-.type1.error.at<-c()
#simulate and derive type 1 error
sim<- function() {
for(i in 1:length(wc)) { print(i)
for(l in 1:nIterations) {
#simulate data frame
simdata<- data.frame (
"randomisation" = c(rep(1,n), rep(0,n))) #half randomised to 1, half randomised to 0
simdata$intervention <- c(rbinom(n,1,w11[i]), # for those randomised to 1, intervention proportion w11
rbinom(n,1,w10[i])) # for those randomised to 0, intervention proportion w10
simdata$outcome <- rep(NA,2*n)
for(j in 1:(2*n)){
if(simdata$randomisation[j]==1 & simdata$intervention[j]==1){
simdata$outcome[j] <- rbinom(1,1,prob=p11[i])# for those randomised to 1, intervention 1, outcome proportion p11
}
else if(simdata$randomisation[j]==1 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p01[i]) # for those randomised to 1, intervention 0, outcome proportion p01
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==1){
simdata$outcome[j] <-rbinom(1,1,prob=p10[i]) # for those randomised to 0, intervention 1, outcome proportion p10
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p00[i]) # for those randomised to 0, intervention 0, outcome proportion p00
}
}
#generate proportions in simulated data
w1.vector<- w11.vector<-(filter(simdata,simdata$randomisation==1))$intervention; w1.value<-mean(w1.vector)
w0.vector<- w10.vector<-(filter(simdata,simdata$randomisation==0))$intervention; w0.value<-mean(w0.vector)
wc.value<- w1.value-w0.value
if (wc.value==0) {wc.value=wc[i]}
p11.vector<- (filter(simdata,intervention==1, simdata$randomisation==1))$outcome; p11.value<-mean(p11.vector)
p01.vector<- (filter(simdata,intervention==0, simdata$randomisation==1))$outcome; p01.value<-mean(p01.vector)
p10.vector<- (filter(simdata,intervention==1, simdata$randomisation==0))$outcome; p10.value<-mean(p10.vector)
p00.vector<- (filter(simdata,intervention==0, simdata$randomisation==0))$outcome; p00.value<-mean(p00.vector)
pz1.vector<- (filter(simdata, randomisation==1))$outcome; pz1.value<-mean(pz1.vector)
pz0.vector<- (filter(simdata, randomisation==0))$outcome; pz0.value<-mean(pz0.vector)
pd1.vector<- (filter(simdata, intervention==1))$outcome;pd1.value<- mean(pd1.vector)
pd0.vector<- (filter(simdata, intervention==0))$outcome;pd0.value<- mean(pd0.vector)
#estimate treatment effects
eff.itt<- pz1.value-pz0.value #intention to treat
eff.pp <- p11.value-p00.value #per protocol
eff.at <- pd1.value-pd0.value #as treated
ivmodel=ivreg(simdata$outcome ~ simdata$intervention, ~ simdata$randomisation, x=TRUE, data=simdata)
eff.iv<-ivmodel$coef[2] #iv with 2 stage regression
#variances of proportions and effects
vw1<-var(w1.vector)
vw0<-var(w0.vector)
v11<-var(p11.vector)
v01<-var(p01.vector)
v10<-var(p10.vector)
v00<-var(p00.vector)
v.1<-vw1*(v11+v01+(p11.value-p01.value)^2)+w1.value^2*v11+(1-w1.value)^2*v01
v.0<-vw0*(v10+v00+(p10.value-p00.value)^2)+w0.value^2*v10+(1-w0.value)^2*v00
var.eff.itt<- pz1.value*(1-pz1.value)/length(pz1.vector) + pz0.value*(1-pz0.value)/length(pz0.vector)
var.eff.pp<- p11.value*(1-p11.value)/length(p11.vector) + p00.value*(1-p00.value)/length(p00.vector)
var.eff.at<- pd1.value*(1-pd1.value)/length(pd1.vector) + pd0.value*(1-pd0.value)/length(pd0.vector)
if (is.na(eff.iv)) {var.eff.iv<-var.eff.itt} else {var.eff.iv<-robust.se(ivmodel)[2,2]^2}
#type 1 error
.type1.error.itt[l]<-pnorm(qnorm(alpha)+(NImargin-eff.itt)/(var.eff.itt^0.5), lower.tail=TRUE)
.type1.error.pp[l]<- pnorm(qnorm(alpha)+(NImargin-eff.pp)/(var.eff.pp^0.5), lower.tail=TRUE)
.type1.error.at[l]<- pnorm(qnorm(alpha)+(NImargin-eff.at)/(var.eff.at^0.5), lower.tail=TRUE)
.type1.error.iv[l]<- pnorm(qnorm(alpha)+(NImargin-eff.iv)/(var.eff.iv^0.5), lower.tail=TRUE)
#mean of Type 1 error
type1.error.iv[i]<-mean(.type1.error.iv, na.rm=TRUE)
type1.error.itt[i]<-mean(.type1.error.itt, na.rm=TRUE)
type1.error.pp[i]<-mean(.type1.error.pp, na.rm=TRUE)
type1.error.at[i]<-mean(.type1.error.at, na.rm=TRUE)
}
}
return(data.frame(wc, type1.error.iv, type1.error.itt, type1.error.pp, type1.error.at))
}
sim.df<-sim()
sim.df$pred.t1.iv<- predict(lm(type1.error.iv~log(wc), data=sim.df))
sim.df$pred.t1.itt<- predict(lm(type1.error.itt~log(wc), data=sim.df))
sim.df$pred.t1.at<- predict(lm(type1.error.at~log(wc), data=sim.df))
sim.df$pred.t1.pp<- predict(lm(type1.error.pp~log(wc), data=sim.df))
bias.plot <- ggplot(sim.df, aes(sim.df[1]))+
geom_point(aes(y=sim.df[2], colour="IV Type 1 error")) +
geom_point(aes(y=sim.df[3], colour="ITT Type 1 error")) +
geom_point(aes(y=sim.df[4], colour="PP Type 1 error")) +
geom_point(aes(y=sim.df[5], colour="AT Type 1 error")) +
geom_line(aes(y = pred.t1.iv, colour="IV Type 1 error" ))+
geom_line(aes(y = pred.t1.itt, colour="ITT Type 1 error"))+
geom_line(aes(y = pred.t1.pp, colour="PP Type 1 error"))+
geom_line(aes(y = pred.t1.at, colour="AT Type 1 error"))+
geom_line(aes(y=alpha, colour='Alpha'),linetype="dotted") +
xlab("Proportion of compliers")+
ylab("Type 1 error")
return(bias.plot)
}
t1<-simdata.type1(n=230,wna=1,pc1=0.4, pa=0.4, pn=0.4, NImargin=0.12, nIterations=1000)
t2<-simdata.type1(n=230,wna=2,pc1=0.4, pa=0.4, pn=0.4, NImargin=0.12, nIterations=1000)
t3<-simdata.type1(n=230,wna=3,pc1=0.4, pa=0.4, pn=0.4, NImargin=0.12, nIterations=1000)
ggarrange(t1, t2, t3, ncol = 3, nrow = 1)
simdata.type1(n=500,wna=1,pc1=0.4, pa=0.4, pn=0.4, NImargin=0.12, nIterations=10)
t4<-t1
t5<-simdata.type1(n=230,wna=1,pc1=0.2,pa=0.4,pn=0.4,NImargin=0.12,nIterations=1000)#-0.12
t6<-simdata.type1(n=230,wna=1,pc1=0.6,pa=0.4,pn=0.4,NImargin=0.12,nIterations=1000)#-0.12
t7<-simdata.type1(n=230,wna=1,pc1=0.4,pa=0.2,pn=0.4,NImargin=0.12,nIterations=1000)#-0.32
t8<-simdata.type1(n=230,wna=1,pc1=0.4,pa=0.6,pn=0.4,NImargin=0.12,nIterations=1000)#0.08
t9<-simdata.type1(n=230,wna=1,pc1=0.4,pa=0.7,pn=0.4,NImargin=0.12,nIterations=1000)#0.18
t10<-simdata.type1(n=230,wna=1,pc1=0.4,pa=0.4,pn=0.2,NImargin=0.12,nIterations=1000)#0.08
t11<-simdata.type1(n=230,wna=1,pc1=0.4,pa=0.4,pn=0.5,NImargin=0.12,nIterations=1000)#-0.22
ggarrange(t4, t5, t6, t7,
t8, t9, t10,t11,
ncol = 4, nrow = 2)
##########POWER#########
########################
simdata.power<- function(n, wna, pc1, pc0, pa, pn, NImargin, nIterations){
# n: number of participants per group;
# wna: proportion of never takers;
# pc0: survival rate in compliers in intervention 0;
# pc1: survival rate in compliers in intervention 1;
# pa: survival rate in always takers;
# pn: survival rate in never takers;
# NImargin: non inferiority margin (negative);
# nIterations: number of iterations
alpha=0.025
#define proportions of patients
wc=seq(0.1,1, by=0.05) # proportion of compliers
wa= (1-wc)/(wna+1) # proportion of always takers
wn= wa*wna # proportion of never takers
w00=wc+wn # Randomised to 0, intervention 0 (C+NT), proportion
w01=rep(wn, length (wc)) # Randomised to 0, intervention 1 (NT), proportion
w10= wa # Randomised to 1, intervention 0 (AT), proportion
w11=wc+wa # Randomised to 1, intervention 1 (C+AT), proportion
#define proportions of events # simulation based on alternative hypothesis that pc1-pc0>NImargin (negative)
p00=(wc*pc0+wn*pn)/(wc+wn) # Randomised to 0, intervention 0 (C+NT), outcome
p01=rep(pn, length(wc)) # Randomised to 0, intervention 1 (NT), outcome
p10=rep(pa, length(wc)) # Randomised to 1, intervention 0 (AT), outcome
p11=(wc*pc1+wa*pa)/(wc+wa) # Randomised to 1, intervention 1 (C+AT), outcome
#make up vectors for simulations
power.iv<-power.itt<- power.at<-power.pp<-c()
.power.iv<-.power.itt<- .power.at<-.power.pp<-c()
#simulate and derive treatment effect
sim<- function() {
for(i in 1:length(wc)) { print(i)
for(l in 1:nIterations) {
#simulate data frame
simdata<- data.frame (
"randomisation" = c(rep(1,n), rep(0,n))) #half randomised to 1, half randomised to 0
simdata$intervention <- c(rbinom(n,1,w11[i]), # for those randomised to 1, intervention proportion w11
rbinom(n,1,w10[i])) # for those randomised to 0, intervention proportion w10
simdata$outcome <- rep(NA,2*n)
for(j in 1:(2*n)){
if(simdata$randomisation[j]==1 & simdata$intervention[j]==1){
simdata$outcome[j] <- rbinom(1,1,prob=p11[i])# for those randomised to 1, intervention 1, outcome proportion p11
}
else if(simdata$randomisation[j]==1 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p01[i]) # for those randomised to 1, intervention 0, outcome proportion p01
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==1){
simdata$outcome[j] <-rbinom(1,1,prob=p10[i]) # for those randomised to 0, intervention 1, outcome proportion p10
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p00[i]) # for those randomised to 0, intervention 0, outcome proportion p00
}
}
#generate proportions in simulated data
w1.vector<-(filter(simdata,simdata$randomisation==1))$intervention; w1.value<-mean(w1.vector)
w0.vector<-(filter(simdata,simdata$randomisation==0))$intervention; w0.value<-mean(w0.vector)
wc.value<- w1.value-w0.value
if (wc.value==0) {wc.value=wc[i]}
p11.vector<- (filter(simdata,intervention==1, simdata$randomisation==1))$outcome; p11.value<-mean(p11.vector)
p00.vector<- (filter(simdata,intervention==0, simdata$randomisation==0))$outcome; p00.value<-mean(p00.vector)
pz1.vector<- (filter(simdata, randomisation==1))$outcome; pz1.value<-mean(pz1.vector)
pz0.vector<- (filter(simdata, randomisation==0))$outcome; pz0.value<-mean(pz0.vector)
pd1.vector<- (filter(simdata, intervention==1))$outcome;pd1.value<- mean(pd1.vector)
pd0.vector<- (filter(simdata, intervention==0))$outcome;pd0.value<- mean(pd0.vector)
#estimate treatment effects
eff.itt<- pz1.value-pz0.value #intention to treat
eff.pp <- p11.value-p00.value #per protocol
eff.at <- pd1.value-pd0.value #as treated
ivmodel=ivreg(simdata$outcome ~ simdata$intervention, ~ simdata$randomisation, x=TRUE, data=simdata)
eff.iv<-ivmodel$coef[2] #iv with 2 stage regression
#variances of proportions and effects
var.eff.itt<- pz1.value*(1-pz1.value)/length(pz1.vector) + pz0.value*(1-pz0.value)/length(pz0.vector)
var.eff.pp<- p11.value*(1-p11.value)/length(p11.vector) + p00.value*(1-p00.value)/length(p00.vector)
var.eff.at<- pd1.value*(1-pd1.value)/length(pd1.vector) + pd0.value*(1-pd0.value)/length(pd0.vector)
if (is.na(eff.iv)) {var.eff.iv<-var.eff.itt} else {var.eff.iv<-robust.se(ivmodel)[2,2]^2}
#z values
z.itt<-(eff.itt-NImargin)/sqrt(var.eff.itt)
z.iv<-(eff.iv-NImargin)/sqrt(var.eff.iv)
z.pp<-(eff.pp-NImargin)/sqrt(var.eff.pp)
z.at<-(eff.at-NImargin)/sqrt(var.eff.at)
#power calculations
.power.iv[l]<- pnorm(z.iv-qnorm(1-alpha))+pnorm(-z.iv-qnorm(1-alpha), lower.tail=TRUE)
.power.itt[l]<- pnorm(z.itt-qnorm(1-alpha))+pnorm(-z.itt-qnorm(1-alpha), lower.tail=TRUE)
.power.at[l]<- pnorm(z.at-qnorm(1-alpha))+pnorm(-z.at-qnorm(1-alpha), lower.tail=TRUE)
.power.pp[l]<-pnorm(z.pp-qnorm(1-alpha))+pnorm(-z.pp-qnorm(1-alpha), lower.tail=TRUE)
power.iv[i]<-mean(.power.iv, na.rm=TRUE)
power.itt[i]<-mean(.power.itt,na.rm=TRUE)
power.pp[i]<-mean(.power.pp,na.rm=TRUE)
power.at[i]<-mean(.power.at,na.rm=TRUE)
}
}
return(data.frame(wc, power.iv, power.itt, power.pp, power.at))
}
sim.df<-sim()
sim.df$pred.p.iv<- predict(lm(power.iv~log(wc), data=sim.df))
sim.df$pred.p.itt<- predict(lm(power.itt~log(wc), data=sim.df))
sim.df$pred.p.at<- predict(lm(power.at~log(wc), data=sim.df))
sim.df$pred.p.pp<- predict(lm(power.pp~log(wc), data=sim.df))
plot <- ggplot(sim.df, aes(sim.df[1]))+
geom_point(aes(y=sim.df[2], colour="IV Power")) +
geom_point(aes(y=sim.df[3], colour="ITT Power")) +
geom_point(aes(y=sim.df[4], colour="PP Power")) +
geom_point(aes(y=sim.df[5], colour="AT Power")) +
geom_line(aes(y = pred.p.iv, colour="IV Power" ))+
geom_line(aes(y = pred.p.itt, colour="ITT Power"))+
geom_line(aes(y = pred.p.pp, colour="PP Power"))+
geom_line(aes(y = pred.p.at, colour="AT Power"))+
geom_line(aes(y=.8, colour='Target power'),linetype="dotted") +
xlab("Proportion of compliers")+
ylab("Power")
return(plot)
}
p1<-simdata.power(n=230,wna=1, pc1=0.4, pa=0.4,pc0=0.4, pn=0.4, NImargin=-0.12, nIterations=1000) #pc1-pc0=0
p2<-simdata.power(n=230,wna=2,pc1=0.4, pa=0.4, pc0=0.4,pn=0.4, NImargin=-0.12, nIterations=1000)
p3<-simdata.power(n=230,wna=3,pc1=0.4, pa=0.4, pc0=0.4,pn=0.4, NImargin=-0.12, nIterations=1000)
ggarrange(p1, p2, p3, ncol = 3, nrow = 1)
p4<- p1
p5<-simdata.power(n=230,wna=1,pc1=0.4,pa=0.4,pc0=0.3,pn=0.4,NImargin=-0.12, nIterations=1000) #pc1-pc0=0.1
p6<-simdata.power(n=230,wna=1,pc1=0.4,pa=0.4,pc0=0.5,pn=0.4,NImargin=-0.12, nIterations=1000) #pc1-pc0= -0.1
p7<-simdata.power(n=230,wna=1,pc1=0.4,pa=0.5,pc0=0.4,pn=0.3,NImargin=-0.12, nIterations=1000) #pc1-pc0=0
p8<-simdata.power(n=230,wna=1,pc1=0.5,pa=0.3,pc0=0.5,pn=0.5,NImargin=-0.12, nIterations=1000) #pc1-pc0=0
p9<- simdata.power(n=10000,wna=1,pc1=0.4,pa=0.4,pc0=0.5,pn=0.4,NImargin=-0.12, nIterations=10) #pc1-pc0=-0.1, can be overcome with larger sample size
ggarrange(p4, p5, p6, p7, p8, p9,
ncol = 3, nrow = 2)
##########GROUP SEQUENTIAL#########
###################################
simdata.gs<- function(n, wna, pc1, pc0, pa, pn, NImargin, nIterations){
# n: number of participants per group:
# wna: proportion of never takers;
# pc0: survival rate in compliers in intervention 0;
# pc1: survival rate in compliers in intervention 1;
# pa: survival rate in always takers;
# pn: survival rate in never takers;
# NImargin: non inferiority margin (negative);
# nIterations: number of iterations
alpha=0.025
#define proportions of patients
wc=seq(0.1,1, by=0.05) # proportion of compliers
wa= (1-wc)/(wna+1) # proportion of always takers
wn= wa*wna # proportion of never takers
w00=wc+wn # Randomised to 0, intervention 0 (C+NT), proportion
w01=rep(wn, length (wc)) # Randomised to 0, intervention 1 (NT), proportion
w10= wa # Randomised to 1, intervention 0 (AT), proportion
w11=wc+wa # Randomised to 1, intervention 1 (C+AT), proportion
#define proportions of events # simulation based on alternative hypothesis that pc1-pc0>NImargin (negative)
p00=(wc*pc0+wn*pn)/(wc+wn) # Randomised to 0, intervention 0 (C+NT), outcome
p01=rep(pn, length(wc)) # Randomised to 0, intervention 1 (NT), outcome
p10=rep(pa, length(wc)) # Randomised to 1, intervention 0 (AT), outcome
p11=(wc*pc1+wa*pa)/(wc+wa) # Randomised to 1, intervention 1 (C+AT), outcome
#make empty vectors for iterations
z.itt1<-z.pp1<-z.at1<-z.iv1<-z.itt2<-z.pp2<-z.at2<-z.iv2<-c()
z.itt3<-z.pp3<-z.at3<-z.iv3<-z.itt4<-z.pp4<-z.at4<-z.iv4<-c()
.z.itt<-.z.pp<-.z.at<-.z.iv<-c()
#simulate and derive treatment effect
sim<- function() {
for(i in 1:length(wc)) { print (i)
for(l in 1:nIterations) { print(l)
#simulate data frame
simdata<- data.frame (
"randomisation" = c(rep(1,n), rep(0,n))) #half randomised to 1, half randomised to 0
simdata$intervention <- c(rbinom(n,1,w11[i]), # for those randomised to 1, intervention proportion w11
rbinom(n,1,w10[i])) # for those randomised to 0, intervention proportion w10
simdata$outcome <- rep(NA,2*n)
for(j in 1:(2*n)){
if(simdata$randomisation[j]==1 & simdata$intervention[j]==1){
simdata$outcome[j] <- rbinom(1,1,prob=p11[i])# for those randomised to 1, intervention 1, outcome proportion p11
}
else if(simdata$randomisation[j]==1 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p01[i]) # for those randomised to 1, intervention 0, outcome proportion p01
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==1){
simdata$outcome[j] <-rbinom(1,1,prob=p10[i]) # for those randomised to 0, intervention 1, outcome proportion p10
}
else if(simdata$randomisation[j]==0 & simdata$intervention[j]==0){
simdata$outcome[j] <-rbinom(1,1,prob=p00[i]) # for those randomised to 0, intervention 0, outcome proportion p00
}
}
#Shuffle rows
simdata <- simdata[sample(nrow(simdata)),]
#data for each interim analysis
s1<-simdata[1:(2*n/4),]
s2<-simdata[1:(2*n/4*2),]
s3<-simdata[1:(2*n/4*3),]
s4<-simdata[1:(2*n),]
#Compute z values at each interim analysis (3 interim and 1 final)
#generate proportions in simulated data s1
w1.vector<-(filter(s1,randomisation==1))$intervention; w1.value<-mean(w1.vector)
w0.vector<-(filter(s1,randomisation==0))$intervention; w0.value<-mean(w0.vector)
wc.value<- w1.value-w0.value
if (wc.value==0) {wc.value=wc[i]}
p11.vector<- (filter(s1,intervention==1, randomisation==1))$outcome; p11.value<-mean(p11.vector)
p00.vector<- (filter(s1,intervention==0, randomisation==0))$outcome; p00.value<-mean(p00.vector)
pz1.vector<- (filter(s1,randomisation==1))$outcome; pz1.value<-mean(pz1.vector)
pz0.vector<- (filter(s1,randomisation==0))$outcome; pz0.value<-mean(pz0.vector)
pd1.vector<- (filter(s1,intervention==1))$outcome;pd1.value<- mean(pd1.vector)
pd0.vector<- (filter(s1,intervention==0))$outcome;pd0.value<- mean(pd0.vector)
###estimate treatment effects
eff.itt<- pz1.value-pz0.value #intention to treat
eff.pp <- p11.value-p00.value #per protocol
eff.at <- pd1.value-pd0.value #as treated
ivmodel=ivreg(outcome ~ intervention, ~ randomisation, x=TRUE, data=s1)
eff.iv<-ivmodel$coef[2] #iv with 2 stage regression
###variances of proportions and effects
var.eff.itt<- pz1.value*(1-pz1.value)/length(pz1.vector) + pz0.value*(1-pz0.value)/length(pz0.vector)
var.eff.pp<- p11.value*(1-p11.value)/length(p11.vector) + p00.value*(1-p00.value)/length(p00.vector)
var.eff.at<- pd1.value*(1-pd1.value)/length(pd1.vector) + pd0.value*(1-pd0.value)/length(pd0.vector)
if (is.na(eff.iv)) {var.eff.iv<-var.eff.itt} else {var.eff.iv<-robust.se(ivmodel)[2,2]^2}
###z values
z.itt1[l]<-(eff.itt-NImargin)/sqrt(var.eff.itt)
z.iv1[l]<-(eff.iv-NImargin)/sqrt(var.eff.iv)
z.pp1[l]<-(eff.pp-NImargin)/sqrt(var.eff.pp)
z.at1[l]<-(eff.at-NImargin)/sqrt(var.eff.at)
#generate proportions in simulated data s2
w1.vector<-(filter(s2,randomisation==1))$intervention; w1.value<-mean(w1.vector)
w0.vector<-(filter(s2,randomisation==0))$intervention; w0.value<-mean(w0.vector)
wc.value<- w1.value-w0.value
if (wc.value==0) {wc.value=wc[i]}
p11.vector<- (filter(s2,intervention==1, randomisation==1))$outcome; p11.value<-mean(p11.vector)
p00.vector<- (filter(s2,intervention==0, randomisation==0))$outcome; p00.value<-mean(p00.vector)
pz1.vector<- (filter(s2, randomisation==1))$outcome; pz1.value<-mean(pz1.vector)
pz0.vector<- (filter(s2, randomisation==0))$outcome; pz0.value<-mean(pz0.vector)
pd1.vector<- (filter(s2, intervention==1))$outcome;pd1.value<- mean(pd1.vector)
pd0.vector<- (filter(s2, intervention==0))$outcome;pd0.value<- mean(pd0.vector)
###estimate treatment effects
eff.itt<- pz1.value-pz0.value #intention to treat
eff.pp <- p11.value-p00.value #per protocol
eff.at <- pd1.value-pd0.value #as treated
ivmodel=ivreg(outcome ~ intervention, ~ randomisation, x=TRUE, data=s2)
eff.iv<-ivmodel$coef[2] #iv with 2 stage regression
###variances of proportions and effects
var.eff.itt<- pz1.value*(1-pz1.value)/length(pz1.vector) + pz0.value*(1-pz0.value)/length(pz0.vector)
var.eff.pp<- p11.value*(1-p11.value)/length(p11.vector) + p00.value*(1-p00.value)/length(p00.vector)
var.eff.at<- pd1.value*(1-pd1.value)/length(pd1.vector) + pd0.value*(1-pd0.value)/length(pd0.vector)
if (is.na(eff.iv)) {var.eff.iv<-var.eff.itt} else {var.eff.iv<-robust.se(ivmodel)[2,2]^2}
###z values
z.itt2[l]<-(eff.itt-NImargin)/sqrt(var.eff.itt)
z.iv2[l]<-(eff.iv-NImargin)/sqrt(var.eff.iv)
z.pp2[l]<-(eff.pp-NImargin)/sqrt(var.eff.pp)
z.at2[l]<-(eff.at-NImargin)/sqrt(var.eff.at)
#generate proportions in simulated data s3
w1.vector<-(filter(s3,randomisation==1))$intervention; w1.value<-mean(w1.vector)
w0.vector<-(filter(s3,randomisation==0))$intervention; w0.value<-mean(w0.vector)
wc.value<- w1.value-w0.value
if (wc.value==0) {wc.value=wc[i]}
p11.vector<- (filter(s3,intervention==1, randomisation==1))$outcome; p11.value<-mean(p11.vector)
p00.vector<- (filter(s3,intervention==0, randomisation==0))$outcome; p00.value<-mean(p00.vector)
pz1.vector<- (filter(s3, randomisation==1))$outcome; pz1.value<-mean(pz1.vector)
pz0.vector<- (filter(s3, randomisation==0))$outcome; pz0.value<-mean(pz0.vector)
pd1.vector<- (filter(s3, intervention==1))$outcome;pd1.value<- mean(pd1.vector)
pd0.vector<- (filter(s3, intervention==0))$outcome;pd0.value<- mean(pd0.vector)
###estimate treatment effects
eff.itt<- pz1.value-pz0.value #intention to treat
eff.pp <- p11.value-p00.value #per protocol
eff.at <- pd1.value-pd0.value #as treated
ivmodel=ivreg(outcome ~ intervention, ~ randomisation, x=TRUE, data=s3)
eff.iv<-ivmodel$coef[2] #iv with 2 stage regression
###variances of proportions and effects
var.eff.itt<- pz1.value*(1-pz1.value)/length(pz1.vector) + pz0.value*(1-pz0.value)/length(pz0.vector)
var.eff.pp<- p11.value*(1-p11.value)/length(p11.vector) + p00.value*(1-p00.value)/length(p00.vector)
var.eff.at<- pd1.value*(1-pd1.value)/length(pd1.vector) + pd0.value*(1-pd0.value)/length(pd0.vector)
if (is.na(eff.iv)) {var.eff.iv<-var.eff.itt} else {var.eff.iv<-robust.se(ivmodel)[2,2]^2}
###z values
z.itt3[l]<-(eff.itt-NImargin)/sqrt(var.eff.itt)
z.iv3[l]<-(eff.iv-NImargin)/sqrt(var.eff.iv)
z.pp3[l]<-(eff.pp-NImargin)/sqrt(var.eff.pp)
z.at3[l]<-(eff.at-NImargin)/sqrt(var.eff.at)
#generate proportions in simulated data s4
w1.vector<-(filter(s4,randomisation==1))$intervention; w1.value<-mean(w1.vector)
w0.vector<-(filter(s4,randomisation==0))$intervention; w0.value<-mean(w0.vector)
wc.value<- w1.value-w0.value
if (wc.value==0) {wc.value=wc[i]}
p11.vector<- (filter(s4,intervention==1, randomisation==1))$outcome; p11.value<-mean(p11.vector)
p00.vector<- (filter(s4,intervention==0, randomisation==0))$outcome; p00.value<-mean(p00.vector)
pz1.vector<- (filter(s4, randomisation==1))$outcome; pz1.value<-mean(pz1.vector)
pz0.vector<- (filter(s4, randomisation==0))$outcome; pz0.value<-mean(pz0.vector)
pd1.vector<- (filter(s4, intervention==1))$outcome;pd1.value<- mean(pd1.vector)
pd0.vector<- (filter(s4, intervention==0))$outcome;pd0.value<- mean(pd0.vector)
###estimate treatment effects
eff.itt<- pz1.value-pz0.value #intention to treat
eff.pp <- p11.value-p00.value #per protocol
eff.at <- pd1.value-pd0.value #as treated
ivmodel=ivreg(outcome ~ intervention, ~ randomisation, x=TRUE, data=s4)
eff.iv<-ivmodel$coef[2] #iv with 2 stage regression
###variances of proportions and effects
var.eff.itt<- pz1.value*(1-pz1.value)/length(pz1.vector) + pz0.value*(1-pz0.value)/length(pz0.vector)
var.eff.pp<- p11.value*(1-p11.value)/length(p11.vector) + p00.value*(1-p00.value)/length(p00.vector)
var.eff.at<- pd1.value*(1-pd1.value)/length(pd1.vector) + pd0.value*(1-pd0.value)/length(pd0.vector)
if (is.na(eff.iv)) {var.eff.iv<-var.eff.itt} else {var.eff.iv<-robust.se(ivmodel)[2,2]^2}
###z values
z.itt4[l]<-(eff.itt-NImargin)/sqrt(var.eff.itt)
z.iv4[l]<-(eff.iv-NImargin)/sqrt(var.eff.iv)
z.pp4[l]<-(eff.pp-NImargin)/sqrt(var.eff.pp)
z.at4[l]<-(eff.at-NImargin)/sqrt(var.eff.at)
#average the simulations
z.itt<-c(mean(z.itt1,na.rm = TRUE), mean(z.itt2,na.rm = TRUE),mean(z.itt3,na.rm = TRUE),mean(z.itt4,na.rm = TRUE))
z.at<-c(mean(z.at1,na.rm = TRUE), mean(z.at2,na.rm = TRUE),mean(z.at3,na.rm = TRUE),mean(z.at4,na.rm = TRUE))
z.pp<-c(mean(z.pp1,na.rm = TRUE), mean(z.pp2,na.rm = TRUE),mean(z.pp3,na.rm = TRUE),mean(z.pp4,na.rm = TRUE))
z.iv<-c(mean(z.iv1,na.rm = TRUE), mean(z.iv2,na.rm = TRUE),mean(z.iv3,na.rm = TRUE),mean(z.iv4,na.rm = TRUE))
}
.z.itt[[i]]<-z.itt
.z.at[[i]]<-z.at
.z.pp[[i]]<-z.pp
.z.iv[[i]]<-z.iv
}
z<-gsDesign(k=4, n.fix=2*n, delta0 = NImargin, n.I=c(nrow(s1),nrow(s2) ,nrow(s3),nrow(s4)), maxn.IPlan = 2*n, beta=0.2)
z<-z$upper$bound
z.itt<-data.frame(t(data.frame(.z.itt))); print(z.itt)
z.at<-data.frame(t(data.frame(.z.at))); print(z.at)
z.pp<-data.frame(t(data.frame(.z.pp))); print(z.pp)
z.iv<-data.frame(t(data.frame(.z.iv))); print(z.iv)
z.c<-data.frame(cbind(wc, z.itt, z.at, z.pp, z.iv,
rep(z[1],length(wc)),rep(z[2],length(wc)),
rep(z[3],length(wc)),rep(z[4],length(wc))))
names(z.c)<-c('wc','itt1','itt2','itt3','itt4',
'at1','at2','at3','at4',
'pp1','pp2','pp3','pp4',
'iv1','iv2','iv3','iv4',
'z1','z2','z3','z4')
return(z.c)
}
z.c<- sim()
#plot Z values and boundaries against wc
z.c$pred.z.itt1<- predict(lm(itt1~log(wc), data=z.c))
z.c$pred.z.itt2<- predict(lm(itt2~log(wc), data=z.c))
z.c$pred.z.itt3<- predict(lm(itt3~log(wc), data=z.c))
z.c$pred.z.itt4<- predict(lm(itt4~log(wc), data=z.c))
z.c$pred.z.at1<- predict(lm(at1~log(wc), data=z.c))
z.c$pred.z.at2<- predict(lm(at2~log(wc), data=z.c))
z.c$pred.z.at3<- predict(lm(at3~log(wc), data=z.c))
z.c$pred.z.at4<- predict(lm(at4~log(wc), data=z.c))
z.c$pred.z.pp1<- predict(lm(pp1~log(wc), data=z.c))
z.c$pred.z.pp2<- predict(lm(pp2~log(wc), data=z.c))
z.c$pred.z.pp3<- predict(lm(pp3~log(wc), data=z.c))
z.c$pred.z.pp4<- predict(lm(pp4~log(wc), data=z.c))
z.c$pred.z.iv1<- predict(lm(iv1~log(wc), data=z.c))
z.c$pred.z.iv2<- predict(lm(iv2~log(wc), data=z.c))
z.c$pred.z.iv3<- predict(lm(iv3~log(wc), data=z.c))
z.c$pred.z.iv4<- predict(lm(iv4~log(wc), data=z.c))
plot.z.itt<- ggplot(z.c, aes(z.c[1]))+
geom_point(aes(y = z.c[2], colour="Z value ITT 1" ))+
geom_point(aes(y = z.c[3], colour="Z value ITT 2"))+
geom_point(aes(y = z.c[4], colour="Z value ITT 3"))+
geom_point(aes(y = z.c[5], colour="Z value ITT 4"))+
geom_line(aes(y = pred.z.itt1, colour="Z value ITT 1" ))+
geom_line(aes(y = pred.z.itt2, colour="Z value ITT 2"))+
geom_line(aes(y = pred.z.itt3, colour="Z value ITT 3" ))+
geom_line(aes(y = pred.z.itt4, colour="Z value ITT 4"))+
geom_line(aes(y = z1, colour="Z value ITT 1"),linetype="dotted")+
geom_line(aes(y = z2, colour="Z value ITT 2"),linetype="dotted")+
geom_line(aes(y = z3, colour="Z value ITT 3"),linetype="dotted")+
geom_line(aes(y = z4, colour="Z value ITT 4"),linetype="dotted")+
xlab("Proportion of compliers")+
ylab("Z value")
plot.z.at<- ggplot(z.c, aes(z.c[1]))+
geom_point(aes(y = z.c[6], colour="Z value AT 1" ))+
geom_point(aes(y = z.c[7], colour="Z value AT 2"))+
geom_point(aes(y = z.c[8], colour="Z value AT 3"))+
geom_point(aes(y = z.c[9], colour="Z value AT 4"))+
geom_line(aes(y = pred.z.at1, colour="Z value AT 1" ))+
geom_line(aes(y = pred.z.at2, colour="Z value AT 2"))+
geom_line(aes(y = pred.z.at3, colour="Z value AT 3" ))+
geom_line(aes(y = pred.z.at4, colour="Z value AT 4"))+
geom_line(aes(y = z1, colour="Z value AT 1"),linetype="dotted")+
geom_line(aes(y = z2, colour="Z value AT 2"),linetype="dotted")+
geom_line(aes(y = z3, colour="Z value AT 3"),linetype="dotted")+
geom_line(aes(y = z4, colour="Z value AT 4"),linetype="dotted")+
xlab("Proportion of compliers")+
ylab("Z value")
plot.z.pp<- ggplot(z.c, aes(z.c[1]))+
geom_point(aes(y = z.c[10], colour="Z value PP 1" ))+
geom_point(aes(y = z.c[11], colour="Z value PP 2"))+
geom_point(aes(y = z.c[12], colour="Z value PP 3"))+
geom_point(aes(y = z.c[13], colour="Z value PP 4"))+
geom_line(aes(y = pred.z.pp1, colour="Z value PP 1" ))+
geom_line(aes(y = pred.z.pp2, colour="Z value PP 2"))+
geom_line(aes(y = pred.z.pp3, colour="Z value PP 3" ))+
geom_line(aes(y = pred.z.pp4, colour="Z value PP 4"))+
geom_line(aes(y = z1, colour="Z value PP 1"),linetype="dotted")+
geom_line(aes(y = z2, colour="Z value PP 2"),linetype="dotted")+
geom_line(aes(y = z3, colour="Z value PP 3"),linetype="dotted")+
geom_line(aes(y = z4, colour="Z value PP 4"),linetype="dotted")+
xlab("Proportion of compliers")+
ylab("Z value")
plot.z.iv<- ggplot(z.c, aes(z.c[1]))+
geom_point(aes(y = z.c[14], colour="Z value IV 1" ))+
geom_point(aes(y = z.c[15], colour="Z value IV 2"))+
geom_point(aes(y = z.c[16], colour="Z value IV 3"))+
geom_point(aes(y = z.c[17], colour="Z value IV 4"))+
geom_line(aes(y = pred.z.iv1, colour="Z value IV 1" ))+
geom_line(aes(y = pred.z.iv2, colour="Z value IV 2"))+
geom_line(aes(y = pred.z.iv3, colour="Z value IV 3" ))+
geom_line(aes(y = pred.z.iv4, colour="Z value IV 4"))+
geom_line(aes(y = z1, colour="Z value IV 1"),linetype="dotted")+
geom_line(aes(y = z2, colour="Z value IV 2"),linetype="dotted")+
geom_line(aes(y = z3, colour="Z value IV 3"),linetype="dotted")+
geom_line(aes(y = z4, colour="Z value IV 4"),linetype="dotted")+
xlab("Proportion of compliers")+
ylab("Z value")
plot<- ggarrange(plot.z.itt, plot.z.at, plot.z.pp, plot.z.iv, ncol = 2, nrow = 2)
return(plot)
}
gs1<-simdata.gs(n=230,wna=1,pc0=0.6, pc1=0.6,pa=0.6,pn=0.6,nIterations=100,NImargin=-.12)
gs2<-simdata.gs(n=300,wna=1,pc0=0.6, pc1=0.6,pa=0.6,pn=0.6,nIterations=100,NImargin=-.12) # increased sample size
gs3<-simdata.gs(n=230,wna=1,pc0=0.6, pc1=0.7,pa=0.6,pn=0.6,nIterations=100,NImargin=-.12) # increased effect of intervention
gs4<-simdata.gs(n=230,wna=3,pc0=0.6, pc1=0.6,pa=0.6,pn=0.6,nIterations=100,NImargin=-.12) # increased effect wna ratio
gs5<-simdata.gs(n=230,wna=1,pc0=0.6, pc1=0.6,pa=0.8,pn=0.6,nIterations=100,NImargin=-.12) # increased pa
gs6<-simdata.gs(n=230,wna=1,pc0=0.6, pc1=0.6,pa=0.6,pn=0.8,nIterations=100,NImargin=-.12) # increased pn
#using gsDesign package to determine sample size for REGARD-VAP
n.fix<- nBinomial (p1=0.6, p2=0.6, delta0=-0.12)
x<-gsDesign(n.fix=n.fix, k=4, beta=0.2, delta0=-0.12)
ceiling(x$n.I)
y<-gsProbability(theta=x$delta*seq(0,2,0.25),d=x)
plot(y, plottype=6, lty=2, lwd=3)
|
6d770c9845e4e240329e91e07b6c19585ccf13b6
|
b229db462a31f45d0ea7258fd53e988f5f334fd2
|
/man/circle_points.Rd
|
e118b3647f7855c1259d89074b835d958917eca2
|
[] |
no_license
|
fdzul/hotspotr
|
08bc4384393a0f2042ff75d79628436632f59cfe
|
e370aa52c92b2e4b0d54d7825ef3865da3d14154
|
refs/heads/master
| 2020-07-15T11:44:50.753458
| 2014-07-18T14:31:04
| 2014-07-18T14:31:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 571
|
rd
|
circle_points.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{circle_points}
\alias{circle_points}
\title{Surround a set of points with a circle of points at a pre-specified
radius.}
\usage{
circle_points(x, y, np = 6, r = 0.5)
}
\arguments{
\item{x}{vector of x coordinates for points to surround}
\item{y}{vector of y coordinates for points to surround}
\item{np}{number of external points}
\item{r}{radius of circle, measured in distance from center of the
set of points.}
}
\description{
Surround a set of points with a circle of points at a pre-specified
radius.
}
|
58ae017c62131e1451758b87dbef0f6b37ca233e
|
d943eb5047ad9fa312170bb644129aa8d8e420c1
|
/inst/AOV2R/AOV2R.R
|
f9955a5a5a367b1e0f5b03bdb63204d2ccd85635
|
[] |
no_license
|
stla/gfilmm
|
3e85e1da4dc6127aec16fd910ddcc4ae0be668b5
|
865f3d5854b35db3fc956e98d38883ef4681e745
|
refs/heads/master
| 2022-07-28T10:14:50.453119
| 2022-07-11T17:43:58
| 2022-07-11T17:43:58
| 150,234,446
| 0
| 1
| null | 2021-06-25T09:45:57
| 2018-09-25T08:47:25
|
C++
|
UTF-8
|
R
| false
| false
| 4,328
|
r
|
AOV2R.R
|
library(rgr)
SimAOV2R <- function(I, J, Kij, mu = 0, sigmaP = 1, sigmaO = 1, sigmaPO = 1,
sigmaE = 1, factor.names = c("Part", "Operator"),
resp.name = "y", keep.intermediate = FALSE) {
if (length(Kij) == 1L) {
Kij <- rep(Kij, I * J)
}
Operator <- factor(
rep(sprintf(paste0("B%0", floor(log10(J)) + 1, "d"), 1:J), each = I),
)
Part <- factor(
rep(sprintf(paste0("A%0", floor(log10(I)) + 1, "d"), 1:I), times = J),
)
Oj <- rep(rnorm(J, 0, sigmaO), each = I)
Pi <- rep(rnorm(I, 0, sigmaP), times = J)
POij <- rnorm(I * J, 0, sigmaPO)
simdata0 <- data.frame(Part, Operator, Pi, Oj, POij)
simdata <- simdata0[rep(1:nrow(simdata0), times = Kij), ]
Eijk <- rnorm(sum(Kij), 0, sigmaE)
simdata <- cbind(simdata, Eijk)
simdata[[resp.name]] <- mu + with(simdata, Oj + Pi + POij + Eijk)
names(simdata)[1:2] <- factor.names
if (!keep.intermediate) simdata <- simdata[, c(factor.names, resp.name)]
simdata
}
confintAOV2R <- function(dat, alpha = 0.05) {
dat <- gx.sort.df(~ Operator + Part, dat)
I <- length(levels(dat$Part))
J <- length(levels(dat$Operator))
Kij <- aggregate(y ~ Part:Operator, FUN = length, data = dat)$y
Khmean <- I * J / (sum(1 / Kij))
ag <- aggregate(y ~ Part:Operator, FUN = mean, data = dat)
Ybarij <- ag$y
Ybari <- aggregate(y ~ Part, FUN = mean, data = ag)$y
Ybarj <- aggregate(y ~ Operator, FUN = mean, data = ag)$y
Ybar <- mean(Ybarij)
S2.P <- J * Khmean * crossprod(Ybari - Ybar) / (I - 1) # I=p J=o
S2.O <- I * Khmean * crossprod(Ybarj - Ybar) / (J - 1)
Ybari <- rep(Ybari, times = J)
Ybarj <- rep(Ybarj, each = I)
S2.PO <- Khmean * crossprod(Ybarij - Ybari - Ybarj + Ybar) / (I - 1) / (J - 1) #
S2.E <- crossprod(dat$y - rep(Ybarij, times = Kij)) / (sum(Kij) - I * J) # je crois qu'on peut aussi faire anova() pour S2.PO et S2.E
USS <- c(S2.P, S2.O, S2.PO, S2.E)
gammaP <- (S2.P - S2.PO) / J / Khmean
gammaM <- (S2.O + (I - 1) * S2.PO + I * (Khmean - 1) * S2.E) / I / Khmean
gammaT <- (I * S2.P + J * S2.O + (I * J - I - J) * S2.PO + I * J * (Khmean - 1) * S2.E) / I / J / Khmean
G <- 1 - sapply(
c(I - 1, J - 1, (I - 1) * (J - 1), sum(Kij) - I * J),
function(d) {
d / qchisq(1 - alpha / 2, d)
}
) # n/qchisq(alpha,n) = qf(1-alpha,Inf,n)
H <- sapply(
c(I - 1, J - 1, (I - 1) * (J - 1), sum(Kij) - I * J),
function(d) {
d / qchisq(alpha / 2, d)
}
) - 1
F <- rep(NA, 4)
F[c(1, 3)] <- qf(1 - alpha / 2, df1 = I - 1, df2 = c((I - 1) * (J - 1), J - 1))
F[c(2, 4)] <- qf(alpha / 2, df1 = I - 1, df2 = c((I - 1) * (J - 1), J - 1))
G13 <- ((F[1] - 1)^2 - G[1]^2 * F[1]^2 - H[3]^2) / F[1]
H13 <- ((1 - F[2])^2 - H[1]^2 * F[2]^2 - G[3]^2) / F[2]
VLP <- G[1]^2 * S2.P^2 + H[3]^2 * S2.PO^2 + G13 * S2.P * S2.PO
VUP <- H[1]^2 * S2.P^2 + G[3]^2 * S2.PO^2 + H13 * S2.P * S2.PO
wUSS2 <- (c(1, I - 1, I * (Khmean - 1)) * USS[2:4])^2
VLM <- crossprod(G[2:4]^2, wUSS2)
VUM <- crossprod(H[2:4]^2, wUSS2)
wUSS2 <- (c(I, J, I * J - I - J, I * J * (Khmean - 1)) * USS)^2
VLT <- crossprod(G^2, wUSS2)
VUT <- crossprod(H^2, wUSS2)
out <- data.frame(
Parameter = c("gammaP", "gammaM", "gammaT"),
Estimate = c(gammaP, gammaM, gammaT),
low.bound = c(gammaP - sqrt(VLP) / J / Khmean, gammaM - sqrt(VLM) / I / Khmean, gammaT - sqrt(VLT) / I / J / Khmean),
up.bound = c(gammaP + sqrt(VUP) / J / Khmean, gammaM + sqrt(VUM) / I / Khmean, gammaT + sqrt(VUT) / I / J / Khmean)
)
attributes(out) <- c(attributes(out), alpha = alpha)
return(out)
}
set.seed(3141)
dat <- SimAOV2R(4, 3, 2)
confintAOV2R(dat)
library(rstanarm)
options(mc.cores = parallel::detectCores())
rsa <- stan_lmer(y ~ (1|Part) + (1|Operator) + (1|Part:Operator), data = dat,
iter = 10000,
prior_aux = cauchy(0, 5),
prior_covariance = decov(shape = 1/15, scale = 15))
tail(posterior_interval(rsa, prob = 0.95))
library(gfilmm)
library(doParallel)
cl <- makePSOCKcluster(3L)
registerDoParallel(cl)
# dat <- gx.sort.df(~ Part + Operator, dat)
gfs <- foreach(i = c(3L,4L,5L), .combine=list, .multicombine = TRUE, .export = "gfilmm") %dopar%
gfilmm(~ cbind(y-0.01, y+0.01), ~ 1, ~ Part+Operator, data = dat[sample.int(24L),], N = 10000*i, long = FALSE)
stopCluster(cl)
lapply(gfs, gfiSummary)
|
843cfb90252c57c6cc7ebeebf77af89bb6ada036
|
ae163aa2bd49c395807ef273f0d4f6dcce2956d6
|
/man/my_csv_reader.Rd
|
c7655e501487caa2dc6dc6539bb0485ea1c2f43a
|
[] |
no_license
|
samimhirech/finalpackage
|
6361d0a24846b87e21c867a9d10f3a0a7a50b086
|
2470e0a2231397ab0cf6cfb6e405e960cea19698
|
refs/heads/master
| 2021-05-08T09:09:55.207313
| 2017-10-16T08:50:11
| 2017-10-16T08:50:11
| 107,099,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 283
|
rd
|
my_csv_reader.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_csv_reader.R
\name{my_csv_reader}
\alias{my_csv_reader}
\title{My csv reader}
\usage{
my_csv_reader(x)
}
\arguments{
\item{x}{a path}
}
\value{
this returns the csv file
}
\description{
My csv reader
}
|
f0ad83b84e1cd12bc518b55342321cab1cd76c5e
|
a3ea56f1ab54e0c3d5b794b9ab358870c3e03444
|
/man/ringo.Rd
|
665cb40ef8b951a56d43674f408a2868c909ce79
|
[] |
no_license
|
jameswagstaff/ringo
|
511acf849fac21705aa4775a10038c0d3672da64
|
f54b682eca422a47a6a97e29db53831740237200
|
refs/heads/master
| 2021-08-16T01:04:28.769229
| 2020-04-29T16:53:35
| 2020-04-29T16:53:35
| 170,023,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 355
|
rd
|
ringo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ringo.R
\docType{package}
\name{ringo}
\alias{ringo}
\title{ringo: Read STAR files into R}
\description{
The ringo package is for reading and manipulating STAR files like those generated by
Relion \url{https://www2.mrc-lmb.cam.ac.uk/relion/index.php?title=Main_Page}
into R.
}
|
dd6364b2f3a7b64debc60bbd0897226dcd155521
|
808279a6a210ccf9bdc76eafbb2938b83c88aafc
|
/R/subscripts/palaeorotation_sensitivity_test.R
|
4e321cceadb9e0c36feb04db7c29063fa93aa6f1
|
[] |
no_license
|
LewisAJones/Coral_Reef_Distribution
|
a4a87261b126e0350a2a897d464e672aa34708b4
|
a746fe7ca15d22b0d385e6e200b3c6d0689ca7e9
|
refs/heads/main
| 2023-04-10T02:00:48.567855
| 2022-06-15T18:26:33
| 2022-06-15T18:26:33
| 351,208,167
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 905
|
r
|
palaeorotation_sensitivity_test.R
|
#load libraries
#sensitivity between models
library(chronosphere)
library(dplyr)
#collection_no 34647 coordinates
xy <-cbind(long=c(74.6667), lat=c(37.3333))
#approx. mid age of collection
age <- 218
#try different rotation models
models <- c("PALEOMAP", "MULLER2016", "SETON2012", "GOLONKA", "MATTHEWS2016")
#run across different models
df <- lapply(models, function(x){
tmp <- as.data.frame(reconstruct(x = xy, age = age, model = x))
tmp$model <- as.character(x)
tmp
})
names(df) <- models
df <- bind_rows(df)
#sensitivity between plate boundaries
#load libraries
library(chronosphere)
library(dplyr)
#collection_no 34647 coordinates
xy <-cbind(long=c(rep(74.6667, 11)), lat=c(seq(37, 38, 0.1)))
#approx. mid age of collection
age <- 218
models <- c("PALEOMAP")
#run across different coordinates
df <- as.data.frame(reconstruct(x = xy, age = age, model = models))
df <- cbind.data.frame(xy, df)
|
ade7bcc5cbf1dc15f104b21afdaa215c34ea2c26
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Devore7/examples/ex13.52.Rd.R
|
15e6cbd23d531ba43472aa7911c24d07b96d190a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
ex13.52.Rd.R
|
library(Devore7)
### Name: ex13.52
### Title: R Data set: ex13.52
### Aliases: ex13.52
### Keywords: datasets
### ** Examples
data(ex13.52)
str(ex13.52)
|
f6e6538c708f4b71bad3d0e40ba38fd2262683a9
|
3f97fd801b7c8bdd69fb7f86373b7338b32e5d1a
|
/man/validate_naomi_population.Rd
|
3cfb9857187b76606a31d3fc77a0d445902efa29
|
[
"MIT"
] |
permissive
|
kklot/naomi.utils
|
083390022ed66194f7b93cf6daea68d3c74a650d
|
2b14594b08191625a338173284976c39d6aa7606
|
refs/heads/master
| 2023-04-04T17:55:42.064764
| 2021-03-01T09:08:47
| 2021-03-01T09:08:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 614
|
rd
|
validate_naomi_population.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_population.R
\name{validate_naomi_population}
\alias{validate_naomi_population}
\title{Validate naomi population dataset}
\usage{
validate_naomi_population(population, areas, area_level)
}
\arguments{
\item{area_level}{area level(s) at which population is supplied}
}
\value{
Invisibly TRUE or raises error.
}
\description{
Validate naomi population dataset
}
\details{
Check that:
\itemize{
\item Column names match schema
\item Population stratification has exactly area_id / sex / age_group for
each year data are supplied
}
}
|
355c6a75ee73ebb4d9c28699297d4c32776f2abb
|
b6fdf49a8a59a59e29b3ef3bd4288ec1ae30f764
|
/math.R
|
afc5390ee335e2d2cd789a91bd90833715e0053e
|
[] |
no_license
|
meenapandey500/R_Programming-
|
879d1b7fd93aecd8da93bafc4805a813b6fc5de6
|
b0ec5745c4cdccd745ccbcba8f8f0360018484f1
|
refs/heads/main
| 2023-03-25T03:21:02.679405
| 2021-03-19T09:42:41
| 2021-03-19T09:42:41
| 349,368,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
math.R
|
a=as.numeric(readline("Enter Number a"))
cat("\n 1. round() \n2. ceiling() \n 3. floor \n 4. truncate\n")
ch=as.integer(readline("enter your choice"))
switch(ch,
cat("\n Value of a after decimal 2 digit :",round(a,digit=2)),
cat("\n ceil : ",ceiling(a)),
cat("\n floor : ",floor(a)),
cat("\n Truncate :",trunc(a)),
cat("\n Invalid choice")
)
|
1b0e3c963d7b982a8b007481b183bcaf66a08a96
|
bd45db9efaa8770661ddb06a66ca2cfeab408a34
|
/R/api-hc-hc_add_series.R
|
56f4d7a4faf4c42ace8766cb78d01292868fac31
|
[] |
no_license
|
APKBridget/highcharter
|
a6c14524d937e731103f322cc71df261bf586c8f
|
03e947d546a126c97051ed6db7ddd427914152ad
|
refs/heads/master
| 2020-07-01T04:57:32.074899
| 2016-11-17T02:20:50
| 2016-11-17T02:20:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,070
|
r
|
api-hc-hc_add_series.R
|
#' Adding and removing series from highchart objects
#'
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data An R object like numeric, list, ts, xts, etc.
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @examples
#'
#' highchart() %>%
#' hc_add_series(data = abs(rnorm(5)), type = "column") %>%
#' hc_add_series(data = purrr::map(0:4, function(x) list(x, x)), type = "scatter", color = "blue")
#'
#' @export
hc_add_series <- function(hc, data, ...){
assertthat::assert_that(is.highchart(hc))
UseMethod("hc_add_series", data)
}
#' @export
hc_add_series.default <- function(hc, ...) {
assertthat::assert_that(is.highchart(hc))
if(getOption("highcharter.verbose"))
message("hc_add_series.default")
validate_args("add_series", eval(substitute(alist(...))))
hc$x$hc_opts$series <- append(hc$x$hc_opts$series, list(list(...)))
hc
}
#' `hc_add_series` for numeric objects
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data A numeric object
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @export
hc_add_series.numeric <- function(hc, data, ...) {
if(getOption("highcharter.verbose"))
message("hc_add_series.numeric")
data <- fix_1_length_data(data)
hc_add_series.default(hc, data = data, ...)
}
#' hc_add_series for time series objects
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data A time series \code{ts} object.
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @importFrom zoo as.Date
#' @importFrom stats time
#' @export
hc_add_series.ts <- function(hc, data, ...) {
if(getOption("highcharter.verbose"))
message("hc_add_series.ts")
# http://stackoverflow.com/questions/29202021/
timestamps <- data %>%
stats::time() %>%
zoo::as.Date() %>%
datetime_to_timestamp()
series <- list_parse2(data.frame(timestamps, as.vector(data)))
hc_add_series(hc, data = series, ...)
}
#' hc_add_series for xts objects
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data A \code{xts} object.
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @importFrom xts is.xts
#' @importFrom quantmod is.OHLC
#' @export
hc_add_series.xts <- function(hc, data, ...) {
if(getOption("highcharter.verbose"))
message("hc_add_series.xts")
if(is.OHLC(data))
return(hc_add_series.ohlc(hc, data, ...))
timestamps <- datetime_to_timestamp(time(data))
series <- list_parse2(data.frame(timestamps, as.vector(data)))
hc_add_series(hc, data = series, ...)
}
#' @rdname hc_add_series.xts
#' @param type The type of wayto show the \code{xts} object. Can be 'candlestick' or 'ohlc'.
#' @importFrom stringr str_extract
#' @export
hc_add_series.ohlc <- function(hc, data, type = "candlestick", ...){
if(getOption("highcharter.verbose"))
message("hc_add_series.xts.ohlc")
time <- datetime_to_timestamp(time(data))
xdf <- cbind(time, as.data.frame(data))
xds <- list_parse2(xdf)
nm <- ifelse(!is.null(list(...)[["name"]]),
list(...)[["name"]],
str_extract(names(data)[1], "^[A-Za-z]+"))
hc_add_series(hc, data = xds, name = nm, type = type, ...)
}
#' hc_add_series for forecast objects
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data A \code{forecast} object.
#' @param addOriginal Logical value to add the original series or not.
#' @param addLevels Logical value to show predicctions bands.
#' @param fillOpacity The opacity of bands
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @export
hc_add_series.forecast <- function(hc, data, addOriginal = FALSE, addLevels = TRUE,
fillOpacity = 0.1, ...) {
if(getOption("highcharter.verbose"))
message("hc_add_series.forecast")
rid <- random_id()
method <- data$method
# hc <- highchart() %>% hc_title(text = "LALALA")
# ... <- NULL
if(addOriginal)
hc <- hc_add_series(hc, data$x, name = "Series", zIndex = 3, ...)
hc <- hc_add_series(hc, data$mean, name = method, zIndex = 2, id = rid, ...)
if(addLevels){
tmf <- datetime_to_timestamp(zoo::as.Date(time(data$mean)))
nmf <- paste(method, "level", data$level)
for (m in seq(ncol(data$upper))) { # m <- 1
dfbands <- data_frame(
t = tmf,
u = as.vector(data$upper[, m]),
l = as.vector(data$lower[, m])
)
hc <- hc %>%
hc_add_series(
data = list_parse2(dfbands),
name = nmf[m],
type = "arearange",
fillOpacity = fillOpacity,
zIndex = 1,
lineWidth = 0,
linkedTo = rid,
...)
}
}
hc
}
#' hc_add_series for density objects
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data A \code{density} object.
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @export
hc_add_series.density <- function(hc, data, ...) {
if(getOption("highcharter.verbose"))
message("hc_add_series.density")
data <- list_parse(data.frame(cbind(x = data$x, y = data$y)))
hc_add_series(hc, data = data, ...)
}
#' hc_add_series for character and factor objects
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data A \code{character} or \code{factor} object.
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @export
hc_add_series.character <- function(hc, data, ...) {
if(getOption("highcharter.verbose"))
message("hc_add_series.character")
series <- data %>%
table() %>%
as.data.frame(stringsAsFactors = FALSE) %>%
setNames(c("name", "y")) %>%
list_parse()
hc_add_series(hc, data = series, ...)
}
#' @rdname hc_add_series.character
#' @export
hc_add_series.factor <- hc_add_series.character
#' hc_add_series for data frames objects
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param data A \code{data.frame} object.
#' @param mappings Mappings, same idea as \code{ggplot2}.
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#' @export
hc_add_series.data.frame <- function(hc, data, mappings = list(), ...) {
if(getOption("highcharter.verbose"))
message("hc_add_series.data.frame")
if(length(mappings) == 0)
return(hc_add_series(hc, data = list_parse(data), ...))
hc
}
#' Removing series to highchart objects
#'
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param names The series's names to delete.
#'
#' @export
hc_rm_series <- function(hc, names = NULL) {
stopifnot(!is.null(names))
positions <- hc$x$hc_opts$series %>%
map("name") %>%
unlist()
position <- which(positions %in% names)
hc$x$hc_opts$series[position] <- NULL
hc
}
|
17d69f1bbda162a2bbdeabf28dac745af78153bc
|
f301db0962617354de8afa835e3de27c5bc6d51e
|
/man/flyhelp.Rd
|
c982bfa377f84685033b1822970f27d0c8a5f1b8
|
[] |
no_license
|
Dasonk/flydoc
|
286abb4288694b719e4daa994fdf2caadd15f359
|
26a7a467bbfecfd44884df678fbd7139cd4f4afd
|
refs/heads/master
| 2021-06-29T20:15:47.335683
| 2017-03-16T16:30:07
| 2017-03-16T16:30:07
| 6,071,319
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
rd
|
flyhelp.Rd
|
\name{flyhelp}
\alias{flyhelp}
\title{Show the documentation for a flydoc function}
\usage{
flyhelp(fun)
}
\arguments{
\item{fun}{The function to show the flydoc for}
}
\description{
This builds and shows the documentation for a function
that has been documented using flydoc
}
\examples{
myfun <- function(x, y){
x + y
}
Title(myfun) <- "My crazy function"
Description(myfun) <- "This function is a crazy function"
Arguments(myfun) <- c(x = "Value 1 to add", y = "Value 2 to add")
Return(myfun) <- "The sum of x and y"
Details(myfun) <- "This uses some pretty advanced math. You might need to read up on arithmetic"
Examples(myfun) <- "myfun(1, 2)"
\dontrun{
flyhelp(myfun)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.