content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(caret)
library(e1071)
library(randomForest)
setwd("C:/Carabid_Data/Carabid_Data/CarabidPCA")
sptrainPCA = read.csv("normsptrain.csv")
spvalidPCA = read.csv("normspvalid.csv")
sptestPCA = read.csv("normsptest.csv")
grouptrainPCA = read.csv("normgrouptrain.csv")
groupvalidPCA = read.csv("normgroupvalid.csv")
grouptestPCA = read.csv("normgrouptest.csv")
subgenustrainPCA = read.csv("normsubgenustrain.csv")
subgenusvalidPCA = read.csv("normsubgenusvalid.csv")
subgenustestPCA = read.csv("normsubgenustest.csv")
genustrainPCA = read.csv("normgenustrain.csv")
genusvalidPCA = read.csv("normgenusvalid.csv")
genustestPCA = read.csv("normgenustest.csv")
subtribtrainPCA = read.csv("normsubtribtrain.csv")
subtribvalidPCA = read.csv("normsubtribvalid.csv")
subtribtestPCA = read.csv("normsubtribtest.csv")
tribtrainPCA = read.csv("normtribtrain.csv")
tribvalidPCA = read.csv("normtribvalid.csv")
tribtestPCA = read.csv("normtribtest.csv")
suptribtrainPCA = read.csv("normsuptribtrain.csv")
suptribvalidPCA = read.csv("normsuptribvalid.csv")
suptribtestPCA = read.csv("normsuptribtest.csv")
subfamtrainPCA = read.csv("normsubfamtrain.csv")
subfamvalidPCA = read.csv("normsubfamvalid.csv")
subfamtestPCA = read.csv("normsubfamtest.csv")
setwd("C:/Carabid_Data/Carabid_Data/CarabidWide")
sptrainWide = read.csv("normsptrain.csv")
spvalidWide = read.csv("normspvalid.csv")
sptestWide = read.csv("normsptest.csv")
grouptrainWide = read.csv("normgrouptrain.csv")
groupvalidWide = read.csv("normgroupvalid.csv")
grouptestWide = read.csv("normgrouptest.csv")
subgenustrainWide = read.csv("normsubgenustrain.csv")
subgenusvalidWide = read.csv("normsubgenusvalid.csv")
subgenustestWide = read.csv("normsubgenustest.csv")
genustrainWide = read.csv("normgenustrain.csv")
genusvalidWide = read.csv("normgenusvalid.csv")
genustestWide = read.csv("normgenustest.csv")
subtribtrainWide = read.csv("normsubtribtrain.csv")
subtribvalidWide = read.csv("normsubtribvalid.csv")
subtribtestWide = read.csv("normsubtribtest.csv")
tribtrainWide = read.csv("normtribtrain.csv")
tribvalidWide = read.csv("normtribvalid.csv")
tribtestWide = read.csv("normtribtest.csv")
suptribtrainWide = read.csv("normsuptribtrain.csv")
suptribvalidWide = read.csv("normsuptribvalid.csv")
suptribtestWide = read.csv("normsuptribtest.csv")
subfamtrainWide = read.csv("normsubfamtrain.csv")
subfamvalidWide = read.csv("normsubfamvalid.csv")
subfamtestWide = read.csv("normsubfamtest.csv")
set.seed(27)
knn = train(SpeciesName ~.,
method = "knn",
tuneGrid = expand.grid(k = 1:25),
metric = "Accuracy",
data = sptrainPCA)
pred.knn = predict.train(knn, newdata = spvalidPCA)
knnresults = knn$results
knnbestresults = knnresults[knn$bestTune$k,]
lda = train(SpeciesName ~.,
method = "lda",
metric = "Accuracy",
data = sptrainPCA)
pred.lda = predict.train(lda, newdata = spvalidPCA)
ldaresults = lda$results
nbayes = train(SpeciesName ~.,
method = "naive_bayes",
metric = "Accuracy",
data = sptrainPCA)
pred.nbayes = predict.train(nbayes, newdata = spvalidPCA)
nbayesresults = nbayes$results
rf = train(SpeciesName ~.,
method = "rf",
tuneGrid = expand.grid(.mtry = c(1:13)),
metric = "Accuracy",
data = sptrainWide)
pred.rf = predict.train(rf, newdata = spvalidWide)
rfresults = rf$results
rfbestresults = rfresults[rf$bestTune$.mtry,]
predDF = data.frame(pred.knn,
pred.lda,
pred.rf,
#pred.nbayes,
Actual = spvalidWide$SpeciesName,
stringsAsFactors = F)
#tuneRF(predDF[,1:3], predDF[,4], ntreeTry = 500)
modelStack = randomForest(Actual ~.,
data = predDF,
ntree = 500,
mtry = 2)
test.knn = predict(knn, sptestPCA)
test.lda = predict(lda, sptestPCA)
#test.nbayes = predict(nbayes, sptestPCA)
test.rf = predict(rf, sptestWide)
testDF = data.frame(test.knn,
test.lda,
#test.nbayes,
test.rf,
Actual = sptestPCA$SpeciesName,
stringsAsFactors = F)
names(testDF) = names(predDF)
combPred = predict(modelStack, testDF)
ensembleaccuracy = combPred == testDF$Actual
length(ensembleaccuracy[ensembleaccuracy == TRUE])/nrow(testDF)
knnaccuracy = test.knn == sptestPCA$SpeciesName
length(knnaccuracy[knnaccuracy == TRUE])/nrow(sptestPCA)
rfaccuracy = test.rf == sptestPCA$SpeciesName
length(rfaccuracy[rfaccuracy == TRUE])/nrow(sptestPCA)
| /Ensemble_Holdout.R | no_license | Jarrett-Blair/Carabid-Learn | R | false | false | 4,793 | r | library(caret)
library(e1071)
library(randomForest)
setwd("C:/Carabid_Data/Carabid_Data/CarabidPCA")
sptrainPCA = read.csv("normsptrain.csv")
spvalidPCA = read.csv("normspvalid.csv")
sptestPCA = read.csv("normsptest.csv")
grouptrainPCA = read.csv("normgrouptrain.csv")
groupvalidPCA = read.csv("normgroupvalid.csv")
grouptestPCA = read.csv("normgrouptest.csv")
subgenustrainPCA = read.csv("normsubgenustrain.csv")
subgenusvalidPCA = read.csv("normsubgenusvalid.csv")
subgenustestPCA = read.csv("normsubgenustest.csv")
genustrainPCA = read.csv("normgenustrain.csv")
genusvalidPCA = read.csv("normgenusvalid.csv")
genustestPCA = read.csv("normgenustest.csv")
subtribtrainPCA = read.csv("normsubtribtrain.csv")
subtribvalidPCA = read.csv("normsubtribvalid.csv")
subtribtestPCA = read.csv("normsubtribtest.csv")
tribtrainPCA = read.csv("normtribtrain.csv")
tribvalidPCA = read.csv("normtribvalid.csv")
tribtestPCA = read.csv("normtribtest.csv")
suptribtrainPCA = read.csv("normsuptribtrain.csv")
suptribvalidPCA = read.csv("normsuptribvalid.csv")
suptribtestPCA = read.csv("normsuptribtest.csv")
subfamtrainPCA = read.csv("normsubfamtrain.csv")
subfamvalidPCA = read.csv("normsubfamvalid.csv")
subfamtestPCA = read.csv("normsubfamtest.csv")
setwd("C:/Carabid_Data/Carabid_Data/CarabidWide")
sptrainWide = read.csv("normsptrain.csv")
spvalidWide = read.csv("normspvalid.csv")
sptestWide = read.csv("normsptest.csv")
grouptrainWide = read.csv("normgrouptrain.csv")
groupvalidWide = read.csv("normgroupvalid.csv")
grouptestWide = read.csv("normgrouptest.csv")
subgenustrainWide = read.csv("normsubgenustrain.csv")
subgenusvalidWide = read.csv("normsubgenusvalid.csv")
subgenustestWide = read.csv("normsubgenustest.csv")
genustrainWide = read.csv("normgenustrain.csv")
genusvalidWide = read.csv("normgenusvalid.csv")
genustestWide = read.csv("normgenustest.csv")
subtribtrainWide = read.csv("normsubtribtrain.csv")
subtribvalidWide = read.csv("normsubtribvalid.csv")
subtribtestWide = read.csv("normsubtribtest.csv")
tribtrainWide = read.csv("normtribtrain.csv")
tribvalidWide = read.csv("normtribvalid.csv")
tribtestWide = read.csv("normtribtest.csv")
suptribtrainWide = read.csv("normsuptribtrain.csv")
suptribvalidWide = read.csv("normsuptribvalid.csv")
suptribtestWide = read.csv("normsuptribtest.csv")
subfamtrainWide = read.csv("normsubfamtrain.csv")
subfamvalidWide = read.csv("normsubfamvalid.csv")
subfamtestWide = read.csv("normsubfamtest.csv")
set.seed(27)
knn = train(SpeciesName ~.,
method = "knn",
tuneGrid = expand.grid(k = 1:25),
metric = "Accuracy",
data = sptrainPCA)
pred.knn = predict.train(knn, newdata = spvalidPCA)
knnresults = knn$results
knnbestresults = knnresults[knn$bestTune$k,]
lda = train(SpeciesName ~.,
method = "lda",
metric = "Accuracy",
data = sptrainPCA)
pred.lda = predict.train(lda, newdata = spvalidPCA)
ldaresults = lda$results
nbayes = train(SpeciesName ~.,
method = "naive_bayes",
metric = "Accuracy",
data = sptrainPCA)
pred.nbayes = predict.train(nbayes, newdata = spvalidPCA)
nbayesresults = nbayes$results
rf = train(SpeciesName ~.,
method = "rf",
tuneGrid = expand.grid(.mtry = c(1:13)),
metric = "Accuracy",
data = sptrainWide)
pred.rf = predict.train(rf, newdata = spvalidWide)
rfresults = rf$results
rfbestresults = rfresults[rf$bestTune$.mtry,]
predDF = data.frame(pred.knn,
pred.lda,
pred.rf,
#pred.nbayes,
Actual = spvalidWide$SpeciesName,
stringsAsFactors = F)
#tuneRF(predDF[,1:3], predDF[,4], ntreeTry = 500)
modelStack = randomForest(Actual ~.,
data = predDF,
ntree = 500,
mtry = 2)
test.knn = predict(knn, sptestPCA)
test.lda = predict(lda, sptestPCA)
#test.nbayes = predict(nbayes, sptestPCA)
test.rf = predict(rf, sptestWide)
testDF = data.frame(test.knn,
test.lda,
#test.nbayes,
test.rf,
Actual = sptestPCA$SpeciesName,
stringsAsFactors = F)
names(testDF) = names(predDF)
combPred = predict(modelStack, testDF)
ensembleaccuracy = combPred == testDF$Actual
length(ensembleaccuracy[ensembleaccuracy == TRUE])/nrow(testDF)
knnaccuracy = test.knn == sptestPCA$SpeciesName
length(knnaccuracy[knnaccuracy == TRUE])/nrow(sptestPCA)
rfaccuracy = test.rf == sptestPCA$SpeciesName
length(rfaccuracy[rfaccuracy == TRUE])/nrow(sptestPCA)
|
groupDates <-
function(rawIndices, TimSer) {
groupsMatrix <- groupIndices(rawIndices)
groupsDF <- data.frame(groupsMatrix)
colnames(groupsDF) <- c("Initial", "Final", "Number of NAs")
groupsDF$Initial <- TimSer$time[groupsDF$Initial]
groupsDF$Final <- TimSer$time[groupsDF$Final]
groupsDF
}
| /R/groupDates.R | no_license | cran/KarsTS | R | false | false | 324 | r | groupDates <-
function(rawIndices, TimSer) {
groupsMatrix <- groupIndices(rawIndices)
groupsDF <- data.frame(groupsMatrix)
colnames(groupsDF) <- c("Initial", "Final", "Number of NAs")
groupsDF$Initial <- TimSer$time[groupsDF$Initial]
groupsDF$Final <- TimSer$time[groupsDF$Final]
groupsDF
}
|
#' Add a vega axis specification to a ggvis plot
#'
#' Axis specifications allow you to either override the default axes,
#' or additional axes.
#'
#' More information about axes can be found in the "axes and legends" vignettes.
#'
#' @section Compared to ggplot2:
#'
#' In ggplot2, axis (and legend) properties are part of the scales
#' specification. In vega, they are separate, which allows the specification
#' of multiple axes, and more flexible linkage between scales and axes.
#'
#' @param vis A ggvis object.
#' @param type The type of axis. Either x or y.
#' @param scale The name of the scale backing the axis component. Defaults to
#' the scale type - you will need to specify if you want (e.g.) a scale
#' for a secondary y-axis.
#' @param orient The orientation of the axis. One of top, bottom, left or right.
#' The orientation can be used to further specialize the axis type (e.g., a y
#' axis oriented for the right edge of the chart) - defaults to bottom for
#' x axes, and left for y axes.
#' @param title A title for the axis. By default, it uses the name of the field
#' in the first data set used by the scale. Use \code{""} to suppress the
#' title.
#' @param title_offset The offset (in pixels) from the axis at which to place
#' the title.
#' @param format The formatting pattern for axis labels. Vega uses D3's format
#' pattern: \url{https://github.com/mbostock/d3/wiki/Formatting}
#' @param ticks A desired number of ticks. The resulting number may be different
#' so that values are "nice" (multiples of 2, 5, 10) and lie within the
#' underlying scale's range.
#' @param values Explicitly set the visible axis tick values.
#' @param subdivide If provided, sets the number of minor ticks between major
#' ticks (the value 9 results in decimal subdivision).
#' @param tick_padding The padding, in pixels, between ticks and text labels.
#' @param tick_size_major,tick_size_minor,tick_size_end
#' The size, in pixels, of major, minor and end ticks.
#' @param offset The offset, in pixels, by which to displace the axis from the
#' edge of the enclosing group or data rectangle.
#' @param layer A string indicating if the axis (and any gridlines) should be
#' placed above or below the data marks. One of "front" or "back" (default).
#' @param grid A flag indicating if gridlines should be created in addition to
#' ticks.
#' @param properties Optional mark property definitions for custom axis styling.
#' Should be an object created by \code{\link{axis_props}}, with properties
#' for ticks, majorTicks, minorTicks, grid, labels, title, and axis.
#' @seealso Vega axis documentation:
#' \url{https://github.com/trifacta/vega/wiki/Axes}
#' @export
#' @examples
#' mtcars %>% ggvis(x = ~wt, y = ~mpg, fill = ~cyl) %>%
#' layer_points() %>%
#' add_axis("x", title = "Weight", orient = "top")
#'
#' # Suppress axis with hide_axis
#' mtcars %>% ggvis(x = ~wt, y = ~mpg, fill = ~cyl) %>%
#' layer_points() %>%
#' hide_axis("x") %>% hide_axis("y")
#'
#' mtcars %>% ggvis(x = ~wt, y = ~mpg) %>% layer_points() %>%
#' add_axis("x", title = "Weight", ticks = 40,
#' properties = axis_props(
#' ticks = list(stroke = "red"),
#' majorTicks = list(strokeWidth = 2),
#' grid = list(stroke = "red"),
#' labels = list(
#' fill = "steelblue",
#' angle = 50,
#' fontSize = 14,
#' align = "left",
#' baseline = "middle",
#' dx = 3
#' ),
#' title = list(fontSize = 16),
#' axis = list(stroke = "#333", strokeWidth = 1.5)
#' )
#' )
add_axis <- function(vis, type, scale = type, orient = NULL, title = NULL,
title_offset = NULL, format = NULL, ticks = NULL,
values = NULL, subdivide = NULL, tick_padding = NULL,
tick_size_major = NULL, tick_size_minor = tick_size_major,
tick_size_end = tick_size_major, offset = NULL,
layer = "back", grid = TRUE, properties = NULL) {
axis <- create_axis(type, scale, orient, title, title_offset, format,
ticks, values, subdivide, tick_padding,
tick_size_major, tick_size_minor, tick_size_end,
offset, layer, grid, properties)
register_axis(vis, axis)
}
#' @rdname add_axis
#' @export
hide_axis <- function(vis, scale) {
axis <- structure(list(scale = scale, hide = TRUE), class = "ggvis_axis")
register_axis(vis, axis)
}
#' Defunct function for adding an axis
#'
#' This function has been replaced with \code{\link{add_axis}}.
#' @param ... Other arguments.
#' @export
add_guide_axis <- function(...) {
stop("add_guide_axis() has been replaced by add_axis().")
}
# Create an axis object.
create_axis <- function(type, scale = type, orient = NULL, title = NULL,
title_offset = NULL, format = NULL, ticks = NULL,
values = NULL, subdivide = NULL, tick_padding = NULL,
tick_size_major = NULL, tick_size_minor = tick_size_major,
tick_size_end = tick_size_major, offset = NULL,
layer = "back", grid = TRUE, properties = NULL) {
assert_that(type %in% c("x", "y"))
assert_that(is.string(scale))
if (is.null(orient)) orient <- c(x = "bottom", y = "left")[type]
orient <- match.arg(orient, c("top", "right", "bottom", "left"))
assert_that(is.null(title) || is.string(title))
# assert_that(is.number(title_offset))
layer <- match.arg(layer, c("front", "back"))
assert_that(is.flag(grid))
assert_that(is.null(properties) || is.axis_props(properties))
structure(compact(list(
type = type, scale = scale, orient = orient, title = title,
titleOffset = title_offset, format = format, ticks = ticks,
values = values, subdivide = subdivide, tickPadding = tick_padding,
tickSizeMajor = tick_size_major, tickSizeMinor = tick_size_minor,
tickSizeEnd = tick_size_end, offset = offset, layer = layer,
grid = grid, properties = properties
)), class = "ggvis_axis")
}
add_missing_axes <- function(vis) {
axes <- vis$axes
scales <- vis$scales
present <- vapply(axes, "[[", "scale", FUN.VALUE = character(1))
missing <- setdiff(intersect(names(scales), c("x", "y")), present)
for (scale in missing) {
vis <- add_axis(vis, scale)
}
vis
}
# Some axis settings require examining the scale
apply_axes_defaults <- function(vis) {
axes <- vis$axes
scales <- vis$scales
axes <- lapply(axes, function(axis) {
scale <- scales[[axis$scale]]
# If we don't have a title, try to get it from the scale.
if (is.null(axis$title)) {
axis$title <- scale$label
}
axis
})
# Replace the original axes with the new ones
vis$axes <- axes
vis
}
#' @export
format.ggvis_axis <- function(x, ...) {
params <- param_string(x, collapse = FALSE)
param_s <- paste0(" ", format(paste0(names(params), ":")), " ", format(params),
collapse = "\n")
paste0("<", class(x)[1], ">\n", param_s)
}
#' @export
print.ggvis_axis <- function(x, ...) cat(format(x, ...), "\n", sep = "")
| /R/guide_axis.R | no_license | trinen/ggvis | R | false | false | 7,126 | r | #' Add a vega axis specification to a ggvis plot
#'
#' Axis specifications allow you to either override the default axes,
#' or additional axes.
#'
#' More information about axes can be found in the "axes and legends" vignettes.
#'
#' @section Compared to ggplot2:
#'
#' In ggplot2, axis (and legend) properties are part of the scales
#' specification. In vega, they are separate, which allows the specification
#' of multiple axes, and more flexible linkage between scales and axes.
#'
#' @param vis A ggvis object.
#' @param type The type of axis. Either x or y.
#' @param scale The name of the scale backing the axis component. Defaults to
#' the scale type - you will need to specify if you want (e.g.) a scale
#' for a secondary y-axis.
#' @param orient The orientation of the axis. One of top, bottom, left or right.
#' The orientation can be used to further specialize the axis type (e.g., a y
#' axis oriented for the right edge of the chart) - defaults to bottom for
#' x axes, and left for y axes.
#' @param title A title for the axis. By default, it uses the name of the field
#' in the first data set used by the scale. Use \code{""} to suppress the
#' title.
#' @param title_offset The offset (in pixels) from the axis at which to place
#' the title.
#' @param format The formatting pattern for axis labels. Vega uses D3's format
#' pattern: \url{https://github.com/mbostock/d3/wiki/Formatting}
#' @param ticks A desired number of ticks. The resulting number may be different
#' so that values are "nice" (multiples of 2, 5, 10) and lie within the
#' underlying scale's range.
#' @param values Explicitly set the visible axis tick values.
#' @param subdivide If provided, sets the number of minor ticks between major
#' ticks (the value 9 results in decimal subdivision).
#' @param tick_padding The padding, in pixels, between ticks and text labels.
#' @param tick_size_major,tick_size_minor,tick_size_end
#' The size, in pixels, of major, minor and end ticks.
#' @param offset The offset, in pixels, by which to displace the axis from the
#' edge of the enclosing group or data rectangle.
#' @param layer A string indicating if the axis (and any gridlines) should be
#' placed above or below the data marks. One of "front" or "back" (default).
#' @param grid A flag indicating if gridlines should be created in addition to
#' ticks.
#' @param properties Optional mark property definitions for custom axis styling.
#' Should be an object created by \code{\link{axis_props}}, with properties
#' for ticks, majorTicks, minorTicks, grid, labels, title, and axis.
#' @seealso Vega axis documentation:
#' \url{https://github.com/trifacta/vega/wiki/Axes}
#' @export
#' @examples
#' mtcars %>% ggvis(x = ~wt, y = ~mpg, fill = ~cyl) %>%
#' layer_points() %>%
#' add_axis("x", title = "Weight", orient = "top")
#'
#' # Suppress axis with hide_axis
#' mtcars %>% ggvis(x = ~wt, y = ~mpg, fill = ~cyl) %>%
#' layer_points() %>%
#' hide_axis("x") %>% hide_axis("y")
#'
#' mtcars %>% ggvis(x = ~wt, y = ~mpg) %>% layer_points() %>%
#' add_axis("x", title = "Weight", ticks = 40,
#' properties = axis_props(
#' ticks = list(stroke = "red"),
#' majorTicks = list(strokeWidth = 2),
#' grid = list(stroke = "red"),
#' labels = list(
#' fill = "steelblue",
#' angle = 50,
#' fontSize = 14,
#' align = "left",
#' baseline = "middle",
#' dx = 3
#' ),
#' title = list(fontSize = 16),
#' axis = list(stroke = "#333", strokeWidth = 1.5)
#' )
#' )
add_axis <- function(vis, type, scale = type, orient = NULL, title = NULL,
title_offset = NULL, format = NULL, ticks = NULL,
values = NULL, subdivide = NULL, tick_padding = NULL,
tick_size_major = NULL, tick_size_minor = tick_size_major,
tick_size_end = tick_size_major, offset = NULL,
layer = "back", grid = TRUE, properties = NULL) {
axis <- create_axis(type, scale, orient, title, title_offset, format,
ticks, values, subdivide, tick_padding,
tick_size_major, tick_size_minor, tick_size_end,
offset, layer, grid, properties)
register_axis(vis, axis)
}
#' @rdname add_axis
#' @export
hide_axis <- function(vis, scale) {
axis <- structure(list(scale = scale, hide = TRUE), class = "ggvis_axis")
register_axis(vis, axis)
}
#' Defunct function for adding an axis
#'
#' This function has been replaced with \code{\link{add_axis}}.
#' @param ... Other arguments.
#' @export
add_guide_axis <- function(...) {
stop("add_guide_axis() has been replaced by add_axis().")
}
# Create an axis object.
create_axis <- function(type, scale = type, orient = NULL, title = NULL,
title_offset = NULL, format = NULL, ticks = NULL,
values = NULL, subdivide = NULL, tick_padding = NULL,
tick_size_major = NULL, tick_size_minor = tick_size_major,
tick_size_end = tick_size_major, offset = NULL,
layer = "back", grid = TRUE, properties = NULL) {
assert_that(type %in% c("x", "y"))
assert_that(is.string(scale))
if (is.null(orient)) orient <- c(x = "bottom", y = "left")[type]
orient <- match.arg(orient, c("top", "right", "bottom", "left"))
assert_that(is.null(title) || is.string(title))
# assert_that(is.number(title_offset))
layer <- match.arg(layer, c("front", "back"))
assert_that(is.flag(grid))
assert_that(is.null(properties) || is.axis_props(properties))
structure(compact(list(
type = type, scale = scale, orient = orient, title = title,
titleOffset = title_offset, format = format, ticks = ticks,
values = values, subdivide = subdivide, tickPadding = tick_padding,
tickSizeMajor = tick_size_major, tickSizeMinor = tick_size_minor,
tickSizeEnd = tick_size_end, offset = offset, layer = layer,
grid = grid, properties = properties
)), class = "ggvis_axis")
}
add_missing_axes <- function(vis) {
axes <- vis$axes
scales <- vis$scales
present <- vapply(axes, "[[", "scale", FUN.VALUE = character(1))
missing <- setdiff(intersect(names(scales), c("x", "y")), present)
for (scale in missing) {
vis <- add_axis(vis, scale)
}
vis
}
# Some axis settings require examining the scale
apply_axes_defaults <- function(vis) {
axes <- vis$axes
scales <- vis$scales
axes <- lapply(axes, function(axis) {
scale <- scales[[axis$scale]]
# If we don't have a title, try to get it from the scale.
if (is.null(axis$title)) {
axis$title <- scale$label
}
axis
})
# Replace the original axes with the new ones
vis$axes <- axes
vis
}
#' @export
format.ggvis_axis <- function(x, ...) {
params <- param_string(x, collapse = FALSE)
param_s <- paste0(" ", format(paste0(names(params), ":")), " ", format(params),
collapse = "\n")
paste0("<", class(x)[1], ">\n", param_s)
}
#' @export
print.ggvis_axis <- function(x, ...) cat(format(x, ...), "\n", sep = "")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/part_lic.R
\docType{data}
\name{part_lic}
\alias{part_lic}
\title{Dados relatvos aos participantes de licitacoes realizadas por orgaos de um ente federativo brasileiro
no periodo de 2011 a 2015}
\format{Um data frame contendo 427242 registros de 13 campos.}
\usage{
data(part_lic)
}
\description{
IMPORTANTE: Todos os campos que pudessem identificar o ente federativo, orgaos, empresas
e certames tiveram seus valores alterados e embaralhados.
}
\details{
Um data frame contendo 427242 registros de 13 campos.
part_lic - Dados relativos aos participantes das licitacoes.
Os campos cujos valores foram alterados estao identificados por um asteristico (*):
\itemize{
\item{\code{COD_LICITACAO [*]}}{ - Codigo que identifica a licitacao na base de dados.}
\item{\code{RESUMO_OBJETO}}{ - Descricao resumida do objeto da licitacao.}
\item{\code{DESC_MODALIDADE}}{ - Modalidade da licitacao.}
\item{\code{STATUS_LICITACAO}}{ - Status relativo a situacao do certame.}
\item{\code{DATA_ABERTURA_SESSAO}}{ - Data de abertura da sessão.}
\item{\code{DESC_UGR [*]}}{ - Descricao da unidade gestora responsavel pelo certame.}
\item{\code{ID_ITEM}}{ - Identificador do item do objeto a que o participante concorre para fornecer.}
\item{\code{CNPJCPF_FORNECEDORES [*]}}{ - CNPJ ou CPF do participante do certame.}
\item{\code{NOME_FORNECEDORES [*]}}{ - Nome do participante do certame.}
\item{\code{TIPO_PESSOA}}{ - Tipo de pessoa em que o participante pode ser classificado (Fisica/Juridica).}
\item{\code{VALOR_FINAL}}{ - Valor final homologado para o item do objeto a que o participante concorre para fornecer.}
\item{\code{VENCEDOR}}{ - Flag que identifica se o participante foi ou nao vencedor no certame (S/N).}
\item{\code{MICRO_PEQUENA_EMPRESA}}{ - Flag que identifica se o participante e ou nao micro empresa (S/N).}
}
}
\keyword{datasets}
| /man/part_lic.Rd | no_license | brunomssmelo/RcextTools | R | false | true | 1,942 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/part_lic.R
\docType{data}
\name{part_lic}
\alias{part_lic}
\title{Dados relatvos aos participantes de licitacoes realizadas por orgaos de um ente federativo brasileiro
no periodo de 2011 a 2015}
\format{Um data frame contendo 427242 registros de 13 campos.}
\usage{
data(part_lic)
}
\description{
IMPORTANTE: Todos os campos que pudessem identificar o ente federativo, orgaos, empresas
e certames tiveram seus valores alterados e embaralhados.
}
\details{
Um data frame contendo 427242 registros de 13 campos.
part_lic - Dados relativos aos participantes das licitacoes.
Os campos cujos valores foram alterados estao identificados por um asteristico (*):
\itemize{
\item{\code{COD_LICITACAO [*]}}{ - Codigo que identifica a licitacao na base de dados.}
\item{\code{RESUMO_OBJETO}}{ - Descricao resumida do objeto da licitacao.}
\item{\code{DESC_MODALIDADE}}{ - Modalidade da licitacao.}
\item{\code{STATUS_LICITACAO}}{ - Status relativo a situacao do certame.}
\item{\code{DATA_ABERTURA_SESSAO}}{ - Data de abertura da sessão.}
\item{\code{DESC_UGR [*]}}{ - Descricao da unidade gestora responsavel pelo certame.}
\item{\code{ID_ITEM}}{ - Identificador do item do objeto a que o participante concorre para fornecer.}
\item{\code{CNPJCPF_FORNECEDORES [*]}}{ - CNPJ ou CPF do participante do certame.}
\item{\code{NOME_FORNECEDORES [*]}}{ - Nome do participante do certame.}
\item{\code{TIPO_PESSOA}}{ - Tipo de pessoa em que o participante pode ser classificado (Fisica/Juridica).}
\item{\code{VALOR_FINAL}}{ - Valor final homologado para o item do objeto a que o participante concorre para fornecer.}
\item{\code{VENCEDOR}}{ - Flag que identifica se o participante foi ou nao vencedor no certame (S/N).}
\item{\code{MICRO_PEQUENA_EMPRESA}}{ - Flag que identifica se o participante e ou nao micro empresa (S/N).}
}
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iotdataplane_package.R
\docType{package}
\name{paws.iotdataplane-package}
\alias{paws.iotdataplane}
\alias{paws.iotdataplane-package}
\title{paws.iotdataplane: AWS IoT Data Plane}
\description{
AWS IoT-Data enables secure, bi-directional
communication between Internet-connected things (such as sensors,
actuators, embedded devices, or smart appliances) and the AWS cloud.
It implements a broker for applications and things to publish messages
over HTTP (Publish) and retrieve, update, and delete thing shadows. A
thing shadow is a persistent representation of your things and their
state in the AWS cloud.
}
\author{
\strong{Maintainer}: David Kretch \email{david.kretch@gmail.com}
Authors:
\itemize{
\item Adam Banker \email{adam.banker39@gmail.com}
}
Other contributors:
\itemize{
\item Amazon.com, Inc. [copyright holder]
}
}
\keyword{internal}
| /service/paws.iotdataplane/man/paws.iotdataplane-package.Rd | permissive | CR-Mercado/paws | R | false | true | 963 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iotdataplane_package.R
\docType{package}
\name{paws.iotdataplane-package}
\alias{paws.iotdataplane}
\alias{paws.iotdataplane-package}
\title{paws.iotdataplane: AWS IoT Data Plane}
\description{
AWS IoT-Data enables secure, bi-directional
communication between Internet-connected things (such as sensors,
actuators, embedded devices, or smart appliances) and the AWS cloud.
It implements a broker for applications and things to publish messages
over HTTP (Publish) and retrieve, update, and delete thing shadows. A
thing shadow is a persistent representation of your things and their
state in the AWS cloud.
}
\author{
\strong{Maintainer}: David Kretch \email{david.kretch@gmail.com}
Authors:
\itemize{
\item Adam Banker \email{adam.banker39@gmail.com}
}
Other contributors:
\itemize{
\item Amazon.com, Inc. [copyright holder]
}
}
\keyword{internal}
|
# Assignment 1, part 1
pollutantmean <- function(directory, pollutant, id=1:332) {
# Read in all the data
basenames <- sprintf("%03d.csv", id)
filenames <- paste(directory, basenames, sep="/")
data <- lapply(filenames, read.csv)
# The mean we want is the mean of the individual
# values, not the mean of each station;
# If we wanted the mean of each station, we could
# just do something like
# f <- function(x) { mean(x[[pollutant]], na.rm=TRUE)}
# mean(sapply(data, f))
# But this will first average each station then average
# those, which doesn't come out the same if the stations
# can have different numbers of measurements
#
# This will pull out the pollutant values, remove the
# the NAs, then repack as a vector
f <- function(x, pollutant) { as.vector(na.omit(x[[pollutant]])) }
# Run that over the data
vals <- lapply(data, f, pollutant)
# Next, combine all the data into a single list
# using stack. However, stack, because R is written by
# morons and the documentation by their idiot cousins
# requires that the list be given names -- which the
# documentation doesn't mention
names(vals) <- filenames
alldata <- stack(vals)$values
mean(alldata)
} | /ProgrammingInR/ProgrammingAssignment1/pollutantmean.R | no_license | pascal226/datasciencecoursera-1 | R | false | false | 1,222 | r | # Assignment 1, part 1
pollutantmean <- function(directory, pollutant, id=1:332) {
# Read in all the data
basenames <- sprintf("%03d.csv", id)
filenames <- paste(directory, basenames, sep="/")
data <- lapply(filenames, read.csv)
# The mean we want is the mean of the individual
# values, not the mean of each station;
# If we wanted the mean of each station, we could
# just do something like
# f <- function(x) { mean(x[[pollutant]], na.rm=TRUE)}
# mean(sapply(data, f))
# But this will first average each station then average
# those, which doesn't come out the same if the stations
# can have different numbers of measurements
#
# This will pull out the pollutant values, remove the
# the NAs, then repack as a vector
f <- function(x, pollutant) { as.vector(na.omit(x[[pollutant]])) }
# Run that over the data
vals <- lapply(data, f, pollutant)
# Next, combine all the data into a single list
# using stack. However, stack, because R is written by
# morons and the documentation by their idiot cousins
# requires that the list be given names -- which the
# documentation doesn't mention
names(vals) <- filenames
alldata <- stack(vals)$values
mean(alldata)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotVsLTime.R
\name{PlotVsLTime}
\alias{PlotVsLTime}
\title{Plots A Score Along The Forecast Time With Its Confidence Interval}
\usage{
PlotVsLTime(
var,
toptitle = "",
ytitle = "",
monini = 1,
freq = 12,
nticks = NULL,
limits = NULL,
listexp = c("exp1", "exp2", "exp3"),
listobs = c("obs1", "obs2", "obs3"),
biglab = FALSE,
hlines = NULL,
leg = TRUE,
siglev = FALSE,
sizetit = 1,
show_conf = TRUE,
fileout = "output_plotvsltime.eps",
width = 8,
height = 5,
size_units = "in",
res = 100,
...
)
}
\arguments{
\item{var}{Matrix containing any Prediction Score with dimensions:\cr
(nexp/nmod, 3/4 ,nltime)\cr
or (nexp/nmod, nobs, 3/4 ,nltime).}
\item{toptitle}{Main title, optional.}
\item{ytitle}{Title of Y-axis, optional.}
\item{monini}{Starting month between 1 and 12. Default = 1.}
\item{freq}{1 = yearly, 12 = monthly, 4 = seasonal, ... Default = 12.}
\item{nticks}{Number of ticks and labels on the x-axis, optional.}
\item{limits}{c(lower limit, upper limit): limits of the Y-axis, optional.}
\item{listexp}{List of experiment names, optional.}
\item{listobs}{List of observation names, optional.}
\item{biglab}{TRUE/FALSE for presentation/paper plot. Default = FALSE.}
\item{hlines}{c(a,b, ..) Add horizontal black lines at Y-positions a,b, ...\cr
Default = NULL.}
\item{leg}{TRUE/FALSE if legend should be added or not to the plot.
Default = TRUE.}
\item{siglev}{TRUE/FALSE if significance level should replace confidence
interval.\cr
Default = FALSE.}
\item{sizetit}{Multiplicative factor to change title size, optional.}
\item{show_conf}{TRUE/FALSE to show/not confidence intervals for input
variables.}
\item{fileout}{Name of output file. Extensions allowed: eps/ps, jpeg, png,
pdf, bmp and tiff.\cr
Default = 'output_plotvsltime.eps'}
\item{width}{File width, in the units specified in the parameter size_units
(inches by default). Takes 8 by default.}
\item{height}{File height, in the units specified in the parameter
size_units (inches by default). Takes 5 by default.}
\item{size_units}{Units of the size of the device (file or window) to plot
in. Inches ('in') by default. See ?Devices and the creator function of the
corresponding device.}
\item{res}{Resolution of the device (file or window) to plot in. See
?Devices and the creator function of the corresponding device.}
\item{...}{Arguments to be passed to the method. Only accepts the following
graphical parameters:\cr
adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt
csi cxy err family fg fig font font.axis font.lab font.main font.sub
lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page pch plt
smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr
For more information about the parameters see `par`.}
}
\description{
Plots The Correlation (\code{Corr()}) or the Root Mean Square Error
(\code{RMS()}) between the forecasted values and their observational
counterpart or the slopes of their trends (\code{Trend()}) or the
InterQuartile Range, Maximum-Mininum, Standard Deviation or Median Absolute
Deviation of the Ensemble Members (\code{Spread()}), or the ratio between
the Ensemble Spread and the RMSE of the Ensemble Mean (\code{RatioSDRMS()})
along the forecast time for all the input experiments on the same figure
with their confidence intervals.
}
\details{
Examples of input:\cr
Model and observed output from \code{Load()} then \code{Clim()} then
\code{Ano()} then \code{Smoothing()}:\cr
(nmod, nmemb, nsdate, nltime) and (nobs, nmemb, nsdate, nltime)\cr
then averaged over the members\cr
\code{Mean1Dim(var_exp/var_obs, posdim = 2)}:\cr
(nmod, nsdate, nltime) and (nobs, nsdate, nltime)\cr
then passed through\cr
\code{Corr(exp, obs, posloop = 1, poscor = 2)} or\cr
\code{RMS(exp, obs, posloop = 1, posRMS = 2)}:\cr
(nmod, nobs, 3, nltime)\cr
would plot the correlations or RMS between each exp & each obs as a function
of the forecast time.
}
\examples{
# Load sample data as in Load() example:
example(Load)
clim <- Clim(sampleData$mod, sampleData$obs)
ano_exp <- Ano(sampleData$mod, clim$clim_exp)
ano_obs <- Ano(sampleData$obs, clim$clim_obs)
runmean_months <- 12
dim_to_smooth <- 4 # Smooth along lead-times
smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth)
smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth)
dim_to_mean <- 2 # Mean along members
required_complete_row <- 3 # Discard startdates for which there are NA leadtimes
leadtimes_per_startdate <- 60
corr <- Corr(Mean1Dim(smooth_ano_exp, dim_to_mean),
Mean1Dim(smooth_ano_obs, dim_to_mean),
compROW = required_complete_row,
limits = c(ceiling((runmean_months + 1) / 2),
leadtimes_per_startdate - floor(runmean_months / 2)))
\donttest{
PlotVsLTime(corr, toptitle = "correlations", ytitle = "correlation",
monini = 11, limits = c(-1, 2), listexp = c('CMIP5 IC3'),
listobs = c('ERSST'), biglab = FALSE, hlines = c(-1, 0, 1),
fileout = 'tos_cor.eps')
}
}
\author{
History:\cr
0.1 - 2011-03 (V. Guemas) - Original code\cr
0.2 - 2013-03 (I. Andreu-Burillo) - Introduced parameter sizetit\cr
0.3 - 2013-10 (I. Andreu-Burillo) - Introduced parameter show_conf\cr
1.0 - 2013-11 (N. Manubens) - Formatting to CRAN
}
\keyword{dynamic}
| /man/PlotVsLTime.Rd | no_license | cran/s2dverification | R | false | true | 5,455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotVsLTime.R
\name{PlotVsLTime}
\alias{PlotVsLTime}
\title{Plots A Score Along The Forecast Time With Its Confidence Interval}
\usage{
PlotVsLTime(
var,
toptitle = "",
ytitle = "",
monini = 1,
freq = 12,
nticks = NULL,
limits = NULL,
listexp = c("exp1", "exp2", "exp3"),
listobs = c("obs1", "obs2", "obs3"),
biglab = FALSE,
hlines = NULL,
leg = TRUE,
siglev = FALSE,
sizetit = 1,
show_conf = TRUE,
fileout = "output_plotvsltime.eps",
width = 8,
height = 5,
size_units = "in",
res = 100,
...
)
}
\arguments{
\item{var}{Matrix containing any Prediction Score with dimensions:\cr
(nexp/nmod, 3/4 ,nltime)\cr
or (nexp/nmod, nobs, 3/4 ,nltime).}
\item{toptitle}{Main title, optional.}
\item{ytitle}{Title of Y-axis, optional.}
\item{monini}{Starting month between 1 and 12. Default = 1.}
\item{freq}{1 = yearly, 12 = monthly, 4 = seasonal, ... Default = 12.}
\item{nticks}{Number of ticks and labels on the x-axis, optional.}
\item{limits}{c(lower limit, upper limit): limits of the Y-axis, optional.}
\item{listexp}{List of experiment names, optional.}
\item{listobs}{List of observation names, optional.}
\item{biglab}{TRUE/FALSE for presentation/paper plot. Default = FALSE.}
\item{hlines}{c(a,b, ..) Add horizontal black lines at Y-positions a,b, ...\cr
Default = NULL.}
\item{leg}{TRUE/FALSE if legend should be added or not to the plot.
Default = TRUE.}
\item{siglev}{TRUE/FALSE if significance level should replace confidence
interval.\cr
Default = FALSE.}
\item{sizetit}{Multiplicative factor to change title size, optional.}
\item{show_conf}{TRUE/FALSE to show/not confidence intervals for input
variables.}
\item{fileout}{Name of output file. Extensions allowed: eps/ps, jpeg, png,
pdf, bmp and tiff.\cr
Default = 'output_plotvsltime.eps'}
\item{width}{File width, in the units specified in the parameter size_units
(inches by default). Takes 8 by default.}
\item{height}{File height, in the units specified in the parameter
size_units (inches by default). Takes 5 by default.}
\item{size_units}{Units of the size of the device (file or window) to plot
in. Inches ('in') by default. See ?Devices and the creator function of the
corresponding device.}
\item{res}{Resolution of the device (file or window) to plot in. See
?Devices and the creator function of the corresponding device.}
\item{...}{Arguments to be passed to the method. Only accepts the following
graphical parameters:\cr
adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt
csi cxy err family fg fig font font.axis font.lab font.main font.sub
lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page pch plt
smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr
For more information about the parameters see `par`.}
}
\description{
Plots The Correlation (\code{Corr()}) or the Root Mean Square Error
(\code{RMS()}) between the forecasted values and their observational
counterpart or the slopes of their trends (\code{Trend()}) or the
InterQuartile Range, Maximum-Mininum, Standard Deviation or Median Absolute
Deviation of the Ensemble Members (\code{Spread()}), or the ratio between
the Ensemble Spread and the RMSE of the Ensemble Mean (\code{RatioSDRMS()})
along the forecast time for all the input experiments on the same figure
with their confidence intervals.
}
\details{
Examples of input:\cr
Model and observed output from \code{Load()} then \code{Clim()} then
\code{Ano()} then \code{Smoothing()}:\cr
(nmod, nmemb, nsdate, nltime) and (nobs, nmemb, nsdate, nltime)\cr
then averaged over the members\cr
\code{Mean1Dim(var_exp/var_obs, posdim = 2)}:\cr
(nmod, nsdate, nltime) and (nobs, nsdate, nltime)\cr
then passed through\cr
\code{Corr(exp, obs, posloop = 1, poscor = 2)} or\cr
\code{RMS(exp, obs, posloop = 1, posRMS = 2)}:\cr
(nmod, nobs, 3, nltime)\cr
would plot the correlations or RMS between each exp & each obs as a function
of the forecast time.
}
\examples{
# Load sample data as in Load() example:
example(Load)
clim <- Clim(sampleData$mod, sampleData$obs)
ano_exp <- Ano(sampleData$mod, clim$clim_exp)
ano_obs <- Ano(sampleData$obs, clim$clim_obs)
runmean_months <- 12
dim_to_smooth <- 4 # Smooth along lead-times
smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth)
smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth)
dim_to_mean <- 2 # Mean along members
required_complete_row <- 3 # Discard startdates for which there are NA leadtimes
leadtimes_per_startdate <- 60
corr <- Corr(Mean1Dim(smooth_ano_exp, dim_to_mean),
Mean1Dim(smooth_ano_obs, dim_to_mean),
compROW = required_complete_row,
limits = c(ceiling((runmean_months + 1) / 2),
leadtimes_per_startdate - floor(runmean_months / 2)))
\donttest{
PlotVsLTime(corr, toptitle = "correlations", ytitle = "correlation",
monini = 11, limits = c(-1, 2), listexp = c('CMIP5 IC3'),
listobs = c('ERSST'), biglab = FALSE, hlines = c(-1, 0, 1),
fileout = 'tos_cor.eps')
}
}
\author{
History:\cr
0.1 - 2011-03 (V. Guemas) - Original code\cr
0.2 - 2013-03 (I. Andreu-Burillo) - Introduced parameter sizetit\cr
0.3 - 2013-10 (I. Andreu-Burillo) - Introduced parameter show_conf\cr
1.0 - 2013-11 (N. Manubens) - Formatting to CRAN
}
\keyword{dynamic}
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width=12,
fig.height=8,
out.width="100%")
library(foster)
library(ggplot2)
library(raster)
library(knitr)
## ---- echo=FALSE, out.width="80%", fig.cap="FOSTER workflow"------------------
knitr::include_graphics("https://raw.githubusercontent.com/mqueinnec/storage/main/FOSTER/FOSTER_workflow.png")
## -----------------------------------------------------------------------------
library(foster)
library(raster)
## ---- eval = FALSE------------------------------------------------------------
# #Read single layer raster
# raster_layer <- raster("path/to/raster_layer.tif")
# #Read multi-layer raster
# raster_stack <- stack("path/to/raster_stack.tif")
## ---- eval = FALSE------------------------------------------------------------
# dem_samples <- rgdal::readOGR(dsn = "path/to/directory",
# layer = "layer_name")
## ---- eval=FALSE--------------------------------------------------------------
# #Example to write output of calcIndices to disk using different options
# ind <- calcIndices(x, indices = c("NDVI","TCG","TCW"),red = 3,nir=4,filename = "full/path/to/filename.tif")
# ind <- calcIndices(x, indices = c("NDVI","TCG","TCW"),red = 3,nir=4,filename = "full/path/to/filename", format="GTiff", overwrite=TRUE, byLayer=TRUE)
## ---- eval=FALSE--------------------------------------------------------------
# rasterOptions(tmpdir) <- "path/to/tempdir"
## -----------------------------------------------------------------------------
elev_p95 <- raster(system.file("extdata/vignette/ALS_metrics/ALS_metrics_p95.tif",package="foster"))
cover <- raster(system.file("extdata/vignette/ALS_metrics/ALS_metrics_cov_mean.tif",package="foster"))
Y_vars <- stack(elev_p95,cover)
#Set layers names
names(Y_vars) <- c("p95","cover")
Y_vars
plot(Y_vars)
## -----------------------------------------------------------------------------
spectral_2006 <- stack(system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2006.tif",package="foster"))
names(spectral_2006) <- c("blue","green","red","nir","swir1","swir2")
spectral_2006
plot(spectral_2006, col = grey.colors(255))
## -----------------------------------------------------------------------------
dem <- raster(system.file("extdata/vignette/topo/DEM.tif",package="foster"))
dem_slope <- raster(system.file("extdata/vignette/topo/DEM_slope.tif",package="foster"))
plot(dem)
plot(dem_slope)
## -----------------------------------------------------------------------------
mask_forest <- raster(system.file("extdata/vignette/landcover/VLCE_forest_2008.tif",package="foster"))
plot(mask_forest)
## -----------------------------------------------------------------------------
Y_vars_resampled <- matchResolution(x = Y_vars,
ref = spectral_2006,
method='bilinear')
Y_vars_resampled
## -----------------------------------------------------------------------------
filt <- matrix(1,nrow=3,ncol=3)
Y_vars_smooth <- focalMultiBand(Y_vars_resampled,
w=filt,
fun=mean,
pad=TRUE,
padValue=NA,
na.rm=TRUE,
keepNA = TRUE)
plot(Y_vars_smooth)
## ---- out.width="100%"--------------------------------------------------------
Y_vars_mask <- matchExtent(Y_vars_smooth,
mask_forest,
mask=TRUE,
maskValue = NA)
plot(Y_vars_mask)
# We do the same with the DEM and slope
dem_mask <- matchExtent(dem,
mask_forest,
mask = TRUE)
dem_slope_mask <- matchExtent(dem_slope,
mask_forest,
mask = TRUE)
## -----------------------------------------------------------------------------
indices_list <- c("NDVI","TCB","TCW","TCG")
# Example for one year
VI_2006 <- calcIndices(spectral_2006,
indices = indices_list,
sat="Landsat5TM",
red=3,
nir=4)
plot(VI_2006[["TCG"]])
## -----------------------------------------------------------------------------
# List all filenames in time-series order: 2006 to 2008
spectral_ts_files <- list(system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2006.tif",package = "foster"),
system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2007.tif",package = "foster"),
system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2008.tif",package = "foster"))
# Open all Landsat images and place them in a list
spectral_ts <- lapply(spectral_ts_files, stack)
spectral_ts
## -----------------------------------------------------------------------------
VI_ts <- calcIndices(spectral_ts,
indices = indices_list,
sat="Landsat5TM",
red=3,
nir=4)
# The output is a list with VI for each year
VI_ts
## -----------------------------------------------------------------------------
plot(VI_ts[[1]])
## -----------------------------------------------------------------------------
VI_ts_smooth <- focalMultiBand(VI_ts,
w=filt,
fun=mean,
na.rm=TRUE,
pad = TRUE,
keepNA = TRUE)
## -----------------------------------------------------------------------------
plot(VI_ts_smooth[[1]])
## -----------------------------------------------------------------------------
funSummary <- function(x){
c(
median = median(x,na.rm=TRUE),
IQR = IQR(x,na.rm=TRUE),
slope = theilSen(x)
)
}
## -----------------------------------------------------------------------------
VI_ts_metrics <- temporalMetrics(VI_ts,
metrics="funSummary")
## -----------------------------------------------------------------------------
VI_ts_metrics
plot(VI_ts_metrics[["NDVI_median"]])
## -----------------------------------------------------------------------------
Y_vars_edges <- edges(Y_vars_mask,
w=3)
plot(Y_vars_edges)
## -----------------------------------------------------------------------------
nSamples = 230
nClasses = 5
mindist = 75
set.seed(1234) #For example reproducibility
sample_strata <- getSample(Y_vars_edges,
layers = c("p95","cover"),
n = nSamples,
strata = nClasses,
mindist = mindist,
norm = TRUE,
xy = TRUE)
## -----------------------------------------------------------------------------
# Sampled points
sampleLoc <- sample_strata$sample
# Map of strata
strata_map <- sample_strata$clusterMap
# k-NN model
kmeans_model <- sample_strata$model
## -----------------------------------------------------------------------------
plot(strata_map)
plot(sampleLoc,add=TRUE)
## -----------------------------------------------------------------------------
# Predictor variables
X_vars <- stack(VI_ts_metrics,
dem_mask,
dem_slope_mask)
# Response variables
Y_vars <- Y_vars_mask
## -----------------------------------------------------------------------------
# Extract values at sample
X_vars_sample <- getSampleValues(X_vars, sampleLoc)
Y_vars_sample <- getSampleValues(Y_vars, sampleLoc)
X_vars_sample
Y_vars_sample
## -----------------------------------------------------------------------------
#Create data partition
set.seed(1234) #for example reproducibility
train_idx <- partition(sampleLoc$cluster,
type="kfold",
kfold = 5,
returnTrain = TRUE)
train_idx
## -----------------------------------------------------------------------------
set.seed(1234) #for example reproducibility
kNN <- trainNN(x = X_vars_sample,
y=Y_vars_sample,
inTrain = train_idx,
k = 1,
method = "randomForest",
ntree = 200)
## -----------------------------------------------------------------------------
kNN_model <- kNN$model
kNN_preds <- kNN$preds
head(kNN_preds, 10)
## -----------------------------------------------------------------------------
accuracy(obs = kNN_preds$obs,
preds = kNN_preds$preds,
vars = kNN_preds$variable,
folds = kNN_preds$Fold)
## -----------------------------------------------------------------------------
plots_scatter <- scatter(obs = kNN_preds$obs,
preds = kNN_preds$preds,
vars = kNN_preds$variable)
plots_scatter$cov
plots_scatter$p95
## -----------------------------------------------------------------------------
imp <- varImp(kNN_model,scaled=FALSE,plot=TRUE,plotType="boxplot")
imp$plot
imp <- varImp(kNN$model,scaled=TRUE,plot=TRUE,plotType="grid")
imp$plot
## -----------------------------------------------------------------------------
Y_imputed <- predictTrgs(model=kNN_model,
x = X_vars,
nnID = TRUE,
nnDist = TRUE)
Y_imputed
## -----------------------------------------------------------------------------
plot(Y_imputed$p95)
plot(Y_imputed$cover)
plot(Y_imputed$nnID1,col=rainbow(length(unique(Y_imputed$nnID1))))
plot(Y_imputed$nnDist1)
| /foster/inst/doc/foster-example.R | no_license | albrizre/spatstat.revdep | R | false | false | 9,642 | r | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width=12,
fig.height=8,
out.width="100%")
library(foster)
library(ggplot2)
library(raster)
library(knitr)
## ---- echo=FALSE, out.width="80%", fig.cap="FOSTER workflow"------------------
knitr::include_graphics("https://raw.githubusercontent.com/mqueinnec/storage/main/FOSTER/FOSTER_workflow.png")
## -----------------------------------------------------------------------------
library(foster)
library(raster)
## ---- eval = FALSE------------------------------------------------------------
# #Read single layer raster
# raster_layer <- raster("path/to/raster_layer.tif")
# #Read multi-layer raster
# raster_stack <- stack("path/to/raster_stack.tif")
## ---- eval = FALSE------------------------------------------------------------
# dem_samples <- rgdal::readOGR(dsn = "path/to/directory",
# layer = "layer_name")
## ---- eval=FALSE--------------------------------------------------------------
# #Example to write output of calcIndices to disk using different options
# ind <- calcIndices(x, indices = c("NDVI","TCG","TCW"),red = 3,nir=4,filename = "full/path/to/filename.tif")
# ind <- calcIndices(x, indices = c("NDVI","TCG","TCW"),red = 3,nir=4,filename = "full/path/to/filename", format="GTiff", overwrite=TRUE, byLayer=TRUE)
## ---- eval=FALSE--------------------------------------------------------------
# rasterOptions(tmpdir) <- "path/to/tempdir"
## -----------------------------------------------------------------------------
elev_p95 <- raster(system.file("extdata/vignette/ALS_metrics/ALS_metrics_p95.tif",package="foster"))
cover <- raster(system.file("extdata/vignette/ALS_metrics/ALS_metrics_cov_mean.tif",package="foster"))
Y_vars <- stack(elev_p95,cover)
#Set layers names
names(Y_vars) <- c("p95","cover")
Y_vars
plot(Y_vars)
## -----------------------------------------------------------------------------
spectral_2006 <- stack(system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2006.tif",package="foster"))
names(spectral_2006) <- c("blue","green","red","nir","swir1","swir2")
spectral_2006
plot(spectral_2006, col = grey.colors(255))
## -----------------------------------------------------------------------------
dem <- raster(system.file("extdata/vignette/topo/DEM.tif",package="foster"))
dem_slope <- raster(system.file("extdata/vignette/topo/DEM_slope.tif",package="foster"))
plot(dem)
plot(dem_slope)
## -----------------------------------------------------------------------------
mask_forest <- raster(system.file("extdata/vignette/landcover/VLCE_forest_2008.tif",package="foster"))
plot(mask_forest)
## -----------------------------------------------------------------------------
Y_vars_resampled <- matchResolution(x = Y_vars,
ref = spectral_2006,
method='bilinear')
Y_vars_resampled
## -----------------------------------------------------------------------------
filt <- matrix(1,nrow=3,ncol=3)
Y_vars_smooth <- focalMultiBand(Y_vars_resampled,
w=filt,
fun=mean,
pad=TRUE,
padValue=NA,
na.rm=TRUE,
keepNA = TRUE)
plot(Y_vars_smooth)
## ---- out.width="100%"--------------------------------------------------------
Y_vars_mask <- matchExtent(Y_vars_smooth,
mask_forest,
mask=TRUE,
maskValue = NA)
plot(Y_vars_mask)
# We do the same with the DEM and slope
dem_mask <- matchExtent(dem,
mask_forest,
mask = TRUE)
dem_slope_mask <- matchExtent(dem_slope,
mask_forest,
mask = TRUE)
## -----------------------------------------------------------------------------
indices_list <- c("NDVI","TCB","TCW","TCG")
# Example for one year
VI_2006 <- calcIndices(spectral_2006,
indices = indices_list,
sat="Landsat5TM",
red=3,
nir=4)
plot(VI_2006[["TCG"]])
## -----------------------------------------------------------------------------
# List all filenames in time-series order: 2006 to 2008
spectral_ts_files <- list(system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2006.tif",package = "foster"),
system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2007.tif",package = "foster"),
system.file("extdata/vignette/Landsat_BAP/Landsat_BAP_2008.tif",package = "foster"))
# Open all Landsat images and place them in a list
spectral_ts <- lapply(spectral_ts_files, stack)
spectral_ts
## -----------------------------------------------------------------------------
VI_ts <- calcIndices(spectral_ts,
indices = indices_list,
sat="Landsat5TM",
red=3,
nir=4)
# The output is a list with VI for each year
VI_ts
## -----------------------------------------------------------------------------
plot(VI_ts[[1]])
## -----------------------------------------------------------------------------
VI_ts_smooth <- focalMultiBand(VI_ts,
w=filt,
fun=mean,
na.rm=TRUE,
pad = TRUE,
keepNA = TRUE)
## -----------------------------------------------------------------------------
plot(VI_ts_smooth[[1]])
## -----------------------------------------------------------------------------
funSummary <- function(x){
c(
median = median(x,na.rm=TRUE),
IQR = IQR(x,na.rm=TRUE),
slope = theilSen(x)
)
}
## -----------------------------------------------------------------------------
VI_ts_metrics <- temporalMetrics(VI_ts,
metrics="funSummary")
## -----------------------------------------------------------------------------
VI_ts_metrics
plot(VI_ts_metrics[["NDVI_median"]])
## -----------------------------------------------------------------------------
Y_vars_edges <- edges(Y_vars_mask,
w=3)
plot(Y_vars_edges)
## -----------------------------------------------------------------------------
nSamples = 230
nClasses = 5
mindist = 75
set.seed(1234) #For example reproducibility
sample_strata <- getSample(Y_vars_edges,
layers = c("p95","cover"),
n = nSamples,
strata = nClasses,
mindist = mindist,
norm = TRUE,
xy = TRUE)
## -----------------------------------------------------------------------------
# Sampled points
sampleLoc <- sample_strata$sample
# Map of strata
strata_map <- sample_strata$clusterMap
# k-NN model
kmeans_model <- sample_strata$model
## -----------------------------------------------------------------------------
plot(strata_map)
plot(sampleLoc,add=TRUE)
## -----------------------------------------------------------------------------
# Predictor variables
X_vars <- stack(VI_ts_metrics,
dem_mask,
dem_slope_mask)
# Response variables
Y_vars <- Y_vars_mask
## -----------------------------------------------------------------------------
# Extract values at sample
X_vars_sample <- getSampleValues(X_vars, sampleLoc)
Y_vars_sample <- getSampleValues(Y_vars, sampleLoc)
X_vars_sample
Y_vars_sample
## -----------------------------------------------------------------------------
#Create data partition
set.seed(1234) #for example reproducibility
train_idx <- partition(sampleLoc$cluster,
type="kfold",
kfold = 5,
returnTrain = TRUE)
train_idx
## -----------------------------------------------------------------------------
set.seed(1234) #for example reproducibility
kNN <- trainNN(x = X_vars_sample,
y=Y_vars_sample,
inTrain = train_idx,
k = 1,
method = "randomForest",
ntree = 200)
## -----------------------------------------------------------------------------
kNN_model <- kNN$model
kNN_preds <- kNN$preds
head(kNN_preds, 10)
## -----------------------------------------------------------------------------
accuracy(obs = kNN_preds$obs,
preds = kNN_preds$preds,
vars = kNN_preds$variable,
folds = kNN_preds$Fold)
## -----------------------------------------------------------------------------
plots_scatter <- scatter(obs = kNN_preds$obs,
preds = kNN_preds$preds,
vars = kNN_preds$variable)
plots_scatter$cov
plots_scatter$p95
## -----------------------------------------------------------------------------
imp <- varImp(kNN_model,scaled=FALSE,plot=TRUE,plotType="boxplot")
imp$plot
imp <- varImp(kNN$model,scaled=TRUE,plot=TRUE,plotType="grid")
imp$plot
## -----------------------------------------------------------------------------
Y_imputed <- predictTrgs(model=kNN_model,
x = X_vars,
nnID = TRUE,
nnDist = TRUE)
Y_imputed
## -----------------------------------------------------------------------------
plot(Y_imputed$p95)
plot(Y_imputed$cover)
plot(Y_imputed$nnID1,col=rainbow(length(unique(Y_imputed$nnID1))))
plot(Y_imputed$nnDist1)
|
# Exercise 4: external data sets: Gates Foundation Educational Grants
# Use the `read.csv()` functoin to read the data from the `data/gates_money.csv`
# file into a variable called `grants` using the `read.csv()`
# Be sure to set your working directory in RStudio, and do NOT treat strings as
# factors!
grants <- read.csv("data/gates_money.csv", stringsAsFactors = FALSE)
# Use the View function to look at the loaded data
View(grants)
# Create a variable `organization` that contains the `organization` column of
# the dataset
organization <- grants$organization
# Confirm that the "organization" column is a vector using the `is.vector()`
# function.
# This is a useful debugging tip if you hit errors later!
is.vector(organization)
## Now you can ask some interesting questions about the dataset
# What was the mean grant value?
mean(grants$total_amount)
# What was the dollar amount of the largest grant?
max(grants$total_amount)
# What was the dollar amount of the smallest grant?
min(grants$total_amount)
# Which organization received the largest grant?
grants[grants$total_amount == max(grants$total_amount), "organization"]
# Which organization received the smallest grant?
grants[grants$total_amount == min(grants$total_amount), "organization"]
# How many grants were awarded in 2010?
sum(grants[grants$start_year == "2010", "total_amount"])
| /chapter-10-exercises/exercise-4/exercise.R | permissive | zpuiy/book-exercises | R | false | false | 1,368 | r | # Exercise 4: external data sets: Gates Foundation Educational Grants
# Use the `read.csv()` functoin to read the data from the `data/gates_money.csv`
# file into a variable called `grants` using the `read.csv()`
# Be sure to set your working directory in RStudio, and do NOT treat strings as
# factors!
grants <- read.csv("data/gates_money.csv", stringsAsFactors = FALSE)
# Use the View function to look at the loaded data
View(grants)
# Create a variable `organization` that contains the `organization` column of
# the dataset
organization <- grants$organization
# Confirm that the "organization" column is a vector using the `is.vector()`
# function.
# This is a useful debugging tip if you hit errors later!
is.vector(organization)
## Now you can ask some interesting questions about the dataset
# What was the mean grant value?
mean(grants$total_amount)
# What was the dollar amount of the largest grant?
max(grants$total_amount)
# What was the dollar amount of the smallest grant?
min(grants$total_amount)
# Which organization received the largest grant?
grants[grants$total_amount == max(grants$total_amount), "organization"]
# Which organization received the smallest grant?
grants[grants$total_amount == min(grants$total_amount), "organization"]
# How many grants were awarded in 2010?
sum(grants[grants$start_year == "2010", "total_amount"])
|
\name{enaR-package}
\alias{enaR-package}
\alias{enaR}
\docType{package}
\title{
Tools ecological network analysis (ena) in R.
}
\description{
This package compiles functions for the analysis of ecological networks,
building on tools previously developed in the MatLab language (Fath and
Borrett 2006) with multiple additions of functionality.
}
\details{
\tabular{ll}{
Package: \tab enaR \cr
Type: \tab Package\cr
Version: \tab 1.00\cr
Date: \tab 2012-10-03\cr
License: \tab GPL-2\cr
}
}
\author{
Authors: Stuart R. Borrett (borretts@uncw.edu) and Matthew K. Lau (mkl48@nau.edu)
Maintainer: Matthew K. Lau <mkl48@nau.edu>
}
\references{
Fath BD, Borrett SR. 2006. A Matlab Function for Network Environ
Analysis. Environmental Modelling and Software 21, 375-405.
}
\keyword{ package }
\seealso{
% ~~ Optional links to other man pages, e.g. ~~
% ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
% \code{\link[network:statnet-package]{statnet}}
%\code{\link[igraph:igraph-package]{igraph}}
\code{\link[network:network-package]{network}}
}
\examples{
}
| /src/debugging/diff/enaR_100/man/enaR-package.Rd | no_license | SEELab/enaR_development | R | false | false | 1,050 | rd | \name{enaR-package}
\alias{enaR-package}
\alias{enaR}
\docType{package}
\title{
Tools ecological network analysis (ena) in R.
}
\description{
This package compiles functions for the analysis of ecological networks,
building on tools previously developed in the MatLab language (Fath and
Borrett 2006) with multiple additions of functionality.
}
\details{
\tabular{ll}{
Package: \tab enaR \cr
Type: \tab Package\cr
Version: \tab 1.00\cr
Date: \tab 2012-10-03\cr
License: \tab GPL-2\cr
}
}
\author{
Authors: Stuart R. Borrett (borretts@uncw.edu) and Matthew K. Lau (mkl48@nau.edu)
Maintainer: Matthew K. Lau <mkl48@nau.edu>
}
\references{
Fath BD, Borrett SR. 2006. A Matlab Function for Network Environ
Analysis. Environmental Modelling and Software 21, 375-405.
}
\keyword{ package }
\seealso{
% ~~ Optional links to other man pages, e.g. ~~
% ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
% \code{\link[network:statnet-package]{statnet}}
%\code{\link[igraph:igraph-package]{igraph}}
\code{\link[network:network-package]{network}}
}
\examples{
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/engine_docs.R
\name{update_model_info_file}
\alias{update_model_info_file}
\title{Save information about models}
\usage{
update_model_info_file(path = "inst/models.tsv")
}
\arguments{
\item{path}{A character string for the location of the tab delimited file.}
}
\description{
This function writes a tab delimited file to the package to capture
information about the known models. This information includes packages in
the tidymodels GitHub repository as well as packages that are known to work
well with tidymodels packages (e.g. not only \pkg{parsnip} but also
\pkg{tune}, etc.). There may be more model definitions in other extension
packages that are not included here.
These data are used to document engines for each model function man page.
}
\details{
See our
\href{https://tidymodels.github.io/model-implementation-principles/}{model implementation guidelines}
on best practices for modeling and modeling packages.
It is highly recommended that the known parsnip extension packages are loaded.
The unexported \pkg{parsnip} function \code{extensions()} will list these.
}
\keyword{internal}
| /man/update_model_info_file.Rd | permissive | tidymodels/parsnip | R | false | true | 1,178 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/engine_docs.R
\name{update_model_info_file}
\alias{update_model_info_file}
\title{Save information about models}
\usage{
update_model_info_file(path = "inst/models.tsv")
}
\arguments{
\item{path}{A character string for the location of the tab delimited file.}
}
\description{
This function writes a tab delimited file to the package to capture
information about the known models. This information includes packages in
the tidymodels GitHub repository as well as packages that are known to work
well with tidymodels packages (e.g. not only \pkg{parsnip} but also
\pkg{tune}, etc.). There may be more model definitions in other extension
packages that are not included here.
These data are used to document engines for each model function man page.
}
\details{
See our
\href{https://tidymodels.github.io/model-implementation-principles/}{model implementation guidelines}
on best practices for modeling and modeling packages.
It is highly recommended that the known parsnip extension packages are loaded.
The unexported \pkg{parsnip} function \code{extensions()} will list these.
}
\keyword{internal}
|
stmv_interpolate_function_lookup = function( modelengine ) {
# wrapper to copy interpolating function as a generic script
local_fn = NULL
local_fn = switch( modelengine,
akima = stmv__akima,
bayesx = stmv__bayesx,
constant = stmv__constant,
fft = stmv__fft,
gaussianprocess2Dt = stmv__gaussianprocess2Dt,
gam = stmv__gam,
glm = stmv__glm,
gstat = stmv__gstat,
krige = stmv__krige,
kernel = stmv__kernel,
linear = stmv__linear,
tps = stmv__tps,
carstm = stmv__carstm,
twostep = stmv__twostep
)
if ( is.null(local_fn) ) {
message( "Interpolation module: ", modelengine, " not found." )
stop ()
}
return( local_fn )
}
| /R/stmv_interpolate_function_lookup.R | permissive | jae0/stmv | R | false | false | 695 | r |
stmv_interpolate_function_lookup = function( modelengine ) {
# wrapper to copy interpolating function as a generic script
local_fn = NULL
local_fn = switch( modelengine,
akima = stmv__akima,
bayesx = stmv__bayesx,
constant = stmv__constant,
fft = stmv__fft,
gaussianprocess2Dt = stmv__gaussianprocess2Dt,
gam = stmv__gam,
glm = stmv__glm,
gstat = stmv__gstat,
krige = stmv__krige,
kernel = stmv__kernel,
linear = stmv__linear,
tps = stmv__tps,
carstm = stmv__carstm,
twostep = stmv__twostep
)
if ( is.null(local_fn) ) {
message( "Interpolation module: ", modelengine, " not found." )
stop ()
}
return( local_fn )
}
|
#Unstructured learning: K clustering
# Generate some example data for clustering
tmp <- c(rnorm(30,-3), rnorm(30,3))
x <- cbind(x=tmp, y=rev(tmp))
plot(x)
# Use the kmeans() function setting k to 2 and nstart=20
# Inspect/print the results
# Q. How many points are in each cluster?
# Q. What ‘component’ of your result object details
# - cluster size?
# - cluster assignment/membership?
# - cluster center?
# Plot x colored by the kmeans cluster assignment and
# add cluster centers as blue points
# Q. Repeat for k=3, which one has the better total SS?
| /Class 08/Class 08.R | no_license | mtomaneng/Bioinformatics-Lab-143 | R | false | false | 560 | r | #Unstructured learning: K clustering
# Generate some example data for clustering
tmp <- c(rnorm(30,-3), rnorm(30,3))
x <- cbind(x=tmp, y=rev(tmp))
plot(x)
# Use the kmeans() function setting k to 2 and nstart=20
# Inspect/print the results
# Q. How many points are in each cluster?
# Q. What ‘component’ of your result object details
# - cluster size?
# - cluster assignment/membership?
# - cluster center?
# Plot x colored by the kmeans cluster assignment and
# add cluster centers as blue points
# Q. Repeat for k=3, which one has the better total SS?
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{transit_ridership}
\alias{transit_ridership}
\title{Transit ridership in the Chicago region, 1980-2019}
\format{
A tibble. 200 rows and 3 variables
\describe{
\item{year}{Double. Year of data}
\item{system}{Char. Name of system (includes CTA bus, CTA rail, Metra, Pace, and Pace ADA)}
\item{ridership}{Double. Annual unlinked passenger trips in millions}
}
}
\source{
Regional Transportation Authority \url{http://www.rtams.org/rtams/systemRidership.jsp}
}
\usage{
transit_ridership
}
\description{
A test dataset containing 1980-2019 transit ridership for the three service
boards that provide transit in Northeastern Illinois.
}
\examples{
# A line graph
ggplot(transit_ridership,aes(x = year,y=ridership,group=system,color=system)) +
geom_line(na.rm=TRUE)
}
\keyword{datasets}
| /man/transit_ridership.Rd | permissive | CMAP-REPOS/cmapplot | R | false | true | 899 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{transit_ridership}
\alias{transit_ridership}
\title{Transit ridership in the Chicago region, 1980-2019}
\format{
A tibble. 200 rows and 3 variables
\describe{
\item{year}{Double. Year of data}
\item{system}{Char. Name of system (includes CTA bus, CTA rail, Metra, Pace, and Pace ADA)}
\item{ridership}{Double. Annual unlinked passenger trips in millions}
}
}
\source{
Regional Transportation Authority \url{http://www.rtams.org/rtams/systemRidership.jsp}
}
\usage{
transit_ridership
}
\description{
A test dataset containing 1980-2019 transit ridership for the three service
boards that provide transit in Northeastern Illinois.
}
\examples{
# A line graph
ggplot(transit_ridership,aes(x = year,y=ridership,group=system,color=system)) +
geom_line(na.rm=TRUE)
}
\keyword{datasets}
|
#Datatypes in R
#Atomic Datatype
#Integer Datatype
x <- 2L
typeof(x)
#Double Datatype
y <- 2.5
typeof(y)
#Complex
z <- 3+2i
typeof(z)
#Charecter
a <- "h"
typeof(a)
#logical
q1 <- T
typeof(q1)
q2 <- FALSE
typeof(q2) | /Datatypes.R | no_license | nitinakash2017/R-Programming | R | false | false | 221 | r |
#Datatypes in R
#Atomic Datatype
#Integer Datatype
x <- 2L
typeof(x)
#Double Datatype
y <- 2.5
typeof(y)
#Complex
z <- 3+2i
typeof(z)
#Charecter
a <- "h"
typeof(a)
#logical
q1 <- T
typeof(q1)
q2 <- FALSE
typeof(q2) |
## Read the dataset
library(sqldf)
power <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where Date == '1/2/2007'OR Date == '2/2/2007' ",
header = TRUE, sep = ";")
hist(power$Global_active_power, main = "Global Active Power",
col = "red", xlab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot1.png")
dev.off()
| /plot1.R | no_license | FA78DWA/ExData_Plotting1 | R | false | false | 406 | r | ## Read the dataset
library(sqldf)
power <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where Date == '1/2/2007'OR Date == '2/2/2007' ",
header = TRUE, sep = ";")
hist(power$Global_active_power, main = "Global Active Power",
col = "red", xlab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot1.png")
dev.off()
|
\name{gts.listRegion}
\alias{gts.listRegion}
\title{
Function querying all the geological time concept of a region or international geological time concept.
}
\description{Function querying all the geological time concept of a region or international geological time concept.
}
\usage{
gts.listRegion(prefix = NULL, graph = NULL)
}
\arguments{
\item{prefix}{
[character] prefix for SPARQL querying. [Optional]
}
\item{graph}{
[character] GRAPH for SPARQL querying. [Optional]
}
}
\references{
%% ~put references to the literature/web site here ~
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
gts.listRegion()
} | /R_Functions/man/gts.listRegion.Rd | permissive | xgmachina/DeepTimeKB | R | false | false | 643 | rd | \name{gts.listRegion}
\alias{gts.listRegion}
\title{
Function querying all the geological time concept of a region or international geological time concept.
}
\description{Function querying all the geological time concept of a region or international geological time concept.
}
\usage{
gts.listRegion(prefix = NULL, graph = NULL)
}
\arguments{
\item{prefix}{
[character] prefix for SPARQL querying. [Optional]
}
\item{graph}{
[character] GRAPH for SPARQL querying. [Optional]
}
}
\references{
%% ~put references to the literature/web site here ~
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
gts.listRegion()
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/score_ibd.R
\name{ibd.segment}
\alias{ibd.segment}
\title{Score IBD sharing by segment.}
\usage{
ibd.segment(inheritance, ind1index, ind2index = NULL, relatedness = TRUE)
}
\arguments{
\item{inheritance}{list of numeric matrices.}
\item{ind1index, ind2index}{positive integer, represents index of individual in pedigree.}
\item{relatedness}{logical, determines coding of IBD information.}
}
\value{
A dataframe of three variables. \code{ibd} represents IBD sharing status of a segment, and \code{startpos}/\code{endpos} represents starting/ending genetic position of the segment.
}
\description{
\code{ibd.segment} determines the starting and endping genetic positions of segments with different amount of pairwise IBD sharing.
}
\details{
When only index of one individual is supplied, IBD sharing status for each segment is coded as 0 (not IBD) or 1 (IBD) between the two haplotypes of the individual.
When indices of two individuals are supplied, IBD sharing status for each segment is either in relatedness (default) or lexicographical order of IBD state, where recoding can be done using \code{\link{recode.ibd}}.
}
\examples{
# a simple pedigree with sibling marriage
pedigree = as.character(rep(1, 5))
member = as.character(c(11, 12, 21, 22, 31))
sex = as.numeric(c(1, 2, 1, 2, 1))
father = as.character(c(NA, NA, 11, 11, 21))
mother = as.character(c(NA, NA, 12, 12, 22))
pedinfo = data.frame(pedigree, member, sex, father, mother, stringsAsFactors = FALSE)
inheritance = sim.recomb(pedinfo, 100)
# IBD segments between the two haplotypes of the inbred individual
ibd.segment(inheritance, 5)
# IBD segments between the two full sibs
ibd.segment(inheritance, 3, 4) # relatedness
ibd.segment(inheritance, 3, 4, relatedness = FALSE) # lexicographical order of IBD state
}
| /rres/man/ibd.segment.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,859 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/score_ibd.R
\name{ibd.segment}
\alias{ibd.segment}
\title{Score IBD sharing by segment.}
\usage{
ibd.segment(inheritance, ind1index, ind2index = NULL, relatedness = TRUE)
}
\arguments{
\item{inheritance}{list of numeric matrices.}
\item{ind1index, ind2index}{positive integer, represents index of individual in pedigree.}
\item{relatedness}{logical, determines coding of IBD information.}
}
\value{
A dataframe of three variables. \code{ibd} represents IBD sharing status of a segment, and \code{startpos}/\code{endpos} represents starting/ending genetic position of the segment.
}
\description{
\code{ibd.segment} determines the starting and endping genetic positions of segments with different amount of pairwise IBD sharing.
}
\details{
When only index of one individual is supplied, IBD sharing status for each segment is coded as 0 (not IBD) or 1 (IBD) between the two haplotypes of the individual.
When indices of two individuals are supplied, IBD sharing status for each segment is either in relatedness (default) or lexicographical order of IBD state, where recoding can be done using \code{\link{recode.ibd}}.
}
\examples{
# a simple pedigree with sibling marriage
pedigree = as.character(rep(1, 5))
member = as.character(c(11, 12, 21, 22, 31))
sex = as.numeric(c(1, 2, 1, 2, 1))
father = as.character(c(NA, NA, 11, 11, 21))
mother = as.character(c(NA, NA, 12, 12, 22))
pedinfo = data.frame(pedigree, member, sex, father, mother, stringsAsFactors = FALSE)
inheritance = sim.recomb(pedinfo, 100)
# IBD segments between the two haplotypes of the inbred individual
ibd.segment(inheritance, 5)
# IBD segments between the two full sibs
ibd.segment(inheritance, 3, 4) # relatedness
ibd.segment(inheritance, 3, 4, relatedness = FALSE) # lexicographical order of IBD state
}
|
dfhead <- read.table("household_power_consumption.txt",header=FALSE, sep=";",nrows=1,stringsAsFactors = FALSE)
df <- read.table("household_power_consumption.txt",header=FALSE, sep=";",skip= grep("1/2/2007",readLines("household_power_consumption.txt")),nrows=2878)
colnames(df) <- unlist(dfhead)
df2 <- df
df2$Date <- as.Date(strptime(df2[,1],"%d/%m/%Y"))
df2$datetime <- as.POSIXct(paste(df2$Date, df2$Time), format="%Y-%m-%d %H:%M:%S")
par(mfrow=c(2,2),mar=c(4,4,4,1),cex.lab=0.75,cex.axis=1)
with(df2,{
plot(datetime,Global_active_power,type = "l",xlab="",ylab="Global Active Power(kilowatts)",cex.lab=0.75,cex.axis=1)
plot(datetime,Voltage,type = "l",xlab="datetime",ylab="Voltage")
plot(datetime,Sub_metering_1, type="l",xlab="",ylab="Energy sub metering")
points(datetime,Sub_metering_2, type="l",col="red")
points(datetime,Sub_metering_3, type="l",col="blue")
legend("topright",col=c("black","red","blue"),lty=1,cex=0.5,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(datetime,Global_reactive_power,type = "l",xlab="datetime",ylab="Global_reactive_power")
})
dev.copy(png,"plot4.png")
dev.off() | /plot4.R | no_license | gaayatri/ExData_Plotting1 | R | false | false | 1,133 | r | dfhead <- read.table("household_power_consumption.txt",header=FALSE, sep=";",nrows=1,stringsAsFactors = FALSE)
df <- read.table("household_power_consumption.txt",header=FALSE, sep=";",skip= grep("1/2/2007",readLines("household_power_consumption.txt")),nrows=2878)
colnames(df) <- unlist(dfhead)
df2 <- df
df2$Date <- as.Date(strptime(df2[,1],"%d/%m/%Y"))
df2$datetime <- as.POSIXct(paste(df2$Date, df2$Time), format="%Y-%m-%d %H:%M:%S")
par(mfrow=c(2,2),mar=c(4,4,4,1),cex.lab=0.75,cex.axis=1)
with(df2,{
plot(datetime,Global_active_power,type = "l",xlab="",ylab="Global Active Power(kilowatts)",cex.lab=0.75,cex.axis=1)
plot(datetime,Voltage,type = "l",xlab="datetime",ylab="Voltage")
plot(datetime,Sub_metering_1, type="l",xlab="",ylab="Energy sub metering")
points(datetime,Sub_metering_2, type="l",col="red")
points(datetime,Sub_metering_3, type="l",col="blue")
legend("topright",col=c("black","red","blue"),lty=1,cex=0.5,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(datetime,Global_reactive_power,type = "l",xlab="datetime",ylab="Global_reactive_power")
})
dev.copy(png,"plot4.png")
dev.off() |
library("readxl")
library(fastDummies)
library(rpart)
library(rpart.plot)
library(DMwR2)
library(class)
library (e1071)
library("kernlab")
library(randomForest)
library(ipred)
library(neuralnet)
library(adabag)
library(mdatools)
my_data <- read_excel("Dataset_Mineria.xlsx")
my_data<-my_data[,-c(1,2,3,5,6,8)]
my_data<-my_data[-c(1017,1166),]
my_dataImp<-knnImputation(my_data[,-c(1)],k=10)
summary(my_data)
tasa.aciertos.tree<-NA
tasa.aciertos.knn<-NA
tasa.aciertos.nbayes<-NA
tasa.aciertos.nbayes.laplace<-NA
tasa.aciertos.svm<-NA
tasa.aciertos.svp<-NA
tasa.aciertos.rf<-NA
tasa.aciertos.nn<-NA
tasa.aciertos.pls<-NA
tasa.aciertos.simca<-NA
tasa.aciertos.bag<-NA
tasa.aciertos.bot<-NA
my_dataCE<- scale(my_dataImp, center = TRUE, scale = TRUE)
PCA_Data <- princomp(my_dataCE, cor= T, scores=T)
scores <- PCA_Data$scores[,1:10]
my_dataCE<-cbind(my_data[,c(1)],my_dataCE)
matriz<-cbind(my_data[,c(1)],scores)
cjo<-sample(1:1437,1079, FALSE) #se selecciona el 75% datos=1079,25=1079
datos.entrenamiento<-matriz[cjo,]
datos.validacion<-matriz[-cjo,]
########################
#CLASSIFICATION TREES
########################
tree3<-rpartXse(factor(Posc)~ . ,data=datos.entrenamiento, se = 0.5)
X11()
prp(tree3,extra=101)
pred <- predict(tree3, datos.validacion[,-c(1)],type = "class")
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.tree<-sum(tab[row(tab)==col(tab)])/sum(tab)
########################
#RANDOM FOREST
########################
rf<-randomForest(factor(Posc) ~ ., data = datos.entrenamiento, mtry=3,
method="class",importance=TRUE)
pred<- predict(rf, datos.validacion[,-c(1)])
#matriz de confusión
tab <- table(pred, datos.validacion$Posc)
tab
#tasa de acierto
tasa.aciertos.rf<-sum(tab[row(tab)==col(tab)])/sum(tab)
importance(rf)
# Plot variable importance
varImpPlot(rf, main="",col="dark blue")
varImpPlot(rf, main="",col="dark blue", type=1)
varImpPlot(rf, main="",col="dark blue", type=2)
varImpPlot(rf, main="",col="dark blue", class=TRUE)
#######################
#vecino mas proximo
#####################
datos.entrenamiento$Posc<-factor(datos.entrenamiento$Posc)
vecino<-knn(datos.entrenamiento[,-c(1)], datos.validacion[,-c(1)], datos.entrenamiento$Posc, k = 3, prob = TRUE)
tab<-table(factor(datos.validacion$Posc),vecino)
tab
tasa.aciertos.knn<-sum(tab[row(tab)==col(tab)])/sum(tab)
#################
#Naive Bayes
###########
nbayes <- naiveBayes(factor(Posc)~ ., data=datos.entrenamiento)
pred <- predict(nbayes, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.nbayes<-sum(tab[row(tab)==col(tab)])/sum(tab)
## using Laplace smoothing:
model <- naiveBayes(factor(Posc)~ ., data=datos.entrenamiento, laplace = 3)
pred <- predict(model, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.nbayes.laplace<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#SVM
#############
svp <- ksvm(factor(Posc)~ ., data=datos.entrenamiento, type = "C-svc", kernel = "rbfdot",kpar = "automatic")
pred <- predict(svp, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.svp<-sum(tab[row(tab)==col(tab)])/sum(tab)
model.svm <- svm(factor(Posc)~ ., data = datos.entrenamiento, method = "C-classification", kernel = "radial",cost = 10, gamma = 0.1)
pred <- predict(model.svm, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.svm<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#Redes Neuronales
#############
modelo.nn<-neuralnet(Posc~.,
data = datos.entrenamiento,
threshold = 0.01,
hidden = c(60),
linear.output = FALSE )
# Red
pred<-predict(modelo.nn,datos.validacion[,-c(1)])
tab <- table(apply(pred, 1, which.max), datos.validacion$Posc)
tab
tasa.aciertos.nn<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#PLS
#############
datos.entrenamientoPLS<-my_dataCE[cjo,]
datos.validacionPLS<-my_dataCE[-cjo,]
cc.all<-datos.entrenamiento[,c(1)]
modelo.pls<-plsda(datos.entrenamientoPLS[,-c(1)], cc.all, ncomp = 13,cv=1)
res<-predict(modelo.pls, datos.validacionPLS[,-c(1)], datos.validacionPLS$Posc,type="class")
tab <- getConfusionMatrix(res)
tab
tasa.aciertos.pls<-sum(tab[row(tab)==col(tab)])/sum(tab)
summary(modelo.pls)
#############
#Simca
#############
datos.entrenamientoPCA1<-my_dataCE[cjo,]
datos.validacionPCA<-my_dataCE[-cjo,]
datos.entrenamientoCC<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="CC",-c(1)]
clases.CC<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="CC",c(1)]
datos.entrenamientoDF<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DF",-c(1)]
clases.DF<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DF",c(1)]
datos.entrenamientoDL<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DL",-c(1)]
clases.DL<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DL",c(1)]
modelo.simca.CC <- simca(datos.entrenamientoCC, "CC", ncomp = 10, cv = 1)
modelo.simca.DF <- simca(datos.entrenamientoDF, "DF", ncomp = 10, cv = 1)
modelo.simca.DL <- simca(datos.entrenamientoCC, "DL", ncomp = 10, cv = 1)
modelo.simca<-simcam(list(modelo.simca.CC, modelo.simca.DF, modelo.simca.DL))
res<-predict(modelo.simca, datos.validacionPCA[,-c(1)], datos.validacionPCA$Posc)
tab <- getConfusionMatrix(res)
tab
tasa.aciertos.simca<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#Bagging
#############
datos.entrenamiento$Posc<-factor(datos.entrenamiento$Posc)
bag.Posc<- bagging(Posc ~., data=datos.entrenamiento, coob=TRUE)
pred <- predict(bag.Posc, datos.validacion[,-c(1)]) #para predecir hemos de quitar la clasificación que ocupa el lugar 8
#matriz de confusión
tab <- table(pred$class, datos.validacion$Posc)
tab
#tasa de acierto
tasa.aciertos.bag<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#Boosting
#############
datos.entrenamiento$Posc<-factor(datos.entrenamiento$Posc)
bot.Posc<- boosting(Posc ~., data=datos.entrenamiento, coob=TRUE)
pred <- predict(bot.Posc, datos.validacion[,-c(1)]) #pred es una lista y has que acceder a las clase predicha
#matriz de confusión
tab <- table(pred$class, datos.validacion$Posc) #tabla cruzada de predicciones y clasificación real
tab
#tasa de acierto
tasa.aciertos.bot<-sum(tab[row(tab)==col(tab)])/sum(tab)
tasa<-cbind(tasa.aciertos.tree,tasa.aciertos.knn,tasa.aciertos.nbayes,
tasa.aciertos.nbayes.laplace,tasa.aciertos.svm,
tasa.aciertos.svp,tasa.aciertos.rf,tasa.aciertos.nn,
tasa.aciertos.pls,tasa.aciertos.simca,
tasa.aciertos.bag,tasa.aciertos.bot)
| /Data_Mining_Project/Holdout Trabajo Final Minería.R | no_license | javiporcel21/Academic-Projects | R | false | false | 6,835 | r | library("readxl")
library(fastDummies)
library(rpart)
library(rpart.plot)
library(DMwR2)
library(class)
library (e1071)
library("kernlab")
library(randomForest)
library(ipred)
library(neuralnet)
library(adabag)
library(mdatools)
my_data <- read_excel("Dataset_Mineria.xlsx")
my_data<-my_data[,-c(1,2,3,5,6,8)]
my_data<-my_data[-c(1017,1166),]
my_dataImp<-knnImputation(my_data[,-c(1)],k=10)
summary(my_data)
tasa.aciertos.tree<-NA
tasa.aciertos.knn<-NA
tasa.aciertos.nbayes<-NA
tasa.aciertos.nbayes.laplace<-NA
tasa.aciertos.svm<-NA
tasa.aciertos.svp<-NA
tasa.aciertos.rf<-NA
tasa.aciertos.nn<-NA
tasa.aciertos.pls<-NA
tasa.aciertos.simca<-NA
tasa.aciertos.bag<-NA
tasa.aciertos.bot<-NA
my_dataCE<- scale(my_dataImp, center = TRUE, scale = TRUE)
PCA_Data <- princomp(my_dataCE, cor= T, scores=T)
scores <- PCA_Data$scores[,1:10]
my_dataCE<-cbind(my_data[,c(1)],my_dataCE)
matriz<-cbind(my_data[,c(1)],scores)
cjo<-sample(1:1437,1079, FALSE) #se selecciona el 75% datos=1079,25=1079
datos.entrenamiento<-matriz[cjo,]
datos.validacion<-matriz[-cjo,]
########################
#CLASSIFICATION TREES
########################
tree3<-rpartXse(factor(Posc)~ . ,data=datos.entrenamiento, se = 0.5)
X11()
prp(tree3,extra=101)
pred <- predict(tree3, datos.validacion[,-c(1)],type = "class")
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.tree<-sum(tab[row(tab)==col(tab)])/sum(tab)
########################
#RANDOM FOREST
########################
rf<-randomForest(factor(Posc) ~ ., data = datos.entrenamiento, mtry=3,
method="class",importance=TRUE)
pred<- predict(rf, datos.validacion[,-c(1)])
#matriz de confusión
tab <- table(pred, datos.validacion$Posc)
tab
#tasa de acierto
tasa.aciertos.rf<-sum(tab[row(tab)==col(tab)])/sum(tab)
importance(rf)
# Plot variable importance
varImpPlot(rf, main="",col="dark blue")
varImpPlot(rf, main="",col="dark blue", type=1)
varImpPlot(rf, main="",col="dark blue", type=2)
varImpPlot(rf, main="",col="dark blue", class=TRUE)
#######################
#vecino mas proximo
#####################
datos.entrenamiento$Posc<-factor(datos.entrenamiento$Posc)
vecino<-knn(datos.entrenamiento[,-c(1)], datos.validacion[,-c(1)], datos.entrenamiento$Posc, k = 3, prob = TRUE)
tab<-table(factor(datos.validacion$Posc),vecino)
tab
tasa.aciertos.knn<-sum(tab[row(tab)==col(tab)])/sum(tab)
#################
#Naive Bayes
###########
nbayes <- naiveBayes(factor(Posc)~ ., data=datos.entrenamiento)
pred <- predict(nbayes, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.nbayes<-sum(tab[row(tab)==col(tab)])/sum(tab)
## using Laplace smoothing:
model <- naiveBayes(factor(Posc)~ ., data=datos.entrenamiento, laplace = 3)
pred <- predict(model, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.nbayes.laplace<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#SVM
#############
svp <- ksvm(factor(Posc)~ ., data=datos.entrenamiento, type = "C-svc", kernel = "rbfdot",kpar = "automatic")
pred <- predict(svp, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.svp<-sum(tab[row(tab)==col(tab)])/sum(tab)
model.svm <- svm(factor(Posc)~ ., data = datos.entrenamiento, method = "C-classification", kernel = "radial",cost = 10, gamma = 0.1)
pred <- predict(model.svm, datos.validacion[,-c(1)])
tab <- table(pred, datos.validacion$Posc)
tab
tasa.aciertos.svm<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#Redes Neuronales
#############
modelo.nn<-neuralnet(Posc~.,
data = datos.entrenamiento,
threshold = 0.01,
hidden = c(60),
linear.output = FALSE )
# Red
pred<-predict(modelo.nn,datos.validacion[,-c(1)])
tab <- table(apply(pred, 1, which.max), datos.validacion$Posc)
tab
tasa.aciertos.nn<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#PLS
#############
datos.entrenamientoPLS<-my_dataCE[cjo,]
datos.validacionPLS<-my_dataCE[-cjo,]
cc.all<-datos.entrenamiento[,c(1)]
modelo.pls<-plsda(datos.entrenamientoPLS[,-c(1)], cc.all, ncomp = 13,cv=1)
res<-predict(modelo.pls, datos.validacionPLS[,-c(1)], datos.validacionPLS$Posc,type="class")
tab <- getConfusionMatrix(res)
tab
tasa.aciertos.pls<-sum(tab[row(tab)==col(tab)])/sum(tab)
summary(modelo.pls)
#############
#Simca
#############
datos.entrenamientoPCA1<-my_dataCE[cjo,]
datos.validacionPCA<-my_dataCE[-cjo,]
datos.entrenamientoCC<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="CC",-c(1)]
clases.CC<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="CC",c(1)]
datos.entrenamientoDF<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DF",-c(1)]
clases.DF<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DF",c(1)]
datos.entrenamientoDL<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DL",-c(1)]
clases.DL<-datos.entrenamientoPCA1[datos.entrenamientoPCA1$Posc=="DL",c(1)]
modelo.simca.CC <- simca(datos.entrenamientoCC, "CC", ncomp = 10, cv = 1)
modelo.simca.DF <- simca(datos.entrenamientoDF, "DF", ncomp = 10, cv = 1)
modelo.simca.DL <- simca(datos.entrenamientoCC, "DL", ncomp = 10, cv = 1)
modelo.simca<-simcam(list(modelo.simca.CC, modelo.simca.DF, modelo.simca.DL))
res<-predict(modelo.simca, datos.validacionPCA[,-c(1)], datos.validacionPCA$Posc)
tab <- getConfusionMatrix(res)
tab
tasa.aciertos.simca<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#Bagging
#############
datos.entrenamiento$Posc<-factor(datos.entrenamiento$Posc)
bag.Posc<- bagging(Posc ~., data=datos.entrenamiento, coob=TRUE)
pred <- predict(bag.Posc, datos.validacion[,-c(1)]) #para predecir hemos de quitar la clasificación que ocupa el lugar 8
#matriz de confusión
tab <- table(pred$class, datos.validacion$Posc)
tab
#tasa de acierto
tasa.aciertos.bag<-sum(tab[row(tab)==col(tab)])/sum(tab)
#############
#Boosting
#############
datos.entrenamiento$Posc<-factor(datos.entrenamiento$Posc)
bot.Posc<- boosting(Posc ~., data=datos.entrenamiento, coob=TRUE)
pred <- predict(bot.Posc, datos.validacion[,-c(1)]) #pred es una lista y has que acceder a las clase predicha
#matriz de confusión
tab <- table(pred$class, datos.validacion$Posc) #tabla cruzada de predicciones y clasificación real
tab
#tasa de acierto
tasa.aciertos.bot<-sum(tab[row(tab)==col(tab)])/sum(tab)
tasa<-cbind(tasa.aciertos.tree,tasa.aciertos.knn,tasa.aciertos.nbayes,
tasa.aciertos.nbayes.laplace,tasa.aciertos.svm,
tasa.aciertos.svp,tasa.aciertos.rf,tasa.aciertos.nn,
tasa.aciertos.pls,tasa.aciertos.simca,
tasa.aciertos.bag,tasa.aciertos.bot)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 28866
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 28866
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_7_9.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9835
c no.of clauses 28866
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 28866
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_7_9.unsat.qdimacs 9835 28866 E1 [] 0 90 9745 28866 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/mult-matrix/mult_bool_matrix_10_7_9.unsat/mult_bool_matrix_10_7_9.unsat.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 665 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 28866
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 28866
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_7_9.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9835
c no.of clauses 28866
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 28866
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_7_9.unsat.qdimacs 9835 28866 E1 [] 0 90 9745 28866 NONE
|
analysis <- function() {
# Gets the dataset files
dataUrl = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile = "dataset.zip"
if(!file.exists(zipFile)) {
download.file(dataUrl, zipFile)
unzip(zipFile)
}
###############################################################
## STEP 1: Merges the training and the test sets to create
## one data set.
# Get data for train and test
trainData = getData("train")
testData = getData("test")
# Bind the train and test data
mergedData = rbind(trainData, testData)
###############################################################
## STEP 2: Extracts only the measurements on the mean and
## standard deviation for each measurement.
# Get mean/standard deviation realted feature ids and names
extraction <- getExtraction()
# Get the columnId for "activity", "volunteer" "type"
colNames = names(mergedData)
activityId = which(colNames == "Activity")
volunteerId = which(colNames == "Volunteer")
typeId = which(colNames == "type")
# extract data from mergedData by column id
mergedData = mergedData[, c(extraction$feature_id, activityId, volunteerId, typeId)]
###############################################################
## STEP3: Uses descriptive activity names to name the activities in the data set
# Get the id-feature name table from activity_labels.txt
activity<-read.table("./UCI HAR Dataset/activity_labels.txt", stringsAsFactors=FALSE)
names(activity) <- c("activity", "activity_label")
# change the list of "activity" from ID to descriptive name by sapply
mergedData$Activity<-sapply(mergedData$Activity,function(id) activity[activity==id,2])
###############################################################
## STEP4: Appropriately labels the data set with descriptive variable names.
# Reuse the extraction generated in step3 to rename the column name.
# Direct query of the feature name in extaction by column_seq is ok because the column seq is not changed
featureNames<-extraction[,2]
# Remove "()" in the feature name
# Replace "-" with "_"
featureNames<-lapply(featureNames,
function(feature) {
gsub("-", "_", sub("\\(\\)", "", feature))
})
for (i in 1:length(featureNames)) {
colnames(mergedData)[i] <- featureNames[i]
}
###############################################################
## STEP5: Creates a second, independent tidy data set
## with the average of each variable for each
## activity and each subject.
# Calculate the result by function aggregate()
attach(mergedData)
result <- aggregate(mergedData, by=list(Activity,Volunteer), FUN=mean)
detach(mergedData)
# Trim the result
# 1. remove last 3 columns, because the means of Activty, Volunteer, type column are meaningless
colNames<-colnames(result)
activityId <- which(colNames == "Activity")
volunteerId <- which(colNames == "Volunteer")
typeId <- which(colNames == "type")
result <- result[,-c(activityId, volunteerId, typeId)]
# 2. Set the content of 1st column as "$Activity by volunteer $Volunteer"
result$Group.1 = paste(result$Group.1, "by volunteer", result$Group.2)
# 3. Rename Column Group.1 and remove Column Group2
result<- result[,-2]
colnames(result)[1] <- "Activity by Volunteer"
## Output the result
write.table(result, file = "result.txt", row.names=FALSE)
}
## This function gets the date in the %category sustdbfolder
## The data consists of: set, label, volunteer, type
## Input: "train" for getting traib data and "test" for getting test deviation
## Output: Train data for input "train"
## Test data for input "test"
## Null for other input
getData <- function(category) {
# param check
if (category != "train" & category != "test") {
return(NULL)
}
# set the directory according to the category
directory = paste("./UCI HAR Dataset/", category, sep="")
trainX <- read.table(paste(directory, "/X_", category, ".txt", sep="")) # data set
trainY <- read.table(paste(directory, "/y_", category, ".txt", sep="")) # activity
volunteer <- read.table(paste(directory, "/subject_", category, ".txt", sep="")) # volunteer
type = category
typeDF <- data.frame(type)
names(trainY) <- "Activity"
names(volunteer) <- "Volunteer"
dataset <- cbind(trainX, trainY, volunteer, typeDF)
}
## This function extracts the id-featureName pairs of the mean/standard deviation
## for each measurement.
## Output: Id and name of the mean/standard deviation related features
getExtraction <- function() {
# Read id-feature table from features.txt
features<-read.table("./UCI HAR Dataset/features.txt", stringsAsFactors=FALSE)
# extract the id-feature pair for means
names(features)<-c("feature_id","feature")
meanID = grep("mean|std", features$feature)
extraction <- features[meanID,]
}
| /run_analysis.R | no_license | Hightechnician/CleaningDataCourseProject | R | false | false | 5,278 | r | analysis <- function() {
# Gets the dataset files
dataUrl = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile = "dataset.zip"
if(!file.exists(zipFile)) {
download.file(dataUrl, zipFile)
unzip(zipFile)
}
###############################################################
## STEP 1: Merges the training and the test sets to create
## one data set.
# Get data for train and test
trainData = getData("train")
testData = getData("test")
# Bind the train and test data
mergedData = rbind(trainData, testData)
###############################################################
## STEP 2: Extracts only the measurements on the mean and
## standard deviation for each measurement.
# Get mean/standard deviation realted feature ids and names
extraction <- getExtraction()
# Get the columnId for "activity", "volunteer" "type"
colNames = names(mergedData)
activityId = which(colNames == "Activity")
volunteerId = which(colNames == "Volunteer")
typeId = which(colNames == "type")
# extract data from mergedData by column id
mergedData = mergedData[, c(extraction$feature_id, activityId, volunteerId, typeId)]
###############################################################
## STEP3: Uses descriptive activity names to name the activities in the data set
# Get the id-feature name table from activity_labels.txt
activity<-read.table("./UCI HAR Dataset/activity_labels.txt", stringsAsFactors=FALSE)
names(activity) <- c("activity", "activity_label")
# change the list of "activity" from ID to descriptive name by sapply
mergedData$Activity<-sapply(mergedData$Activity,function(id) activity[activity==id,2])
###############################################################
## STEP4: Appropriately labels the data set with descriptive variable names.
# Reuse the extraction generated in step3 to rename the column name.
# Direct query of the feature name in extaction by column_seq is ok because the column seq is not changed
featureNames<-extraction[,2]
# Remove "()" in the feature name
# Replace "-" with "_"
featureNames<-lapply(featureNames,
function(feature) {
gsub("-", "_", sub("\\(\\)", "", feature))
})
for (i in 1:length(featureNames)) {
colnames(mergedData)[i] <- featureNames[i]
}
###############################################################
## STEP5: Creates a second, independent tidy data set
## with the average of each variable for each
## activity and each subject.
# Calculate the result by function aggregate()
attach(mergedData)
result <- aggregate(mergedData, by=list(Activity,Volunteer), FUN=mean)
detach(mergedData)
# Trim the result
# 1. remove last 3 columns, because the means of Activty, Volunteer, type column are meaningless
colNames<-colnames(result)
activityId <- which(colNames == "Activity")
volunteerId <- which(colNames == "Volunteer")
typeId <- which(colNames == "type")
result <- result[,-c(activityId, volunteerId, typeId)]
# 2. Set the content of 1st column as "$Activity by volunteer $Volunteer"
result$Group.1 = paste(result$Group.1, "by volunteer", result$Group.2)
# 3. Rename Column Group.1 and remove Column Group2
result<- result[,-2]
colnames(result)[1] <- "Activity by Volunteer"
## Output the result
write.table(result, file = "result.txt", row.names=FALSE)
}
## This function gets the date in the %category sustdbfolder
## The data consists of: set, label, volunteer, type
## Input: "train" for getting traib data and "test" for getting test deviation
## Output: Train data for input "train"
## Test data for input "test"
## Null for other input
getData <- function(category) {
# param check
if (category != "train" & category != "test") {
return(NULL)
}
# set the directory according to the category
directory = paste("./UCI HAR Dataset/", category, sep="")
trainX <- read.table(paste(directory, "/X_", category, ".txt", sep="")) # data set
trainY <- read.table(paste(directory, "/y_", category, ".txt", sep="")) # activity
volunteer <- read.table(paste(directory, "/subject_", category, ".txt", sep="")) # volunteer
type = category
typeDF <- data.frame(type)
names(trainY) <- "Activity"
names(volunteer) <- "Volunteer"
dataset <- cbind(trainX, trainY, volunteer, typeDF)
}
## This function extracts the id-featureName pairs of the mean/standard deviation
## for each measurement.
## Output: Id and name of the mean/standard deviation related features
getExtraction <- function() {
# Read id-feature table from features.txt
features<-read.table("./UCI HAR Dataset/features.txt", stringsAsFactors=FALSE)
# extract the id-feature pair for means
names(features)<-c("feature_id","feature")
meanID = grep("mean|std", features$feature)
extraction <- features[meanID,]
}
|
library(reticulate)
#py_install("scipy")
#use_python("/usr/local/bin/python")
#use_virtualenv("myenv")
source_python("DBCV.py")
#' Clustering by fast search and find of density peaks
#'
#' This package implement the clustering algorithm described by Alex Rodriguez
#' and Alessandro Laio (2014). It provides the user with tools for generating
#' the initial rho and delta values for each observation as well as using these
#' to assign observations to clusters. This is done in two passes so the user is
#' free to reassign observations to clusters using a new set of rho and delta
#' thresholds, without needing to recalculate everything.
#'
#' @section Plotting:
#' Two types of plots are supported by this package, and both mimics the types of
#' plots used in the publication for the algorithm. The standard plot function
#' produces a decision plot, with optional colouring of cluster peaks if these
#' are assigned. Furthermore [plotMDS()] performs a multidimensional
#' scaling of the distance matrix and plots this as a scatterplot. If clusters
#' are assigned observations are coloured according to their assignment.
#'
#' @section Cluster detection:
#' The two main functions for this package are [densityClust()] and
#' [findClusters()]. The former takes a distance matrix and optionally
#' a distance cutoff and calculates rho and delta for each observation. The
#' latter takes the output of [densityClust()] and make cluster
#' assignment for each observation based on a user defined rho and delta
#' threshold. If the thresholds are not specified the user is able to supply
#' them interactively by clicking on a decision plot.
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [densityClust()], [findClusters()], [plotMDS()]
#' @aliases NULL
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @useDynLib densityClust
#' @importFrom Rcpp sourceCpp
#'
'_PACKAGE'
#' Computes the local density of points in a distance matrix
#'
#' This function takes a distance matrix and a distance cutoff and calculate the
#' local density for each point in the matrix. The computation can either be
#' done using a simple summation of the points with the distance cutoff for each
#' observation, or by applying a gaussian kernel scaled by the distance cutoff
#' (more robust for low-density data)
#'
#' @param distance A distance matrix
#'
#' @param dc A numeric value specifying the distance cutoff
#'
#' @param gaussian Logical. Should a gaussian kernel be used to estimate the
#' density (defaults to `FALSE`)
#'
#' @return A vector of local density values, the index matching row and column
#' indexes in the distance matrix
#'
#' @noRd
#'
localDensity <- function(weights, distance, dc, gaussian = FALSE) {
# These implementations are faster by virtue of being written in C++
# They also avoid the need to convert `distance` to a matrix.
if (gaussian) {
res <- gaussianLocalDensity(weights, distance, attr(distance, "Size"), dc)
} else {
res <- nonGaussianLocalDensity(weights, attr(distance, "Size") * sum(weights), distance, attr(distance, "Size"), dc)
}
if (is.null(attr(distance, 'Labels'))) {
names(res) <- NULL
} else {
names(res) <- attr(distance, 'Labels')
}
res
}
#' Calculate distance to closest observation of higher density
#'
#' This function finds, for each observation, the minimum distance to an
#' observation of higher local density.
#'
#' @param distance A distance matrix
#'
#' @param rho A vector of local density values as outputted by [localDensity()]
#'
#' @return A vector of distances with index matching the index in rho
#'
#' @noRd
#'
distanceToPeak <- function(distance, rho) {
# This implementation is faster by virtue of being written in C++.
# It also avoids the need to convert `distance` to a matrix.
res <- distanceToPeakCpp(distance, rho);
names(res) <- names(rho)
res
}
## turn 1 distance matrix into i,j coordinates
get_ij <- function (k, dist_obj) {
if (!inherits(dist_obj, "dist")) stop("please provide a 'dist' object")
n <- attr(dist_obj, "Size")
valid <- (k >= 1) & (k <= n * (n - 1) / 2)
k_valid <- k[valid]
j <- rep.int(NA_real_, length(k))
j[valid] <- floor(((2 * n + 1) - sqrt((2 * n - 1) ^ 2 - 8 * (k_valid - 1))) / 2)
i <- j + k - (2 * n - j) * (j - 1) / 2
cbind(i, j)
}
#' Estimate the distance cutoff for a specified neighbor rate
#'
#' This function calculates a distance cutoff value for a specific distance
#' matrix that makes the average neighbor rate (number of points within the
#' distance cutoff value) fall between the provided range. The authors of the
#' algorithm suggests aiming for a neighbor rate between 1 and 2 percent, but
#' also states that the algorithm is quite robust with regards to more extreme
#' cases.
#'
#' @note If the number of points is larger than 448 (resulting in 100,128
#' pairwise distances), 100,128 distance pairs will be randomly selected to
#' speed up computation time. Use [set.seed()] prior to calling
#' `estimateDc` in order to ensure reproducable results.
#'
#' @param distance A distance matrix
#'
#' @param neighborRateLow The lower bound of the neighbor rate
#'
#' @param neighborRateHigh The upper bound of the neighbor rate
#'
#' @return A numeric value giving the estimated distance cutoff value
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' estimateDc(irisDist)
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @export
#'
estimateDc <- function(weights, distance, neighborRateLow = 0.01, neighborRateHigh = 0.02) {
# This implementation uses binary search instead of linear search.
size <- attr(distance, 'Size')
# If size is greater than 448, there will be >100000 elements in the distance
# object. Subsampling to 100000 elements will speed performance for very
# large dist objects while retaining good accuracy in estimating the cutoff
if (size > 448) {
distance <- distance[sample.int(length(distance), 100128)]
size <- 448
}
low <- min(distance)
high <- max(distance)
dc <- 0
newsize <- sum(weights)
uniquesize <- size
size <- newsize
while (TRUE) {
dc <- (low + high) / 2
# neighborRate = average of number of elements of comb per row that are
# less than dc minus 1 divided by size.
# This implementation avoids converting `distance` to a matrix. The matrix is
# symmetrical, so doubling the result from `distance` (half of the matrix) is
# equivalent. The diagonal of the matrix will always be 0, so as long as dc
# is greater than 0, we add 1 for every element of the diagonal, which is
# the same as size
sum_distance_below_dc <- SumCutOff(weights, distance, attr(distance, "Size"), dc)
#for (k in 1:uniquesize){
# if (distance[k] < dc){
# vals <- get_ij(k, distance)
# sum_distance_below_dc <- sum_distance_below_dc + (weights[vals[1]]*weights[vals[2]])
# }
#}
neighborRate <- (((sum_distance_below_dc * 2 + (if (0 <= dc) size)) / size - 1)) / size
if (neighborRate >= neighborRateLow && neighborRate <= neighborRateHigh) break
if (neighborRate < neighborRateLow) {
low <- dc
} else {
high <- dc
}
}
cat('Distance cutoff calculated to', dc, '\n')
dc
}
#' Calculate clustering attributes based on the densityClust algorithm
#'
#' This function takes a distance matrix and optionally a distance cutoff and
#' calculates the values necessary for clustering based on the algorithm
#' proposed by Alex Rodrigues and Alessandro Laio (see references). The actual
#' assignment to clusters are done in a later step, based on user defined
#' threshold values. If a distance matrix is passed into `distance` the
#' original algorithm described in the paper is used. If a matrix or data.frame
#' is passed instead it is interpretted as point coordinates and rho will be
#' estimated based on k-nearest neighbors of each point (rho is estimated as
#' `exp(-mean(x))` where `x` is the distance to the nearest
#' neighbors). This can be useful when data is so large that calculating the
#' full distance matrix can be prohibitive.
#'
#' @details
#' The function calculates rho and delta for the observations in the provided
#' distance matrix. If a distance cutoff is not provided this is first estimated
#' using [estimateDc()] with default values.
#'
#' The information kept in the densityCluster object is:
#' \describe{
#' \item{`rho`}{A vector of local density values}
#' \item{`delta`}{A vector of minimum distances to observations of higher density}
#' \item{`distance`}{The initial distance matrix}
#' \item{`dc`}{The distance cutoff used to calculate rho}
#' \item{`threshold`}{A named vector specifying the threshold values for rho and delta used for cluster detection}
#' \item{`peaks`}{A vector of indexes specifying the cluster center for each cluster}
#' \item{`clusters`}{A vector of cluster affiliations for each observation. The clusters are referenced as indexes in the peaks vector}
#' \item{`halo`}{A logical vector specifying for each observation if it is considered part of the halo}
#' \item{`knn_graph`}{kNN graph constructed. It is only applicable to the case where coordinates are used as input. Currently it is set as NA.}
#' \item{`nearest_higher_density_neighbor`}{index for the nearest sample with higher density. It is only applicable to the case where coordinates are used as input.}
#' \item{`nn.index`}{indices for each cell's k-nearest neighbors. It is only applicable for the case where coordinates are used as input.}
#' \item{`nn.dist`}{distance to each cell's k-nearest neighbors. It is only applicable for the case where coordinates are used as input.}
#' }
#' Before running findClusters the threshold, peaks, clusters and halo data is
#' `NA`.
#'
#' @param distance A distance matrix or a matrix (or data.frame) for the
#' coordinates of the data. If a matrix or data.frame is used the distances and
#' local density will be estimated using a fast k-nearest neighbor approach.
#'
#' @param dc A distance cutoff for calculating the local density. If missing it
#' will be estimated with `estimateDc(distance)`
#'
#' @param gaussian Logical. Should a gaussian kernel be used to estimate the
#' density (defaults to FALSE)
#'
#' @param verbose Logical. Should the running details be reported
#'
#' @param ... Additional parameters passed on to [get.knn][FNN::get.knn]
#'
#' @return A densityCluster object. See details for a description.
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [estimateDc()], [findClusters()]
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @export
#'
densityClust <- function(orig, weights, distance, dc, gaussian=FALSE, verbose = FALSE, ...) {
#orig = unclass(orig)
path = paste(getwd(), "/temp.txt", sep = "")
write.table(orig, file = path, col.names = F, row.names =F, sep = ",")
if (class(distance) %in% c('data.frame', 'matrix')) {
dp_knn_args <- list(mat = distance, verbose = verbose, ...)
res <- do.call(densityClust.knn, dp_knn_args)
} else {
if (missing(dc)) {
if (verbose) message('Calculating the distance cutoff')
dc <- estimateDc(weights, distance)
}
if (verbose) message('Calculating the local density for each sample based on distance cutoff')
rho <- localDensity(weights, distance, dc, gaussian = gaussian)
if (verbose) message('Calculating the minimal distance of a sample to another sample with higher density')
delta <- distanceToPeak(distance, rho)
if (verbose) message('Returning result...')
res <- list(
size = attr(distance, 'Size'),
truesize = sum(weights),
weights = weights,
fpath = path,
rho = rho,
delta = delta,
distance = distance,
dc = dc,
threshold = c(rho = NA, delta = NA),
peaks = NA,
clusters = NA,
clusters2 = NA,
halo = NA,
knn_graph = NA,
nearest_higher_density_neighbor = NA,
nn.index = NA,
nn.dist = NA
)
class(res) <- 'densityCluster'
}
res
}
#' @export
#' @importFrom graphics plot points
#'
plot.densityCluster <- function(x, ...) {
plot(x$rho, x$delta, main = 'Decision graph', xlab = expression(rho),
ylab = expression(delta))
if (!is.na(x$peaks[1])) {
points(x$rho[x$peaks], x$delta[x$peaks], col = 2:(1 + length(x$peaks)),
pch = 19)
}
}
#' Plot observations using multidimensional scaling and colour by cluster
#'
#' This function produces an MDS scatterplot based on the distance matrix of the
#' densityCluster object (if there is only the coordinates information, a distance
#' matrix will be calculate first), and, if clusters are defined, colours each
#' observation according to cluster affiliation. Observations belonging to a cluster
#' core is plotted with filled circles and observations belonging to the halo with
#' hollow circles. This plotting is not suitable for running large datasets (for example
#' datasets with > 1000 samples). Users are suggested to use other methods, for example
#' tSNE, etc. to visualize their clustering results too.
#'
#' @param x A densityCluster object as produced by [densityClust()]
#'
#' @param ... Additional parameters. Currently ignored
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [densityClust()] for creating `densityCluster`
#' objects, and [plotTSNE()] for an alternative plotting approach.
#'
#' @export
#'
plotMDS <- function(x, ...) {
UseMethod('plotMDS')
}
#' @export
#' @importFrom stats cmdscale
#' @importFrom graphics plot points legend
#' @importFrom stats dist
plotMDS.densityCluster <- function(x, ...) {
if (class(x$distance) %in% c('data.frame', 'matrix')) {
mds <- cmdscale(dist(x$distance))
} else {
mds <- cmdscale(x$distance)
}
if (length(x$peaks) == 1){
plot(mds[,1], mds[,2], xlab = '', ylab = '', main = 'MDS plot of observations')
} else {
plot(mds[,1], mds[,2], xlab = '', ylab = '', main = 'MDS plot of observations', cex = 0.5, col = "white")
}
mds
#Scale the weights for each point to match their new point size
if (max(x$weights) != min(x$weights)){
cex_weights = 2*((x$weights-min(x$weights))/(max(x$weights)-min(x$weights))) + 0.5
} else {
cex_weights = rep(1,length(weights))
}
if (!is.na(x$peaks[1])) {
for (i in 1:length(x$peaks)) {
ind <- which(x$clusters == i)
#points(mds[ind, 1], mds[ind, 2], col = i + 1, pch = ifelse(x$halo[ind], 1, 19))
for (index in ind){
if (index == x$peaks[i]){
points(mds[index, 1], mds[index, 2], col = i + 1, pch = 4, cex = cex_weights[index])
points(mds[index, 1], mds[index, 2], col = i + 1, pch = ifelse(x$halo[index], 2, 17), cex = cex_weights[index])
}
else {
points(mds[index, 1], mds[index, 2], col = i + 1, pch = ifelse(x$halo[index], 1, 19), cex = cex_weights[index])
}
}
}
legend('topright', legend = c('core', 'halo'), pch = c(19, 1), horiz = TRUE)
}
}
#' Plot observations using t-distributed neighbor embedding and colour by cluster
#'
#' This function produces an t-SNE scatterplot based on the distance matrix of the
#' densityCluster object (if there is only the coordinates information, a distance
#' matrix will be calculate first), and, if clusters are defined, colours each
#' observation according to cluster affiliation. Observations belonging to a cluster
#' core is plotted with filled circles and observations belonging to the halo with
#' hollow circles.
#'
#' @param x A densityCluster object as produced by [densityClust()]
#'
#' @param ... Additional parameters. Currently ignored
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotTSNE(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [densityClust()] for creating `densityCluster`
#' objects, and [plotMDS()] for an alternative plotting approach.
#'
#' @export
#'
plotTSNE <- function(x, ...) {
UseMethod('plotTSNE')
}
#' @export
#' @importFrom graphics plot points legend
#' @importFrom stats dist
#' @importFrom stats rnorm
#' @importFrom Rtsne Rtsne
plotTSNE.densityCluster <- function(x, max_components = 2, ...) {
if (class(x$distance) %in% c('data.frame', 'matrix')) {
data <- as.matrix(dist(x$distance))
} else {
data <- as.matrix(x$distance)
}
# avoid issues related to repetitions
dup_id <- which(duplicated(data))
if (length(dup_id) > 0) {
data[dup_id, ] <- data[dup_id, ] + rnorm(length(dup_id) * ncol(data), sd = 1e-10)
}
tsne_res <- Rtsne::Rtsne(as.matrix(data), dims = max_components,
pca = T)
tsne_data <- tsne_res$Y[, 1:max_components]
plot(tsne_data[,1], tsne_data[,2], xlab = '', ylab = '', main = 'tSNE plot of observations')
if (!is.na(x$peaks[1])) {
for (i in 1:length(x$peaks)) {
ind <- which(x$clusters == i)
points(tsne_data[ind, 1], tsne_data[ind, 2], col = i + 1, pch = ifelse(x$halo[ind], 1, 19))
}
legend('topright', legend = c('core', 'halo'), pch = c(19, 1), horiz = TRUE)
}
}
#' @export
#'
print.densityCluster <- function(x, ...) {
if (is.na(x$peaks[1])) {
cat('A densityCluster object with no clusters defined\n\n')
cat('Number of observations:', length(x$rho), '\n')
} else {
cat('A densityCluster object with', length(x$peaks), 'clusters defined\n\n')
cat('Number of observations:', length(x$rho), '\n')
cat('Observations in core: ', sum(!x$halo), '\n\n')
cat('Parameters:\n')
cat('dc (distance cutoff) rho threshold delta threshold\n')
cat(formatC(x$dc, width = -22), formatC(x$threshold[1], width = -22), x$threshold[2])
}
}
#' Detect clusters in a densityCluster obejct
#'
#' This function uses the supplied rho and delta thresholds to detect cluster
#' peaks and assign the rest of the observations to one of these clusters.
#' Furthermore core/halo status is calculated. If either rho or delta threshold
#' is missing the user is presented with a decision plot where they are able to
#' click on the plot area to set the treshold. If either rho or delta is set,
#' this takes presedence over the value found by clicking.
#'
#' @param x A densityCluster object as produced by [densityClust()]
#'
#' @param ... Additional parameters passed on
#'
#' @return A densityCluster object with clusters assigned to all observations
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @export
#'
findClusters <- function(x, ...) {
UseMethod("findClusters")
}
findCluster_validationChart <- function(x, ...) {
UseMethod("findCluster_validationChart")
}
#' @rdname findClusters
#'
#' @param rho The threshold for local density when detecting cluster peaks
#'
#' @param delta The threshold for minimum distance to higher density when detecting cluster peaks
#'
#' @param plot Logical. Should a decision plot be shown after cluster detection
#'
#' @param peaks A numeric vector indicates the index of density peaks used for clustering. This vector should be retrieved from the decision plot with caution. No checking involved.
#'
#' @param verbose Logical. Should the running details be reported
#'
#' @export
#' @importFrom graphics plot locator
findClusters.densityCluster <- function(x, rho, delta, plot = FALSE, peaks = NULL, verbose = FALSE, ...) {
if (class(x$distance) %in% c('data.frame', 'matrix')) {
peak_ind <- which(x$rho > rho & x$delta > delta)
x$peaks <- peak_ind
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
#replace certain values in cluster matrix with the cluster centers
for (i in x$peaks) {
cluster[i] <- match(i, x$peaks)
}
#for all indexs that arent in the orginal cluster centers
for (ind in setdiff(runOrder, x$peaks)) {
#set target_* to the index where the nearest higher density neighbors of each point are equal to the non cluster center
target_lower_density_samples <- which(x$nearest_higher_density_neighbor == ind) #all the target cells should have the same cluster id as current higher density cell
cluster[ind] <- cluster[x$nearest_higher_density_neighbor[ind]]
}
#now the cluster matrix consists of cluster centers [ind] = point and other points of highest near density
potential_duplicates <- which(is.na(cluster))
for (ind in potential_duplicates) {
res <- as.integer(names(which.max(table(cluster[x$nn.index[ind, ]]))))
if (length(res) > 0) {
cluster[ind] <- res #assign NA samples to the majority of its clusters
} else {
message('try to increase the number of kNN (through argument k) at step of densityClust.')
cluster[ind] <- NA
}
}
x$clusters <- factor(cluster)
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
#intersection of
connect_samples_ind <- intersect(unique(x$nn.index[cluster == i, ]), which(cluster != i))
averageRho <- outer(x$rho[cluster == i], x$rho[connect_samples_ind], '+') / 2
if (any(connect_samples_ind)) border[i] <- max(averageRho[connect_samples_ind])
}
x$halo <- x$rho < border[cluster]
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
else {
# Detect cluster peaks
if (!is.null(peaks)) {
if (verbose) message('peaks are provided, clustering will be performed based on them')
x$peaks <- peaks
} else {
if (missing(rho) || missing(delta)) {
x$peaks <- NA
plot(x)
cat('Click on plot to select thresholds\n')
threshold <- locator(1)
if (missing(rho)) rho <- threshold$x
if (missing(delta)) delta <- threshold$y
plot = TRUE
}
x$peaks <- which(x$rho > rho & x$delta > delta)
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
if (plot) {
plot(x)
}
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
if (verbose) message('Assigning each sample to a cluster based on its nearest density peak')
for (i in runOrder) {
if ((i %% round(length(runOrder) / 25)) == 0) {
if (verbose) message(paste('the runOrder index is', i))
}
if (i %in% x$peaks) {
cluster[i] <- match(i, x$peaks)
} else {
higherDensity <- which(x$rho > x$rho[i])
cluster[i] <- cluster[higherDensity[which.min(findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), i, higherDensity))]]
}
}
x$clusters <- cluster
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
averageRho <- outer(x$rho[cluster == i], x$rho[cluster != i], '+')/2
index <- findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), which(cluster == i), which(cluster != i)) <= x$dc
if (any(index)) border[i] <- max(averageRho[index])
}
x$halo <- x$rho < border[cluster]
}
x$halo <- x$rho < border[cluster]
# Sort cluster designations by gamma (= rho * delta)
gamma <- x$rho * x$delta
pk.ordr <- order(gamma[x$peaks], decreasing = TRUE)
x$peaks <- x$peaks[pk.ordr]
x$clusters <- match(x$clusters, pk.ordr)
if (length(x$peaks) > 1 && (length(x$peaks) < 20) ){
for (z in 1:x$size){
x$clusters2[z] = x$peaks[x$clusters[z]]
}
cpath = paste(getwd(), "/temp_cluster.txt", sep = "")
write.table(x$clusters2, file = cpath, col.names = F, row.names =F, sep = ",")
tempDBCV <- DBCV(x$fpath,cpath)
print("DBCV is: ")
print(tempDBCV)
}
x
}
findCluster_validationChart.densityCluster <- function(x, rho_step = 0, delta_step = 0, plot = FALSE, peaks = NULL, verbose = FALSE, ...) {
#obtain max, min rho
rho_max = max(x$rho) - 0.01
rho_min = min(x$rho) + 0.01
#default rho step size
if (rho_step == 0){
rho_step = (rho_max - rho_min)/10
}
#obtain max, min delta
delta_max = max(x$delta) - 0.01
delta_min = min(x$delta) + 0.01
#default delta step size
if (delta_step == 0){
delta_step = (delta_max - delta_min)/10
}
#create data frame
Rho_Vals <- seq(from = rho_min , to = rho_max , by = rho_step)
Delta_Vals <- seq(from = delta_min , to = delta_max , by = delta_step)
testClusters <- data.frame(Rho = double(), Delta = double(), Gamma = double(), ClusterCenters = integer(), Unclassified = integer(), NumOutliers = integer(), DBCV = double())
#implement for loop
for (rho_temp in Rho_Vals){
for (delta_temp in Delta_Vals){
rho = rho_temp
delta = delta_temp
if (class(x$distance) %in% c('data.frame', 'matrix')) {
peak_ind <- which(x$rho > rho & x$delta > delta)
x$peaks <- peak_ind
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
#replace certain values in cluster matrix with the cluster centers
for (i in x$peaks) {
cluster[i] <- match(i, x$peaks)
}
#for all indexs that arent in the orginal cluster centers
for (ind in setdiff(runOrder, x$peaks)) {
#set target_* to the index where the nearest higher density neighbors of each point are equal to the non cluster center
target_lower_density_samples <- which(x$nearest_higher_density_neighbor == ind) #all the target cells should have the same cluster id as current higher density cell
cluster[ind] <- cluster[x$nearest_higher_density_neighbor[ind]]
}
#now the cluster matrix consists of cluster centers [ind] = point and other points of highest near density
potential_duplicates <- which(is.na(cluster))
for (ind in potential_duplicates) {
res <- as.integer(names(which.max(table(cluster[x$nn.index[ind, ]]))))
if (length(res) > 0) {
cluster[ind] <- res #assign NA samples to the majority of its clusters
} else {
message('try to increase the number of kNN (through argument k) at step of densityClust.')
cluster[ind] <- NA
}
}
x$clusters <- factor(cluster)
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
#intersection of
connect_samples_ind <- intersect(unique(x$nn.index[cluster == i, ]), which(cluster != i))
averageRho <- outer(x$rho[cluster == i], x$rho[connect_samples_ind], '+') / 2
if (any(connect_samples_ind)) border[i] <- max(averageRho[connect_samples_ind])
}
x$halo <- x$rho < border[cluster]
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
else {
# Detect cluster peaks
if (!is.null(peaks)) {
if (verbose) message('peaks are provided, clustering will be performed based on them')
x$peaks <- peaks
} else {
if (missing(rho) || missing(delta)) {
x$peaks <- NA
plot(x)
cat('Click on plot to select thresholds\n')
threshold <- locator(1)
if (missing(rho)) rho <- threshold$x
if (missing(delta)) delta <- threshold$y
plot = TRUE
}
x$peaks <- which(x$rho > rho & x$delta > delta)
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
if (plot) {
plot(x)
}
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
if (verbose) message('Assigning each sample to a cluster based on its nearest density peak')
for (i in runOrder) {
if ((i %% round(length(runOrder) / 25)) == 0) {
if (verbose) message(paste('the runOrder index is', i))
}
if (i %in% x$peaks) {
cluster[i] <- match(i, x$peaks)
} else {
higherDensity <- which(x$rho > x$rho[i])
cluster[i] <- cluster[higherDensity[which.min(findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), i, higherDensity))]]
}
}
x$clusters <- cluster
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
averageRho <- outer(x$rho[cluster == i], x$rho[cluster != i], '+')/2
index <- findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), which(cluster == i), which(cluster != i)) <= x$dc
if (any(index)) border[i] <- max(averageRho[index])
}
x$halo <- x$rho < border[cluster]
}
x$halo <- x$rho < border[cluster]
# Sort cluster designations by gamma (= rho * delta)
gamma <- x$rho * x$delta
pk.ordr <- order(gamma[x$peaks], decreasing = TRUE)
x$peaks <- x$peaks[pk.ordr]
x$clusters <- match(x$clusters, pk.ordr)
#assign cluster matrix of raw indices to cluster2
if (length(x$peaks) > 1 && (length(x$peaks) < 20) ){
for (z in 1:x$size){
x$clusters2[z] = x$peaks[x$clusters[z]]
}
cpath = paste(getwd(), "/temp_cluster.txt", sep = "")
write.table(x$clusters2, file = cpath, col.names = F, row.names =F, sep = ",")
#print(x$clusters)
#message(paste("Running DBCV for cluster number:", length(x$peaks)))
tempDBCV <- DBCV(x$fpath,cpath)
#message(paste("DBCV was: ", tempDBCV))
testClusters[nrow(testClusters) + 1, ] = c(rho, delta, (rho*delta), length(x$peaks), length(x$halo[x$halo == TRUE]), 0, tempDBCV )
}
}
}
testClusters
}
#' Extract cluster membership from a densityCluster object
#'
#' This function allows the user to extract the cluster membership of all the
#' observations in the given densityCluster object. The output can be formatted
#' in two ways as described below. Halo observations can be chosen to be removed
#' from the output.
#'
#' @details
#' Two formats for the output are available. Either a vector of integers
#' denoting for each observation, which cluster the observation belongs to. If
#' halo observations are removed, these are set to NA. The second format is a
#' list with a vector for each group containing the index for the member
#' observations in the group. If halo observations are removed their indexes are
#' omitted. The list format correspond to the following transform of the vector
#' format `split(1:length(clusters), clusters)`, where `clusters` are
#' the cluster information in vector format.
#'
#' @param x The densityCluster object. [findClusters()] must have
#' been performed prior to this call to avoid throwing an error.
#'
#' @param ... Currently ignored
#'
#' @return A vector or list with cluster memberships for the observations in the
#' initial distance matrix
#'
#' @export
#'
clusters <- function(x, ...) {
UseMethod("clusters")
}
#' @rdname clusters
#'
#' @param as.list Should the output be in the list format. Defaults to FALSE
#'
#' @param halo.rm Logical. should halo observations be removed. Defaults to TRUE
#'
#' @export
#'
clusters.densityCluster <- function(x, as.list = FALSE, halo.rm = TRUE, ...) {
if (!clustered(x)) stop('x must be clustered prior to cluster extraction')
res <- x$clusters
if (halo.rm) {
res[x$halo] <- NA
}
if (as.list) {
res <- split(1:length(res), res)
}
res
}
#' Check whether a densityCluster object have been clustered
#'
#' This function checks whether [findClusters()] has been performed on
#' the given object and returns a boolean depending on the outcome
#'
#' @param x A densityCluster object
#'
#' @return `TRUE` if [findClusters()] have been performed, otherwise
#' `FALSE`
#'
#' @export
#'
clustered <- function(x) {
UseMethod("clustered")
}
#' @rdname clustered
#'
#' @export
#'
clustered.densityCluster <- function(x) {
!any(is.na(x$peaks[1]), is.na(x$clusters[1]), is.na(x$halo[1]))
}
#' Extract labels
#'
#' @noRd
#'
#' @export
#'
labels.densityCluster <- function(object, ...) {
labels(object$distance)
}
#' Fast knn version of densityClust
#'
#' This function will be called by densityClust if a matrix or data.frame is
#' passed in rather than a distance object
#'
#' @noRd
#'
#' @importFrom FNN get.knn
densityClust.knn <- function(mat, k = NULL, verbose = F, ...) {
if (is.null(k)) {
k <- round(sqrt(nrow(mat)) / 2) # empirical way to select the number of neighbor points
k <- max(10, k) # ensure k is at least 10
}
if (verbose) message('Finding kNN using FNN with ', k, ' neighbors')
dx <- get.knn(mat, k = k, ...)
nn.index <- dx$nn.index
nn.dist <- dx$nn.dist
N <- nrow(nn.index)
knn_graph <- NULL
if (verbose) message('Calculating the local density for each sample based on kNNs ...')
rho <- apply(nn.dist, 1, function(x) {
exp(-mean(x))
})
if (verbose) message('Calculating the minimal distance of a sample to another sample with higher density ...')
rho_order <- order(rho)
delta <- vector(mode = 'integer', length = N)
nearest_higher_density_neighbor <- vector(mode = 'integer', length = N)
delta_neighbor_tmp <- smallest_dist_rho_order_coords(rho[rho_order], as.matrix(mat[rho_order, ]))
delta[rho_order] <- delta_neighbor_tmp$smallest_dist
nearest_higher_density_neighbor[rho_order] <- rho_order[delta_neighbor_tmp$nearest_higher_density_sample + 1]
if (verbose) message('Returning result...')
res <- list(
rho = rho,
delta = delta,
distance = mat,
dc = NULL,
threshold = c(rho = NA, delta = NA),
peaks = NA,
clusters = NA,
halo = NA,
knn_graph = knn_graph,
nearest_higher_density_neighbor = nearest_higher_density_neighbor,
nn.index = nn.index,
nn.dist = nn.dist
)
class(res) <- 'densityCluster'
res
}
| /R_Python_C/New_Eco_Data_and_Validation_Suite/densityClust.R | no_license | DhanujG/Unsupervised-Clustering-Analysis-for-Competitive-Species-Traits | R | false | false | 36,639 | r | library(reticulate)
#py_install("scipy")
#use_python("/usr/local/bin/python")
#use_virtualenv("myenv")
source_python("DBCV.py")
#' Clustering by fast search and find of density peaks
#'
#' This package implement the clustering algorithm described by Alex Rodriguez
#' and Alessandro Laio (2014). It provides the user with tools for generating
#' the initial rho and delta values for each observation as well as using these
#' to assign observations to clusters. This is done in two passes so the user is
#' free to reassign observations to clusters using a new set of rho and delta
#' thresholds, without needing to recalculate everything.
#'
#' @section Plotting:
#' Two types of plots are supported by this package, and both mimics the types of
#' plots used in the publication for the algorithm. The standard plot function
#' produces a decision plot, with optional colouring of cluster peaks if these
#' are assigned. Furthermore [plotMDS()] performs a multidimensional
#' scaling of the distance matrix and plots this as a scatterplot. If clusters
#' are assigned observations are coloured according to their assignment.
#'
#' @section Cluster detection:
#' The two main functions for this package are [densityClust()] and
#' [findClusters()]. The former takes a distance matrix and optionally
#' a distance cutoff and calculates rho and delta for each observation. The
#' latter takes the output of [densityClust()] and make cluster
#' assignment for each observation based on a user defined rho and delta
#' threshold. If the thresholds are not specified the user is able to supply
#' them interactively by clicking on a decision plot.
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [densityClust()], [findClusters()], [plotMDS()]
#' @aliases NULL
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @useDynLib densityClust
#' @importFrom Rcpp sourceCpp
#'
'_PACKAGE'
#' Computes the local density of points in a distance matrix
#'
#' This function takes a distance matrix and a distance cutoff and calculate the
#' local density for each point in the matrix. The computation can either be
#' done using a simple summation of the points with the distance cutoff for each
#' observation, or by applying a gaussian kernel scaled by the distance cutoff
#' (more robust for low-density data)
#'
#' @param distance A distance matrix
#'
#' @param dc A numeric value specifying the distance cutoff
#'
#' @param gaussian Logical. Should a gaussian kernel be used to estimate the
#' density (defaults to `FALSE`)
#'
#' @return A vector of local density values, the index matching row and column
#' indexes in the distance matrix
#'
#' @noRd
#'
localDensity <- function(weights, distance, dc, gaussian = FALSE) {
# These implementations are faster by virtue of being written in C++
# They also avoid the need to convert `distance` to a matrix.
if (gaussian) {
res <- gaussianLocalDensity(weights, distance, attr(distance, "Size"), dc)
} else {
res <- nonGaussianLocalDensity(weights, attr(distance, "Size") * sum(weights), distance, attr(distance, "Size"), dc)
}
if (is.null(attr(distance, 'Labels'))) {
names(res) <- NULL
} else {
names(res) <- attr(distance, 'Labels')
}
res
}
#' Calculate distance to closest observation of higher density
#'
#' This function finds, for each observation, the minimum distance to an
#' observation of higher local density.
#'
#' @param distance A distance matrix
#'
#' @param rho A vector of local density values as outputted by [localDensity()]
#'
#' @return A vector of distances with index matching the index in rho
#'
#' @noRd
#'
distanceToPeak <- function(distance, rho) {
# This implementation is faster by virtue of being written in C++.
# It also avoids the need to convert `distance` to a matrix.
res <- distanceToPeakCpp(distance, rho);
names(res) <- names(rho)
res
}
## turn 1 distance matrix into i,j coordinates
get_ij <- function (k, dist_obj) {
if (!inherits(dist_obj, "dist")) stop("please provide a 'dist' object")
n <- attr(dist_obj, "Size")
valid <- (k >= 1) & (k <= n * (n - 1) / 2)
k_valid <- k[valid]
j <- rep.int(NA_real_, length(k))
j[valid] <- floor(((2 * n + 1) - sqrt((2 * n - 1) ^ 2 - 8 * (k_valid - 1))) / 2)
i <- j + k - (2 * n - j) * (j - 1) / 2
cbind(i, j)
}
#' Estimate the distance cutoff for a specified neighbor rate
#'
#' This function calculates a distance cutoff value for a specific distance
#' matrix that makes the average neighbor rate (number of points within the
#' distance cutoff value) fall between the provided range. The authors of the
#' algorithm suggests aiming for a neighbor rate between 1 and 2 percent, but
#' also states that the algorithm is quite robust with regards to more extreme
#' cases.
#'
#' @note If the number of points is larger than 448 (resulting in 100,128
#' pairwise distances), 100,128 distance pairs will be randomly selected to
#' speed up computation time. Use [set.seed()] prior to calling
#' `estimateDc` in order to ensure reproducable results.
#'
#' @param distance A distance matrix
#'
#' @param neighborRateLow The lower bound of the neighbor rate
#'
#' @param neighborRateHigh The upper bound of the neighbor rate
#'
#' @return A numeric value giving the estimated distance cutoff value
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' estimateDc(irisDist)
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @export
#'
estimateDc <- function(weights, distance, neighborRateLow = 0.01, neighborRateHigh = 0.02) {
# This implementation uses binary search instead of linear search.
size <- attr(distance, 'Size')
# If size is greater than 448, there will be >100000 elements in the distance
# object. Subsampling to 100000 elements will speed performance for very
# large dist objects while retaining good accuracy in estimating the cutoff
if (size > 448) {
distance <- distance[sample.int(length(distance), 100128)]
size <- 448
}
low <- min(distance)
high <- max(distance)
dc <- 0
newsize <- sum(weights)
uniquesize <- size
size <- newsize
while (TRUE) {
dc <- (low + high) / 2
# neighborRate = average of number of elements of comb per row that are
# less than dc minus 1 divided by size.
# This implementation avoids converting `distance` to a matrix. The matrix is
# symmetrical, so doubling the result from `distance` (half of the matrix) is
# equivalent. The diagonal of the matrix will always be 0, so as long as dc
# is greater than 0, we add 1 for every element of the diagonal, which is
# the same as size
sum_distance_below_dc <- SumCutOff(weights, distance, attr(distance, "Size"), dc)
#for (k in 1:uniquesize){
# if (distance[k] < dc){
# vals <- get_ij(k, distance)
# sum_distance_below_dc <- sum_distance_below_dc + (weights[vals[1]]*weights[vals[2]])
# }
#}
neighborRate <- (((sum_distance_below_dc * 2 + (if (0 <= dc) size)) / size - 1)) / size
if (neighborRate >= neighborRateLow && neighborRate <= neighborRateHigh) break
if (neighborRate < neighborRateLow) {
low <- dc
} else {
high <- dc
}
}
cat('Distance cutoff calculated to', dc, '\n')
dc
}
#' Calculate clustering attributes based on the densityClust algorithm
#'
#' This function takes a distance matrix and optionally a distance cutoff and
#' calculates the values necessary for clustering based on the algorithm
#' proposed by Alex Rodrigues and Alessandro Laio (see references). The actual
#' assignment to clusters are done in a later step, based on user defined
#' threshold values. If a distance matrix is passed into `distance` the
#' original algorithm described in the paper is used. If a matrix or data.frame
#' is passed instead it is interpretted as point coordinates and rho will be
#' estimated based on k-nearest neighbors of each point (rho is estimated as
#' `exp(-mean(x))` where `x` is the distance to the nearest
#' neighbors). This can be useful when data is so large that calculating the
#' full distance matrix can be prohibitive.
#'
#' @details
#' The function calculates rho and delta for the observations in the provided
#' distance matrix. If a distance cutoff is not provided this is first estimated
#' using [estimateDc()] with default values.
#'
#' The information kept in the densityCluster object is:
#' \describe{
#' \item{`rho`}{A vector of local density values}
#' \item{`delta`}{A vector of minimum distances to observations of higher density}
#' \item{`distance`}{The initial distance matrix}
#' \item{`dc`}{The distance cutoff used to calculate rho}
#' \item{`threshold`}{A named vector specifying the threshold values for rho and delta used for cluster detection}
#' \item{`peaks`}{A vector of indexes specifying the cluster center for each cluster}
#' \item{`clusters`}{A vector of cluster affiliations for each observation. The clusters are referenced as indexes in the peaks vector}
#' \item{`halo`}{A logical vector specifying for each observation if it is considered part of the halo}
#' \item{`knn_graph`}{kNN graph constructed. It is only applicable to the case where coordinates are used as input. Currently it is set as NA.}
#' \item{`nearest_higher_density_neighbor`}{index for the nearest sample with higher density. It is only applicable to the case where coordinates are used as input.}
#' \item{`nn.index`}{indices for each cell's k-nearest neighbors. It is only applicable for the case where coordinates are used as input.}
#' \item{`nn.dist`}{distance to each cell's k-nearest neighbors. It is only applicable for the case where coordinates are used as input.}
#' }
#' Before running findClusters the threshold, peaks, clusters and halo data is
#' `NA`.
#'
#' @param distance A distance matrix or a matrix (or data.frame) for the
#' coordinates of the data. If a matrix or data.frame is used the distances and
#' local density will be estimated using a fast k-nearest neighbor approach.
#'
#' @param dc A distance cutoff for calculating the local density. If missing it
#' will be estimated with `estimateDc(distance)`
#'
#' @param gaussian Logical. Should a gaussian kernel be used to estimate the
#' density (defaults to FALSE)
#'
#' @param verbose Logical. Should the running details be reported
#'
#' @param ... Additional parameters passed on to [get.knn][FNN::get.knn]
#'
#' @return A densityCluster object. See details for a description.
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [estimateDc()], [findClusters()]
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @export
#'
densityClust <- function(orig, weights, distance, dc, gaussian=FALSE, verbose = FALSE, ...) {
#orig = unclass(orig)
path = paste(getwd(), "/temp.txt", sep = "")
write.table(orig, file = path, col.names = F, row.names =F, sep = ",")
if (class(distance) %in% c('data.frame', 'matrix')) {
dp_knn_args <- list(mat = distance, verbose = verbose, ...)
res <- do.call(densityClust.knn, dp_knn_args)
} else {
if (missing(dc)) {
if (verbose) message('Calculating the distance cutoff')
dc <- estimateDc(weights, distance)
}
if (verbose) message('Calculating the local density for each sample based on distance cutoff')
rho <- localDensity(weights, distance, dc, gaussian = gaussian)
if (verbose) message('Calculating the minimal distance of a sample to another sample with higher density')
delta <- distanceToPeak(distance, rho)
if (verbose) message('Returning result...')
res <- list(
size = attr(distance, 'Size'),
truesize = sum(weights),
weights = weights,
fpath = path,
rho = rho,
delta = delta,
distance = distance,
dc = dc,
threshold = c(rho = NA, delta = NA),
peaks = NA,
clusters = NA,
clusters2 = NA,
halo = NA,
knn_graph = NA,
nearest_higher_density_neighbor = NA,
nn.index = NA,
nn.dist = NA
)
class(res) <- 'densityCluster'
}
res
}
#' @export
#' @importFrom graphics plot points
#'
plot.densityCluster <- function(x, ...) {
plot(x$rho, x$delta, main = 'Decision graph', xlab = expression(rho),
ylab = expression(delta))
if (!is.na(x$peaks[1])) {
points(x$rho[x$peaks], x$delta[x$peaks], col = 2:(1 + length(x$peaks)),
pch = 19)
}
}
#' Plot observations using multidimensional scaling and colour by cluster
#'
#' This function produces an MDS scatterplot based on the distance matrix of the
#' densityCluster object (if there is only the coordinates information, a distance
#' matrix will be calculate first), and, if clusters are defined, colours each
#' observation according to cluster affiliation. Observations belonging to a cluster
#' core is plotted with filled circles and observations belonging to the halo with
#' hollow circles. This plotting is not suitable for running large datasets (for example
#' datasets with > 1000 samples). Users are suggested to use other methods, for example
#' tSNE, etc. to visualize their clustering results too.
#'
#' @param x A densityCluster object as produced by [densityClust()]
#'
#' @param ... Additional parameters. Currently ignored
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [densityClust()] for creating `densityCluster`
#' objects, and [plotTSNE()] for an alternative plotting approach.
#'
#' @export
#'
plotMDS <- function(x, ...) {
UseMethod('plotMDS')
}
#' @export
#' @importFrom stats cmdscale
#' @importFrom graphics plot points legend
#' @importFrom stats dist
plotMDS.densityCluster <- function(x, ...) {
if (class(x$distance) %in% c('data.frame', 'matrix')) {
mds <- cmdscale(dist(x$distance))
} else {
mds <- cmdscale(x$distance)
}
if (length(x$peaks) == 1){
plot(mds[,1], mds[,2], xlab = '', ylab = '', main = 'MDS plot of observations')
} else {
plot(mds[,1], mds[,2], xlab = '', ylab = '', main = 'MDS plot of observations', cex = 0.5, col = "white")
}
mds
#Scale the weights for each point to match their new point size
if (max(x$weights) != min(x$weights)){
cex_weights = 2*((x$weights-min(x$weights))/(max(x$weights)-min(x$weights))) + 0.5
} else {
cex_weights = rep(1,length(weights))
}
if (!is.na(x$peaks[1])) {
for (i in 1:length(x$peaks)) {
ind <- which(x$clusters == i)
#points(mds[ind, 1], mds[ind, 2], col = i + 1, pch = ifelse(x$halo[ind], 1, 19))
for (index in ind){
if (index == x$peaks[i]){
points(mds[index, 1], mds[index, 2], col = i + 1, pch = 4, cex = cex_weights[index])
points(mds[index, 1], mds[index, 2], col = i + 1, pch = ifelse(x$halo[index], 2, 17), cex = cex_weights[index])
}
else {
points(mds[index, 1], mds[index, 2], col = i + 1, pch = ifelse(x$halo[index], 1, 19), cex = cex_weights[index])
}
}
}
legend('topright', legend = c('core', 'halo'), pch = c(19, 1), horiz = TRUE)
}
}
#' Plot observations using t-distributed neighbor embedding and colour by cluster
#'
#' This function produces an t-SNE scatterplot based on the distance matrix of the
#' densityCluster object (if there is only the coordinates information, a distance
#' matrix will be calculate first), and, if clusters are defined, colours each
#' observation according to cluster affiliation. Observations belonging to a cluster
#' core is plotted with filled circles and observations belonging to the halo with
#' hollow circles.
#'
#' @param x A densityCluster object as produced by [densityClust()]
#'
#' @param ... Additional parameters. Currently ignored
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotTSNE(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @seealso [densityClust()] for creating `densityCluster`
#' objects, and [plotMDS()] for an alternative plotting approach.
#'
#' @export
#'
plotTSNE <- function(x, ...) {
UseMethod('plotTSNE')
}
#' @export
#' @importFrom graphics plot points legend
#' @importFrom stats dist
#' @importFrom stats rnorm
#' @importFrom Rtsne Rtsne
plotTSNE.densityCluster <- function(x, max_components = 2, ...) {
if (class(x$distance) %in% c('data.frame', 'matrix')) {
data <- as.matrix(dist(x$distance))
} else {
data <- as.matrix(x$distance)
}
# avoid issues related to repetitions
dup_id <- which(duplicated(data))
if (length(dup_id) > 0) {
data[dup_id, ] <- data[dup_id, ] + rnorm(length(dup_id) * ncol(data), sd = 1e-10)
}
tsne_res <- Rtsne::Rtsne(as.matrix(data), dims = max_components,
pca = T)
tsne_data <- tsne_res$Y[, 1:max_components]
plot(tsne_data[,1], tsne_data[,2], xlab = '', ylab = '', main = 'tSNE plot of observations')
if (!is.na(x$peaks[1])) {
for (i in 1:length(x$peaks)) {
ind <- which(x$clusters == i)
points(tsne_data[ind, 1], tsne_data[ind, 2], col = i + 1, pch = ifelse(x$halo[ind], 1, 19))
}
legend('topright', legend = c('core', 'halo'), pch = c(19, 1), horiz = TRUE)
}
}
#' @export
#'
print.densityCluster <- function(x, ...) {
if (is.na(x$peaks[1])) {
cat('A densityCluster object with no clusters defined\n\n')
cat('Number of observations:', length(x$rho), '\n')
} else {
cat('A densityCluster object with', length(x$peaks), 'clusters defined\n\n')
cat('Number of observations:', length(x$rho), '\n')
cat('Observations in core: ', sum(!x$halo), '\n\n')
cat('Parameters:\n')
cat('dc (distance cutoff) rho threshold delta threshold\n')
cat(formatC(x$dc, width = -22), formatC(x$threshold[1], width = -22), x$threshold[2])
}
}
#' Detect clusters in a densityCluster obejct
#'
#' This function uses the supplied rho and delta thresholds to detect cluster
#' peaks and assign the rest of the observations to one of these clusters.
#' Furthermore core/halo status is calculated. If either rho or delta threshold
#' is missing the user is presented with a decision plot where they are able to
#' click on the plot area to set the treshold. If either rho or delta is set,
#' this takes presedence over the value found by clicking.
#'
#' @param x A densityCluster object as produced by [densityClust()]
#'
#' @param ... Additional parameters passed on
#'
#' @return A densityCluster object with clusters assigned to all observations
#'
#' @examples
#' irisDist <- dist(iris[,1:4])
#' irisClust <- densityClust(irisDist, gaussian=TRUE)
#' plot(irisClust) # Inspect clustering attributes to define thresholds
#'
#' irisClust <- findClusters(irisClust, rho=2, delta=2)
#' plotMDS(irisClust)
#' split(iris[,5], irisClust$clusters)
#'
#' @references Rodriguez, A., & Laio, A. (2014). *Clustering by fast search and find of density peaks.* Science, **344**(6191), 1492-1496. doi:10.1126/science.1242072
#'
#' @export
#'
findClusters <- function(x, ...) {
UseMethod("findClusters")
}
findCluster_validationChart <- function(x, ...) {
UseMethod("findCluster_validationChart")
}
#' @rdname findClusters
#'
#' @param rho The threshold for local density when detecting cluster peaks
#'
#' @param delta The threshold for minimum distance to higher density when detecting cluster peaks
#'
#' @param plot Logical. Should a decision plot be shown after cluster detection
#'
#' @param peaks A numeric vector indicates the index of density peaks used for clustering. This vector should be retrieved from the decision plot with caution. No checking involved.
#'
#' @param verbose Logical. Should the running details be reported
#'
#' @export
#' @importFrom graphics plot locator
findClusters.densityCluster <- function(x, rho, delta, plot = FALSE, peaks = NULL, verbose = FALSE, ...) {
if (class(x$distance) %in% c('data.frame', 'matrix')) {
peak_ind <- which(x$rho > rho & x$delta > delta)
x$peaks <- peak_ind
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
#replace certain values in cluster matrix with the cluster centers
for (i in x$peaks) {
cluster[i] <- match(i, x$peaks)
}
#for all indexs that arent in the orginal cluster centers
for (ind in setdiff(runOrder, x$peaks)) {
#set target_* to the index where the nearest higher density neighbors of each point are equal to the non cluster center
target_lower_density_samples <- which(x$nearest_higher_density_neighbor == ind) #all the target cells should have the same cluster id as current higher density cell
cluster[ind] <- cluster[x$nearest_higher_density_neighbor[ind]]
}
#now the cluster matrix consists of cluster centers [ind] = point and other points of highest near density
potential_duplicates <- which(is.na(cluster))
for (ind in potential_duplicates) {
res <- as.integer(names(which.max(table(cluster[x$nn.index[ind, ]]))))
if (length(res) > 0) {
cluster[ind] <- res #assign NA samples to the majority of its clusters
} else {
message('try to increase the number of kNN (through argument k) at step of densityClust.')
cluster[ind] <- NA
}
}
x$clusters <- factor(cluster)
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
#intersection of
connect_samples_ind <- intersect(unique(x$nn.index[cluster == i, ]), which(cluster != i))
averageRho <- outer(x$rho[cluster == i], x$rho[connect_samples_ind], '+') / 2
if (any(connect_samples_ind)) border[i] <- max(averageRho[connect_samples_ind])
}
x$halo <- x$rho < border[cluster]
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
else {
# Detect cluster peaks
if (!is.null(peaks)) {
if (verbose) message('peaks are provided, clustering will be performed based on them')
x$peaks <- peaks
} else {
if (missing(rho) || missing(delta)) {
x$peaks <- NA
plot(x)
cat('Click on plot to select thresholds\n')
threshold <- locator(1)
if (missing(rho)) rho <- threshold$x
if (missing(delta)) delta <- threshold$y
plot = TRUE
}
x$peaks <- which(x$rho > rho & x$delta > delta)
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
if (plot) {
plot(x)
}
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
if (verbose) message('Assigning each sample to a cluster based on its nearest density peak')
for (i in runOrder) {
if ((i %% round(length(runOrder) / 25)) == 0) {
if (verbose) message(paste('the runOrder index is', i))
}
if (i %in% x$peaks) {
cluster[i] <- match(i, x$peaks)
} else {
higherDensity <- which(x$rho > x$rho[i])
cluster[i] <- cluster[higherDensity[which.min(findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), i, higherDensity))]]
}
}
x$clusters <- cluster
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
averageRho <- outer(x$rho[cluster == i], x$rho[cluster != i], '+')/2
index <- findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), which(cluster == i), which(cluster != i)) <= x$dc
if (any(index)) border[i] <- max(averageRho[index])
}
x$halo <- x$rho < border[cluster]
}
x$halo <- x$rho < border[cluster]
# Sort cluster designations by gamma (= rho * delta)
gamma <- x$rho * x$delta
pk.ordr <- order(gamma[x$peaks], decreasing = TRUE)
x$peaks <- x$peaks[pk.ordr]
x$clusters <- match(x$clusters, pk.ordr)
if (length(x$peaks) > 1 && (length(x$peaks) < 20) ){
for (z in 1:x$size){
x$clusters2[z] = x$peaks[x$clusters[z]]
}
cpath = paste(getwd(), "/temp_cluster.txt", sep = "")
write.table(x$clusters2, file = cpath, col.names = F, row.names =F, sep = ",")
tempDBCV <- DBCV(x$fpath,cpath)
print("DBCV is: ")
print(tempDBCV)
}
x
}
findCluster_validationChart.densityCluster <- function(x, rho_step = 0, delta_step = 0, plot = FALSE, peaks = NULL, verbose = FALSE, ...) {
#obtain max, min rho
rho_max = max(x$rho) - 0.01
rho_min = min(x$rho) + 0.01
#default rho step size
if (rho_step == 0){
rho_step = (rho_max - rho_min)/10
}
#obtain max, min delta
delta_max = max(x$delta) - 0.01
delta_min = min(x$delta) + 0.01
#default delta step size
if (delta_step == 0){
delta_step = (delta_max - delta_min)/10
}
#create data frame
Rho_Vals <- seq(from = rho_min , to = rho_max , by = rho_step)
Delta_Vals <- seq(from = delta_min , to = delta_max , by = delta_step)
testClusters <- data.frame(Rho = double(), Delta = double(), Gamma = double(), ClusterCenters = integer(), Unclassified = integer(), NumOutliers = integer(), DBCV = double())
#implement for loop
for (rho_temp in Rho_Vals){
for (delta_temp in Delta_Vals){
rho = rho_temp
delta = delta_temp
if (class(x$distance) %in% c('data.frame', 'matrix')) {
peak_ind <- which(x$rho > rho & x$delta > delta)
x$peaks <- peak_ind
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
#replace certain values in cluster matrix with the cluster centers
for (i in x$peaks) {
cluster[i] <- match(i, x$peaks)
}
#for all indexs that arent in the orginal cluster centers
for (ind in setdiff(runOrder, x$peaks)) {
#set target_* to the index where the nearest higher density neighbors of each point are equal to the non cluster center
target_lower_density_samples <- which(x$nearest_higher_density_neighbor == ind) #all the target cells should have the same cluster id as current higher density cell
cluster[ind] <- cluster[x$nearest_higher_density_neighbor[ind]]
}
#now the cluster matrix consists of cluster centers [ind] = point and other points of highest near density
potential_duplicates <- which(is.na(cluster))
for (ind in potential_duplicates) {
res <- as.integer(names(which.max(table(cluster[x$nn.index[ind, ]]))))
if (length(res) > 0) {
cluster[ind] <- res #assign NA samples to the majority of its clusters
} else {
message('try to increase the number of kNN (through argument k) at step of densityClust.')
cluster[ind] <- NA
}
}
x$clusters <- factor(cluster)
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
#intersection of
connect_samples_ind <- intersect(unique(x$nn.index[cluster == i, ]), which(cluster != i))
averageRho <- outer(x$rho[cluster == i], x$rho[connect_samples_ind], '+') / 2
if (any(connect_samples_ind)) border[i] <- max(averageRho[connect_samples_ind])
}
x$halo <- x$rho < border[cluster]
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
else {
# Detect cluster peaks
if (!is.null(peaks)) {
if (verbose) message('peaks are provided, clustering will be performed based on them')
x$peaks <- peaks
} else {
if (missing(rho) || missing(delta)) {
x$peaks <- NA
plot(x)
cat('Click on plot to select thresholds\n')
threshold <- locator(1)
if (missing(rho)) rho <- threshold$x
if (missing(delta)) delta <- threshold$y
plot = TRUE
}
x$peaks <- which(x$rho > rho & x$delta > delta)
x$threshold['rho'] <- rho
x$threshold['delta'] <- delta
}
if (plot) {
plot(x)
}
# Assign observations to clusters
runOrder <- order(x$rho, decreasing = TRUE)
cluster <- rep(NA, length(x$rho))
if (verbose) message('Assigning each sample to a cluster based on its nearest density peak')
for (i in runOrder) {
if ((i %% round(length(runOrder) / 25)) == 0) {
if (verbose) message(paste('the runOrder index is', i))
}
if (i %in% x$peaks) {
cluster[i] <- match(i, x$peaks)
} else {
higherDensity <- which(x$rho > x$rho[i])
cluster[i] <- cluster[higherDensity[which.min(findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), i, higherDensity))]]
}
}
x$clusters <- cluster
# Calculate core/halo status of observation
border <- rep(0, length(x$peaks))
if (verbose) message('Identifying core and halo for each cluster')
for (i in 1:length(x$peaks)) {
if (verbose) message('the current index of the peak is ', i)
averageRho <- outer(x$rho[cluster == i], x$rho[cluster != i], '+')/2
index <- findDistValueByRowColInd(x$distance, attr(x$distance, 'Size'), which(cluster == i), which(cluster != i)) <= x$dc
if (any(index)) border[i] <- max(averageRho[index])
}
x$halo <- x$rho < border[cluster]
}
x$halo <- x$rho < border[cluster]
# Sort cluster designations by gamma (= rho * delta)
gamma <- x$rho * x$delta
pk.ordr <- order(gamma[x$peaks], decreasing = TRUE)
x$peaks <- x$peaks[pk.ordr]
x$clusters <- match(x$clusters, pk.ordr)
#assign cluster matrix of raw indices to cluster2
if (length(x$peaks) > 1 && (length(x$peaks) < 20) ){
for (z in 1:x$size){
x$clusters2[z] = x$peaks[x$clusters[z]]
}
cpath = paste(getwd(), "/temp_cluster.txt", sep = "")
write.table(x$clusters2, file = cpath, col.names = F, row.names =F, sep = ",")
#print(x$clusters)
#message(paste("Running DBCV for cluster number:", length(x$peaks)))
tempDBCV <- DBCV(x$fpath,cpath)
#message(paste("DBCV was: ", tempDBCV))
testClusters[nrow(testClusters) + 1, ] = c(rho, delta, (rho*delta), length(x$peaks), length(x$halo[x$halo == TRUE]), 0, tempDBCV )
}
}
}
testClusters
}
#' Extract cluster membership from a densityCluster object
#'
#' This function allows the user to extract the cluster membership of all the
#' observations in the given densityCluster object. The output can be formatted
#' in two ways as described below. Halo observations can be chosen to be removed
#' from the output.
#'
#' @details
#' Two formats for the output are available. Either a vector of integers
#' denoting for each observation, which cluster the observation belongs to. If
#' halo observations are removed, these are set to NA. The second format is a
#' list with a vector for each group containing the index for the member
#' observations in the group. If halo observations are removed their indexes are
#' omitted. The list format correspond to the following transform of the vector
#' format `split(1:length(clusters), clusters)`, where `clusters` are
#' the cluster information in vector format.
#'
#' @param x The densityCluster object. [findClusters()] must have
#' been performed prior to this call to avoid throwing an error.
#'
#' @param ... Currently ignored
#'
#' @return A vector or list with cluster memberships for the observations in the
#' initial distance matrix
#'
#' @export
#'
clusters <- function(x, ...) {
UseMethod("clusters")
}
#' @rdname clusters
#'
#' @param as.list Should the output be in the list format. Defaults to FALSE
#'
#' @param halo.rm Logical. should halo observations be removed. Defaults to TRUE
#'
#' @export
#'
clusters.densityCluster <- function(x, as.list = FALSE, halo.rm = TRUE, ...) {
if (!clustered(x)) stop('x must be clustered prior to cluster extraction')
res <- x$clusters
if (halo.rm) {
res[x$halo] <- NA
}
if (as.list) {
res <- split(1:length(res), res)
}
res
}
#' Check whether a densityCluster object have been clustered
#'
#' This function checks whether [findClusters()] has been performed on
#' the given object and returns a boolean depending on the outcome
#'
#' @param x A densityCluster object
#'
#' @return `TRUE` if [findClusters()] have been performed, otherwise
#' `FALSE`
#'
#' @export
#'
clustered <- function(x) {
UseMethod("clustered")
}
#' @rdname clustered
#'
#' @export
#'
clustered.densityCluster <- function(x) {
!any(is.na(x$peaks[1]), is.na(x$clusters[1]), is.na(x$halo[1]))
}
#' Extract labels
#'
#' @noRd
#'
#' @export
#'
labels.densityCluster <- function(object, ...) {
labels(object$distance)
}
#' Fast knn version of densityClust
#'
#' This function will be called by densityClust if a matrix or data.frame is
#' passed in rather than a distance object
#'
#' @noRd
#'
#' @importFrom FNN get.knn
densityClust.knn <- function(mat, k = NULL, verbose = F, ...) {
if (is.null(k)) {
k <- round(sqrt(nrow(mat)) / 2) # empirical way to select the number of neighbor points
k <- max(10, k) # ensure k is at least 10
}
if (verbose) message('Finding kNN using FNN with ', k, ' neighbors')
dx <- get.knn(mat, k = k, ...)
nn.index <- dx$nn.index
nn.dist <- dx$nn.dist
N <- nrow(nn.index)
knn_graph <- NULL
if (verbose) message('Calculating the local density for each sample based on kNNs ...')
rho <- apply(nn.dist, 1, function(x) {
exp(-mean(x))
})
if (verbose) message('Calculating the minimal distance of a sample to another sample with higher density ...')
rho_order <- order(rho)
delta <- vector(mode = 'integer', length = N)
nearest_higher_density_neighbor <- vector(mode = 'integer', length = N)
delta_neighbor_tmp <- smallest_dist_rho_order_coords(rho[rho_order], as.matrix(mat[rho_order, ]))
delta[rho_order] <- delta_neighbor_tmp$smallest_dist
nearest_higher_density_neighbor[rho_order] <- rho_order[delta_neighbor_tmp$nearest_higher_density_sample + 1]
if (verbose) message('Returning result...')
res <- list(
rho = rho,
delta = delta,
distance = mat,
dc = NULL,
threshold = c(rho = NA, delta = NA),
peaks = NA,
clusters = NA,
halo = NA,
knn_graph = knn_graph,
nearest_higher_density_neighbor = nearest_higher_density_neighbor,
nn.index = nn.index,
nn.dist = nn.dist
)
class(res) <- 'densityCluster'
res
}
|
library(lubridate)
library(tidyverse)
load("data/mauna_loa_met_2001_minute.rda")
as.Date("02-01-1998", format = "%m-%d-%Y")
mdy("02-01-1998")
# datetime data- POSIXcT wants year, month day hours, min, sec; use format if it is not this way
tm1 <- as.POSIXct("2016-07-24 23:55:26 PDT")
tm2 <- as.POSIXct("25072016 08:32:07", format = "%d%m%Y %H:%M:%S")
#can tell the time zone
tm3 <- as.POSIXct("2010-12-01 11:42:03", tz = "GMT")
#strptime is for specifying time zone and date format in the same call
tm4 <- as.POSIXct(strptime(C, format = "%Y/%m/%d %H:%M"), tz = "America/Los_Angeles")
tz(tm4)
Sys.timezone()
# you can do the same thing with lubridate
ymd_hm("2010-12-01 11:43", tz = "America/Los_Angeles")
nfy <- read_csv("data/2015_NFY_solinst.csv", skip = 12)
nfy2 <- read_csv("data/2015_NFY_solinst.csv", skip = 12, col_types = "ccidd") #col types are character, charcter, integer, double, double
nfy3 <- read_csv("data/2015_NFY_solinst.csv", skip = 12, col_types = cols(Date = col_date()))
glimpse(nfy3) #this lets you specify which col you want to make different, everything else will be default
# create new col datetime, it will be added on to the end of the sheet
nfy$datetime <- paste(nfy$Date, " ", nfy$Time, sep = "")
# need to make the datetime col in the datetime format, not a character, otherwise R doesn't know what to do with it
nfy2$datetime <- ymd_hms(nfy$datetime, tz = "America/Los_Angeles")
summary(mloa_2001)
mloa_2001$datetime <- paste0(mloa_2001$year, "-", mloa_2001$month, "-", mloa_2001$day, "-", mloa_2001$hour24, ":", mloa_2001$min)
glimpse(mloa_2001)
mloa_2001$datetime <- ymd_hm(mloa_2001$datetime)
#### Challenge ####
# Challenge with dplyr & ggplot
# Remove the NA’s (-99 and -999) in rel_humid, temp_C_2m, windSpeed_m_s
# Use dplyr to calculate the mean monthly temperature (temp_C_2m) using the datetime column (HINT: look at lubridate functions like month())
# Make a ggplot of the avg monthly temperature
# Make a ggplot of the daily average temperature for July (HINT: try yday() function with some summarize() in dplyr)
mloa_2001_sm <- mloa_2001 %>%
filter(rel_humid != -99, rel_humid != -999) %>%
filter(temp_C_2m != -99, temp_C_2m != -999) %>%
filter(windSpeed_m_s != -99, windSpeed_m_s != -999)
glimpse(mloa_2001_sm)
mloa3 <- mloa_2001_sm %>%
mutate(which_month = month(datetime, lab = T)) %>% #we are making a new col named which_month and using the lubridate function of month, which pulls out the month for the col specified, which here is datetime. lab = T gives you the name of the month as opposed to the numeric month
group_by(which_month) %>%
summarize(avg_temp = mean(temp_C_2m))
mloa3 %>% ggplot() +
geom_point(aes(x = which_month, y = avg_temp), size = 3, color = "blue") +
geom_line(aes(x= which_month, y = avg_temp))
#### Functions ####
# any operation that you want to perform more than once can become a function
log(5) # 5 is the argument of the funciton log
# this function says take a and b and return the_sum, which has been desineted as a + b
my_sum <- function(a, b){
the_sum <- a + b
return(the_sum)
}
my_sum(3, 7)
# can add default values to the funtion
my_sum <- function(a=1, b=2){
the_sum <- a + b
return(the_sum)
}
my_sum()
#Create a function that converts the temp in K to the temp in C (subtract 273.15)
conv_temp <- function(a, b= 273.15) {
conv <- a - b
return(conv)
}
conv_temp(100)
### Iterations ####
x <- 1:10
log(x)
# for loops- will repeat code with a new starting value
for(i in 1:10) {
print(i)
} # for ea value i,in the vector1:10, I want to do whatever is in the {}
for(i in 1:10){
print(i)
print(i^2)
}
# we can use the "i" value as an index
for(i in 1:10){
print(letters[i])
print(mtcars$wt[i])
}
#make a results vecotor ahead of time
results <- rep(NA, 10)
for(i in 1:10){
results[i] <- letters[i]
}
| /scripts/week_8_class_code.R | no_license | gge-ucd/r-davis-in-class-mhogaz | R | false | false | 3,945 | r | library(lubridate)
library(tidyverse)
load("data/mauna_loa_met_2001_minute.rda")
as.Date("02-01-1998", format = "%m-%d-%Y")
mdy("02-01-1998")
# datetime data- POSIXcT wants year, month day hours, min, sec; use format if it is not this way
tm1 <- as.POSIXct("2016-07-24 23:55:26 PDT")
tm2 <- as.POSIXct("25072016 08:32:07", format = "%d%m%Y %H:%M:%S")
#can tell the time zone
tm3 <- as.POSIXct("2010-12-01 11:42:03", tz = "GMT")
#strptime is for specifying time zone and date format in the same call
tm4 <- as.POSIXct(strptime(C, format = "%Y/%m/%d %H:%M"), tz = "America/Los_Angeles")
tz(tm4)
Sys.timezone()
# you can do the same thing with lubridate
ymd_hm("2010-12-01 11:43", tz = "America/Los_Angeles")
nfy <- read_csv("data/2015_NFY_solinst.csv", skip = 12)
nfy2 <- read_csv("data/2015_NFY_solinst.csv", skip = 12, col_types = "ccidd") #col types are character, charcter, integer, double, double
nfy3 <- read_csv("data/2015_NFY_solinst.csv", skip = 12, col_types = cols(Date = col_date()))
glimpse(nfy3) #this lets you specify which col you want to make different, everything else will be default
# create new col datetime, it will be added on to the end of the sheet
nfy$datetime <- paste(nfy$Date, " ", nfy$Time, sep = "")
# need to make the datetime col in the datetime format, not a character, otherwise R doesn't know what to do with it
nfy2$datetime <- ymd_hms(nfy$datetime, tz = "America/Los_Angeles")
summary(mloa_2001)
mloa_2001$datetime <- paste0(mloa_2001$year, "-", mloa_2001$month, "-", mloa_2001$day, "-", mloa_2001$hour24, ":", mloa_2001$min)
glimpse(mloa_2001)
mloa_2001$datetime <- ymd_hm(mloa_2001$datetime)
#### Challenge ####
# Challenge with dplyr & ggplot
# Remove the NA’s (-99 and -999) in rel_humid, temp_C_2m, windSpeed_m_s
# Use dplyr to calculate the mean monthly temperature (temp_C_2m) using the datetime column (HINT: look at lubridate functions like month())
# Make a ggplot of the avg monthly temperature
# Make a ggplot of the daily average temperature for July (HINT: try yday() function with some summarize() in dplyr)
mloa_2001_sm <- mloa_2001 %>%
filter(rel_humid != -99, rel_humid != -999) %>%
filter(temp_C_2m != -99, temp_C_2m != -999) %>%
filter(windSpeed_m_s != -99, windSpeed_m_s != -999)
glimpse(mloa_2001_sm)
mloa3 <- mloa_2001_sm %>%
mutate(which_month = month(datetime, lab = T)) %>% #we are making a new col named which_month and using the lubridate function of month, which pulls out the month for the col specified, which here is datetime. lab = T gives you the name of the month as opposed to the numeric month
group_by(which_month) %>%
summarize(avg_temp = mean(temp_C_2m))
mloa3 %>% ggplot() +
geom_point(aes(x = which_month, y = avg_temp), size = 3, color = "blue") +
geom_line(aes(x= which_month, y = avg_temp))
#### Functions ####
# any operation that you want to perform more than once can become a function
log(5) # 5 is the argument of the funciton log
# this function says take a and b and return the_sum, which has been desineted as a + b
my_sum <- function(a, b){
the_sum <- a + b
return(the_sum)
}
my_sum(3, 7)
# can add default values to the funtion
my_sum <- function(a=1, b=2){
the_sum <- a + b
return(the_sum)
}
my_sum()
#Create a function that converts the temp in K to the temp in C (subtract 273.15)
conv_temp <- function(a, b= 273.15) {
conv <- a - b
return(conv)
}
conv_temp(100)
### Iterations ####
x <- 1:10
log(x)
# for loops- will repeat code with a new starting value
for(i in 1:10) {
print(i)
} # for ea value i,in the vector1:10, I want to do whatever is in the {}
for(i in 1:10){
print(i)
print(i^2)
}
# we can use the "i" value as an index
for(i in 1:10){
print(letters[i])
print(mtcars$wt[i])
}
#make a results vecotor ahead of time
results <- rep(NA, 10)
for(i in 1:10){
results[i] <- letters[i]
}
|
##---------------------------------------------------------------------------
## Exploratory Analysis - Course Project
## Plot 5
##
## mmorales 10.12.2016
##---------------------------------------------------------------------------
rm(list = ls()); cat("\014")
setwd("C:/Users/mmora/OneDrive/061 Coursera/spec_DataScience/datascienceCoursera_4EDA/week4/CourseProject") #for Surface PC
url_data <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
# download.file(url_data, destfile = "./010_data/NEI_data.zip",method = "auto") # not needed to be done every time
# unzipp file
# unzip(zipfile="./010_data/NEI_data.zip",exdir="./010_data")
# Read data files
# read national emissions data
NEI <- readRDS("./010_data/summarySCC_PM25.rds")
# str(NEI)
# dim(NEI)
# head(NEI)
SCC <- readRDS("./010_data/Source_Classification_Code.rds")
# str(SCC)
# dim(SCC)
# head(SCC)
require(dplyr)
require(ggplot2)
##---------------------------------------------------------------------------
## Question 5
##
## How have emissions from motor vehicle sources changed from 1999-2008
## in Baltimore City?
##---------------------------------------------------------------------------
# Road related subset
Mary.onroad <- subset(NEI, fips == 24510 & type == 'ON-ROAD')
Mary.em <- summarise(group_by(Mary.onroad, year), sumEmissions=sum(Emissions))
Mary.em$year <- factor(Mary.em$year, levels=c('1999', '2002', '2005', '2008'))
plot5 <- ggplot(data = Mary.em, aes(x = year, y = sumEmissions)) +
geom_bar(stat="identity") +
guides(fill = FALSE) +
ggtitle('Total Emissions of Motor Vehicle Sources in Baltimore City, Maryland') +
ylab(expression('PM'[2.5])) +
xlab('Year') +
theme(legend.position = 'none') +
geom_text(aes(label = round(sumEmissions, 0), size = 1, hjust = 0.5, vjust = 2))
print(plot5)
ggsave("./020_figures/plot5.png", width=12.5*1.5, height=8.25*1.5, dpi=64)
| /plot5.r | no_license | moralmar/EDA_Assignment_Course_Project2 | R | false | false | 2,067 | r | ##---------------------------------------------------------------------------
## Exploratory Analysis - Course Project
## Plot 5
##
## mmorales 10.12.2016
##---------------------------------------------------------------------------
rm(list = ls()); cat("\014")
setwd("C:/Users/mmora/OneDrive/061 Coursera/spec_DataScience/datascienceCoursera_4EDA/week4/CourseProject") #for Surface PC
url_data <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
# download.file(url_data, destfile = "./010_data/NEI_data.zip",method = "auto") # not needed to be done every time
# unzipp file
# unzip(zipfile="./010_data/NEI_data.zip",exdir="./010_data")
# Read data files
# read national emissions data
NEI <- readRDS("./010_data/summarySCC_PM25.rds")
# str(NEI)
# dim(NEI)
# head(NEI)
SCC <- readRDS("./010_data/Source_Classification_Code.rds")
# str(SCC)
# dim(SCC)
# head(SCC)
require(dplyr)
require(ggplot2)
##---------------------------------------------------------------------------
## Question 5
##
## How have emissions from motor vehicle sources changed from 1999-2008
## in Baltimore City?
##---------------------------------------------------------------------------
# Road related subset
Mary.onroad <- subset(NEI, fips == 24510 & type == 'ON-ROAD')
Mary.em <- summarise(group_by(Mary.onroad, year), sumEmissions=sum(Emissions))
Mary.em$year <- factor(Mary.em$year, levels=c('1999', '2002', '2005', '2008'))
plot5 <- ggplot(data = Mary.em, aes(x = year, y = sumEmissions)) +
geom_bar(stat="identity") +
guides(fill = FALSE) +
ggtitle('Total Emissions of Motor Vehicle Sources in Baltimore City, Maryland') +
ylab(expression('PM'[2.5])) +
xlab('Year') +
theme(legend.position = 'none') +
geom_text(aes(label = round(sumEmissions, 0), size = 1, hjust = 0.5, vjust = 2))
print(plot5)
ggsave("./020_figures/plot5.png", width=12.5*1.5, height=8.25*1.5, dpi=64)
|
data<-read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
#Formating the date
data$Date <- as.Date(data$Date, "%d/%m/%Y")
#Fetching data of two days
subsetdata<-subset(data, Date>=as.Date("1/2/2007","%d/%m/%Y") & Date<=as.Date("2/2/2007","%d/%m/%Y"))
#Plotting graph on device sceen
with(subsetdata, hist(Global_active_power, xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power"))
#Copy graph on png file
dev.copy(png,"plot1.png", width = 480, height = 480)
dev.off()
| /Plot1.R | no_license | Syed-Shaheryar-Tirmizi/ExData_Plotting1 | R | false | false | 670 | r | data<-read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
#Formating the date
data$Date <- as.Date(data$Date, "%d/%m/%Y")
#Fetching data of two days
subsetdata<-subset(data, Date>=as.Date("1/2/2007","%d/%m/%Y") & Date<=as.Date("2/2/2007","%d/%m/%Y"))
#Plotting graph on device sceen
with(subsetdata, hist(Global_active_power, xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power"))
#Copy graph on png file
dev.copy(png,"plot1.png", width = 480, height = 480)
dev.off()
|
# New Assignment for Coursera - Plotting - Plot 3
setwd("~/Documents/Summer Rotation 2017/Coursera")
#Upload the data
data <- read.table("~/Documents/Summer Rotation 2017/Coursera/household_power_consumption.txt", sep = ";", header = TRUE, na = "?")
subdata <- data[which(data$Date == "1/2/2007"), ]
subdata2 <- data[which(data$Date == "2/2/2007"), ]
subdata <- rbind(subdata, subdata2)
#Change the date and time variables to Date/Time classes
subdata$Date_Time <- paste(subdata$Date, subdata$Time)
subdata$Date_Time <- strptime(subdata$Date_Time, "%d/%m/%Y %H:%M:%S")
subdata$Date <- as.Date(subdata$Date, "%d/%m/%Y")
##Plot
png("plot3.png", width=400, height=400)
plot(subdata$Date_Time, subdata$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(subdata$Date_Time, subdata$Sub_metering_2, col="red")
lines(subdata$Date_Time, subdata$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, cex = 0.8)
dev.off()
| /plot3.R | no_license | bethanysump/ExData_Plotting1 | R | false | false | 1,038 | r | # New Assignment for Coursera - Plotting - Plot 3
setwd("~/Documents/Summer Rotation 2017/Coursera")
#Upload the data
data <- read.table("~/Documents/Summer Rotation 2017/Coursera/household_power_consumption.txt", sep = ";", header = TRUE, na = "?")
subdata <- data[which(data$Date == "1/2/2007"), ]
subdata2 <- data[which(data$Date == "2/2/2007"), ]
subdata <- rbind(subdata, subdata2)
#Change the date and time variables to Date/Time classes
subdata$Date_Time <- paste(subdata$Date, subdata$Time)
subdata$Date_Time <- strptime(subdata$Date_Time, "%d/%m/%Y %H:%M:%S")
subdata$Date <- as.Date(subdata$Date, "%d/%m/%Y")
##Plot
png("plot3.png", width=400, height=400)
plot(subdata$Date_Time, subdata$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(subdata$Date_Time, subdata$Sub_metering_2, col="red")
lines(subdata$Date_Time, subdata$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, cex = 0.8)
dev.off()
|
# ------------------------------- #
# Locally Weighted & MQE #
# ------------------------------- #
LW.MQE = function(Y0, Delta0, Z0, maxiter=1000, ep=1e-4)
{
Y = sort(Y0);
n = length(Y);
Delta = Delta0[order(Y0)];
Z = cbind( rep(1, n), matrix(Z0[order(Y0),], nrow=n) );
p = ncol(Z);
beta_ols = lm(log(Y)~Z-1)$coefficients;
if( sum(1-Delta) < 1 )
{
#print("Censoring Rates equal to 0.");
beta_0 = beta_ols;
Converge=0; iter=1; error_0 = 0;
while( iter <= maxiter )
{
bx = c( Z %*% beta_0 );
Z.perm = matrix(Z[order(bx),], nrow=n);
beta_new = lm(log(Y) ~ Z.perm-1)$coefficients;
error = mean((log(Y) - Z.perm %*% beta_new )^2);
if( abs(error_0 - error) < ep )
{
Converge = 1;
break;
}else{
beta_0 = beta_new;
error_0 = error;
iter = iter + 1;
}
}
} else {
n.c = sum(1 - Delta);
# Y.e = Y[Delta == 1];
# Y.c = Y[Delta == 0];
# Y.r = Y; Y.r[Delta == 0] = max(Y)^{100};
# -- Biquadratic Kernal Fun. -- #
h = 80* n^{-1/3}; # sd( log(Y) ) ; 50; 100;
B_nk.f = function(x0, x, h, kernel.type="4th")
{
# the kernel weight function Bnk(x0, x), where x0 is a scalar, and x is a vector
# returns a vector
# h is the bandwidth
if(!is.vector(x0)) x0 = as.vector(x0);
xx = (x-x0)/h;
if(kernel.type == "4th"){
xx[abs(xx) >= 1] = 1;
w = 15/16 * (1 - xx^{2})^{2}; #biquadratic kernel
}
w = w/sum(w);
return(w);
}
Kernel.f = function(U0, U, h, kernel.type="4th")
{
# U: n*k matrix
# U0: 1*k matrix
# return: K((U-U0)/h)
if(!is.vector(U0)) U0 = as.vector(U0);
n = nrow(U);
if(kernel.type=="4th"){
tt = rbind(U, U0);
tmp = apply(tt, 2, function(x) {
B_nk.f(x0 = x[n+1], x = x[1:n], h = h, kernel.type = kernel.type)
});
tmp = apply(tmp, 1, prod);
tmp = tmp/sum(tmp);
}
return(tmp);
}
F.T = function(y0, x0, y, x, delta, h, kernel.type = "4th")
{
# tau0(y0, x0) = F(T<y0|x0); so y0 is the C_i, and x0 is the xi in the paper
# x0: k-dimensional covariate vector
# y: n-vector of observed survival time = T^C
# x: k-dimensional covariate matrix
# delta: the censoring indicator function
# h: bandwidth parameter
n = length(y);
# -- kernel weights -- ##
p = qr(x)$rank;
if(p >1) Bn = Kernel.f(U0=x0, U=x, h=h, kernel.type)
else Bn = B_nk.f(x0=x0, x=x, h=h, kernel.type);
if (y0 < max(y)) {
# sort the data y, and the delta, Bn correspondingly to the order of sorted y
y2 = sort(y);
Order = order(y); # so z[Order] = z2
Bn2 = Bn[Order];
delta2 = delta[Order];
eta = which(delta2==1 & y2<=y0); # the index of those observations satisfying delta2==1 & z2<=y0
Bn3 = Bn2[n:1]; # change the order of Bn2, make the first obs of Bn2 to be the last of Bn3
tmp = 1 - Bn2 /cumsum(Bn3)[n:1];
out = 1 - prod(tmp[eta], na.rm=T); # na.rm=T, as some of those tmp=NA as the denom =0
}
else out = 1;
return(out)
}
# ---------------------------------- #
F_logY = rep(0, n);
for(i in 1:n)
{
F_logY[i] = F.T(y0=log(Y[i]), x0=Z[i,, drop=FALSE], y=log(Y), x=Z, delta=Delta, h=h, kernel.type = "4th");
}
LCRQ <- function(y, delta, tau, kernel.type = "4th"){
# Locally weighted censored quantile regression method
# x is a design matrix
# y is the observed survival time = min(T, C)
# delta is the censoring indicator function with 1 standing for uncensored, and 0 censored
# tau is the quantile level of interest
n = length(y);
ind = which(delta == 0);
w = rep(1, n); # the weight vector
if(length(ind) >= 1){
for(i in 1:length(ind)){
tau.star = F_logY[ind[i]];
if (tau > tau.star) w[ind[i]] = (tau - tau.star) / (1-tau.star)
else w[ind[i]] = 1;
}
# pseudo observations
ind2 = which(w != 1);
y.pse = rep(max(y)+100, length(ind2));
yy = c(y, y.pse);
ww = c(w, 1-w[ind2]);
}
else{
yy = y;
ww = w;
}
rq1 = rq(yy~1, weights=ww, tau=tau);
return(rq1$coeff);
}
# ---------------------------------- #
#logY.pse = rep( max(log(Y))+100, n.c );
#logYY = c( log(Y), logY.pse );
tau_i = F_logY[Delta==1];
Q_logY = rep(0, length(tau_i) );
for( ti in 1:length(tau_i) )
{
Q_logY[ti] = LCRQ(y=log(Y), delta=Delta, tau=tau_i[ti], kernel.type = "4th");
}
# ---------------------------------- #
beta_0 = beta_ols;
Obj_fun <- function(x)
{
zx = Z %*% x;
Q_zx = as.numeric( quantile(x=zx, probs=tau_i) );
Obj = sum( (Q_logY - Q_zx)^2 );
return(Obj)
}
beta_new = optim( par=beta_0, fn=Obj_fun, method="Nelder-Mead")$par; ##
}
# F_est = ecdf( c(Z %*% beta_New) );
F_est = ecdf( exp(Z %*% beta_new) );
return( list( beta_est = beta_new, F_est=F_est) );
}
| /R/LW_MQE.R | no_license | jiangqing123/MDSD | R | false | false | 5,321 | r |
# ------------------------------- #
# Locally Weighted & MQE #
# ------------------------------- #
LW.MQE = function(Y0, Delta0, Z0, maxiter=1000, ep=1e-4)
{
Y = sort(Y0);
n = length(Y);
Delta = Delta0[order(Y0)];
Z = cbind( rep(1, n), matrix(Z0[order(Y0),], nrow=n) );
p = ncol(Z);
beta_ols = lm(log(Y)~Z-1)$coefficients;
if( sum(1-Delta) < 1 )
{
#print("Censoring Rates equal to 0.");
beta_0 = beta_ols;
Converge=0; iter=1; error_0 = 0;
while( iter <= maxiter )
{
bx = c( Z %*% beta_0 );
Z.perm = matrix(Z[order(bx),], nrow=n);
beta_new = lm(log(Y) ~ Z.perm-1)$coefficients;
error = mean((log(Y) - Z.perm %*% beta_new )^2);
if( abs(error_0 - error) < ep )
{
Converge = 1;
break;
}else{
beta_0 = beta_new;
error_0 = error;
iter = iter + 1;
}
}
} else {
n.c = sum(1 - Delta);
# Y.e = Y[Delta == 1];
# Y.c = Y[Delta == 0];
# Y.r = Y; Y.r[Delta == 0] = max(Y)^{100};
# -- Biquadratic Kernal Fun. -- #
h = 80* n^{-1/3}; # sd( log(Y) ) ; 50; 100;
B_nk.f = function(x0, x, h, kernel.type="4th")
{
# the kernel weight function Bnk(x0, x), where x0 is a scalar, and x is a vector
# returns a vector
# h is the bandwidth
if(!is.vector(x0)) x0 = as.vector(x0);
xx = (x-x0)/h;
if(kernel.type == "4th"){
xx[abs(xx) >= 1] = 1;
w = 15/16 * (1 - xx^{2})^{2}; #biquadratic kernel
}
w = w/sum(w);
return(w);
}
Kernel.f = function(U0, U, h, kernel.type="4th")
{
# U: n*k matrix
# U0: 1*k matrix
# return: K((U-U0)/h)
if(!is.vector(U0)) U0 = as.vector(U0);
n = nrow(U);
if(kernel.type=="4th"){
tt = rbind(U, U0);
tmp = apply(tt, 2, function(x) {
B_nk.f(x0 = x[n+1], x = x[1:n], h = h, kernel.type = kernel.type)
});
tmp = apply(tmp, 1, prod);
tmp = tmp/sum(tmp);
}
return(tmp);
}
F.T = function(y0, x0, y, x, delta, h, kernel.type = "4th")
{
# tau0(y0, x0) = F(T<y0|x0); so y0 is the C_i, and x0 is the xi in the paper
# x0: k-dimensional covariate vector
# y: n-vector of observed survival time = T^C
# x: k-dimensional covariate matrix
# delta: the censoring indicator function
# h: bandwidth parameter
n = length(y);
# -- kernel weights -- ##
p = qr(x)$rank;
if(p >1) Bn = Kernel.f(U0=x0, U=x, h=h, kernel.type)
else Bn = B_nk.f(x0=x0, x=x, h=h, kernel.type);
if (y0 < max(y)) {
# sort the data y, and the delta, Bn correspondingly to the order of sorted y
y2 = sort(y);
Order = order(y); # so z[Order] = z2
Bn2 = Bn[Order];
delta2 = delta[Order];
eta = which(delta2==1 & y2<=y0); # the index of those observations satisfying delta2==1 & z2<=y0
Bn3 = Bn2[n:1]; # change the order of Bn2, make the first obs of Bn2 to be the last of Bn3
tmp = 1 - Bn2 /cumsum(Bn3)[n:1];
out = 1 - prod(tmp[eta], na.rm=T); # na.rm=T, as some of those tmp=NA as the denom =0
}
else out = 1;
return(out)
}
# ---------------------------------- #
F_logY = rep(0, n);
for(i in 1:n)
{
F_logY[i] = F.T(y0=log(Y[i]), x0=Z[i,, drop=FALSE], y=log(Y), x=Z, delta=Delta, h=h, kernel.type = "4th");
}
LCRQ <- function(y, delta, tau, kernel.type = "4th"){
# Locally weighted censored quantile regression method
# x is a design matrix
# y is the observed survival time = min(T, C)
# delta is the censoring indicator function with 1 standing for uncensored, and 0 censored
# tau is the quantile level of interest
n = length(y);
ind = which(delta == 0);
w = rep(1, n); # the weight vector
if(length(ind) >= 1){
for(i in 1:length(ind)){
tau.star = F_logY[ind[i]];
if (tau > tau.star) w[ind[i]] = (tau - tau.star) / (1-tau.star)
else w[ind[i]] = 1;
}
# pseudo observations
ind2 = which(w != 1);
y.pse = rep(max(y)+100, length(ind2));
yy = c(y, y.pse);
ww = c(w, 1-w[ind2]);
}
else{
yy = y;
ww = w;
}
rq1 = rq(yy~1, weights=ww, tau=tau);
return(rq1$coeff);
}
# ---------------------------------- #
#logY.pse = rep( max(log(Y))+100, n.c );
#logYY = c( log(Y), logY.pse );
tau_i = F_logY[Delta==1];
Q_logY = rep(0, length(tau_i) );
for( ti in 1:length(tau_i) )
{
Q_logY[ti] = LCRQ(y=log(Y), delta=Delta, tau=tau_i[ti], kernel.type = "4th");
}
# ---------------------------------- #
beta_0 = beta_ols;
Obj_fun <- function(x)
{
zx = Z %*% x;
Q_zx = as.numeric( quantile(x=zx, probs=tau_i) );
Obj = sum( (Q_logY - Q_zx)^2 );
return(Obj)
}
beta_new = optim( par=beta_0, fn=Obj_fun, method="Nelder-Mead")$par; ##
}
# F_est = ecdf( c(Z %*% beta_New) );
F_est = ecdf( exp(Z %*% beta_new) );
return( list( beta_est = beta_new, F_est=F_est) );
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
##initialize the inverse to be null
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
##write function to return matrix
get <- function() x
##function to set cached inverse
setinverse <- function(inverse) inv <<- inverse
##function to get cached inverse
getinverse <- function() inv
##return list of functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
##attempts to set inverse
inv <- x$getinverse()
##if there is already a calculated inverse, return it
if (!is.null(inv)){
message("returning cached inverse")
return (inv)
}
##if we have not exited the function yet, calculate a new inverse
data <- x$get()
inv <- solve(data)
##and cache it
x$setinverse(inv)
## Return a matrix that is the inverse of 'x'
inv
}
| /cachematrix.R | no_license | bobbyzhenova/ProgrammingAssignment2 | R | false | false | 1,109 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
##initialize the inverse to be null
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
##write function to return matrix
get <- function() x
##function to set cached inverse
setinverse <- function(inverse) inv <<- inverse
##function to get cached inverse
getinverse <- function() inv
##return list of functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
##attempts to set inverse
inv <- x$getinverse()
##if there is already a calculated inverse, return it
if (!is.null(inv)){
message("returning cached inverse")
return (inv)
}
##if we have not exited the function yet, calculate a new inverse
data <- x$get()
inv <- solve(data)
##and cache it
x$setinverse(inv)
## Return a matrix that is the inverse of 'x'
inv
}
|
rm(list=ls())
dataset<-read.csv("datifinali.csv",sep=",")
dataset<-dataset[,-1]
dati<-dataset[dataset$Target_cost_euro>0,]
test<-dataset[dataset$Target_cost_euro==0,]
n<-nrow(dati)
###Log-trasformation of the output variable.
set.seed(123)
dati$Target_cost_euro<-log(dati$Target_cost_euro)
###division into train and validation set 90% and 10%
sel<-sample(1:n,size=round(n*0.1,0),replace=F)
train <-dati[-sel,]
validation<-dati[sel,]
set.seed(123)
index<-sample(1:dim(train)[1],0.7*dim(train)[1],replace=F)
new_train<-train[index,]
new_test<-train[-index,]
###First model: Random Forest
library(e1071)
model <- randomForest(Target_cost_euro ~ . ,data=new_train )
predictedY <- predict(model, new_test[,-1])
points(new_test$Target_cost_euro, predictedY, col = "red", pch=4)
error <- mean(abs(exp(new_test$Target_cost_euro) - exp(predictedY)))
error
summary(model)
##prediction from the first model.
predictedY2<- predict(model, test)
prevfin=exp(predictedY2)
model1 <- randomForest(Condition ~ ., data = TrainSet, importance = TRUE)
model1
#####################
##second model: training set all dataset!
set.seed(123)
model <- randomForest(Target_cost_euro ~ . ,data=dati
) #default 500 tree
predictedY <- predict(model, new_test[,-1])
points(new_test$Target_cost_euro, predictedY, col = "red", pch=4)
#our error metric is MAE
error <- mean(abs(exp(new_test$Target_cost_euro) - exp(predictedY)))
error
summary(model)
predictedY2<- predict(model, test)
prevfin=exp(predictedY2)
previsioniensemble=(pred+prevfin)/2
write.csv(prevfin,"previsioniforest2.csv",row.names = F)
# boosting ----------------------------------------------------------------
### PREprocessing for XGB -----------------
all=rbind(dati[,-1],test[,-1])
numericVars <- which(sapply(all, is.numeric))
numericVarNames <- names(numericVars)
DFnumeric <- all[, names(all) %in% numericVarNames]
DFfactors <- all[, !(names(all) %in% numericVarNames)]
DFfactors <- DFfactors[, names(DFfactors) ]
cat('There are', length(DFnumeric), 'numeric variables, and', length(DFfactors), 'factor variables')
for(i in 1:ncol(DFnumeric)){
if (abs(skew(DFnumeric[,i]))>0.8){
DFnumeric[,i] <- log(DFnumeric[,i] +1)
}
}
PreNum <- preProcess(DFnumeric, method=c("center", "scale"))
print(PreNum)
DFnorm <- predict(PreNum, DFnumeric)
dim(DFnorm)
DFdummies <- as.data.frame(model.matrix(~.-1, DFfactors))
dim(DFdummies)
ZerocolTest <- which(colSums(DFdummies[(nrow(all[,])+1):nrow(all),])==0)
colnames(DFdummies[ZerocolTest])
DFdummies <- DFdummies[,-ZerocolTest]
ZerocolTrain <- which(colSums(DFdummies[1:nrow(all[,]),])==0)
colnames(DFdummies[ZerocolTrain])
DFdummies <- DFdummies[,-ZerocolTrain]
fewOnes <- which(colSums(DFdummies[1:nrow(all[,]),])<10)
colnames(DFdummies[fewOnes])
DFdummies <- DFdummies[,-fewOnes] #removing predictors
dim(DFdummies)
combined <- cbind(DFnorm, DFdummies)
train1 <- combined[-index,]
test1 <- combined[index,]
### Grid search of the hyperparameters
### that grid is the result of multiple tuning iteration. Searching for the optimal
### combination of hyperparameters.
set.seed(123)
xgb_grid = expand.grid(
nrounds = 500,
eta = c(0.1, 0.01),
max_depth = c(2, 3, 4),
gamma = 0,
colsample_bytree=1,
min_child_weight=c(3 ,5),
subsample=1
)
xgb_caret <- train(x=combined, y=dati$Target_cost_euro, method='xgbTree', trControl= my_control, tuneGrid=xgb_grid)
xgb_caret$bestTune
model <- randomForest(Target_cost_euro ~ . ,data=dati)
label_train <-dati$Target_cost_euro
# put our testing & training data into two seperates Dmatrixs objects
dtrain <- xgb.DMatrix(data = as.matrix(combined[c(1:6773),]), label= label_train)
dtest <- xgb.DMatrix(data = as.matrix(combined[c(6774:9677),]))
default_param<-list(
objective = "reg:linear",
booster = "gbtree",
eta=0.01, #default = 0.3
gamma=0,
max_depth=3, #default=6
min_child_weight=1, #default=1
subsample=1,
colsample_bytree=1,
ntree=500)
##tuning ot the hyperaprameters
xgbcv <- xgb.cv( params = default_param, data = dtrain, nrounds = 2500, nfold = 5, showsd = T, stratified = T, print_every_n = 40, early_stopping_rounds = 10, maximize = F)
##best iteration found at 897
#train the model using the best iteration found by cross validation
xgb_mod <- xgb.train(data = dtrain, params=default_param, nrounds = 897)
XGBpred <- predict(xgb_mod, dtest)
predictions_XGB <- exp(XGBpred) #need to reverse the log to the real values
head(predictions_XGB)
## [1] 116386.8 162307.3 186494.0 187440.4 187258.3 166241.4
##Writing Prevision
write.table(predictions_XGB,file = "previsionixgb2.csv" ,row.names = FALSE, col.names = FALSE)
model1 <- randomForest(Condition ~ .,ntree=1000 data = TrainSet, importance = TRUE)
model1
#### Lasso prevision
set.seed(27042018)
my_control <-trainControl(method="cv", number=3)
lassoGrid <- expand.grid(alpha = 1, lambda = seq(0.001,0.1,by = 0.0005))
lasso_mod <- train(x=train1, y=dati$Target_cost_euro[index], method='glmnet', trControl= my_control, tuneGrid=lassoGrid)
lasso_mod$bestTune
min(lasso_mod$results$MAE)
lassoVarImp <- varImp(lasso_mod,scale=F)
lassoImportance <- lassoVarImp$importance
varsSelected <- length(which(lassoImportance$Overall!=0))
varsNotSelected <- length(which(lassoImportance$Overall==0))
cat('Lasso uses', varsSelected, 'variables in its model, and did not select', varsNotSelected, 'variables.')
LassoPred <- predict(lasso_mod, test1)
predictions_lasso <- exp(LassoPred) #need to reverse the log to the real values
head(predictions_lasso)
pred_lasso=as.integer(predictions_lasso)
mean(abs(predictions_lasso-dati$Target_cost_euro[index]))
write.table(pred_lasso,file = "previsionilasso.csv" ,row.names = FALSE, col.names = FALSE)
### SVM prediction
grid <- expand.grid(C = c(0,0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2,5))
set.seed(3233)
svm_Linear_Grid <- train(x=train1, y=all$SalePrice[!is.na(all$SalePrice)], method = "svmLinear",
trControl=my_control,
tuneGrid = grid,
tuneLength = 10)
svm_Linear_Grid$bestTune
#C=0.01
# C RMSE Rsquared MAE
# 0.00 NaN NaN NaN
# 0.01 0.1299583 0.8971963 0.08474574
# 0.05 0.1304852 0.8958769 0.08447978
# 0.10 0.1306073 0.8956993 0.08477223
# 0.25 0.1308595 0.8953571 0.08495476
# 0.50 0.1308635 0.8953413 0.08493712
# 0.75 0.1307871 0.8954637 0.08494924
# 1.00 0.1307713 0.8954820 0.08488513
# 1.25 0.1308028 0.8954352 0.08491083
# 1.50 0.1307469 0.8955460 0.08491518
# 1.75 0.1308088 0.8954248 0.08490545
# 2.00 0.1308118 0.8954155 0.08494978
# 5.00 0.1307613 0.8955063 0.08492209
plot(svm_Linear_Grid)
SVMpred <- predict(svm_Linear_Grid, test1)
predictions_svm <- exp(SVMpred)
write.table(predictions_svm,file = "previsionisvm.csv" ,row.names = FALSE, col.names = FALSE)
##prev medie
prevmedie=0.65*predictions_XGB+0.30*predictions_svm+0.05*predictions_lasso
write.table(prevmedie,file = "previsionimedie.csv" ,row.names = FALSE, col.names = FALSE)
#### Final Prediction --------------------------
previsionixgb2 <- read.table("C:/Users/federico/Desktop/Competizione/previsionixgb2.csv", quote="\"", comment.char="")
previsioniforest1200 <- read.csv("C:/Users/federico/Desktop/Competizione/previsioniforest1200.csv", sep="")
prevensemble1200<-previsioniforest1200*0.4+previsionixgb2*0.6
write.csv(prevensemble1200,"previsioniensemble1200.csv",row.names = F)
| /Prediction_file.R | no_license | FedericoMelograna/Hackaton_SUS5 | R | false | false | 7,691 | r | rm(list=ls())
dataset<-read.csv("datifinali.csv",sep=",")
dataset<-dataset[,-1]
dati<-dataset[dataset$Target_cost_euro>0,]
test<-dataset[dataset$Target_cost_euro==0,]
n<-nrow(dati)
###Log-trasformation of the output variable.
set.seed(123)
dati$Target_cost_euro<-log(dati$Target_cost_euro)
###division into train and validation set 90% and 10%
sel<-sample(1:n,size=round(n*0.1,0),replace=F)
train <-dati[-sel,]
validation<-dati[sel,]
set.seed(123)
index<-sample(1:dim(train)[1],0.7*dim(train)[1],replace=F)
new_train<-train[index,]
new_test<-train[-index,]
###First model: Random Forest
library(e1071)
model <- randomForest(Target_cost_euro ~ . ,data=new_train )
predictedY <- predict(model, new_test[,-1])
points(new_test$Target_cost_euro, predictedY, col = "red", pch=4)
error <- mean(abs(exp(new_test$Target_cost_euro) - exp(predictedY)))
error
summary(model)
##prediction from the first model.
predictedY2<- predict(model, test)
prevfin=exp(predictedY2)
model1 <- randomForest(Condition ~ ., data = TrainSet, importance = TRUE)
model1
#####################
##second model: training set all dataset!
set.seed(123)
model <- randomForest(Target_cost_euro ~ . ,data=dati
) #default 500 tree
predictedY <- predict(model, new_test[,-1])
points(new_test$Target_cost_euro, predictedY, col = "red", pch=4)
#our error metric is MAE
error <- mean(abs(exp(new_test$Target_cost_euro) - exp(predictedY)))
error
summary(model)
predictedY2<- predict(model, test)
prevfin=exp(predictedY2)
previsioniensemble=(pred+prevfin)/2
write.csv(prevfin,"previsioniforest2.csv",row.names = F)
# boosting ----------------------------------------------------------------
### PREprocessing for XGB -----------------
all=rbind(dati[,-1],test[,-1])
numericVars <- which(sapply(all, is.numeric))
numericVarNames <- names(numericVars)
DFnumeric <- all[, names(all) %in% numericVarNames]
DFfactors <- all[, !(names(all) %in% numericVarNames)]
DFfactors <- DFfactors[, names(DFfactors) ]
cat('There are', length(DFnumeric), 'numeric variables, and', length(DFfactors), 'factor variables')
for(i in 1:ncol(DFnumeric)){
if (abs(skew(DFnumeric[,i]))>0.8){
DFnumeric[,i] <- log(DFnumeric[,i] +1)
}
}
PreNum <- preProcess(DFnumeric, method=c("center", "scale"))
print(PreNum)
DFnorm <- predict(PreNum, DFnumeric)
dim(DFnorm)
DFdummies <- as.data.frame(model.matrix(~.-1, DFfactors))
dim(DFdummies)
ZerocolTest <- which(colSums(DFdummies[(nrow(all[,])+1):nrow(all),])==0)
colnames(DFdummies[ZerocolTest])
DFdummies <- DFdummies[,-ZerocolTest]
ZerocolTrain <- which(colSums(DFdummies[1:nrow(all[,]),])==0)
colnames(DFdummies[ZerocolTrain])
DFdummies <- DFdummies[,-ZerocolTrain]
fewOnes <- which(colSums(DFdummies[1:nrow(all[,]),])<10)
colnames(DFdummies[fewOnes])
DFdummies <- DFdummies[,-fewOnes] #removing predictors
dim(DFdummies)
combined <- cbind(DFnorm, DFdummies)
train1 <- combined[-index,]
test1 <- combined[index,]
### Grid search of the hyperparameters
### that grid is the result of multiple tuning iteration. Searching for the optimal
### combination of hyperparameters.
set.seed(123)
xgb_grid = expand.grid(
nrounds = 500,
eta = c(0.1, 0.01),
max_depth = c(2, 3, 4),
gamma = 0,
colsample_bytree=1,
min_child_weight=c(3 ,5),
subsample=1
)
xgb_caret <- train(x=combined, y=dati$Target_cost_euro, method='xgbTree', trControl= my_control, tuneGrid=xgb_grid)
xgb_caret$bestTune
model <- randomForest(Target_cost_euro ~ . ,data=dati)
label_train <-dati$Target_cost_euro
# put our testing & training data into two seperates Dmatrixs objects
dtrain <- xgb.DMatrix(data = as.matrix(combined[c(1:6773),]), label= label_train)
dtest <- xgb.DMatrix(data = as.matrix(combined[c(6774:9677),]))
default_param<-list(
objective = "reg:linear",
booster = "gbtree",
eta=0.01, #default = 0.3
gamma=0,
max_depth=3, #default=6
min_child_weight=1, #default=1
subsample=1,
colsample_bytree=1,
ntree=500)
##tuning ot the hyperaprameters
xgbcv <- xgb.cv( params = default_param, data = dtrain, nrounds = 2500, nfold = 5, showsd = T, stratified = T, print_every_n = 40, early_stopping_rounds = 10, maximize = F)
##best iteration found at 897
#train the model using the best iteration found by cross validation
xgb_mod <- xgb.train(data = dtrain, params=default_param, nrounds = 897)
XGBpred <- predict(xgb_mod, dtest)
predictions_XGB <- exp(XGBpred) #need to reverse the log to the real values
head(predictions_XGB)
## [1] 116386.8 162307.3 186494.0 187440.4 187258.3 166241.4
##Writing Prevision
write.table(predictions_XGB,file = "previsionixgb2.csv" ,row.names = FALSE, col.names = FALSE)
model1 <- randomForest(Condition ~ .,ntree=1000 data = TrainSet, importance = TRUE)
model1
#### Lasso prevision
set.seed(27042018)
my_control <-trainControl(method="cv", number=3)
lassoGrid <- expand.grid(alpha = 1, lambda = seq(0.001,0.1,by = 0.0005))
lasso_mod <- train(x=train1, y=dati$Target_cost_euro[index], method='glmnet', trControl= my_control, tuneGrid=lassoGrid)
lasso_mod$bestTune
min(lasso_mod$results$MAE)
lassoVarImp <- varImp(lasso_mod,scale=F)
lassoImportance <- lassoVarImp$importance
varsSelected <- length(which(lassoImportance$Overall!=0))
varsNotSelected <- length(which(lassoImportance$Overall==0))
cat('Lasso uses', varsSelected, 'variables in its model, and did not select', varsNotSelected, 'variables.')
LassoPred <- predict(lasso_mod, test1)
predictions_lasso <- exp(LassoPred) #need to reverse the log to the real values
head(predictions_lasso)
pred_lasso=as.integer(predictions_lasso)
mean(abs(predictions_lasso-dati$Target_cost_euro[index]))
write.table(pred_lasso,file = "previsionilasso.csv" ,row.names = FALSE, col.names = FALSE)
### SVM prediction
grid <- expand.grid(C = c(0,0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2,5))
set.seed(3233)
svm_Linear_Grid <- train(x=train1, y=all$SalePrice[!is.na(all$SalePrice)], method = "svmLinear",
trControl=my_control,
tuneGrid = grid,
tuneLength = 10)
svm_Linear_Grid$bestTune
#C=0.01
# C RMSE Rsquared MAE
# 0.00 NaN NaN NaN
# 0.01 0.1299583 0.8971963 0.08474574
# 0.05 0.1304852 0.8958769 0.08447978
# 0.10 0.1306073 0.8956993 0.08477223
# 0.25 0.1308595 0.8953571 0.08495476
# 0.50 0.1308635 0.8953413 0.08493712
# 0.75 0.1307871 0.8954637 0.08494924
# 1.00 0.1307713 0.8954820 0.08488513
# 1.25 0.1308028 0.8954352 0.08491083
# 1.50 0.1307469 0.8955460 0.08491518
# 1.75 0.1308088 0.8954248 0.08490545
# 2.00 0.1308118 0.8954155 0.08494978
# 5.00 0.1307613 0.8955063 0.08492209
plot(svm_Linear_Grid)
SVMpred <- predict(svm_Linear_Grid, test1)
predictions_svm <- exp(SVMpred)
write.table(predictions_svm,file = "previsionisvm.csv" ,row.names = FALSE, col.names = FALSE)
##prev medie
prevmedie=0.65*predictions_XGB+0.30*predictions_svm+0.05*predictions_lasso
write.table(prevmedie,file = "previsionimedie.csv" ,row.names = FALSE, col.names = FALSE)
#### Final Prediction --------------------------
previsionixgb2 <- read.table("C:/Users/federico/Desktop/Competizione/previsionixgb2.csv", quote="\"", comment.char="")
previsioniforest1200 <- read.csv("C:/Users/federico/Desktop/Competizione/previsioniforest1200.csv", sep="")
prevensemble1200<-previsioniforest1200*0.4+previsionixgb2*0.6
write.csv(prevensemble1200,"previsioniensemble1200.csv",row.names = F)
|
library(english)
### Name: as.english
### Title: Generic functions and methods for S3 class english
### Aliases: as.english english english.numeric english.default
### english.english [.english rep.english format.english
### Keywords: arith
### ** Examples
english(1010, UK = FALSE)
english(1010, UK = TRUE)
## The default UK setting will depend on the locale:
cat("\n", ifelse(grepl("^(en_us|english_united)",
tolower(Sys.getlocale("LC_CTYPE"))), "USA", "UK"),
"English is your default\n")
english(101) ## UK not given: deduced from locale
as.english(10001001) + (-5):5
| /data/genthat_extracted_code/english/examples/as.english.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 594 | r | library(english)
### Name: as.english
### Title: Generic functions and methods for S3 class english
### Aliases: as.english english english.numeric english.default
### english.english [.english rep.english format.english
### Keywords: arith
### ** Examples
english(1010, UK = FALSE)
english(1010, UK = TRUE)
## The default UK setting will depend on the locale:
cat("\n", ifelse(grepl("^(en_us|english_united)",
tolower(Sys.getlocale("LC_CTYPE"))), "USA", "UK"),
"English is your default\n")
english(101) ## UK not given: deduced from locale
as.english(10001001) + (-5):5
|
tabItem(
tabName = "changes",
fluidRow(
box(
width = 9,
style = "overflow-x: auto;",
div(
style = "text-align: center;",
h2("All Claims with Changes in Paid or Reported"),
h3(textOutput("changes_title"))
),
dataTableOutput("changes_tbl")
),
box(
width = 3,
title = "Filters",
dateInput(
"val_date_prior",
"Prior Valuation Date",
value = Sys.Date() - lubridate::years(1),
min = min(trans$accident_date),
max = Sys.Date(),
startview = "decade"
),
checkboxGroupButtons(
"changes_new",
"Claim Type",
choices = c("New", "Existing"),
selected = c("New", "Existing"),
justified = TRUE,
status = "primary",
checkIcon = list(
yes = icon("ok", lib = "glyphicon"),
no = icon("remove", lib = "glyphicon")
)
),
br(),
shinyWidgets::pickerInput(
inputId = "changes_ay",
label = "Accident Year",
choices = ay_choices,
options = list(`actions-box` = TRUE),
multiple = TRUE,
selected = ay_choices
)
)
)
)
| /basic-insurer-dashboard/ui/02-changes-ui.R | permissive | manniealfaro/shiny-insurance-examples | R | false | false | 1,203 | r | tabItem(
tabName = "changes",
fluidRow(
box(
width = 9,
style = "overflow-x: auto;",
div(
style = "text-align: center;",
h2("All Claims with Changes in Paid or Reported"),
h3(textOutput("changes_title"))
),
dataTableOutput("changes_tbl")
),
box(
width = 3,
title = "Filters",
dateInput(
"val_date_prior",
"Prior Valuation Date",
value = Sys.Date() - lubridate::years(1),
min = min(trans$accident_date),
max = Sys.Date(),
startview = "decade"
),
checkboxGroupButtons(
"changes_new",
"Claim Type",
choices = c("New", "Existing"),
selected = c("New", "Existing"),
justified = TRUE,
status = "primary",
checkIcon = list(
yes = icon("ok", lib = "glyphicon"),
no = icon("remove", lib = "glyphicon")
)
),
br(),
shinyWidgets::pickerInput(
inputId = "changes_ay",
label = "Accident Year",
choices = ay_choices,
options = list(`actions-box` = TRUE),
multiple = TRUE,
selected = ay_choices
)
)
)
)
|
plot2 <- function() {
if(!file.exists('household.zip')){
url<-"http://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
download.file(url,destfile = "household.zip")
}
unzip("houseold.zip")
data<-read.table("household_power_consumption.txt",header = TRUE, sep= ";")
data$DateTime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
start<-which(data$DateTime==strptime("2007-02-01", "%Y-%m-%d"))
end<-which(data$DateTime==strptime("2007-02-02 23:59:00", "%Y-%m-%d %H:%M:%S"))
data2<-data[start:end,]
png(filename="plot2.png", width=480, height=480)
plot(data2$DateTime, as.numeric(as.character(data2$Global_active_power)), type='l',ylab="Global Active Power (Kilowatts)", xlab="")
dev.off()
} | /plot2.R | no_license | ugomos/ExData_Plotting1 | R | false | false | 877 | r | plot2 <- function() {
if(!file.exists('household.zip')){
url<-"http://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
download.file(url,destfile = "household.zip")
}
unzip("houseold.zip")
data<-read.table("household_power_consumption.txt",header = TRUE, sep= ";")
data$DateTime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
start<-which(data$DateTime==strptime("2007-02-01", "%Y-%m-%d"))
end<-which(data$DateTime==strptime("2007-02-02 23:59:00", "%Y-%m-%d %H:%M:%S"))
data2<-data[start:end,]
png(filename="plot2.png", width=480, height=480)
plot(data2$DateTime, as.numeric(as.character(data2$Global_active_power)), type='l',ylab="Global Active Power (Kilowatts)", xlab="")
dev.off()
} |
getKullbackLeibler <- function(gen.distr, test.distr) {
# Computes the KL distance between the two input distributions.
# gen.distr: the distribution generated from the fitted model
# test.distr: the empirical distribution of the test set
# NOTE: This is used to test the divergence between two DISCRETE distributions only.
if(length(gen.distr) != length(test.distr)) {
stop("Input distributions are not of equal length!")
}
gen.probDistr <- gen.distr / sum(gen.distr)
test.probDistr <- test.distr / sum(test.distr)
KL <- sum(test.probDistr * log2(test.probDistr / gen.probDistr))
return(KL)
}
| /code/util/getKullbackLeibler.R | no_license | nazhir/Project-2-Homophily | R | false | false | 627 | r | getKullbackLeibler <- function(gen.distr, test.distr) {
# Computes the KL distance between the two input distributions.
# gen.distr: the distribution generated from the fitted model
# test.distr: the empirical distribution of the test set
# NOTE: This is used to test the divergence between two DISCRETE distributions only.
if(length(gen.distr) != length(test.distr)) {
stop("Input distributions are not of equal length!")
}
gen.probDistr <- gen.distr / sum(gen.distr)
test.probDistr <- test.distr / sum(test.distr)
KL <- sum(test.probDistr * log2(test.probDistr / gen.probDistr))
return(KL)
}
|
triangular_parameters_U=function(data){
# trovo la moda
Mode <- function(x, na.rm = FALSE) {
if (na.rm) {
x = x[!is.na(x)]
}
ux <- unique(x)
return(ux[which.max(tabulate(match(x, ux)))])
}
# fit della vc triangolare
fCvM <-
fitdistrplus::fitdist(
data,
"triang",
method = "mge",
start = list(
min = min(data),
mode = Mode(data),
max = max(data)
),
gof = "CvM"
)
# parametri della triangolare di U
param=c(fCvM$estimate[1], fCvM$estimate[3], fCvM$estimate[2])
a=1+param[1]/100
b=1+param[3]/100
c=1+param[2]/100
return(c(a,b,c))
}
| /R/triangular_parameters_U.R | no_license | fabriziomaturo/AnnuityRIR | R | false | false | 710 | r | triangular_parameters_U=function(data){
# trovo la moda
Mode <- function(x, na.rm = FALSE) {
if (na.rm) {
x = x[!is.na(x)]
}
ux <- unique(x)
return(ux[which.max(tabulate(match(x, ux)))])
}
# fit della vc triangolare
fCvM <-
fitdistrplus::fitdist(
data,
"triang",
method = "mge",
start = list(
min = min(data),
mode = Mode(data),
max = max(data)
),
gof = "CvM"
)
# parametri della triangolare di U
param=c(fCvM$estimate[1], fCvM$estimate[3], fCvM$estimate[2])
a=1+param[1]/100
b=1+param[3]/100
c=1+param[2]/100
return(c(a,b,c))
}
|
#' @title Convert categorical numerics to factors
#'
#' @description Convert categorical numerics to factors
#'
#' @param data Dataframe
#' @param categoricals Vector of column names that are categoricals.
#' @param verbose If T display extra output during execution.
#'
#' @return Updated dataframe.
#'
#' @export
categoricals_to_factors = function(data, categoricals,
verbose = F) {
for (var_name in categoricals) {
if (!var_name %in% colnames(data)) {
if (verbose) {
cat("Skipping", var_name, "- was not in the data frame.\n")
}
next
}
# Use [[]] to support tibbles.
if (class(data[[var_name]]) == "factor") {
if (verbose) {
cat("Skipping", var_name, "- already a factor.\n")
}
next
}
if (verbose) {
cat("Converting", var_name, "from", class(data[[var_name]]), "to factor.")
cat(" Unique vals:", length(unique(data[[var_name]])), "\n")
}
# Use [[]] to support tibbles.
data[[var_name]] = as.factor(data[[var_name]])
}
return(data)
}
| /R/categoricals_to_factors.R | no_license | ck37/ck37r | R | false | false | 1,081 | r | #' @title Convert categorical numerics to factors
#'
#' @description Convert categorical numerics to factors
#'
#' @param data Dataframe
#' @param categoricals Vector of column names that are categoricals.
#' @param verbose If T display extra output during execution.
#'
#' @return Updated dataframe.
#'
#' @export
categoricals_to_factors = function(data, categoricals,
verbose = F) {
for (var_name in categoricals) {
if (!var_name %in% colnames(data)) {
if (verbose) {
cat("Skipping", var_name, "- was not in the data frame.\n")
}
next
}
# Use [[]] to support tibbles.
if (class(data[[var_name]]) == "factor") {
if (verbose) {
cat("Skipping", var_name, "- already a factor.\n")
}
next
}
if (verbose) {
cat("Converting", var_name, "from", class(data[[var_name]]), "to factor.")
cat(" Unique vals:", length(unique(data[[var_name]])), "\n")
}
# Use [[]] to support tibbles.
data[[var_name]] = as.factor(data[[var_name]])
}
return(data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_MBLT.R
\name{asynchronic_sky}
\alias{asynchronic_sky}
\alias{asynchronic_sky,data.frame-method}
\title{asynchronic sky}
\usage{
asynchronic_sky(block_info, path2sky_chunks, path4output, m, z, a)
\S4method{asynchronic_sky}{data.frame}(block_info, path2sky_chunks, path4output, m, z, a)
}
\arguments{
\item{block_info}{data.frame}
\item{path2sky_chunks}{character}
\item{path4output}{character}
\item{m}{BinImage}
\item{z}{ZenithImage}
\item{a}{AzimuthImage}
}
\description{
asynchronic sky.
}
\examples{
#TODO
}
| /man/asynchronic_sky.Rd | no_license | GastonMauroDiaz/caiman | R | false | true | 601 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_MBLT.R
\name{asynchronic_sky}
\alias{asynchronic_sky}
\alias{asynchronic_sky,data.frame-method}
\title{asynchronic sky}
\usage{
asynchronic_sky(block_info, path2sky_chunks, path4output, m, z, a)
\S4method{asynchronic_sky}{data.frame}(block_info, path2sky_chunks, path4output, m, z, a)
}
\arguments{
\item{block_info}{data.frame}
\item{path2sky_chunks}{character}
\item{path4output}{character}
\item{m}{BinImage}
\item{z}{ZenithImage}
\item{a}{AzimuthImage}
}
\description{
asynchronic sky.
}
\examples{
#TODO
}
|
library(Seurat)
library(BuenColors)
library(data.table)
library(dplyr)
library(viridis)
setwd('~/RWorkSpace/CITE-seq/Duerr/DOGMA-seq/DIG_LLL/code/')
load("../data/DIG_data.RData")
dig <- data
load("../data/LLL_data.RData")
lll <- data
rm(data)
dig$orig.ident <- 'DIG'
lll$orig.ident <- 'LLL'
p1 <- TSSPlot(dig, group.by = 'orig.ident')
p2 <- TSSPlot(lll, group.by = 'orig.ident')
TSS <- p1$data
TSS <- rbind(TSS, p2$data)
p1 <- ggplot(data = TSS, mapping = aes(x = position, y = norm.value, color = group))+
geom_line(stat = "identity", size = 0.2)+
labs(x = "Distance from TSS (bp)", y = "Mean TSS enrichment score", color = "") +
pretty_plot(fontsize = 7) + L_border() +
scale_color_manual(values = c("dodgerblue3", "firebrick")) +
ggtitle("TSS enrichment") +
theme(plot.title = element_text(hjust = 0.5), legend.position = 'none')
cowplot::ggsave2(p1, file = "../plots/TSS.pdf", width = 2, height = 2)
data <- merge(dig, lll, add.cell.ids = c('DIG', 'LLL'))
Idents(data) <- 'orig.ident'
DefaultAssay(data) <- 'ADT'
pdf('../plots/ridge.pdf', width = 20, height = 20)
for (i in 1:6){
print(RidgePlot(data, features = rownames(data)[(i-1)*25 + 1:25], ncol = 5))
}
print(RidgePlot(data, features = rownames(data)[151:163], ncol = 5))
dev.off()
data@assays$ADT@scale.data <- as.matrix(log10(1 + data@assays$ADT@counts))
pdf('../plots/ridge_log10.pdf', width = 20, height = 20)
for (i in 1:6){
print(RidgePlot(data, features = rownames(data)[(i-1)*25 + 1:25], ncol = 5, slot = 'scale.data'))
}
print(RidgePlot(data, features = rownames(data)[151:163], ncol = 5, slot = 'scale.data'))
dev.off() | /DIG_LLL/06_ridge.R | no_license | xzlandy/Benchmark_CITEseq_DOGMAseq | R | false | false | 1,617 | r | library(Seurat)
library(BuenColors)
library(data.table)
library(dplyr)
library(viridis)
setwd('~/RWorkSpace/CITE-seq/Duerr/DOGMA-seq/DIG_LLL/code/')
load("../data/DIG_data.RData")
dig <- data
load("../data/LLL_data.RData")
lll <- data
rm(data)
dig$orig.ident <- 'DIG'
lll$orig.ident <- 'LLL'
p1 <- TSSPlot(dig, group.by = 'orig.ident')
p2 <- TSSPlot(lll, group.by = 'orig.ident')
TSS <- p1$data
TSS <- rbind(TSS, p2$data)
p1 <- ggplot(data = TSS, mapping = aes(x = position, y = norm.value, color = group))+
geom_line(stat = "identity", size = 0.2)+
labs(x = "Distance from TSS (bp)", y = "Mean TSS enrichment score", color = "") +
pretty_plot(fontsize = 7) + L_border() +
scale_color_manual(values = c("dodgerblue3", "firebrick")) +
ggtitle("TSS enrichment") +
theme(plot.title = element_text(hjust = 0.5), legend.position = 'none')
cowplot::ggsave2(p1, file = "../plots/TSS.pdf", width = 2, height = 2)
data <- merge(dig, lll, add.cell.ids = c('DIG', 'LLL'))
Idents(data) <- 'orig.ident'
DefaultAssay(data) <- 'ADT'
pdf('../plots/ridge.pdf', width = 20, height = 20)
for (i in 1:6){
print(RidgePlot(data, features = rownames(data)[(i-1)*25 + 1:25], ncol = 5))
}
print(RidgePlot(data, features = rownames(data)[151:163], ncol = 5))
dev.off()
data@assays$ADT@scale.data <- as.matrix(log10(1 + data@assays$ADT@counts))
pdf('../plots/ridge_log10.pdf', width = 20, height = 20)
for (i in 1:6){
print(RidgePlot(data, features = rownames(data)[(i-1)*25 + 1:25], ncol = 5, slot = 'scale.data'))
}
print(RidgePlot(data, features = rownames(data)[151:163], ncol = 5, slot = 'scale.data'))
dev.off() |
source("utils.R")
test_succeeds("layer_hub works with sequential models", {
library(keras)
model <- keras_model_sequential() %>%
layer_hub(
handle = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4",
input_shape = c(224, 224, 3)
) %>%
layer_dense(1)
a <- tf$constant(array(0, dim = c(1, 224, 224, 3)), dtype = "float32")
res <- as.numeric(model(a))
expect_is(res, "numeric")
})
test_succeeds("layer_hub works with functional API", {
input <- layer_input(shape = c(224, 224, 3))
output <- input %>%
layer_hub(
handle = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
) %>%
layer_dense(1)
model <- keras_model(input, output)
a <- tf$constant(array(0, dim = c(1, 224, 224, 3)), dtype = "float32")
res <- as.numeric(model(a))
expect_is(res, "numeric")
})
test_succeeds("can initialiaze the layer_hub", {
features <- layer_hub(
handle = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
)
input <- layer_input(shape = c(224, 224, 3))
output <- input %>%
features() %>%
layer_dense(1)
model <- keras_model(input, output)
a <- tf$constant(array(0, dim = c(1, 224, 224, 3)), dtype = "float32")
res <- as.numeric(model(a))
expect_is(res, "numeric")
})
| /tests/testthat/test-layer-hub.R | no_license | terrytangyuan/tfhub | R | false | false | 1,319 | r | source("utils.R")
test_succeeds("layer_hub works with sequential models", {
library(keras)
model <- keras_model_sequential() %>%
layer_hub(
handle = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4",
input_shape = c(224, 224, 3)
) %>%
layer_dense(1)
a <- tf$constant(array(0, dim = c(1, 224, 224, 3)), dtype = "float32")
res <- as.numeric(model(a))
expect_is(res, "numeric")
})
test_succeeds("layer_hub works with functional API", {
input <- layer_input(shape = c(224, 224, 3))
output <- input %>%
layer_hub(
handle = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
) %>%
layer_dense(1)
model <- keras_model(input, output)
a <- tf$constant(array(0, dim = c(1, 224, 224, 3)), dtype = "float32")
res <- as.numeric(model(a))
expect_is(res, "numeric")
})
test_succeeds("can initialiaze the layer_hub", {
features <- layer_hub(
handle = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
)
input <- layer_input(shape = c(224, 224, 3))
output <- input %>%
features() %>%
layer_dense(1)
model <- keras_model(input, output)
a <- tf$constant(array(0, dim = c(1, 224, 224, 3)), dtype = "float32")
res <- as.numeric(model(a))
expect_is(res, "numeric")
})
|
library(stats)
library(tidyquant)
library(magrittr)
library(tidyverse)
library(corrplot)
library(corrr)
library(TTR)
library(ggjoy)
library(keras)
library(stringi)
options(scipen = 999)
log_retorno <- function(x) log(x) - log(lag(x))
add_lags <- function(data, lags, ...) {
vars <- map(names(data %>% select(...)), as.name)
fun_names <- stri_c("lag_", stri_pad_left(lags, 3, "0"))
lag_funs <- map(lags, ~ partial(lag, n = .x, .lazy = FALSE)) %>% set_names(fun_names)
vars %>%
map(~ data %>%
select(!!!.x) %>%
mutate_all(.funs = funs(!!!lag_funs)) %>%
set_names(c(first(names(.)), sprintf("%s_%s", first(names(.)), fun_names))) %>%
select(-!!!.x)) %>%
reduce(bind_cols, .init = data)
}
add_leads <- function(data, leads, ...) {
vars <- map(names(data %>% select(...)), as.name)
fun_names <- stri_c("lead_", stri_pad_left(leads, 3, "0"))
lead_funs <- map(leads, ~ partial(lead, n = .x, default = 0, .lazy = FALSE)) %>% set_names(fun_names)
vars %>%
map(~ data %>%
select(!!!.x) %>%
mutate_all(.funs = funs(!!!lead_funs)) %>%
set_names(c(first(names(.)), sprintf("%s_%s", first(names(.)), fun_names))) %>%
select(-!!!.x)) %>%
reduce(bind_cols, .init = data)
}
| /.Rprofile | permissive | LazaroNacif/bitfinex | R | false | false | 1,274 | rprofile | library(stats)
library(tidyquant)
library(magrittr)
library(tidyverse)
library(corrplot)
library(corrr)
library(TTR)
library(ggjoy)
library(keras)
library(stringi)
options(scipen = 999)
log_retorno <- function(x) log(x) - log(lag(x))
add_lags <- function(data, lags, ...) {
vars <- map(names(data %>% select(...)), as.name)
fun_names <- stri_c("lag_", stri_pad_left(lags, 3, "0"))
lag_funs <- map(lags, ~ partial(lag, n = .x, .lazy = FALSE)) %>% set_names(fun_names)
vars %>%
map(~ data %>%
select(!!!.x) %>%
mutate_all(.funs = funs(!!!lag_funs)) %>%
set_names(c(first(names(.)), sprintf("%s_%s", first(names(.)), fun_names))) %>%
select(-!!!.x)) %>%
reduce(bind_cols, .init = data)
}
add_leads <- function(data, leads, ...) {
vars <- map(names(data %>% select(...)), as.name)
fun_names <- stri_c("lead_", stri_pad_left(leads, 3, "0"))
lead_funs <- map(leads, ~ partial(lead, n = .x, default = 0, .lazy = FALSE)) %>% set_names(fun_names)
vars %>%
map(~ data %>%
select(!!!.x) %>%
mutate_all(.funs = funs(!!!lead_funs)) %>%
set_names(c(first(names(.)), sprintf("%s_%s", first(names(.)), fun_names))) %>%
select(-!!!.x)) %>%
reduce(bind_cols, .init = data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hclate.R
\name{hclate}
\alias{hclate}
\title{Tests for Homogeneous Conditional Local Average Treatment Effects}
\usage{
hclate(out, delta, treat, inst, xvector, xpscore, b, cores = 1)
}
\arguments{
\item{out}{vector containing the outcome of interest}
\item{delta}{vector containing the censoring indicator (1 if observed, 0 if censored)}
\item{treat}{vector containing the treatment indicator (1 if treated, 0 if control)}
\item{inst}{vector containing the binary instrument}
\item{xvector}{matrix (or data frame) containing the conditioning covariates}
\item{xpscore}{matrix (or data frame) containing the covariates (and their
transformations) to be included in the propensity score estimation}
\item{b}{number of bootstrap draws}
\item{cores}{number of cores to use during the bootstrap (default is 1).
If cores>1, the bootstrap is conducted using parLapply, instead
of lapply type call.}
}
\value{
a list containing the Kolmogorov-Smirnov test statistic (kstest),
the Cramer-von Mises test statistic (cvmtest), and their associated
bootstrapped p-values, pvks and pvcvm, respectively.
}
\description{
\emph{hclate} computes Kolmogorov-Smirnov and Cramer-von Mises type tests
for the null hypothesis of homogeneous conditional local average treatment effects.
The test is suitable for both censored and uncensored outcomes, and relies on
the availability of a binary instrumental variable that satisfies additional assumptions.
For details of the testing procedure, see Sant'Anna (2016b),'Nonparametric Tests for
Treatment Effect Heterogeneity with Duration Outcomes'.
}
| /man/hclate.Rd | no_license | pedrohcgs/kmte | R | false | true | 1,675 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hclate.R
\name{hclate}
\alias{hclate}
\title{Tests for Homogeneous Conditional Local Average Treatment Effects}
\usage{
hclate(out, delta, treat, inst, xvector, xpscore, b, cores = 1)
}
\arguments{
\item{out}{vector containing the outcome of interest}
\item{delta}{vector containing the censoring indicator (1 if observed, 0 if censored)}
\item{treat}{vector containing the treatment indicator (1 if treated, 0 if control)}
\item{inst}{vector containing the binary instrument}
\item{xvector}{matrix (or data frame) containing the conditioning covariates}
\item{xpscore}{matrix (or data frame) containing the covariates (and their
transformations) to be included in the propensity score estimation}
\item{b}{number of bootstrap draws}
\item{cores}{number of cores to use during the bootstrap (default is 1).
If cores>1, the bootstrap is conducted using parLapply, instead
of lapply type call.}
}
\value{
a list containing the Kolmogorov-Smirnov test statistic (kstest),
the Cramer-von Mises test statistic (cvmtest), and their associated
bootstrapped p-values, pvks and pvcvm, respectively.
}
\description{
\emph{hclate} computes Kolmogorov-Smirnov and Cramer-von Mises type tests
for the null hypothesis of homogeneous conditional local average treatment effects.
The test is suitable for both censored and uncensored outcomes, and relies on
the availability of a binary instrumental variable that satisfies additional assumptions.
For details of the testing procedure, see Sant'Anna (2016b),'Nonparametric Tests for
Treatment Effect Heterogeneity with Duration Outcomes'.
}
|
#' Add a Big Figure to an RTF Document
#'
#' Add a big figure to an rtf (rich text format) document.
#' @param ...
#' One or more character scalars (separated by commas) of text to use for
#' the figure caption.
#' @param FIG
#' A function to create a figure which will be added to the document,
#' default \code{fig}.
#' @param rtf
#' An rtf object, default \code{doc}.
#' @param figc
#' Numeric scalar figure number to use in caption, default
#' \code{GLFCenv$figcount}.
#' @param boldt
#' Logical scalar indicating if figure number should use bold font,
#' default TRUE.
#' @param w
#' Numeric scalar width of figure in inches, default 6.5.
#' @param h
#' Numeric scalar height of figure in inches, default 8.
#' @param rf
#' Numeric scalar resolution of figure, default 300.
#' @param newpage
#' Character scalar indicating if the figure should start on a new page in
#' the document "port" for a new portrait page,
#' "land" for a new landscape page, and "none" for no new page (the default).
#' @param omi
#' Numeric vector, length 4, width of document page margins in inches
#' (bottom, left, top, right), default c(1, 1, 1, 1).
#' @return
#' A 1 is added to the numeric vector of length 1, \code{GLFCenv$figcount},
#' stored in the working directory to keep track of the number of
#' figures written to the rtf document, and label the captions accordingly.
#' @details
#' The figure and caption are written to the rtf file.
#' The size of a new page is assumed to be 11 by 17 inches.
#' @seealso
#' \code{\link{startrtf}} for an example, \code{\link{heading}},
#' \code{\link{para}}, \code{\link{tabl}},
#' \code{\link{endrtf}}, \code{\link[rtf]{RTF}}.
#' @import
#' rtf
#' @export
figbig <- function(..., FIG=fig, rtf=doc, figc=GLFCenv$figcount, boldt=TRUE,
w=NULL, h=NULL, rf=300, newpage="none", omi=c(1, 1, 1, 1)) {
wf <- if (is.null(w)) {
9
} else {
w
}
hf <- if (is.null(h)) {
14
} else {
h
}
if (newpage=="none") {
addNewLine(this=rtf)
}
if (newpage=="port") {
addPageBreak(this=rtf, width=11, height=17, omi=omi)
}
if (newpage=="land") {
wf <- if (is.null(w)) {
15
} else {
w
}
hf <- if (is.null(h)) {
8
} else {
h
}
addPageBreak(this=rtf, width=17, height=11, omi=omi)
}
addPlot(this=rtf, plot.fun=FIG, width=wf, height=hf, res=rf)
addNewLine(this=rtf)
addNewLine(this=rtf)
startParagraph(this=rtf)
addText(this=rtf, paste0("Figure ", figc, ". "), bold=boldt)
addText(this=rtf, ...)
endParagraph(this=rtf)
addNewLine(this=rtf)
addNewLine(this=rtf)
GLFCenv$figcount <- figc + 1
}
| /R/figbig.R | no_license | JVAdams/GLFC | R | false | false | 2,673 | r | #' Add a Big Figure to an RTF Document
#'
#' Add a big figure to an rtf (rich text format) document.
#' @param ...
#' One or more character scalars (separated by commas) of text to use for
#' the figure caption.
#' @param FIG
#' A function to create a figure which will be added to the document,
#' default \code{fig}.
#' @param rtf
#' An rtf object, default \code{doc}.
#' @param figc
#' Numeric scalar figure number to use in caption, default
#' \code{GLFCenv$figcount}.
#' @param boldt
#' Logical scalar indicating if figure number should use bold font,
#' default TRUE.
#' @param w
#' Numeric scalar width of figure in inches, default 6.5.
#' @param h
#' Numeric scalar height of figure in inches, default 8.
#' @param rf
#' Numeric scalar resolution of figure, default 300.
#' @param newpage
#' Character scalar indicating if the figure should start on a new page in
#' the document "port" for a new portrait page,
#' "land" for a new landscape page, and "none" for no new page (the default).
#' @param omi
#' Numeric vector, length 4, width of document page margins in inches
#' (bottom, left, top, right), default c(1, 1, 1, 1).
#' @return
#' A 1 is added to the numeric vector of length 1, \code{GLFCenv$figcount},
#' stored in the working directory to keep track of the number of
#' figures written to the rtf document, and label the captions accordingly.
#' @details
#' The figure and caption are written to the rtf file.
#' The size of a new page is assumed to be 11 by 17 inches.
#' @seealso
#' \code{\link{startrtf}} for an example, \code{\link{heading}},
#' \code{\link{para}}, \code{\link{tabl}},
#' \code{\link{endrtf}}, \code{\link[rtf]{RTF}}.
#' @import
#' rtf
#' @export
figbig <- function(..., FIG=fig, rtf=doc, figc=GLFCenv$figcount, boldt=TRUE,
w=NULL, h=NULL, rf=300, newpage="none", omi=c(1, 1, 1, 1)) {
wf <- if (is.null(w)) {
9
} else {
w
}
hf <- if (is.null(h)) {
14
} else {
h
}
if (newpage=="none") {
addNewLine(this=rtf)
}
if (newpage=="port") {
addPageBreak(this=rtf, width=11, height=17, omi=omi)
}
if (newpage=="land") {
wf <- if (is.null(w)) {
15
} else {
w
}
hf <- if (is.null(h)) {
8
} else {
h
}
addPageBreak(this=rtf, width=17, height=11, omi=omi)
}
addPlot(this=rtf, plot.fun=FIG, width=wf, height=hf, res=rf)
addNewLine(this=rtf)
addNewLine(this=rtf)
startParagraph(this=rtf)
addText(this=rtf, paste0("Figure ", figc, ". "), bold=boldt)
addText(this=rtf, ...)
endParagraph(this=rtf)
addNewLine(this=rtf)
addNewLine(this=rtf)
GLFCenv$figcount <- figc + 1
}
|
testlist <- list(barrier = 0, ben = numeric(0), fee = 0, penalty = numeric(0), spot = c(-3.30527801797248e+192, 6.42940184035092e-275, 9.62209200590935e-306, -1.55863471486258e-19, -2.46006311447769e+260, NaN, -9.95392084152739e+274, 1.24989158946124e-256, 8.31913223101273e-275, 6.98580480160483e-308, 8.59543569086148e-275, 1.06639293972827e-42, 4.34034435299089e-241, 8.30987209248078e-246, 7.2911223740412e-304, -1.77581797082783e-178, -1.70118003745273e+71, 8.31380547349016e-275, 8.3138050000614e-275, -1.06955045158314e+189, 9.77287341786447e-309, 3.0138004396316e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(valuer::calc_account,testlist)
str(result) | /valuer/inst/testfiles/calc_account/libFuzzer_calc_account/calc_account_valgrind_files/1616985934-test.R | no_license | akhikolla/updatedatatype-list4 | R | false | false | 776 | r | testlist <- list(barrier = 0, ben = numeric(0), fee = 0, penalty = numeric(0), spot = c(-3.30527801797248e+192, 6.42940184035092e-275, 9.62209200590935e-306, -1.55863471486258e-19, -2.46006311447769e+260, NaN, -9.95392084152739e+274, 1.24989158946124e-256, 8.31913223101273e-275, 6.98580480160483e-308, 8.59543569086148e-275, 1.06639293972827e-42, 4.34034435299089e-241, 8.30987209248078e-246, 7.2911223740412e-304, -1.77581797082783e-178, -1.70118003745273e+71, 8.31380547349016e-275, 8.3138050000614e-275, -1.06955045158314e+189, 9.77287341786447e-309, 3.0138004396316e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(valuer::calc_account,testlist)
str(result) |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/query.allom.data.R
\name{AllomUnitCoef}
\alias{AllomUnitCoef}
\title{AllomUnitCoef}
\usage{
AllomUnitCoef(x, tp = NULL)
}
\arguments{
\item{x}{units: mm, cm, cm2, m, in, g, kg, lb, Mg}
\item{tp}{diameter type, leave NULL if DBH. Options: 'd.b.h.^2','cbh','crc'}
}
\description{
converts length units FROM cm TO specified units
converts mass units TO kg FROM specificed units
}
| /modules/allometry/man/AllomUnitCoef.Rd | permissive | davidjpmoore/pecan | R | false | false | 466 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/query.allom.data.R
\name{AllomUnitCoef}
\alias{AllomUnitCoef}
\title{AllomUnitCoef}
\usage{
AllomUnitCoef(x, tp = NULL)
}
\arguments{
\item{x}{units: mm, cm, cm2, m, in, g, kg, lb, Mg}
\item{tp}{diameter type, leave NULL if DBH. Options: 'd.b.h.^2','cbh','crc'}
}
\description{
converts length units FROM cm TO specified units
converts mass units TO kg FROM specificed units
}
|
#------------------------------------------------------------------------------#
# TITLE: Create HTML files to review point ids within each observed pattern
# (N->N, T->N, etc)
# DATE: 20190511
# PROG: B Saul
# DESC:
#------------------------------------------------------------------------------#
library(leaflet)
library(mapview)
library(htmlwidgets)
validation_points <- readRDS(here::here("data", "validation_study_20190511.rds")) %>%
ungroup() %>%
arrange(point) %>%
mutate(
validation_order = 1:n()
)
for(i in 1:nrow(validation_points)){
if(i < nrow(validation_points)){
next_link <- sprintf("%s_%s.html",
validation_points[i + 1, "validation_order"],
validation_points[i + 1, "point"])
}
rmarkdown::render(
input = "R/validation_study_html_template.Rmd",
output_file = sprintf("%s_%s.html",
validation_points[i, "validation_order"],
validation_points[i, "point"]),
output_dir = here::here("study_data", "validation_study"),
params = list(point_data = validation_points[i, ],
next_link = next_link)
)
}
| /R/validation_study.R | no_license | bsaul/ocForestCover | R | false | false | 1,216 | r | #------------------------------------------------------------------------------#
# TITLE: Create HTML files to review point ids within each observed pattern
# (N->N, T->N, etc)
# DATE: 20190511
# PROG: B Saul
# DESC:
#------------------------------------------------------------------------------#
library(leaflet)
library(mapview)
library(htmlwidgets)
validation_points <- readRDS(here::here("data", "validation_study_20190511.rds")) %>%
ungroup() %>%
arrange(point) %>%
mutate(
validation_order = 1:n()
)
for(i in 1:nrow(validation_points)){
if(i < nrow(validation_points)){
next_link <- sprintf("%s_%s.html",
validation_points[i + 1, "validation_order"],
validation_points[i + 1, "point"])
}
rmarkdown::render(
input = "R/validation_study_html_template.Rmd",
output_file = sprintf("%s_%s.html",
validation_points[i, "validation_order"],
validation_points[i, "point"]),
output_dir = here::here("study_data", "validation_study"),
params = list(point_data = validation_points[i, ],
next_link = next_link)
)
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(markdown)
shinyUI(navbarPage("Choose your next Mercedes-Benz",
tabPanel("Home",
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Provide your trip detail and preferences on cars:"),
numericInput('dis', 'Distance (in miles):', 50, min = 1, max = 1000),
numericInput('cost', 'Gasoline Price (per gallon):', 2.41, min = 2, max = 4, step=0.01),
numericInput('gas', 'Maximum expenditure on gasoline:', 50, min=1, max=1000),
checkboxGroupInput('cyl', 'Number of cylinders:', c("Four"=4, "Six"=6, "Eight"=8), selected = c(4,6,8)),
sliderInput('disp', 'Displacement', min=70, max=480, value=c(70,480), step=10),
sliderInput('hp', 'Gross horsepower', min=50, max=340, value=c(50,340), step=10),
checkboxGroupInput('am', 'Transmission:', c("Automatic"=0, "Manual"=1), selected = c(0,1))
),
mainPanel(
dataTableOutput('table')
)
)
),
tabPanel("About",
mainPanel(includeMarkdown("C:/Users/aysegul/Desktop/w4/ShinyWebapp/CarShinyProject/Readme.md")
)
)
))
| /ui.R | no_license | Aysegulzemnos/DataProductsShiny | R | false | false | 1,872 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(markdown)
shinyUI(navbarPage("Choose your next Mercedes-Benz",
tabPanel("Home",
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Provide your trip detail and preferences on cars:"),
numericInput('dis', 'Distance (in miles):', 50, min = 1, max = 1000),
numericInput('cost', 'Gasoline Price (per gallon):', 2.41, min = 2, max = 4, step=0.01),
numericInput('gas', 'Maximum expenditure on gasoline:', 50, min=1, max=1000),
checkboxGroupInput('cyl', 'Number of cylinders:', c("Four"=4, "Six"=6, "Eight"=8), selected = c(4,6,8)),
sliderInput('disp', 'Displacement', min=70, max=480, value=c(70,480), step=10),
sliderInput('hp', 'Gross horsepower', min=50, max=340, value=c(50,340), step=10),
checkboxGroupInput('am', 'Transmission:', c("Automatic"=0, "Manual"=1), selected = c(0,1))
),
mainPanel(
dataTableOutput('table')
)
)
),
tabPanel("About",
mainPanel(includeMarkdown("C:/Users/aysegul/Desktop/w4/ShinyWebapp/CarShinyProject/Readme.md")
)
)
))
|
\name{ GSE18520_eset }
\alias{ GSE18520_eset }
\docType{data}
\title{ A gene signature predictive for outcome in advanced ovarian cancer identifies a survival factor: microfibril-associated glycoprotein 2. }
\description{ Advanced stage papillary serous tumors of the ovary are responsible for the majority of ovarian cancer deaths, yet the molecular determinants modulating patient survival are poorly characterized. Here, we identify and validate a prognostic gene expression signature correlating with survival in a series of microdissected serous ovarian tumors. Independent evaluation confirmed the association of a prognostic gene microfibril-associated glycoprotein 2 (MAGP2) with poor prognosis, whereas in vitro mechanistic analyses demonstrated its ability to prolong tumor cell survival and stimulate endothelial cell motility and survival via the alpha(V)beta(3) integrin receptor. Increased MAGP2 expression correlated with microvessel density suggesting a proangiogenic role in vivo. Thus, MAGP2 may serve as a survival-associated target. }
\usage{data( GSE18520_eset )}
\format{
\preformatted{
experimentData(eset):
Experiment data
Experimenter name: Mok SC, Bonome T, Vathipadiekal V, Bell A, Johnson ME, Wong KK, Park DC, Hao K, Yip DK, Donninger H, Ozbun L, Samimi G, Brady J, Randonovich M, Pise-Masison CA, Barrett JC, Wong WH, Welch WR, Berkowitz RS, Birrer MJ.A gene signature predictive for outcome in advanced ovarian cancer identifies a survival factor: microfibril-associated glycoprotein 2. Cancer Cell. 2009 Dec 8; 16(6):521-32.
Laboratory: Mok, Birrer 2009
Contact information:
Title: A gene signature predictive for outcome in advanced ovarian cancer identifies a survival factor: microfibril-associated glycoprotein 2.
URL:
PMIDs: 19962670
Abstract: A 110 word abstract is available. Use 'abstract' method.
Information is available on: preprocessing
notes:
platform_title:
[HG-U133_Plus_2] Affymetrix Human Genome U133 Plus 2.0 Array
platform_shorttitle:
Affymetrix HG-U133Plus2
platform_summary:
hgu133plus2
platform_manufacturer:
Affymetrix|Operon
platform_distribution:
commercial|non-commercial
platform_accession:
GPL570|GPL9216
version:
2015-09-22 19:21:25
featureData(eset):
An object of class 'AnnotatedDataFrame'
featureNames: 1007_s_at 1053_at ... AFFX-HUMISGF3A/M97935_MB_at
(42447 total)
varLabels: probeset gene EntrezGene.ID best_probe
varMetadata: labelDescription
}}
\details{
\preformatted{
assayData: 42447 features, 63 samples
Platform type:
Overall survival time-to-event summary (in years):
Call: survfit(formula = Surv(time, cens) ~ -1)
10 observations deleted due to missingness
n events median 0.95LCL 0.95UCL
53.00 41.00 2.05 1.48 3.70
---------------------------
Available sample meta-data:
---------------------------
alt_sample_name:
Min. 1st Qu. Median Mean 3rd Qu. Max.
312.0 395.0 694.0 893.3 1040.0 2237.0
sample_type:
healthy tumor
10 53
histological_type:
ser NA's
53 10
primarysite:
ov
63
summarygrade:
high NA's
53 10
summarystage:
late NA's
53 10
tumorstage:
3 NA's
53 10
grade:
3 NA's
53 10
days_to_death:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
150 450 630 1212 1440 4500 10
vital_status:
deceased living NA's
41 12 10
debulking:
optimal
63
percent_normal_cells:
0
63
percent_stromal_cells:
0
63
percent_tumor_cells:
100
63
batch:
2004-03-12 2004-04-08 2004-04-09 2004-07-20 2004-08-12 2004-08-13 2004-09-30
20 6 9 11 10 1 6
uncurated_author_metadata:
title: Normal Ovary, 2008///geo_accession: GSM462643///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2008///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462643/GSM462643.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2061///geo_accession: GSM462644///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2061///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462644/GSM462644.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2064///geo_accession: GSM462645///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2064///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462645/GSM462645.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2085///geo_accession: GSM462646///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2085///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462646/GSM462646.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2225///geo_accession: GSM462647///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2225///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462647/GSM462647.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2226///geo_accession: GSM462648///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2226///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462648/GSM462648.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2228///geo_accession: GSM462649///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2228///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462649/GSM462649.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2230///geo_accession: GSM462650///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2230///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462650/GSM462650.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2234///geo_accession: GSM462651///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2234///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462651/GSM462651.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2237///geo_accession: GSM462652///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2237///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462652/GSM462652.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1109///geo_accession: GSM461390///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 34 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1109///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461390/GSM461390.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1214///geo_accession: GSM461391///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 17///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1214///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461391/GSM461391.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1231///geo_accession: GSM461367///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 15///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1231///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461367/GSM461367.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1562///geo_accession: GSM461368///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 19 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1562///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461368/GSM461368.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1660///geo_accession: GSM461369///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 15 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1660///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461369/GSM461369.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1993///geo_accession: GSM461400///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 11 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1993///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461400/GSM461400.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 312///geo_accession: GSM461379///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 48///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 312///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461379/GSM461379.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 317///geo_accession: GSM461348///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 150 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 317///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461348/GSM461348.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 321///geo_accession: GSM461380///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 45///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 321///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461380/GSM461380.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 324///geo_accession: GSM461373///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 59///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 324///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461373/GSM461373.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 332///geo_accession: GSM461349///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 7///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 332///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461349/GSM461349.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 345///geo_accession: GSM461392///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 18///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 345///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461392/GSM461392.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 349///geo_accession: GSM461350///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 144 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 349///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461350/GSM461350.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 351///geo_accession: GSM461351///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 142 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 351///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461351/GSM461351.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 358///geo_accession: GSM461393///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 358///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461393/GSM461393.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 367///geo_accession: GSM461381///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 16///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 367///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461381/GSM461381.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 377///geo_accession: GSM461374///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 12///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 377///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461374/GSM461374.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 380///geo_accession: GSM461375///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 57///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 380///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461375/GSM461375.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 386///geo_accession: GSM461352///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 95///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 386///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461352/GSM461352.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 388///geo_accession: GSM461353///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 132 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 388///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461353/GSM461353.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 389///geo_accession: GSM461354///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 13///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 389///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461354/GSM461354.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 394///geo_accession: GSM461382///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 16///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 394///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461382/GSM461382.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 396///geo_accession: GSM461376///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 396///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461376/GSM461376.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 402///geo_accession: GSM461355///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 8///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 402///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461355/GSM461355.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 410///geo_accession: GSM461356///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 150 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 410///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461356/GSM461356.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 412///geo_accession: GSM461357///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 113///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 412///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461357/GSM461357.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 434///geo_accession: GSM461358///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 72///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 434///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461358/GSM461358.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 443///geo_accession: GSM461377///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 111///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 443///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461377/GSM461377.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 461///geo_accession: GSM461394///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 32///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 461///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461394/GSM461394.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 467///geo_accession: GSM461359///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 11///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 467///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461359/GSM461359.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 477///geo_accession: GSM461383///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 477///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461383/GSM461383.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 486///geo_accession: GSM461395///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 486///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461395/GSM461395.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 629///geo_accession: GSM461360///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 50 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 629///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461360/GSM461360.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 631///geo_accession: GSM461396///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 30///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 631///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461396/GSM461396.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 656///geo_accession: GSM461384///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 25///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 656///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461384/GSM461384.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 662///geo_accession: GSM461370///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 23///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 662///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461370/GSM461370.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 692///geo_accession: GSM461397///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 35///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 692///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461397/GSM461397.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 694///geo_accession: GSM461385///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 33///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 694///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461385/GSM461385.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 702///geo_accession: GSM461361///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 13///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 702///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461361/GSM461361.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 714///geo_accession: GSM461362///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 8///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 714///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461362/GSM461362.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 715///geo_accession: GSM461386///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 33///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 715///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461386/GSM461386.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 718///geo_accession: GSM461398///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 26///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 718///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461398/GSM461398.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 744///geo_accession: GSM461378///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 14///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 744///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461378/GSM461378.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 765///geo_accession: GSM461363///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 12///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 765///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461363/GSM461363.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 778///geo_accession: GSM461399///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 16///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 778///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461399/GSM461399.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 780///geo_accession: GSM461364///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 11///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 780///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461364/GSM461364.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 786///geo_accession: GSM461387///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 48 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 786///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461387/GSM461387.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 794///geo_accession: GSM461388///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 15///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 794///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461388/GSM461388.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 799///geo_accession: GSM461365///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 5///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 799///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461365/GSM461365.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 800///geo_accession: GSM461371///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 36///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 800///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461371/GSM461371.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 872///geo_accession: GSM461366///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 9///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 872///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461366/GSM461366.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 934///geo_accession: GSM461372///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 36 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 934///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461372/GSM461372.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 970///geo_accession: GSM461389///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 18///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 970///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461389/GSM461389.CEL.gz///data_row_count: 54675
1
duplicates:
GSE18520.GSE18520_GSM462649
1
GSE18520.GSE18520_GSM462649///GSE18520.GSE18520_GSM462650
1
GSE18520.GSE18520_GSM462650
1
NA's
60
}}
\keyword{datasets}
| /man/GSE18520.Rd | no_license | bhklab/MetaGxData | R | false | false | 279,100 | rd | \name{ GSE18520_eset }
\alias{ GSE18520_eset }
\docType{data}
\title{ A gene signature predictive for outcome in advanced ovarian cancer identifies a survival factor: microfibril-associated glycoprotein 2. }
\description{ Advanced stage papillary serous tumors of the ovary are responsible for the majority of ovarian cancer deaths, yet the molecular determinants modulating patient survival are poorly characterized. Here, we identify and validate a prognostic gene expression signature correlating with survival in a series of microdissected serous ovarian tumors. Independent evaluation confirmed the association of a prognostic gene microfibril-associated glycoprotein 2 (MAGP2) with poor prognosis, whereas in vitro mechanistic analyses demonstrated its ability to prolong tumor cell survival and stimulate endothelial cell motility and survival via the alpha(V)beta(3) integrin receptor. Increased MAGP2 expression correlated with microvessel density suggesting a proangiogenic role in vivo. Thus, MAGP2 may serve as a survival-associated target. }
\usage{data( GSE18520_eset )}
\format{
\preformatted{
experimentData(eset):
Experiment data
Experimenter name: Mok SC, Bonome T, Vathipadiekal V, Bell A, Johnson ME, Wong KK, Park DC, Hao K, Yip DK, Donninger H, Ozbun L, Samimi G, Brady J, Randonovich M, Pise-Masison CA, Barrett JC, Wong WH, Welch WR, Berkowitz RS, Birrer MJ.A gene signature predictive for outcome in advanced ovarian cancer identifies a survival factor: microfibril-associated glycoprotein 2. Cancer Cell. 2009 Dec 8; 16(6):521-32.
Laboratory: Mok, Birrer 2009
Contact information:
Title: A gene signature predictive for outcome in advanced ovarian cancer identifies a survival factor: microfibril-associated glycoprotein 2.
URL:
PMIDs: 19962670
Abstract: A 110 word abstract is available. Use 'abstract' method.
Information is available on: preprocessing
notes:
platform_title:
[HG-U133_Plus_2] Affymetrix Human Genome U133 Plus 2.0 Array
platform_shorttitle:
Affymetrix HG-U133Plus2
platform_summary:
hgu133plus2
platform_manufacturer:
Affymetrix|Operon
platform_distribution:
commercial|non-commercial
platform_accession:
GPL570|GPL9216
version:
2015-09-22 19:21:25
featureData(eset):
An object of class 'AnnotatedDataFrame'
featureNames: 1007_s_at 1053_at ... AFFX-HUMISGF3A/M97935_MB_at
(42447 total)
varLabels: probeset gene EntrezGene.ID best_probe
varMetadata: labelDescription
}}
\details{
\preformatted{
assayData: 42447 features, 63 samples
Platform type:
Overall survival time-to-event summary (in years):
Call: survfit(formula = Surv(time, cens) ~ -1)
10 observations deleted due to missingness
n events median 0.95LCL 0.95UCL
53.00 41.00 2.05 1.48 3.70
---------------------------
Available sample meta-data:
---------------------------
alt_sample_name:
Min. 1st Qu. Median Mean 3rd Qu. Max.
312.0 395.0 694.0 893.3 1040.0 2237.0
sample_type:
healthy tumor
10 53
histological_type:
ser NA's
53 10
primarysite:
ov
63
summarygrade:
high NA's
53 10
summarystage:
late NA's
53 10
tumorstage:
3 NA's
53 10
grade:
3 NA's
53 10
days_to_death:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
150 450 630 1212 1440 4500 10
vital_status:
deceased living NA's
41 12 10
debulking:
optimal
63
percent_normal_cells:
0
63
percent_stromal_cells:
0
63
percent_tumor_cells:
100
63
batch:
2004-03-12 2004-04-08 2004-04-09 2004-07-20 2004-08-12 2004-08-13 2004-09-30
20 6 9 11 10 1 6
uncurated_author_metadata:
title: Normal Ovary, 2008///geo_accession: GSM462643///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2008///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462643/GSM462643.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2061///geo_accession: GSM462644///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2061///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462644/GSM462644.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2064///geo_accession: GSM462645///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2064///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462645/GSM462645.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2085///geo_accession: GSM462646///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2085///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462646/GSM462646.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2225///geo_accession: GSM462647///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2225///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462647/GSM462647.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2226///geo_accession: GSM462648///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2226///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462648/GSM462648.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2228///geo_accession: GSM462649///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2228///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462649/GSM462649.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2230///geo_accession: GSM462650///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2230///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462650/GSM462650.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2234///geo_accession: GSM462651///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2234///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462651/GSM462651.CEL.gz///data_row_count: 54675
1
title: Normal Ovary, 2237///geo_accession: GSM462652///status: Public on Oct 17 2009///submission_date: Oct 16 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: normal ovarian surface epithelium (OSE)///organism_ch1: Homo sapiens///characteristics_ch1: tissue: ovarian surface epithelium (OSE)///characteristics_ch1.1: ///characteristics_ch1.2: ///characteristics_ch1.3: ///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from normal ovary.///description.1: HOSE2237///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM462nnn/GSM462652/GSM462652.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1109///geo_accession: GSM461390///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 34 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1109///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461390/GSM461390.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1214///geo_accession: GSM461391///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 17///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1214///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461391/GSM461391.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1231///geo_accession: GSM461367///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 15///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1231///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461367/GSM461367.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1562///geo_accession: GSM461368///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 19 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1562///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461368/GSM461368.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1660///geo_accession: GSM461369///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 15 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1660///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461369/GSM461369.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 1993///geo_accession: GSM461400///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 11 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 1993///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461400/GSM461400.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 312///geo_accession: GSM461379///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 48///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 312///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461379/GSM461379.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 317///geo_accession: GSM461348///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 150 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 317///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461348/GSM461348.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 321///geo_accession: GSM461380///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 45///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 321///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461380/GSM461380.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 324///geo_accession: GSM461373///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 59///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 324///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461373/GSM461373.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 332///geo_accession: GSM461349///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 7///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 332///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461349/GSM461349.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 345///geo_accession: GSM461392///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 18///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 345///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461392/GSM461392.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 349///geo_accession: GSM461350///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 144 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 349///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461350/GSM461350.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 351///geo_accession: GSM461351///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 142 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 351///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461351/GSM461351.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 358///geo_accession: GSM461393///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 358///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461393/GSM461393.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 367///geo_accession: GSM461381///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 16///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 367///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461381/GSM461381.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 377///geo_accession: GSM461374///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 12///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 377///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461374/GSM461374.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 380///geo_accession: GSM461375///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 57///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 380///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461375/GSM461375.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 386///geo_accession: GSM461352///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 95///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 386///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461352/GSM461352.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 388///geo_accession: GSM461353///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 132 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 388///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461353/GSM461353.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 389///geo_accession: GSM461354///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 13///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 389///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461354/GSM461354.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 394///geo_accession: GSM461382///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 16///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 394///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461382/GSM461382.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 396///geo_accession: GSM461376///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 396///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461376/GSM461376.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 402///geo_accession: GSM461355///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 8///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 402///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461355/GSM461355.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 410///geo_accession: GSM461356///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 150 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 410///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461356/GSM461356.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 412///geo_accession: GSM461357///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 113///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 412///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461357/GSM461357.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 434///geo_accession: GSM461358///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 72///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 434///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461358/GSM461358.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 443///geo_accession: GSM461377///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 111///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 443///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461377/GSM461377.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 461///geo_accession: GSM461394///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 32///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 461///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461394/GSM461394.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 467///geo_accession: GSM461359///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 11///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 467///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461359/GSM461359.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 477///geo_accession: GSM461383///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 477///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461383/GSM461383.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 486///geo_accession: GSM461395///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 21///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 486///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461395/GSM461395.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 629///geo_accession: GSM461360///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 50 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 629///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461360/GSM461360.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 631///geo_accession: GSM461396///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 30///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 631///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461396/GSM461396.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 656///geo_accession: GSM461384///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 25///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 656///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461384/GSM461384.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 662///geo_accession: GSM461370///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 23///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 662///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461370/GSM461370.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 692///geo_accession: GSM461397///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 35///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 692///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461397/GSM461397.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 694///geo_accession: GSM461385///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 33///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 694///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461385/GSM461385.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 702///geo_accession: GSM461361///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 13///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 702///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461361/GSM461361.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 714///geo_accession: GSM461362///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 8///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 714///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461362/GSM461362.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 715///geo_accession: GSM461386///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 33///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 715///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461386/GSM461386.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 718///geo_accession: GSM461398///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 26///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 718///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461398/GSM461398.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 744///geo_accession: GSM461378///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 14///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 744///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461378/GSM461378.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 765///geo_accession: GSM461363///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 12///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 765///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461363/GSM461363.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 778///geo_accession: GSM461399///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 16///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 778///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461399/GSM461399.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 780///geo_accession: GSM461364///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 11///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 780///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461364/GSM461364.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 786///geo_accession: GSM461387///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 48 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 786///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461387/GSM461387.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 794///geo_accession: GSM461388///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 15///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 794///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461388/GSM461388.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 799///geo_accession: GSM461365///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 5///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 799///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461365/GSM461365.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 800///geo_accession: GSM461371///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 36///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 800///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461371/GSM461371.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 872///geo_accession: GSM461366///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 9///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 872///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461366/GSM461366.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 934///geo_accession: GSM461372///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 36 (A)///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 934///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461372/GSM461372.CEL.gz///data_row_count: 54675
1
title: Ovarian Tumor, 970///geo_accession: GSM461389///status: Public on Oct 17 2009///submission_date: Oct 11 2009///last_update_date: Jan 19 2011///type: RNA///channel_count: 1///source_name_ch1: papillary serous ovarian adenocarcinoma///organism_ch1: Homo sapiens///characteristics_ch1: tissue: papillary serous ovarian adenocarcinoma///characteristics_ch1.1: tumor stage: late///characteristics_ch1.2: tumor grade: high///characteristics_ch1.3: surv data: 18///treatment_protocol_ch1: All specimens were subjected to laser-based microdissection and analyzed as pure, microdissected epithelial cell populations.///molecule_ch1: total RNA///extract_protocol_ch1: RNeasy Micro Kit according to the manufacturers protocol (Qiagen Inc., Valencia, CA).///label_ch1: biotin///label_protocol_ch1: Two rounds of amplification were completed according to standard Affymetrix Two-Cycle Amplification protocol using 25ng of total RNA.///taxid_ch1: 9606///hyb_protocol: A 15 microgram aliquot of amplified biotinylated RNA was hybridized to a Human U133 Plus 2.0 GeneChip array (Affymetrix, Santa Clara, CA).///scan_protocol: Arrays were scanned using the laser confocal GeneChip Scanner 3000 (Affymetrix).///description: Gene expression data from tumor.///description.1: 970///data_processing: Low-level analysis included array normalization and estimation of expression level using an invariant set of probe sets to adjust the overall signal level of the arrays to the same level. Next, a model-based PM-only approach established gene expression levels using dChip software. A modified semi-supervised method was applied in two stages: (1) supervised dimension reduction by fitting a univariate Cox model that included a jackknifing step to each gene, so only those genes with a consistently large Cox score were included in the signature. Among 200, 100, or 300 probe sets, 200 yielded an optimally sized predictor; (2) In stage 2, the dimensions of the data set were reduced from 200 to 5 by PC analysis. The number of PCs was set at 5 capturing 90% of the expression data. 4 or 6 PCs were investigated to see whether this parameter affected the results. A prediction model was then built using multivariate Cox regression, where independent variables included the first 5 PCs and debulking status.///data_processing.1: Standard leave-one-out validation evaluated the prediction model reserving one sample for testing, and using the remaining 52 patients to establish the prediction model following the method described above. The reserved patient had no contribution to the prediction model yielding 53 separate predicted hazards. Patients were equally divided into low and high-risk groups according to whether their hazard was less or greater than the sample median and were compared by Kaplan Meier analysis with a non-parametric log-rank test.///platform_id: GPL570///contact_name: Michael,,Birrer///contact_email: mbirrer@partners.org///contact_phone: 6177244800///contact_laboratory: Surgical Oncology Research Labs///contact_department: Medicine///contact_institute: MGH///contact_address: 70 Blossom Street///contact_city: Boston///contact_state: MA///contact_zip.postal_code: 2114///contact_country: USA///supplementary_file: ftp://ftp.ncbi.nih.gov/pub/geo/DATA/supplementary/samples/GSM461nnn/GSM461389/GSM461389.CEL.gz///data_row_count: 54675
1
duplicates:
GSE18520.GSE18520_GSM462649
1
GSE18520.GSE18520_GSM462649///GSE18520.GSE18520_GSM462650
1
GSE18520.GSE18520_GSM462650
1
NA's
60
}}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/display.R
\name{display}
\alias{display}
\alias{print_md}
\alias{print_html}
\alias{display.data.frame}
\alias{print_md.data.frame}
\alias{print_html.data.frame}
\title{Generic export of data frames into formatted tables}
\usage{
display(object, ...)
print_md(x, ...)
print_html(x, ...)
\method{display}{data.frame}(object, format = "markdown", ...)
\method{print_md}{data.frame}(x, ...)
\method{print_html}{data.frame}(x, ...)
}
\arguments{
\item{object, x}{A data frame.}
\item{...}{Arguments passed to other methods.}
\item{format}{String, indicating the output format. Can be \code{"markdown"} or \code{"html"}.}
}
\value{
Depending on \code{format}, either an object of class \code{gt_tbl}
or a character vector of class \code{knitr_kable}.
}
\description{
\code{display()} is a generic function to export data frames
into various table formats (like plain text, markdown, ...). \code{print_md()}
usually is a convenient wrapper for \code{display(format = "markdown")}.
Similar, \code{print_html()} is a shortcut for \code{display(format = "html")}.
See the documentation for the specific objects' classes.
}
\examples{
display(iris[1:5, ])
}
| /man/display.Rd | no_license | cran/insight | R | false | true | 1,277 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/display.R
\name{display}
\alias{display}
\alias{print_md}
\alias{print_html}
\alias{display.data.frame}
\alias{print_md.data.frame}
\alias{print_html.data.frame}
\title{Generic export of data frames into formatted tables}
\usage{
display(object, ...)
print_md(x, ...)
print_html(x, ...)
\method{display}{data.frame}(object, format = "markdown", ...)
\method{print_md}{data.frame}(x, ...)
\method{print_html}{data.frame}(x, ...)
}
\arguments{
\item{object, x}{A data frame.}
\item{...}{Arguments passed to other methods.}
\item{format}{String, indicating the output format. Can be \code{"markdown"} or \code{"html"}.}
}
\value{
Depending on \code{format}, either an object of class \code{gt_tbl}
or a character vector of class \code{knitr_kable}.
}
\description{
\code{display()} is a generic function to export data frames
into various table formats (like plain text, markdown, ...). \code{print_md()}
usually is a convenient wrapper for \code{display(format = "markdown")}.
Similar, \code{print_html()} is a shortcut for \code{display(format = "html")}.
See the documentation for the specific objects' classes.
}
\examples{
display(iris[1:5, ])
}
|
list.of.pkgs <- c("readr","dplyr","zoo","ggplot2", "reshape2", "data.table",
"future","listenv","readxl","purrr","e1071" ,"rcompanion","tictoc")
new.pkgs <- list.of.pkgs[!(list.of.pkgs %in% installed.packages()[, "Package"])]
if (length(new.pkgs)){ install.pkgs(new.pkgs) }
for (pkg in list.of.pkgs){ library(pkg,character.only = TRUE) }
#-------------------
#
# Read preprocessed P80 data sets
#
#-------------------
workdir= "Y:/proj/CFWI_WetlandStress/Update2018"
setwd(workdir)
SFWMD_P80b <- read_csv("./SFWMD/SFWMD_P80.csv")
SWFWMD_P80b <- read_csv("./SWFWMD/SWFWMD_P80.csv")
SJRWMD_P80b <- read_csv("./SJRWMD/SJRWMD_P80.csv")
AllP80 <-bind_rows(SFWMD_P80b,SWFWMD_P80b,SJRWMD_P80b)
EMT_ID <- read_csv("EMT_ID.csv")
AllP80 <-merge(EMT_ID,AllP80)
write.csv(AllP80,file='AllP80.csv',row.names=FALSE)
Class1Wetlands <- read_excel("Class 1 Wetland Info for Analysis ALLv1.xlsx", na = "NA")
Class1P80 <-merge(Class1Wetlands,AllP80, by.x='CFCA/EMT ID', by.y='EMT_ID')
# Remove redundant 2006-2017_P80
Class1P80$`2006-2017_P80.y`<-NULL
names(Class1P80)[names(Class1P80)=='2006-2017_P80.x']<-"2006-2017_P80"
names(Class1P80)
#-------------------
#
# Calculate thetas
#
#-------------------
thetas = data.frame()
strStr <- "Stress Status in 2018"
physStr <- "Physiographic Region"
physVec <-c("Plain","Ridge")
stressVec <- c("Stressed","Not Stressed")
ranks = "2009-2017_P80"
rankVec <- c( "2009-2017_P80" )
for (ranks in rankVec) {
theta = Class1P80$"Edge Reference Elevation (ft NAVD 88)" - Class1P80[,ranks]
thetas = rbind(thetas,cbind.data.frame(EMT_ID=Class1P80$`CFCA/EMT ID`, rank=ranks,theta=as.numeric(theta)))
}
thetas <- merge(thetas,Class1P80[,c(1,3,4,5)], by.x='EMT_ID', by.y = "CFCA/EMT ID")
names(thetas)[names(thetas) == strStr] <-"Stress"
names(thetas)[names(thetas) == physStr] <-"phys"
#----------------------------------------------------------------------------
# Fs and Fu are fraction of stressed wetlands and unstressed wetlands Equations: 10 & 11
#----------------------------------------------------------------------------
thetas$Fu = NA
thetas$Fs = NA
thetas$mean = NA
thetas$sd = NA
# Current Observed Percentages
thetas[thetas$phys=='Ridge',]$Fu <-0.606 # .65517
thetas[thetas$phys=='Ridge',]$Fs <-0.394 # .34483
thetas[thetas$phys=='Plain',]$Fu <-0.824 # .7742
thetas[thetas$phys=='Plain',]$Fs <-0.176 # .2258
#Class 1
# SFsu = 1.0 SFus = 1.0
#Class 2
# SFsu = 1.0 SFus = 1.0
#Class 3
# phys Urban DisSim SHA sf_us sf_su SFus SFsu
#------ ---------- ------ ----- ----- ----- ----- -----
# Plain low 0.694 0.82 0.824 0.176 0.469 0.100
# Plain Mod & High 0.616 0.581 0.824 0.176 0.295 0.063
# Ridge All 0.671 1 0.581 0.419 0.390 0.281
thetas$theta.logN <-NA
#----------------------------------------------------------------------------
# transform data by subsets using:
# phys- Physiographic Region (Ridge or Plain)
# stress- Wetland Stress Status in 2018,
#----------------------------------------------------------------------------
for (phys in physVec) {
#----------------------------------------------------------------------------
# "Not Stressed Plains" thetas are transformed with a log function
# to provide a more normal distribution
# "Ridge" thetas are copied in to fill the column
#----------------------------------------------------------------------------
if (phys == "Plain") {
thetas[thetas$phys ==phys & thetas$Stress == 'Not Stressed',]$theta.logN <-
log(thetas[thetas$phys == phys & thetas$Stress == 'Not Stressed',]$theta+15)
thetas[thetas$phys ==phys & thetas$Stress == 'Stressed',]$theta.logN <-
log(thetas[thetas$phys == phys & thetas$Stress == 'Stressed',]$theta+15)
# thetas[thetas$phys ==phys,]$theta.logN <-thetas[thetas$phys == phys,]$theta
}
else
{
thetas[thetas$phys ==phys ,]$theta.logN <- thetas[thetas$phys == phys ,]$theta
}
for (stress in stressVec) {
#----------------------------------------------------------------------------
# mean and sd are provided for probability density function for the selected
# physiographic region type and initial Stress Status in 2018
#----------------------------------------------------------------------------
thetas[thetas$Stress == stress & thetas$phys == phys, ]$mean <-
mean(thetas[thetas$Stress == stress & thetas$phys == phys, ]$theta.logN)
thetas[thetas$Stress == stress & thetas$phys == phys, ]$sd <-
sd(thetas[thetas$Stress == stress & thetas$phys == phys, ]$theta.logN)
}
}
thetaSeq<-seq(-15.0,20,.1)
deltas <- seq(-10, 10, .1)
Plain<- as.data.frame(thetaSeq)
names(Plain) <-c('theta')
newColumns <-c('phys','Ppu','Ps','Pu','Pps','PpAll','PsiU','PsiS')
Plain[newColumns]<-0.0
Plain$phys <- "Plain"
Ridge<- as.data.frame(thetaSeq)
names(Ridge) <-c('theta')
Ridge[newColumns]<-0.0
Ridge$phys <- "Ridge"
Wetlands <-rbind(Plain,Ridge)
#----------------------------------------------------------------------------
# dnorm function returns probability from density function at each theta value Equations: 12 & 13
#----------------------------------------------------------------------------
for (phys in physVec) {
if (phys == 'Plain') {
# Mean <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$sd)
Mean <- 5.18
SD <- 1.75
cat(paste("Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
# Wetlands[Wetlands$phys == phys,]$Ps <- dnorm(log(Wetlands[Wetlands$phys == phys,]$theta+16), Mean, SD)
Wetlands[Wetlands$phys == phys,]$Ps <- dnorm(Wetlands[Wetlands$phys == phys,]$theta, Mean, SD)
# Mean <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$sd)
Mean <- 2.73
SD <- 0.95
cat(paste("Not Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
# Wetlands[Wetlands$phys == phys,]$Pu <- dnorm(log(Wetlands[Wetlands$phys == phys,]$theta+16), Mean, SD)
Wetlands[Wetlands$phys == phys,]$Pu <- dnorm((Wetlands[Wetlands$phys == phys,]$theta), Mean, SD)
}
else if (phys == 'Ridge')
{
# Mean <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$sd)
Mean <- 7.86
SD <- 2.55
cat(paste("Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
Wetlands[Wetlands$phys == phys,]$Ps <- dnorm(Wetlands[Wetlands$phys == phys,]$theta,Mean, SD)
# Mean <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$sd)
Mean <- 3.42
SD <- 1.57
cat(paste("Not Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
Wetlands[Wetlands$phys == phys,]$Pu <- dnorm(Wetlands[Wetlands$phys == phys,]$theta,Mean, SD)
}
#----------------------------------------------------------------------------
# Pps and Ppu are Population-weighted contributions of stress and unstress
# wetlands to the total population probability density of all wetlands at
# each wetland hydrologic index (theta) Equations: 14 & 15
#----------------------------------------------------------------------------
Wetlands[Wetlands$phys == phys,]$Ppu <-Wetlands[Wetlands$phys == phys,]$Pu*max(thetas[thetas$phys==phys,]$Fu)
Wetlands[Wetlands$phys == phys,]$Pps <-Wetlands[Wetlands$phys == phys,]$Ps*max(thetas[thetas$phys==phys,]$Fs)
Wetlands[Wetlands$phys == phys,]$PpAll <-Wetlands[Wetlands$phys == phys,]$Ppu + Wetlands[Wetlands$phys == phys,]$Pps
#----------------------------------------------------------------------------
# PsiU and PsiS Probability weighted Cumulative Probability Equation 17 & 18
#----------------------------------------------------------------------------
Wetlands[Wetlands$phys == phys,]$PsiU <- Wetlands[Wetlands$phys == phys,]$Ppu /Wetlands[Wetlands$phys == phys,]$PpAll
Wetlands[Wetlands$phys == phys,]$PsiS <- Wetlands[Wetlands$phys == phys,]$Pps /Wetlands[Wetlands$phys == phys,]$PpAll
}
write.csv(file='h:/Wetlands.csv',Wetlands)
#----------------------------------------------------------------------------
# Returns stress appropriate PsiValue lookup from Wetlands Table
# using theta and final theta (or theta+delta)
#
# type is not key, but used to subset data enable better performance
# with multiple processors
#----------------------------------------------------------------------------
PsiVals <- function(type, status, hydIndex) {
val <- round(hydIndex,2)
if (status == 'Not Stressed' & !is.na(val)) {
retVal<-(Wetlands[Wetlands$phys == type &
val == round(Wetlands[Wetlands$phys == type,]$theta, 2), ]$PsiU)
} else if (status == 'Stressed' & !is.na(val)) {
retVal<-(Wetlands[Wetlands$phys == type &
val == round(Wetlands[Wetlands$phys == type,]$theta, 2),]$PsiS)
} else
{
retVal<-NA
}
}
#----------------------------------------------------------------------------
# Vectorize function to work with dataframes input
#----------------------------------------------------------------------------
vPsiVals <- Vectorize(PsiVals)
#----------------------------------------------------------------------------
# Function used to calculate zetas
#----------------------------------------------------------------------------
makeZetas <- function(phys,stress,deltas,thetaSeq) {
z = matrix(NA,length(thetaSeq),1+length(deltas))
z[,1] <- vdf[,1]
for (i in seq(2,1+length(deltas))){
psiTheta2 <-unname(unlist(vPsiVals(phys,stress,vdf[,i])))
psiTheta1 <-unname(vPsiVals(phys,stress,vdf[,1]))
z[,i] = 1 - ( psiTheta2/psiTheta1)
z[is.nan(z[,i]) ,i] <- NA
z[z[,i]<0,i] <- 0
z[z[,i]>1,i] <- NA
}
StressZetas<- as.data.frame(cbind(phys,stress,z,
Wetlands[Wetlands$phys==phys,]$Ps,
Wetlands[Wetlands$phys==phys,]$Pu))
#deltaNames <- sprintf("delta_%s",deltas)
names(StressZetas) <- c("phys","stress","theta",deltas,"Ps","Pu")
cat(paste('Zetas Calculated for',stress, phys,'\n'))
return(StressZetas)
}
# Define matrix/dataframe for intial and examples of possible thetas
vdf = c()
for (x in thetaSeq) {
possibleThetas<- deltas+x
vdf<-c(vdf,possibleThetas)
}
dim(vdf)<-c(length(deltas),length(thetaSeq))
vdf <- t(vdf)
vdf[vdf< min(thetaSeq)]<-NA
vdf[vdf> max(thetaSeq)]<-NA
# Add theta column to beginning
vdf <-cbind(Wetlands[1:length(thetaSeq),]$theta,vdf)
physVec = c('Ridge','Plain')
stressVec = c('Not Stressed','Stressed')
ix = 0
plan(multiprocess)
data <- listenv()
#----------------------------------------------------------------------------
# Create zetas using multiprocessing functions
#----------------------------------------------------------------------------
tic("Calculate Zetas")
for (phys in physVec){
for (stress in stressVec){
cat(paste(phys, stress, '\n'))
ix = ix + 1
data[[ix]] %<-% makeZetas(phys,stress,deltas,thetaSeq)
}
}
xdata <- as.list(data)
zetas<- do.call(rbind,xdata)
zetaMelt <- melt(zetas,id=c("phys","stress","theta","Ps","Pu"),na.rm=T)
toc()
write.csv(file='h:/ZetasHistorical.csv',zetas,row.names=FALSE)
write.csv(file='h:/ZetasMeltHistorical.csv',zetaMelt)
write.csv(file='h:/WetlandsHistorical.csv',Wetlands)
wideTheta <- dcast(thetas,EMT_ID~rank,value.var='theta.logN',mean)
thetaEval <- merge(wideTheta,Class1P80[,c(1,3,4,5,11)], by.x='EMT_ID', by.y = "CFCA/EMT ID")
write.csv(file='h:/thetas4EvalHistorical.csv',thetaEval)
write.csv(file='h:/thetasTransformedHistorical.csv',thetas)
OrigwideTheta<- dcast(thetas,EMT_ID~rank,value.var='theta',mean)
OrigTheta <-merge(OrigwideTheta,Class1P80[,c(1,3,4,5,11)], by.x='EMT_ID', by.y = "CFCA/EMT ID")
workdir= "Y:/proj/CFWI_WetlandStress/Update2018"
| /ZetaCalcFor HistoricalData.R | no_license | KevinRodberg/CFWI-Wetlands-Stress-Update2018 | R | false | false | 12,137 | r | list.of.pkgs <- c("readr","dplyr","zoo","ggplot2", "reshape2", "data.table",
"future","listenv","readxl","purrr","e1071" ,"rcompanion","tictoc")
new.pkgs <- list.of.pkgs[!(list.of.pkgs %in% installed.packages()[, "Package"])]
if (length(new.pkgs)){ install.pkgs(new.pkgs) }
for (pkg in list.of.pkgs){ library(pkg,character.only = TRUE) }
#-------------------
#
# Read preprocessed P80 data sets
#
#-------------------
workdir= "Y:/proj/CFWI_WetlandStress/Update2018"
setwd(workdir)
SFWMD_P80b <- read_csv("./SFWMD/SFWMD_P80.csv")
SWFWMD_P80b <- read_csv("./SWFWMD/SWFWMD_P80.csv")
SJRWMD_P80b <- read_csv("./SJRWMD/SJRWMD_P80.csv")
AllP80 <-bind_rows(SFWMD_P80b,SWFWMD_P80b,SJRWMD_P80b)
EMT_ID <- read_csv("EMT_ID.csv")
AllP80 <-merge(EMT_ID,AllP80)
write.csv(AllP80,file='AllP80.csv',row.names=FALSE)
Class1Wetlands <- read_excel("Class 1 Wetland Info for Analysis ALLv1.xlsx", na = "NA")
Class1P80 <-merge(Class1Wetlands,AllP80, by.x='CFCA/EMT ID', by.y='EMT_ID')
# Remove redundant 2006-2017_P80
Class1P80$`2006-2017_P80.y`<-NULL
names(Class1P80)[names(Class1P80)=='2006-2017_P80.x']<-"2006-2017_P80"
names(Class1P80)
#-------------------
#
# Calculate thetas
#
#-------------------
thetas = data.frame()
strStr <- "Stress Status in 2018"
physStr <- "Physiographic Region"
physVec <-c("Plain","Ridge")
stressVec <- c("Stressed","Not Stressed")
ranks = "2009-2017_P80"
rankVec <- c( "2009-2017_P80" )
for (ranks in rankVec) {
theta = Class1P80$"Edge Reference Elevation (ft NAVD 88)" - Class1P80[,ranks]
thetas = rbind(thetas,cbind.data.frame(EMT_ID=Class1P80$`CFCA/EMT ID`, rank=ranks,theta=as.numeric(theta)))
}
thetas <- merge(thetas,Class1P80[,c(1,3,4,5)], by.x='EMT_ID', by.y = "CFCA/EMT ID")
names(thetas)[names(thetas) == strStr] <-"Stress"
names(thetas)[names(thetas) == physStr] <-"phys"
#----------------------------------------------------------------------------
# Fs and Fu are fraction of stressed wetlands and unstressed wetlands Equations: 10 & 11
#----------------------------------------------------------------------------
thetas$Fu = NA
thetas$Fs = NA
thetas$mean = NA
thetas$sd = NA
# Current Observed Percentages
thetas[thetas$phys=='Ridge',]$Fu <-0.606 # .65517
thetas[thetas$phys=='Ridge',]$Fs <-0.394 # .34483
thetas[thetas$phys=='Plain',]$Fu <-0.824 # .7742
thetas[thetas$phys=='Plain',]$Fs <-0.176 # .2258
#Class 1
# SFsu = 1.0 SFus = 1.0
#Class 2
# SFsu = 1.0 SFus = 1.0
#Class 3
# phys Urban DisSim SHA sf_us sf_su SFus SFsu
#------ ---------- ------ ----- ----- ----- ----- -----
# Plain low 0.694 0.82 0.824 0.176 0.469 0.100
# Plain Mod & High 0.616 0.581 0.824 0.176 0.295 0.063
# Ridge All 0.671 1 0.581 0.419 0.390 0.281
thetas$theta.logN <-NA
#----------------------------------------------------------------------------
# transform data by subsets using:
# phys- Physiographic Region (Ridge or Plain)
# stress- Wetland Stress Status in 2018,
#----------------------------------------------------------------------------
for (phys in physVec) {
#----------------------------------------------------------------------------
# "Not Stressed Plains" thetas are transformed with a log function
# to provide a more normal distribution
# "Ridge" thetas are copied in to fill the column
#----------------------------------------------------------------------------
if (phys == "Plain") {
thetas[thetas$phys ==phys & thetas$Stress == 'Not Stressed',]$theta.logN <-
log(thetas[thetas$phys == phys & thetas$Stress == 'Not Stressed',]$theta+15)
thetas[thetas$phys ==phys & thetas$Stress == 'Stressed',]$theta.logN <-
log(thetas[thetas$phys == phys & thetas$Stress == 'Stressed',]$theta+15)
# thetas[thetas$phys ==phys,]$theta.logN <-thetas[thetas$phys == phys,]$theta
}
else
{
thetas[thetas$phys ==phys ,]$theta.logN <- thetas[thetas$phys == phys ,]$theta
}
for (stress in stressVec) {
#----------------------------------------------------------------------------
# mean and sd are provided for probability density function for the selected
# physiographic region type and initial Stress Status in 2018
#----------------------------------------------------------------------------
thetas[thetas$Stress == stress & thetas$phys == phys, ]$mean <-
mean(thetas[thetas$Stress == stress & thetas$phys == phys, ]$theta.logN)
thetas[thetas$Stress == stress & thetas$phys == phys, ]$sd <-
sd(thetas[thetas$Stress == stress & thetas$phys == phys, ]$theta.logN)
}
}
thetaSeq<-seq(-15.0,20,.1)
deltas <- seq(-10, 10, .1)
Plain<- as.data.frame(thetaSeq)
names(Plain) <-c('theta')
newColumns <-c('phys','Ppu','Ps','Pu','Pps','PpAll','PsiU','PsiS')
Plain[newColumns]<-0.0
Plain$phys <- "Plain"
Ridge<- as.data.frame(thetaSeq)
names(Ridge) <-c('theta')
Ridge[newColumns]<-0.0
Ridge$phys <- "Ridge"
Wetlands <-rbind(Plain,Ridge)
#----------------------------------------------------------------------------
# dnorm function returns probability from density function at each theta value Equations: 12 & 13
#----------------------------------------------------------------------------
for (phys in physVec) {
if (phys == 'Plain') {
# Mean <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$sd)
Mean <- 5.18
SD <- 1.75
cat(paste("Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
# Wetlands[Wetlands$phys == phys,]$Ps <- dnorm(log(Wetlands[Wetlands$phys == phys,]$theta+16), Mean, SD)
Wetlands[Wetlands$phys == phys,]$Ps <- dnorm(Wetlands[Wetlands$phys == phys,]$theta, Mean, SD)
# Mean <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$sd)
Mean <- 2.73
SD <- 0.95
cat(paste("Not Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
# Wetlands[Wetlands$phys == phys,]$Pu <- dnorm(log(Wetlands[Wetlands$phys == phys,]$theta+16), Mean, SD)
Wetlands[Wetlands$phys == phys,]$Pu <- dnorm((Wetlands[Wetlands$phys == phys,]$theta), Mean, SD)
}
else if (phys == 'Ridge')
{
# Mean <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Stressed" & thetas$phys==phys,]$sd)
Mean <- 7.86
SD <- 2.55
cat(paste("Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
Wetlands[Wetlands$phys == phys,]$Ps <- dnorm(Wetlands[Wetlands$phys == phys,]$theta,Mean, SD)
# Mean <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$mean)
# SD <- max(thetas[thetas$Stress =="Not Stressed" & thetas$phys==phys,]$sd)
Mean <- 3.42
SD <- 1.57
cat(paste("Not Stressed",phys,'Mean=',round(Mean,2),'StdDev=',round(SD,4)))
cat('\n')
Wetlands[Wetlands$phys == phys,]$Pu <- dnorm(Wetlands[Wetlands$phys == phys,]$theta,Mean, SD)
}
#----------------------------------------------------------------------------
# Pps and Ppu are Population-weighted contributions of stress and unstress
# wetlands to the total population probability density of all wetlands at
# each wetland hydrologic index (theta) Equations: 14 & 15
#----------------------------------------------------------------------------
Wetlands[Wetlands$phys == phys,]$Ppu <-Wetlands[Wetlands$phys == phys,]$Pu*max(thetas[thetas$phys==phys,]$Fu)
Wetlands[Wetlands$phys == phys,]$Pps <-Wetlands[Wetlands$phys == phys,]$Ps*max(thetas[thetas$phys==phys,]$Fs)
Wetlands[Wetlands$phys == phys,]$PpAll <-Wetlands[Wetlands$phys == phys,]$Ppu + Wetlands[Wetlands$phys == phys,]$Pps
#----------------------------------------------------------------------------
# PsiU and PsiS Probability weighted Cumulative Probability Equation 17 & 18
#----------------------------------------------------------------------------
Wetlands[Wetlands$phys == phys,]$PsiU <- Wetlands[Wetlands$phys == phys,]$Ppu /Wetlands[Wetlands$phys == phys,]$PpAll
Wetlands[Wetlands$phys == phys,]$PsiS <- Wetlands[Wetlands$phys == phys,]$Pps /Wetlands[Wetlands$phys == phys,]$PpAll
}
write.csv(file='h:/Wetlands.csv',Wetlands)
#----------------------------------------------------------------------------
# Returns stress appropriate PsiValue lookup from Wetlands Table
# using theta and final theta (or theta+delta)
#
# type is not key, but used to subset data enable better performance
# with multiple processors
#----------------------------------------------------------------------------
PsiVals <- function(type, status, hydIndex) {
val <- round(hydIndex,2)
if (status == 'Not Stressed' & !is.na(val)) {
retVal<-(Wetlands[Wetlands$phys == type &
val == round(Wetlands[Wetlands$phys == type,]$theta, 2), ]$PsiU)
} else if (status == 'Stressed' & !is.na(val)) {
retVal<-(Wetlands[Wetlands$phys == type &
val == round(Wetlands[Wetlands$phys == type,]$theta, 2),]$PsiS)
} else
{
retVal<-NA
}
}
#----------------------------------------------------------------------------
# Vectorize function to work with dataframes input
#----------------------------------------------------------------------------
vPsiVals <- Vectorize(PsiVals)
#----------------------------------------------------------------------------
# Function used to calculate zetas
#----------------------------------------------------------------------------
makeZetas <- function(phys,stress,deltas,thetaSeq) {
z = matrix(NA,length(thetaSeq),1+length(deltas))
z[,1] <- vdf[,1]
for (i in seq(2,1+length(deltas))){
psiTheta2 <-unname(unlist(vPsiVals(phys,stress,vdf[,i])))
psiTheta1 <-unname(vPsiVals(phys,stress,vdf[,1]))
z[,i] = 1 - ( psiTheta2/psiTheta1)
z[is.nan(z[,i]) ,i] <- NA
z[z[,i]<0,i] <- 0
z[z[,i]>1,i] <- NA
}
StressZetas<- as.data.frame(cbind(phys,stress,z,
Wetlands[Wetlands$phys==phys,]$Ps,
Wetlands[Wetlands$phys==phys,]$Pu))
#deltaNames <- sprintf("delta_%s",deltas)
names(StressZetas) <- c("phys","stress","theta",deltas,"Ps","Pu")
cat(paste('Zetas Calculated for',stress, phys,'\n'))
return(StressZetas)
}
# Define matrix/dataframe for intial and examples of possible thetas
vdf = c()
for (x in thetaSeq) {
possibleThetas<- deltas+x
vdf<-c(vdf,possibleThetas)
}
dim(vdf)<-c(length(deltas),length(thetaSeq))
vdf <- t(vdf)
vdf[vdf< min(thetaSeq)]<-NA
vdf[vdf> max(thetaSeq)]<-NA
# Add theta column to beginning
vdf <-cbind(Wetlands[1:length(thetaSeq),]$theta,vdf)
physVec = c('Ridge','Plain')
stressVec = c('Not Stressed','Stressed')
ix = 0
plan(multiprocess)
data <- listenv()
#----------------------------------------------------------------------------
# Create zetas using multiprocessing functions
#----------------------------------------------------------------------------
tic("Calculate Zetas")
for (phys in physVec){
for (stress in stressVec){
cat(paste(phys, stress, '\n'))
ix = ix + 1
data[[ix]] %<-% makeZetas(phys,stress,deltas,thetaSeq)
}
}
xdata <- as.list(data)
zetas<- do.call(rbind,xdata)
zetaMelt <- melt(zetas,id=c("phys","stress","theta","Ps","Pu"),na.rm=T)
toc()
write.csv(file='h:/ZetasHistorical.csv',zetas,row.names=FALSE)
write.csv(file='h:/ZetasMeltHistorical.csv',zetaMelt)
write.csv(file='h:/WetlandsHistorical.csv',Wetlands)
wideTheta <- dcast(thetas,EMT_ID~rank,value.var='theta.logN',mean)
thetaEval <- merge(wideTheta,Class1P80[,c(1,3,4,5,11)], by.x='EMT_ID', by.y = "CFCA/EMT ID")
write.csv(file='h:/thetas4EvalHistorical.csv',thetaEval)
write.csv(file='h:/thetasTransformedHistorical.csv',thetas)
OrigwideTheta<- dcast(thetas,EMT_ID~rank,value.var='theta',mean)
OrigTheta <-merge(OrigwideTheta,Class1P80[,c(1,3,4,5,11)], by.x='EMT_ID', by.y = "CFCA/EMT ID")
workdir= "Y:/proj/CFWI_WetlandStress/Update2018"
|
# ---------------------------------------------------------------------------- #
# Author - Anupama Rajaram
# Program - Linear and multiple Regression
# ---------------------------------------------------------------------------- #
# prepping the environment - clean up memory, set number formatting
rm(list=ls(all=TRUE))
options(digits=2)
library(car)
titanic = read.delim(file = 'train.csv', header = TRUE, sep = ',', dec = '.')
attach(titanic)
# basic linear regression
regr <- lm(crim ~. , data = Boston)
summary(regr)
# multiple linear regression
lr1 <- lm(Survived ~.-Name -Cabin -Ticket, data = titanic)
summary(lr1) # this will clearly show the most significant explanatory variables
plot(lr1)
anova(lr1)
AIC(lr1) # Akaike’s Information Criterion
coefficients(lr1) # gives you the coeff of the regression equation
confint(lr1) # to compare if the significance is actually true.
# visulaization
scatterplotMatrix(titanic, spread=FALSE, lty.smooth=2,
main="Scatter Plot Matrix")
qqPlot(lr1, labels=row.names(titanic), id.method="identify",
simulate=TRUE, main="Q-Q Plot") # generates interactive probability plot
outlierTest(lr1) # check for outliers
hat.plot <- function(lr1) {
p <- length(coefficients(lr1))
n <- length(fitted(lr1))
plot(hatvalues(lr1), main="Index Plot of Hat Values")
abline(h=c(2,3)*p/n, col="red", lty=2)
identify(1:n, hatvalues(lr1), names(hatvalues(lr1)))
}
hat.plot(lr1) # to check for influential observations
| /regression_basic.R | no_license | anurajaram/R_projects | R | false | false | 1,545 | r | # ---------------------------------------------------------------------------- #
# Author - Anupama Rajaram
# Program - Linear and multiple Regression
# ---------------------------------------------------------------------------- #
# prepping the environment - clean up memory, set number formatting
rm(list=ls(all=TRUE))
options(digits=2)
library(car)
titanic = read.delim(file = 'train.csv', header = TRUE, sep = ',', dec = '.')
attach(titanic)
# basic linear regression
regr <- lm(crim ~. , data = Boston)
summary(regr)
# multiple linear regression
lr1 <- lm(Survived ~.-Name -Cabin -Ticket, data = titanic)
summary(lr1) # this will clearly show the most significant explanatory variables
plot(lr1)
anova(lr1)
AIC(lr1) # Akaike’s Information Criterion
coefficients(lr1) # gives you the coeff of the regression equation
confint(lr1) # to compare if the significance is actually true.
# visulaization
scatterplotMatrix(titanic, spread=FALSE, lty.smooth=2,
main="Scatter Plot Matrix")
qqPlot(lr1, labels=row.names(titanic), id.method="identify",
simulate=TRUE, main="Q-Q Plot") # generates interactive probability plot
outlierTest(lr1) # check for outliers
hat.plot <- function(lr1) {
p <- length(coefficients(lr1))
n <- length(fitted(lr1))
plot(hatvalues(lr1), main="Index Plot of Hat Values")
abline(h=c(2,3)*p/n, col="red", lty=2)
identify(1:n, hatvalues(lr1), names(hatvalues(lr1)))
}
hat.plot(lr1) # to check for influential observations
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions_implementations.R
\name{Inverse-Gamma}
\alias{Inverse-Gamma}
\alias{dinvgamma}
\alias{rinvgamma}
\alias{pinvgamma}
\alias{qinvgamma}
\title{The Inverse Gamma Distribution}
\usage{
dinvgamma(x, shape, scale = 1, rate = 1/scale, log = FALSE)
rinvgamma(n = 1, shape, scale = 1, rate = 1/scale)
pinvgamma(q, shape, scale = 1, rate = 1/scale, lower.tail = TRUE,
log.p = FALSE)
qinvgamma(p, shape, scale = 1, rate = 1/scale, lower.tail = TRUE,
log.p = FALSE)
}
\arguments{
\item{x}{vector of values.}
\item{shape}{vector of shape values, must be positive.}
\item{scale}{vector of scale values, must be positive.}
\item{rate}{vector of rate values, must be positive.}
\item{log}{logical; if TRUE, probability density is returned on the log scale.}
\item{n}{number of observations.}
\item{q}{vector of quantiles.}
\item{lower.tail}{logical; if TRUE (default) probabilities are \eqn{P[X \le x]}; otherwise, \eqn{P[X > x]}.}
\item{log.p}{logical; if TRUE, probabilities p are given by user as log(p).}
\item{p}{vector of probabilities.}
}
\value{
\code{dinvgamma} gives the density, \code{pinvgamma} gives the distribution
function, \code{qinvgamma} gives the quantile function, and \code{rinvgamma}
generates random deviates.
}
\description{
Density, distribution function, quantile function and random
generation for the inverse gamma distribution with rate
or scale (mean = scale / (shape - 1)) parameterizations.
}
\details{
The inverse gamma distribution with parameters \code{shape} \eqn{=\alpha}{= a} and
\code{scale} \eqn{=\sigma}{= s} has density
\deqn{
f(x)= \frac{s^a}{\Gamma(\alpha)} {x}^{-(\alpha+1)} e^{-\sigma/x}%
}{f(x)= (s^a / Gamma(a)) x^-(a+1) e^-(s/x)}
for \eqn{x \ge 0}, \eqn{\alpha > 0}{a > 0} and \eqn{\sigma > 0}{s > 0}.
(Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's
\code{\link{gamma}()} and defined in its help.
The mean and variance are
\eqn{E(X) = \frac{\sigma}{\alpha}-1}{E(X) = s/(a-1)} and
\eqn{Var(X) = \frac{\sigma^2}{(\alpha-1)^2 (\alpha-2)}}{Var(X) = s^2 / ((a-1)^2 * (a-2))},
with the mean defined only
for \eqn{\alpha > 1}{a > 1} and the variance only for \eqn{\alpha > 2}{a > 2}.
See Gelman et al., Appendix A or
the BUGS manual for mathematical details.
}
\examples{
x <- rinvgamma(50, shape = 1, scale = 3)
dinvgamma(x, shape = 1, scale = 3)
}
\references{
Gelman, A., Carlin, J.B., Stern, H.S., and Rubin, D.B. (2004) \emph{Bayesian Data Analysis}, 2nd ed. Chapman and Hall/CRC.
}
\seealso{
\link{Distributions} for other standard distributions
}
\author{
Christopher Paciorek
}
| /nCompiler/man/Inverse-Gamma.Rd | permissive | joe-nano/nCompiler | R | false | true | 2,659 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions_implementations.R
\name{Inverse-Gamma}
\alias{Inverse-Gamma}
\alias{dinvgamma}
\alias{rinvgamma}
\alias{pinvgamma}
\alias{qinvgamma}
\title{The Inverse Gamma Distribution}
\usage{
dinvgamma(x, shape, scale = 1, rate = 1/scale, log = FALSE)
rinvgamma(n = 1, shape, scale = 1, rate = 1/scale)
pinvgamma(q, shape, scale = 1, rate = 1/scale, lower.tail = TRUE,
log.p = FALSE)
qinvgamma(p, shape, scale = 1, rate = 1/scale, lower.tail = TRUE,
log.p = FALSE)
}
\arguments{
\item{x}{vector of values.}
\item{shape}{vector of shape values, must be positive.}
\item{scale}{vector of scale values, must be positive.}
\item{rate}{vector of rate values, must be positive.}
\item{log}{logical; if TRUE, probability density is returned on the log scale.}
\item{n}{number of observations.}
\item{q}{vector of quantiles.}
\item{lower.tail}{logical; if TRUE (default) probabilities are \eqn{P[X \le x]}; otherwise, \eqn{P[X > x]}.}
\item{log.p}{logical; if TRUE, probabilities p are given by user as log(p).}
\item{p}{vector of probabilities.}
}
\value{
\code{dinvgamma} gives the density, \code{pinvgamma} gives the distribution
function, \code{qinvgamma} gives the quantile function, and \code{rinvgamma}
generates random deviates.
}
\description{
Density, distribution function, quantile function and random
generation for the inverse gamma distribution with rate
or scale (mean = scale / (shape - 1)) parameterizations.
}
\details{
The inverse gamma distribution with parameters \code{shape} \eqn{=\alpha}{= a} and
\code{scale} \eqn{=\sigma}{= s} has density
\deqn{
f(x)= \frac{s^a}{\Gamma(\alpha)} {x}^{-(\alpha+1)} e^{-\sigma/x}%
}{f(x)= (s^a / Gamma(a)) x^-(a+1) e^-(s/x)}
for \eqn{x \ge 0}, \eqn{\alpha > 0}{a > 0} and \eqn{\sigma > 0}{s > 0}.
(Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's
\code{\link{gamma}()} and defined in its help.
The mean and variance are
\eqn{E(X) = \frac{\sigma}{\alpha}-1}{E(X) = s/(a-1)} and
\eqn{Var(X) = \frac{\sigma^2}{(\alpha-1)^2 (\alpha-2)}}{Var(X) = s^2 / ((a-1)^2 * (a-2))},
with the mean defined only
for \eqn{\alpha > 1}{a > 1} and the variance only for \eqn{\alpha > 2}{a > 2}.
See Gelman et al., Appendix A or
the BUGS manual for mathematical details.
}
\examples{
x <- rinvgamma(50, shape = 1, scale = 3)
dinvgamma(x, shape = 1, scale = 3)
}
\references{
Gelman, A., Carlin, J.B., Stern, H.S., and Rubin, D.B. (2004) \emph{Bayesian Data Analysis}, 2nd ed. Chapman and Hall/CRC.
}
\seealso{
\link{Distributions} for other standard distributions
}
\author{
Christopher Paciorek
}
|
library("caret")
library(corrplot)
library(C50)
library(dummies)
library(gmodels)
library(Metrics)
library(neuralnet)
library(plyr)
library(rpart)
library(tree)
library(e1071)
library(rpart.plot)
library(fastDummies)
################################## Load Files #############################################
x <-
read.csv(
"C:\\Users\\User\\Documents\\Thesis\\Data\\Models\\LeagueELO\\Bundesliga\\ELO17-18.csv",
stringsAsFactors = FALSE
)
################################# Clean Data ##############################################
x$B365H <- as.numeric(x$B365H)
x$B365D <- as.numeric(x$B365D)
x$B365A <- as.numeric(x$B365A)
x$BWH <- as.numeric(x$BWH)
x$BWD <- as.numeric(x$BWD)
x$BWA <- as.numeric(x$BWA)
x$IWH <- as.numeric(x$IWH)
x$IWD <- as.numeric(x$IWD)
x$IWA <- as.numeric(x$IWA)
x$LBH <- as.numeric(x$LBH)
x$LBD <- as.numeric(x$LBD)
x$LBA <- as.numeric(x$LBA)
x$PSH <- as.numeric(x$PSH)
x$PSD <- as.numeric(x$PSD)
x$PSA <- as.numeric(x$PSA)
x$WHH <- as.numeric(x$WHH)
x$WHD <- as.numeric(x$WHD)
x$WHA <- as.numeric(x$WHA)
x$VCH <- as.numeric(x$VCH)
x$VCD <- as.numeric(x$VCD)
x$VCA <- as.numeric(x$VCA)
x <- na.exclude(x)
################################## Rename Columns #########################################
colnames(x)[1] <- "Season"
################################ Create Dummy Vars ########################################
x <- cbind.data.frame(x, dummy(x$Home))
x <- cbind.data.frame(x, dummy(x$Away))
########################### Remove Cols After Dummy Vars ##################################
x$Home <- NULL
x$Away <- NULL
x$Season <- NULL
##################################### All Bookies #########################################
NNM <- x
set.seed(123)
NNM.rows <- nrow(NNM)
NNM.sample <- sample(NNM.rows, NNM.rows * 0.6)
NN.train <- NNM[NNM.sample, ]
NN.test <- NNM[NNM.sample, ]
NN = neuralnet(FTR ~ ., NN.train, hidden = 3, linear.output = T)
plot(NN)
comp <- compute(NN, NN.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM2 <- x[-c(7:24)]
set.seed(123)
NNM2.rows <- nrow(NNM2)
NNM2.sample <- sample(NNM2.rows, NNM2.rows * 0.6)
NN2.train <- NNM2[NNM2.sample, ]
NN2.test <- NNM2[NNM2.sample, ]
NN2 = neuralnet(FTR ~ ., NN2.train, hidden = 3, linear.output = T)
plot(NN2)
comp <- compute(NN2, NN2.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN2.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM3 <- x[-c(4:6, 10:24)]
set.seed(123)
NNM3.rows <- nrow(NNM3)
NNM3.sample <- sample(NNM3.rows, NNM3.rows * 0.6)
NN3.train <- NNM3[NNM3.sample, ]
NN3.test <- NNM3[NNM3.sample, ]
NN3 = neuralnet(FTR ~ ., NN3.train, hidden = 3, linear.output = T)
plot(NN3)
comp <- compute(NN3, NN3.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN3.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM4 <- x[-c(4:9, 13:24)]
set.seed(123)
NNM4.rows <- nrow(NNM4)
NNM4.sample <- sample(NNM4.rows, NNM4.rows * 0.6)
NN4.train <- NNM4[NNM4.sample, ]
NN4.test <- NNM4[NNM4.sample, ]
NN4 = neuralnet(FTR ~ ., NN.train, hidden = 3, linear.output = T)
plot(NN4)
comp <- compute(NN4, NN4.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN4.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM5 <- x[-c(4:12, 16:24)]
set.seed(123)
NNM5.rows <- nrow(NNM5)
NNM5.sample <- sample(NNM5.rows, NNM5.rows * 0.6)
NN5.train <- NNM5[NNM5.sample, ]
NN5.test <- NNM5[NNM5.sample, ]
NN5 = neuralnet(FTR ~ ., NN5.train, hidden = 3, linear.output = T)
plot(NN5)
comp <- compute(NN5, NN5.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN5.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM6 <- x[-c(4:15, 19:24)]
set.seed(123)
NNM6.rows <- nrow(NNM6)
NNM6.sample <- sample(NNM6.rows, NNM6.rows * 0.6)
NN6.train <- NNM6[NNM6.sample, ]
NN6.test <- NNM6[NNM6.sample, ]
NN6 = neuralnet(FTR ~ ., NN6.train, hidden = 3, linear.output = T)
plot(NN6)
comp <- compute(NN6, NN6.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN6.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM7 <- x[-c(4:18, 20:24)]
set.seed(123)
NNM7.rows <- nrow(NNM7)
NNM7.sample <- sample(NNM7.rows, NNM7.rows * 0.6)
NN7.train <- NNM7[NNM7.sample, ]
NN7.test <- NNM7[NNM7.sample, ]
NN7 = neuralnet(FTR ~ ., NN7.train, hidden = 3, linear.output = T)
plot(NN7)
comp <- compute(NN7, NN7.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN7.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##############################################################################################################
NNM8 <- x[-c(4:21)]
set.seed(123)
NNM8.rows <- nrow(NNM8)
NNM8.sample <- sample(NNM8.rows, NNM8.rows * 0.6)
NN8.train <- NNM8[NNM8.sample, ]
NN8.test <- NNM8[NNM8.sample, ]
NN8 = neuralnet(FTR ~ ., NN8.train, hidden = 3, linear.output = T)
plot(NN8)
comp <- compute(NN8, NN8.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN8.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
) | /0-Implementation/R Files/NN/League - ELO/Bundesliga/17-18.R | no_license | Chanter08/Thesis | R | false | false | 6,371 | r | library("caret")
library(corrplot)
library(C50)
library(dummies)
library(gmodels)
library(Metrics)
library(neuralnet)
library(plyr)
library(rpart)
library(tree)
library(e1071)
library(rpart.plot)
library(fastDummies)
################################## Load Files #############################################
x <-
read.csv(
"C:\\Users\\User\\Documents\\Thesis\\Data\\Models\\LeagueELO\\Bundesliga\\ELO17-18.csv",
stringsAsFactors = FALSE
)
################################# Clean Data ##############################################
x$B365H <- as.numeric(x$B365H)
x$B365D <- as.numeric(x$B365D)
x$B365A <- as.numeric(x$B365A)
x$BWH <- as.numeric(x$BWH)
x$BWD <- as.numeric(x$BWD)
x$BWA <- as.numeric(x$BWA)
x$IWH <- as.numeric(x$IWH)
x$IWD <- as.numeric(x$IWD)
x$IWA <- as.numeric(x$IWA)
x$LBH <- as.numeric(x$LBH)
x$LBD <- as.numeric(x$LBD)
x$LBA <- as.numeric(x$LBA)
x$PSH <- as.numeric(x$PSH)
x$PSD <- as.numeric(x$PSD)
x$PSA <- as.numeric(x$PSA)
x$WHH <- as.numeric(x$WHH)
x$WHD <- as.numeric(x$WHD)
x$WHA <- as.numeric(x$WHA)
x$VCH <- as.numeric(x$VCH)
x$VCD <- as.numeric(x$VCD)
x$VCA <- as.numeric(x$VCA)
x <- na.exclude(x)
################################## Rename Columns #########################################
colnames(x)[1] <- "Season"
################################ Create Dummy Vars ########################################
x <- cbind.data.frame(x, dummy(x$Home))
x <- cbind.data.frame(x, dummy(x$Away))
########################### Remove Cols After Dummy Vars ##################################
x$Home <- NULL
x$Away <- NULL
x$Season <- NULL
##################################### All Bookies #########################################
NNM <- x
set.seed(123)
NNM.rows <- nrow(NNM)
NNM.sample <- sample(NNM.rows, NNM.rows * 0.6)
NN.train <- NNM[NNM.sample, ]
NN.test <- NNM[NNM.sample, ]
NN = neuralnet(FTR ~ ., NN.train, hidden = 3, linear.output = T)
plot(NN)
comp <- compute(NN, NN.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM2 <- x[-c(7:24)]
set.seed(123)
NNM2.rows <- nrow(NNM2)
NNM2.sample <- sample(NNM2.rows, NNM2.rows * 0.6)
NN2.train <- NNM2[NNM2.sample, ]
NN2.test <- NNM2[NNM2.sample, ]
NN2 = neuralnet(FTR ~ ., NN2.train, hidden = 3, linear.output = T)
plot(NN2)
comp <- compute(NN2, NN2.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN2.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM3 <- x[-c(4:6, 10:24)]
set.seed(123)
NNM3.rows <- nrow(NNM3)
NNM3.sample <- sample(NNM3.rows, NNM3.rows * 0.6)
NN3.train <- NNM3[NNM3.sample, ]
NN3.test <- NNM3[NNM3.sample, ]
NN3 = neuralnet(FTR ~ ., NN3.train, hidden = 3, linear.output = T)
plot(NN3)
comp <- compute(NN3, NN3.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN3.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM4 <- x[-c(4:9, 13:24)]
set.seed(123)
NNM4.rows <- nrow(NNM4)
NNM4.sample <- sample(NNM4.rows, NNM4.rows * 0.6)
NN4.train <- NNM4[NNM4.sample, ]
NN4.test <- NNM4[NNM4.sample, ]
NN4 = neuralnet(FTR ~ ., NN.train, hidden = 3, linear.output = T)
plot(NN4)
comp <- compute(NN4, NN4.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN4.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM5 <- x[-c(4:12, 16:24)]
set.seed(123)
NNM5.rows <- nrow(NNM5)
NNM5.sample <- sample(NNM5.rows, NNM5.rows * 0.6)
NN5.train <- NNM5[NNM5.sample, ]
NN5.test <- NNM5[NNM5.sample, ]
NN5 = neuralnet(FTR ~ ., NN5.train, hidden = 3, linear.output = T)
plot(NN5)
comp <- compute(NN5, NN5.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN5.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM6 <- x[-c(4:15, 19:24)]
set.seed(123)
NNM6.rows <- nrow(NNM6)
NNM6.sample <- sample(NNM6.rows, NNM6.rows * 0.6)
NN6.train <- NNM6[NNM6.sample, ]
NN6.test <- NNM6[NNM6.sample, ]
NN6 = neuralnet(FTR ~ ., NN6.train, hidden = 3, linear.output = T)
plot(NN6)
comp <- compute(NN6, NN6.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN6.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
NNM7 <- x[-c(4:18, 20:24)]
set.seed(123)
NNM7.rows <- nrow(NNM7)
NNM7.sample <- sample(NNM7.rows, NNM7.rows * 0.6)
NN7.train <- NNM7[NNM7.sample, ]
NN7.test <- NNM7[NNM7.sample, ]
NN7 = neuralnet(FTR ~ ., NN7.train, hidden = 3, linear.output = T)
plot(NN7)
comp <- compute(NN7, NN7.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN7.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##############################################################################################################
NNM8 <- x[-c(4:21)]
set.seed(123)
NNM8.rows <- nrow(NNM8)
NNM8.sample <- sample(NNM8.rows, NNM8.rows * 0.6)
NN8.train <- NNM8[NNM8.sample, ]
NN8.test <- NNM8[NNM8.sample, ]
NN8 = neuralnet(FTR ~ ., NN8.train, hidden = 3, linear.output = T)
plot(NN8)
comp <- compute(NN8, NN8.test[-1])
pred.weights <- comp$net.result
idx <- apply(pred.weights, 1, which.max)
pred <- c('A', 'D', 'H')[idx]
CrossTable(
idx,
NN8.test$FTR,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
) |
data_test <- GetMorbiData(y1 = 2010,y2 = 2011)
| /tests/testthat/helper_morbidity.R | no_license | rOpenSpain/MorbiditySpainR | R | false | false | 47 | r | data_test <- GetMorbiData(y1 = 2010,y2 = 2011)
|
\name{opvdat}
\docType{data}
\alias{opvdat}
\title{Data on OPV and intussusception}
\description{
The data comprise ages in days at oral polio vaccine (OPV) and first hospital admission for intussusception in children between the ages of 27 and 365 days. There are 207 cases.
}
\usage{opvdat}
\format{A data frame containing 207 rows and 8 columns. The column names are 'case' (individual identifier), 'sta' (age on first day of the observation period), 'end' (age on last day of the observation period), 'intus' (age at admission for intussusception), 'opv' (age at first dose of OPV), 'opvd2' (age at second dose of OPV), 'opvd3' (age at third dose of OPV), 'sex' (1 for males, 2 for females).}
\source{
Whitaker, H. J., Farrington, C. P., Spiessens, B., and Musonda, P. (2006). Tutorial in biostatistics: The self-controlled case series method. Statistics in Medicine 25, 1768--1797.
}
\references{
Andrews N., Miller, E., Waight, P., Farrington, P., Crowcroft, N., Stowe, J., and Taylor B. (2002). Does oral polio vaccine cause intussusception in infants? Evidence from a sequence of three self-controlled case series studies in the United Kingdom. European Journal of Epidemiology 17, 701--706.
}
\keyword{datasets}
| /man/opvdat.Rd | no_license | cran/SCCS | R | false | false | 1,237 | rd | \name{opvdat}
\docType{data}
\alias{opvdat}
\title{Data on OPV and intussusception}
\description{
The data comprise ages in days at oral polio vaccine (OPV) and first hospital admission for intussusception in children between the ages of 27 and 365 days. There are 207 cases.
}
\usage{opvdat}
\format{A data frame containing 207 rows and 8 columns. The column names are 'case' (individual identifier), 'sta' (age on first day of the observation period), 'end' (age on last day of the observation period), 'intus' (age at admission for intussusception), 'opv' (age at first dose of OPV), 'opvd2' (age at second dose of OPV), 'opvd3' (age at third dose of OPV), 'sex' (1 for males, 2 for females).}
\source{
Whitaker, H. J., Farrington, C. P., Spiessens, B., and Musonda, P. (2006). Tutorial in biostatistics: The self-controlled case series method. Statistics in Medicine 25, 1768--1797.
}
\references{
Andrews N., Miller, E., Waight, P., Farrington, P., Crowcroft, N., Stowe, J., and Taylor B. (2002). Does oral polio vaccine cause intussusception in infants? Evidence from a sequence of three self-controlled case series studies in the United Kingdom. European Journal of Epidemiology 17, 701--706.
}
\keyword{datasets}
|
test_that("match_style.whitespace: styling matching on whitespace", {
style <- whitespace(spaces = 2L, newlines = 1L)
expect_no_lint(match_style.whitespace(style, first_parsed_node(" 1L")))
style <- whitespace(spaces = 1L, newlines = 1L)
expect_lint(match_style.whitespace(style, first_parsed_node(" 1L")))
style <- whitespace(spaces = 2L, newlines = 2L)
expect_no_lint(match_style.whitespace(style, first_parsed_node(" \n 1L")))
style <- whitespace(spaces = 2L, newlines = 2L)
expect_lint(match_style.whitespace(style, first_parsed_node(" 1L")))
})
| /tests/testthat/test_match_style_whitespace.R | no_license | dgkf/reflow | R | false | false | 574 | r | test_that("match_style.whitespace: styling matching on whitespace", {
style <- whitespace(spaces = 2L, newlines = 1L)
expect_no_lint(match_style.whitespace(style, first_parsed_node(" 1L")))
style <- whitespace(spaces = 1L, newlines = 1L)
expect_lint(match_style.whitespace(style, first_parsed_node(" 1L")))
style <- whitespace(spaces = 2L, newlines = 2L)
expect_no_lint(match_style.whitespace(style, first_parsed_node(" \n 1L")))
style <- whitespace(spaces = 2L, newlines = 2L)
expect_lint(match_style.whitespace(style, first_parsed_node(" 1L")))
})
|
# descritivas -------------------------------------------------------------
library(tidyverse)
library(gapminder)
library(ggplot2)
library(GGally)
library(lme4)
#library(sjPlot)
summary(gapminder)
# criar decada
gapminder <- gapminder %>% mutate(decada = 10*(year%/%10)) %>%
mutate(decada = as.factor(decada))
# cada pais tem duas leituras por decada
n_distinct(gapminder$country)*2*n_distinct(gapminder$decada) == nrow(gapminder)
# summary
gapminder %>% select(lifeExp, gdpPercap, pop) %>%
summary() %>% xtable::xtable()
#summary lifeExp por continente
gapminder %>% group_by(continent) %>%
summarise(n_country = n_distinct(country),
min = min(lifeExp),
Q1 = quantile(lifeExp, 0.25),
median = median(lifeExp),
Q3 = quantile(lifeExp, 0.75),
max = max(lifeExp)) %>% xtable::xtable()
#plots facetados continente e década
gapminder %>% ggplot(aes(x = factor(decada), y = lifeExp, color = continent)) +
geom_jitter(aes(alpha = 0.1), show.legend = FALSE) +
geom_boxplot() + #scale_y_log10() +
facet_wrap(~continent)
# scatterplot correlation matrix
ggplt <- gapminder %>%
select(continent, gdpPercap, pop, lifeExp, decada) %>%
ggpairs(
columns = 2:ncol(.),
mapping = ggplot2::aes(color = continent),
diag = list(continuous = 'barDiag'),
legend = c(2, 2),
upper = list(continuous = wrap("cor", size = 3, alignPercent = 1)),
lower = list(mapping = ggplot2::aes(alpha = 0.9))
)
ggplt[1,1] <- ggplt[1,1] + scale_x_log10()
ggplt[2,2] <- ggplt[2,2] + scale_x_log10()
ggplt[3,1] <- ggplt[3,1] + scale_x_log10()
ggplt[3,2] <- ggplt[3,2] + scale_x_log10()
ggplt[4,1] <- ggplt[4,1] + scale_x_log10()
ggplt[4,2] <- ggplt[4,2] + scale_x_log10()
ggplt[2,1] <- ggplt[2,1] + scale_x_log10() + scale_y_log10()
ggplt +
theme(panel.grid.major = element_blank()) +
ggtitle('Correlation Scatterplot matrix') +
theme(plot.title = element_text(size = 10,hjust = 0.5),
axis.text.x = element_text(size = 8, angle=45),
axis.text.y = element_text(size = 8, angle=45))
# anovas ------------------------------------------------------------------
modelo_life2 <- lm(lifeExp ~ factor(decada) + continent + continent/country, data = gapminder)
anova(modelo_life2) %>% xtable::xtable()
par(mfrow=c(2,2))
plot(modelo_life2)
tuckey_modelo_life2 <- TukeyHSD(aov(modelo_life2), "continent", ordered = T)
tuckey_modelo_life2$continent %>% xtable::xtable()
broom::tidy(modelo_life2) %>% xtable::xtable()
# codigo nao usado --------------------------------------------------------
## com componente aleatorio
lmer(lifeExp ~ decada + continent + 1|continent/country, data = gapminder)
## nested anova
gapminder$CC <- factor(gapminder$continent:gapminder$country)
xtabs(lifeExp ~ continent + CC + decada, data = gapminder)
fm1 <- aov(lifeExp ~ decada + continent + continent/country, data = gapminder)
summary(fm1)
modelo_gdp <- lm(gdpPercap ~ decada + pop+ continent/country, data = gapminder)
summary(modelo_gdp)
anova(modelo_gdp)
drop1(modelo_gdp)
| /descritivas.R | no_license | andradecarolina/ME623 | R | false | false | 3,070 | r | # descritivas -------------------------------------------------------------
library(tidyverse)
library(gapminder)
library(ggplot2)
library(GGally)
library(lme4)
#library(sjPlot)
summary(gapminder)
# criar decada
gapminder <- gapminder %>% mutate(decada = 10*(year%/%10)) %>%
mutate(decada = as.factor(decada))
# cada pais tem duas leituras por decada
n_distinct(gapminder$country)*2*n_distinct(gapminder$decada) == nrow(gapminder)
# summary
gapminder %>% select(lifeExp, gdpPercap, pop) %>%
summary() %>% xtable::xtable()
#summary lifeExp por continente
gapminder %>% group_by(continent) %>%
summarise(n_country = n_distinct(country),
min = min(lifeExp),
Q1 = quantile(lifeExp, 0.25),
median = median(lifeExp),
Q3 = quantile(lifeExp, 0.75),
max = max(lifeExp)) %>% xtable::xtable()
#plots facetados continente e década
gapminder %>% ggplot(aes(x = factor(decada), y = lifeExp, color = continent)) +
geom_jitter(aes(alpha = 0.1), show.legend = FALSE) +
geom_boxplot() + #scale_y_log10() +
facet_wrap(~continent)
# scatterplot correlation matrix
ggplt <- gapminder %>%
select(continent, gdpPercap, pop, lifeExp, decada) %>%
ggpairs(
columns = 2:ncol(.),
mapping = ggplot2::aes(color = continent),
diag = list(continuous = 'barDiag'),
legend = c(2, 2),
upper = list(continuous = wrap("cor", size = 3, alignPercent = 1)),
lower = list(mapping = ggplot2::aes(alpha = 0.9))
)
ggplt[1,1] <- ggplt[1,1] + scale_x_log10()
ggplt[2,2] <- ggplt[2,2] + scale_x_log10()
ggplt[3,1] <- ggplt[3,1] + scale_x_log10()
ggplt[3,2] <- ggplt[3,2] + scale_x_log10()
ggplt[4,1] <- ggplt[4,1] + scale_x_log10()
ggplt[4,2] <- ggplt[4,2] + scale_x_log10()
ggplt[2,1] <- ggplt[2,1] + scale_x_log10() + scale_y_log10()
ggplt +
theme(panel.grid.major = element_blank()) +
ggtitle('Correlation Scatterplot matrix') +
theme(plot.title = element_text(size = 10,hjust = 0.5),
axis.text.x = element_text(size = 8, angle=45),
axis.text.y = element_text(size = 8, angle=45))
# anovas ------------------------------------------------------------------
modelo_life2 <- lm(lifeExp ~ factor(decada) + continent + continent/country, data = gapminder)
anova(modelo_life2) %>% xtable::xtable()
par(mfrow=c(2,2))
plot(modelo_life2)
tuckey_modelo_life2 <- TukeyHSD(aov(modelo_life2), "continent", ordered = T)
tuckey_modelo_life2$continent %>% xtable::xtable()
broom::tidy(modelo_life2) %>% xtable::xtable()
# codigo nao usado --------------------------------------------------------
## com componente aleatorio
lmer(lifeExp ~ decada + continent + 1|continent/country, data = gapminder)
## nested anova
gapminder$CC <- factor(gapminder$continent:gapminder$country)
xtabs(lifeExp ~ continent + CC + decada, data = gapminder)
fm1 <- aov(lifeExp ~ decada + continent + continent/country, data = gapminder)
summary(fm1)
modelo_gdp <- lm(gdpPercap ~ decada + pop+ continent/country, data = gapminder)
summary(modelo_gdp)
anova(modelo_gdp)
drop1(modelo_gdp)
|
# this code finds the minimum reporting limit from field blanks
# and corrects absorbances for MRLs
# source Steve's code to summarize blanks
source('scripts/2_process/fxn_absMRL.R')
# source Steve's code to correct absorbance values
source('scripts/2_process/fxn_absMRLAdjust.R')
# read in raw absorbance data
abs.raw <- read.csv('raw_data/rawCompiledAbs.csv')
# calculate MRL based on all blank samples
blankGRnums.all <- grep('^Q[[:alpha:]]', names(abs.raw), value = TRUE)
MRL.all <- absMRL(abs.raw, "Wavelength", blankGRnums.all)
# read in cleaned absorbance data
abs.cleaned <- read.csv("cached_data/cleanedAbsData.csv")
GRnums <- as.character(abs.cleaned$GRnumber)
Wavelength <- grep("A", names(abs.cleaned), value = TRUE)
Wavelength.num <- gsub("A", "", Wavelength)
storms <- abs.cleaned[,c('GRnumber', 'date', 'datetime', 'ProjectID')]
abs.t.cleaned <- as.data.frame(t(abs.cleaned[,2:(ncol(abs.cleaned)-4)]))
names(abs.t.cleaned) <- GRnums
abs.t.cleaned$Wavelength <- Wavelength.num
wl.column <- grep('Wavelength', names(abs.t.cleaned))
abs.t.cleaned <- abs.t.cleaned[,c(wl.column, 1:(wl.column-1))]
# adjust values from MRL - here setting to 1/2 MRL
# function outputs two dataframes - one with adjusted values, the other with "<" for all corrected values
abs.corrected <- absMRLAdjust(dfabs = abs.t.cleaned, dfMRLs = MRL.all, Wavelength = 'Wavelength', sampleGRnums = GRnums, multiplier = 0.5)
abs.censored <- abs.corrected[[2]]
# find out how many censored values we have for each wavelength to determine which wavelengths we should use
# turn "<" into NAs
# maybe add this to function in the future
for (i in 2:length(abs.censored)){
abs.censored[,i] <- as.numeric(as.character(abs.censored[,i]))
}
# now create dataframe of 'censored' values to release with data
test <- abs.censored[,-1]
test[!is.na(test)] <- 'FALSE'
test[is.na(test)] <- 'TRUE'
test <- as.data.frame(t(test))
names(test) <- paste('A', abs.censored[,1], sep = "")
test$GRnumber <- row.names(test)
names(test)
# count NAs per row (# of samples censored per wavelength)
count_na <- function(x) sum(is.na(x))
abs.censored$n_censored <- apply(abs.censored, 1, count_na)
abs.censored$prop_censored <- abs.censored$n_censored/(length(abs.censored)-1)
abs.corrected.t <- as.data.frame(t(abs.corrected[[1]][,-grep('Wavelength', names(abs.corrected[[1]]))]))
names(abs.corrected.t) <- Wavelength
abs.corrected.t$GRnumber <- row.names(abs.corrected.t)
abs.corrected.t <- merge(abs.corrected.t, storms, by = 'GRnumber', all.x = TRUE)
write.csv(abs.corrected[[1]],'cached_data/correctedAbsData.csv',row.names = FALSE)
write.csv(abs.corrected.t, 'cached_data/tcorrectedAbsData.csv', row.names = FALSE)
write.csv(test, 'cached_data/censoredAbs.csv', row.names = FALSE)
png('figures/Abs_prop_censored.png')
plot(prop_censored~Wavelength, data = abs.censored, ylab = "Proportion of Samples < MDL", cex.lab = 1.3)
dev.off()
#######################################################
# test if blanks by site makes a difference
# that is, is there contamination at any sites?
blankGRnums.OUT <- grep('QOUT', names(abs.raw), value = TRUE)
blankGRnums.LK <- grep('QLK', names(abs.raw), value = TRUE)
blankGRnums.CG <- grep('QCG', names(abs.raw), value = TRUE)
blankGRnums.US <- grep('QUS', names(abs.raw), value = TRUE)
data.list <- list()
data.list[[1]] <- absMRL(abs.raw, "Wavelength", blankGRnums.all)
data.list[[2]] <- absMRL(abs.raw, "Wavelength", blankGRnums.OUT)
data.list[[3]] <- absMRL(abs.raw, "Wavelength", blankGRnums.LK)
data.list[[4]] <- absMRL(abs.raw, "Wavelength", blankGRnums.CG)
data.list[[5]] <- absMRL(abs.raw, "Wavelength", blankGRnums.US)
# visualize data for major differences
absMRL.viz <- function(data.list, var){
plot(data.list[[1]][, var]~data.list[[1]][, 'Wavelength'], type = "l")
points(data.list[[2]][, var]~data.list[[2]][, 'Wavelength'], type = "l", col = "red")
points(data.list[[3]][, var]~data.list[[3]][, 'Wavelength'], type = "l", col = "blue")
points(data.list[[4]][, var]~data.list[[4]][, 'Wavelength'], type = "l", col = "green")
points(data.list[[5]][, var]~data.list[[5]][, 'Wavelength'], type = "l", col = "purple")
legend("topright", legend = c("All", "OUT", "LK", "CG", "US"),
col = c('black', 'red', 'blue', 'green', 'purple'), lty = 1, title = paste(var," of Blank", sep = ""))
}
data.list <- list()
data.list[[1]]<- test.all
data.list[[2]] <- test.OUT
data.list[[3]] <- test.LK
data.list[[4]] <- test.CG
data.list[[5]] <- test.US
par(mfrow=c(1,1))
absMRL.viz(data.list = data.list, var = "mean")
| /scripts/2_process/correctAbs_MRL.R | no_license | limnoliver/GMIAAbs | R | false | false | 4,541 | r | # this code finds the minimum reporting limit from field blanks
# and corrects absorbances for MRLs
# source Steve's code to summarize blanks
source('scripts/2_process/fxn_absMRL.R')
# source Steve's code to correct absorbance values
source('scripts/2_process/fxn_absMRLAdjust.R')
# read in raw absorbance data
abs.raw <- read.csv('raw_data/rawCompiledAbs.csv')
# calculate MRL based on all blank samples
blankGRnums.all <- grep('^Q[[:alpha:]]', names(abs.raw), value = TRUE)
MRL.all <- absMRL(abs.raw, "Wavelength", blankGRnums.all)
# read in cleaned absorbance data
abs.cleaned <- read.csv("cached_data/cleanedAbsData.csv")
GRnums <- as.character(abs.cleaned$GRnumber)
Wavelength <- grep("A", names(abs.cleaned), value = TRUE)
Wavelength.num <- gsub("A", "", Wavelength)
storms <- abs.cleaned[,c('GRnumber', 'date', 'datetime', 'ProjectID')]
abs.t.cleaned <- as.data.frame(t(abs.cleaned[,2:(ncol(abs.cleaned)-4)]))
names(abs.t.cleaned) <- GRnums
abs.t.cleaned$Wavelength <- Wavelength.num
wl.column <- grep('Wavelength', names(abs.t.cleaned))
abs.t.cleaned <- abs.t.cleaned[,c(wl.column, 1:(wl.column-1))]
# adjust values from MRL - here setting to 1/2 MRL
# function outputs two dataframes - one with adjusted values, the other with "<" for all corrected values
abs.corrected <- absMRLAdjust(dfabs = abs.t.cleaned, dfMRLs = MRL.all, Wavelength = 'Wavelength', sampleGRnums = GRnums, multiplier = 0.5)
abs.censored <- abs.corrected[[2]]
# find out how many censored values we have for each wavelength to determine which wavelengths we should use
# turn "<" into NAs
# maybe add this to function in the future
for (i in 2:length(abs.censored)){
abs.censored[,i] <- as.numeric(as.character(abs.censored[,i]))
}
# now create dataframe of 'censored' values to release with data
test <- abs.censored[,-1]
test[!is.na(test)] <- 'FALSE'
test[is.na(test)] <- 'TRUE'
test <- as.data.frame(t(test))
names(test) <- paste('A', abs.censored[,1], sep = "")
test$GRnumber <- row.names(test)
names(test)
# count NAs per row (# of samples censored per wavelength)
count_na <- function(x) sum(is.na(x))
abs.censored$n_censored <- apply(abs.censored, 1, count_na)
abs.censored$prop_censored <- abs.censored$n_censored/(length(abs.censored)-1)
abs.corrected.t <- as.data.frame(t(abs.corrected[[1]][,-grep('Wavelength', names(abs.corrected[[1]]))]))
names(abs.corrected.t) <- Wavelength
abs.corrected.t$GRnumber <- row.names(abs.corrected.t)
abs.corrected.t <- merge(abs.corrected.t, storms, by = 'GRnumber', all.x = TRUE)
write.csv(abs.corrected[[1]],'cached_data/correctedAbsData.csv',row.names = FALSE)
write.csv(abs.corrected.t, 'cached_data/tcorrectedAbsData.csv', row.names = FALSE)
write.csv(test, 'cached_data/censoredAbs.csv', row.names = FALSE)
png('figures/Abs_prop_censored.png')
plot(prop_censored~Wavelength, data = abs.censored, ylab = "Proportion of Samples < MDL", cex.lab = 1.3)
dev.off()
#######################################################
# test if blanks by site makes a difference
# that is, is there contamination at any sites?
blankGRnums.OUT <- grep('QOUT', names(abs.raw), value = TRUE)
blankGRnums.LK <- grep('QLK', names(abs.raw), value = TRUE)
blankGRnums.CG <- grep('QCG', names(abs.raw), value = TRUE)
blankGRnums.US <- grep('QUS', names(abs.raw), value = TRUE)
data.list <- list()
data.list[[1]] <- absMRL(abs.raw, "Wavelength", blankGRnums.all)
data.list[[2]] <- absMRL(abs.raw, "Wavelength", blankGRnums.OUT)
data.list[[3]] <- absMRL(abs.raw, "Wavelength", blankGRnums.LK)
data.list[[4]] <- absMRL(abs.raw, "Wavelength", blankGRnums.CG)
data.list[[5]] <- absMRL(abs.raw, "Wavelength", blankGRnums.US)
# visualize data for major differences
absMRL.viz <- function(data.list, var){
plot(data.list[[1]][, var]~data.list[[1]][, 'Wavelength'], type = "l")
points(data.list[[2]][, var]~data.list[[2]][, 'Wavelength'], type = "l", col = "red")
points(data.list[[3]][, var]~data.list[[3]][, 'Wavelength'], type = "l", col = "blue")
points(data.list[[4]][, var]~data.list[[4]][, 'Wavelength'], type = "l", col = "green")
points(data.list[[5]][, var]~data.list[[5]][, 'Wavelength'], type = "l", col = "purple")
legend("topright", legend = c("All", "OUT", "LK", "CG", "US"),
col = c('black', 'red', 'blue', 'green', 'purple'), lty = 1, title = paste(var," of Blank", sep = ""))
}
data.list <- list()
data.list[[1]]<- test.all
data.list[[2]] <- test.OUT
data.list[[3]] <- test.LK
data.list[[4]] <- test.CG
data.list[[5]] <- test.US
par(mfrow=c(1,1))
absMRL.viz(data.list = data.list, var = "mean")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R
\docType{methods}
\name{filterTrajFeaturesByDL}
\alias{filterTrajFeaturesByDL}
\alias{filterTrajFeaturesByDL,SingleCellExperiment-method}
\title{Filter trajectory features by Detection Level (DL)}
\usage{
filterTrajFeaturesByDL(sce, threshold, show_plot = TRUE)
}
\arguments{
\item{sce}{An \code{SingleCellExperiment} object}
\item{threshold}{Minimum number of samples; if value < 1 it is interpreted
as fraction, otherwise as absolute sample count}
\item{show_plot}{Indicates if plot should be shown (default: TRUE)}
}
\value{
A \code{character} vector
}
\description{
Filters trajectory features that are detected in a minimum number of
samples.
}
\details{
The detection level denotes the fraction of samples in which a
feature was detected. For each trajectory feature listed in the
CellTrailsSet object the relative number of samples having a feature
expression value greater than 0 is counted. Features that are expressed in
a fraction of all samples greater than \code{threshold} remain labeled as
trajectory feature as listed in the \code{SingleCellExperiment} object,
otherwise they may be not considered for dimensionality reduction,
clustering, and trajectory reconstruction. If the parameter \code{threshold}
fullfills \code{threshold} \eqn{>= 1} it becomes converted to a relative
fraction of the total sample count. Please note that spike-in controls
are ignored and are not listed as trajectory features.
}
\examples{
# Example data
set.seed(1101)
dat <- simulate_exprs(n_features=15000, n_samples=100)
# Create container
alist <- list(logcounts=dat)
sce <- SingleCellExperiment(assays=alist)
# Filter features
tfeat <- filterTrajFeaturesByDL(sce, threshold=2)
head(tfeat)
# Set trajectory features to object
trajFeatureNames(sce) <- tfeat
# Number of features
length(trajFeatureNames(sce)) #filtered
nrow(sce) #total
}
\seealso{
\code{trajFeatureNames} \code{isSpike}
}
\author{
Daniel C. Ellwanger
}
| /man/filterTrajFeaturesByDL.Rd | no_license | rcannood/CellTrails | R | false | true | 2,015 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R
\docType{methods}
\name{filterTrajFeaturesByDL}
\alias{filterTrajFeaturesByDL}
\alias{filterTrajFeaturesByDL,SingleCellExperiment-method}
\title{Filter trajectory features by Detection Level (DL)}
\usage{
filterTrajFeaturesByDL(sce, threshold, show_plot = TRUE)
}
\arguments{
\item{sce}{An \code{SingleCellExperiment} object}
\item{threshold}{Minimum number of samples; if value < 1 it is interpreted
as fraction, otherwise as absolute sample count}
\item{show_plot}{Indicates if plot should be shown (default: TRUE)}
}
\value{
A \code{character} vector
}
\description{
Filters trajectory features that are detected in a minimum number of
samples.
}
\details{
The detection level denotes the fraction of samples in which a
feature was detected. For each trajectory feature listed in the
CellTrailsSet object the relative number of samples having a feature
expression value greater than 0 is counted. Features that are expressed in
a fraction of all samples greater than \code{threshold} remain labeled as
trajectory feature as listed in the \code{SingleCellExperiment} object,
otherwise they may be not considered for dimensionality reduction,
clustering, and trajectory reconstruction. If the parameter \code{threshold}
fullfills \code{threshold} \eqn{>= 1} it becomes converted to a relative
fraction of the total sample count. Please note that spike-in controls
are ignored and are not listed as trajectory features.
}
\examples{
# Example data
set.seed(1101)
dat <- simulate_exprs(n_features=15000, n_samples=100)
# Create container
alist <- list(logcounts=dat)
sce <- SingleCellExperiment(assays=alist)
# Filter features
tfeat <- filterTrajFeaturesByDL(sce, threshold=2)
head(tfeat)
# Set trajectory features to object
trajFeatureNames(sce) <- tfeat
# Number of features
length(trajFeatureNames(sce)) #filtered
nrow(sce) #total
}
\seealso{
\code{trajFeatureNames} \code{isSpike}
}
\author{
Daniel C. Ellwanger
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
#
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(bootstrapPage(
sidebarLayout(
sidebarPanel(height = "100%",
selectInput(inputId = "aq_indicator",
label = "Air Quality Indicator Options:",
choices = c("Ozone", "Solar.R", "Wind", "Temp"),
selected = "Solar.R"),
selectInput(inputId = "n_breaks",
label = "Number of columns in histogram:",
choices = c(10, 20, 30, 40, 50),
selected = 20),
checkboxInput(inputId = "individual_obs",
label = strong("Display individual observations"),
value = FALSE)
),
mainPanel(
plotOutput(outputId = "main_plot", height = "300px"),
# Display this only if the density is shown
conditionalPanel(condition = "input.density == true",
sliderInput(inputId = "bw_adjust",
label = "Bandwidth adjustment:",
min = 0.2, max = 2, value = 1, step = 0.2)
),
h3("App Documentation"),
h4("App Description"),
h5("This section of this document accompanies the Week 4 Project for the Shiny App.
This simple app displays histograms of the four different air quality indicators in the airquality dataset that comes with the r installation."),
h5("The user can interact with the app by selecting from any of the four air quality options from the dropdown list. S/he can select the number of columns to display in each histogram, can use a checkbox to toggle between displaying individual data points on the x-axis."),
h4("Inputs"),
h5("* Air quality options dropdown list with four options (Ozone, Solar.R, Wind and Temp"),
h5("* Number of columns to display in the histogram; a dropdown list to select from."),
h5("* A checkbox for user to chose whether to display individual data points or not."),
h4("Outputs"),
h5("The app's output consists of:"),
h5("* Histograms for each of the four air quality options (Ozone, Solar.R, Wind and Temp) - depending on the option selected."),
h5("* Histograms with varying number of columns, ddepending on the number seleccted from the dropdown list."),
h5("* Histograms with the individual data points indicated on the x-axis.")
)
)
))
| /ui.R | no_license | ennismDiplo/Data-Products | R | false | false | 2,562 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
#
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(bootstrapPage(
sidebarLayout(
sidebarPanel(height = "100%",
selectInput(inputId = "aq_indicator",
label = "Air Quality Indicator Options:",
choices = c("Ozone", "Solar.R", "Wind", "Temp"),
selected = "Solar.R"),
selectInput(inputId = "n_breaks",
label = "Number of columns in histogram:",
choices = c(10, 20, 30, 40, 50),
selected = 20),
checkboxInput(inputId = "individual_obs",
label = strong("Display individual observations"),
value = FALSE)
),
mainPanel(
plotOutput(outputId = "main_plot", height = "300px"),
# Display this only if the density is shown
conditionalPanel(condition = "input.density == true",
sliderInput(inputId = "bw_adjust",
label = "Bandwidth adjustment:",
min = 0.2, max = 2, value = 1, step = 0.2)
),
h3("App Documentation"),
h4("App Description"),
h5("This section of this document accompanies the Week 4 Project for the Shiny App.
This simple app displays histograms of the four different air quality indicators in the airquality dataset that comes with the r installation."),
h5("The user can interact with the app by selecting from any of the four air quality options from the dropdown list. S/he can select the number of columns to display in each histogram, can use a checkbox to toggle between displaying individual data points on the x-axis."),
h4("Inputs"),
h5("* Air quality options dropdown list with four options (Ozone, Solar.R, Wind and Temp"),
h5("* Number of columns to display in the histogram; a dropdown list to select from."),
h5("* A checkbox for user to chose whether to display individual data points or not."),
h4("Outputs"),
h5("The app's output consists of:"),
h5("* Histograms for each of the four air quality options (Ozone, Solar.R, Wind and Temp) - depending on the option selected."),
h5("* Histograms with varying number of columns, ddepending on the number seleccted from the dropdown list."),
h5("* Histograms with the individual data points indicated on the x-axis.")
)
)
))
|
\name{GAabbreviation-internal}
\alias{GAabbreviation-internal}
\alias{alphas.GAabbreviate}
\alias{convCorr.GAabbreviate}
\alias{fitness.GAabbreviate}
\alias{GAabbreviation-package}
\alias{is.missing}
\alias{impute}
\alias{makeKey.GAabbreviate}
\alias{measure.GAabbreviate}
\alias{monitor.GAabbreviate}
\alias{updateObject.GAabbreviate}
\alias{plot.GAabbreviate}
\alias{print.GAabbreviate}
\alias{summary.GAabbreviate}
\title{Internal GAabbreviation functions}
\description{Internal functions not intended to be called directly by users or functions for which no documentation has bee written so far.}
%\usage{}
\keyword{internal} | /man/GAabbreviate-internal.Rd | no_license | cran/GAabbreviate | R | false | false | 633 | rd | \name{GAabbreviation-internal}
\alias{GAabbreviation-internal}
\alias{alphas.GAabbreviate}
\alias{convCorr.GAabbreviate}
\alias{fitness.GAabbreviate}
\alias{GAabbreviation-package}
\alias{is.missing}
\alias{impute}
\alias{makeKey.GAabbreviate}
\alias{measure.GAabbreviate}
\alias{monitor.GAabbreviate}
\alias{updateObject.GAabbreviate}
\alias{plot.GAabbreviate}
\alias{print.GAabbreviate}
\alias{summary.GAabbreviate}
\title{Internal GAabbreviation functions}
\description{Internal functions not intended to be called directly by users or functions for which no documentation has bee written so far.}
%\usage{}
\keyword{internal} |
## Two functions below that cache the inverse of a matrix,
##and solves the inverse if it is not already cached.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
cachedMatrix <- NULL
set <- function(mtrx) {
x <<- mtrx
cachedMatrix <<- NULL
}
get <- function() {
x
}
setInverse <- function(solve) cachedMatrix <<- solve
getInverse <- function() {
cachedMatrix
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by
##makeCacheMatrix above. If the inverse has already been calculated (and
##the matrix has not changed), then the cachesolve should retrieve the
##inverse from the cache
cacheSolve <- function(x, ...) {
cachedMatrix <- x$getInverse()
if(!is.null(cachedMatrix)) {
message("getting cached data")
return(cachedMatrix)
}
data <- x$get()
cachedMatrix <- solve(data, ...)
x$setInverse(cachedMatrix)
cachedMatrix
}
| /cachematrix.R | no_license | rb787/ProgrammingAssignment2 | R | false | false | 1,252 | r | ## Two functions below that cache the inverse of a matrix,
##and solves the inverse if it is not already cached.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
cachedMatrix <- NULL
set <- function(mtrx) {
x <<- mtrx
cachedMatrix <<- NULL
}
get <- function() {
x
}
setInverse <- function(solve) cachedMatrix <<- solve
getInverse <- function() {
cachedMatrix
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by
##makeCacheMatrix above. If the inverse has already been calculated (and
##the matrix has not changed), then the cachesolve should retrieve the
##inverse from the cache
cacheSolve <- function(x, ...) {
cachedMatrix <- x$getInverse()
if(!is.null(cachedMatrix)) {
message("getting cached data")
return(cachedMatrix)
}
data <- x$get()
cachedMatrix <- solve(data, ...)
x$setInverse(cachedMatrix)
cachedMatrix
}
|
#df <- data.frame()
#
#for (i in 0:length(strings)/5000) {
# df <- cbind(df,strings[c((5000*i):(5000*i)+5000)])
# }
library(dplyr)
df <- mutate_all(as.data.frame(letters), .funs=toupper)
nucleotides <- df[c(1,3,7,20),]
strings<- as.character(sample(nucleotides,4*50*10^5,replace = T))
test.mat <- matrix(data = strings,nrow = 10^5)
test.df <- as.data.frame(test.mat)
#sapply(test.mat[1,],)
test_4.df <-
data.frame('1' = apply(test.mat[,c(1:50)],FUN = paste,MARGIN = 1,collapse = "") ,
'2' = apply(test.mat[,c(51:100)],FUN = paste,MARGIN = 1,collapse = "") ,
'3' = apply(test.mat[,c(101:150)],FUN = paste,MARGIN = 1,collapse = "") ,
'4' = apply(test.mat[,c(151:200)],FUN = paste,MARGIN = 1,collapse = "") )
write.table(test_4.df,file = "pete_test_data",row.names = F,col.names = F) | /make_genetic_data_random.R | no_license | MattSkiff/first_repo | R | false | false | 836 | r | #df <- data.frame()
#
#for (i in 0:length(strings)/5000) {
# df <- cbind(df,strings[c((5000*i):(5000*i)+5000)])
# }
library(dplyr)
df <- mutate_all(as.data.frame(letters), .funs=toupper)
nucleotides <- df[c(1,3,7,20),]
strings<- as.character(sample(nucleotides,4*50*10^5,replace = T))
test.mat <- matrix(data = strings,nrow = 10^5)
test.df <- as.data.frame(test.mat)
#sapply(test.mat[1,],)
test_4.df <-
data.frame('1' = apply(test.mat[,c(1:50)],FUN = paste,MARGIN = 1,collapse = "") ,
'2' = apply(test.mat[,c(51:100)],FUN = paste,MARGIN = 1,collapse = "") ,
'3' = apply(test.mat[,c(101:150)],FUN = paste,MARGIN = 1,collapse = "") ,
'4' = apply(test.mat[,c(151:200)],FUN = paste,MARGIN = 1,collapse = "") )
write.table(test_4.df,file = "pete_test_data",row.names = F,col.names = F) |
"UPminimalsupport" <-
function(pik)
{
if(any(is.na(pik))) stop("there are missing values in the pik vector")
basicsplit<-function(pik)
{
N=length(pik)
n=sum(pik)
A=(1:N)[pik==0]
B=(1:N)[pik==1]
C=setdiff(setdiff(1:N,A),B)
D=C[sample.int(length(C), round(n-length(B)))]
s1v=rep(0,times=N)
s1v[c(B,D)]=1
alpha=min(1-max(pik[setdiff(C,D)]),min(pik[D]))
pikb= (pik-alpha*s1v)/(1-alpha)
if(runif(1,0,1)<alpha) s=s1v else s=pikb
s
}
is.a.sample<-function(s,EPS=sqrt(.Machine$double.eps)) if(sum(abs(s-round(s)))<EPS) TRUE else FALSE
while(!is.a.sample(pik))pik=basicsplit(pik)
round(pik)
}
| /R/UPminimalsupport.R | no_license | cran/sampling | R | false | false | 610 | r | "UPminimalsupport" <-
function(pik)
{
if(any(is.na(pik))) stop("there are missing values in the pik vector")
basicsplit<-function(pik)
{
N=length(pik)
n=sum(pik)
A=(1:N)[pik==0]
B=(1:N)[pik==1]
C=setdiff(setdiff(1:N,A),B)
D=C[sample.int(length(C), round(n-length(B)))]
s1v=rep(0,times=N)
s1v[c(B,D)]=1
alpha=min(1-max(pik[setdiff(C,D)]),min(pik[D]))
pikb= (pik-alpha*s1v)/(1-alpha)
if(runif(1,0,1)<alpha) s=s1v else s=pikb
s
}
is.a.sample<-function(s,EPS=sqrt(.Machine$double.eps)) if(sum(abs(s-round(s)))<EPS) TRUE else FALSE
while(!is.a.sample(pik))pik=basicsplit(pik)
round(pik)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biasvar.R
\name{BiasVar}
\alias{BiasVar}
\title{Demo of bias/variance tradeoff}
\usage{
BiasVar(N = 25, n = 10, R = 200, seed = 1)
}
\arguments{
\item{N:}{number of observations}
\item{n:}{max degree of polynomial fitting}
\item{R:}{number of repetitions for bias and variance estimation}
\item{seed:}{seed random generator}
}
\value{
: list with
\itemize{
\item{Remp}: training MSE
\item{B}: estimated squared bias
\item{V}: estimated sqaured variance
}
}
\description{
Demo of bias/variance tradeoff
}
\examples{
BiasVar()
}
\references{
\url{mlg.ulb.ac.be}
}
\author{
Gianluca Bontempi \email{gbonte@ulb.ac.be}
}
| /man/BiasVar.Rd | no_license | niuneo/gbcode | R | false | true | 702 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biasvar.R
\name{BiasVar}
\alias{BiasVar}
\title{Demo of bias/variance tradeoff}
\usage{
BiasVar(N = 25, n = 10, R = 200, seed = 1)
}
\arguments{
\item{N:}{number of observations}
\item{n:}{max degree of polynomial fitting}
\item{R:}{number of repetitions for bias and variance estimation}
\item{seed:}{seed random generator}
}
\value{
: list with
\itemize{
\item{Remp}: training MSE
\item{B}: estimated squared bias
\item{V}: estimated sqaured variance
}
}
\description{
Demo of bias/variance tradeoff
}
\examples{
BiasVar()
}
\references{
\url{mlg.ulb.ac.be}
}
\author{
Gianluca Bontempi \email{gbonte@ulb.ac.be}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ec2_operations.R
\name{terminate_client_vpn_connections}
\alias{terminate_client_vpn_connections}
\title{Terminates active Client VPN endpoint connections}
\usage{
terminate_client_vpn_connections(ClientVpnEndpointId,
ConnectionId = NULL, Username = NULL, DryRun = NULL)
}
\arguments{
\item{ClientVpnEndpointId}{[required] The ID of the Client VPN endpoint to which the client is connected.}
\item{ConnectionId}{The ID of the client connection to be terminated.}
\item{Username}{The name of the user who initiated the connection. Use this option to terminate all active connections for the specified user. This option can only be used if the user has established up to five connections.}
\item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Terminates active Client VPN endpoint connections. This action can be used to terminate a specific client connection, or up to five connections established by a specific user.
}
\section{Accepted Parameters}{
\preformatted{terminate_client_vpn_connections(
ClientVpnEndpointId = "string",
ConnectionId = "string",
Username = "string",
DryRun = TRUE|FALSE
)
}
}
| /service/paws.ec2/man/terminate_client_vpn_connections.Rd | permissive | CR-Mercado/paws | R | false | true | 1,442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ec2_operations.R
\name{terminate_client_vpn_connections}
\alias{terminate_client_vpn_connections}
\title{Terminates active Client VPN endpoint connections}
\usage{
terminate_client_vpn_connections(ClientVpnEndpointId,
ConnectionId = NULL, Username = NULL, DryRun = NULL)
}
\arguments{
\item{ClientVpnEndpointId}{[required] The ID of the Client VPN endpoint to which the client is connected.}
\item{ConnectionId}{The ID of the client connection to be terminated.}
\item{Username}{The name of the user who initiated the connection. Use this option to terminate all active connections for the specified user. This option can only be used if the user has established up to five connections.}
\item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Terminates active Client VPN endpoint connections. This action can be used to terminate a specific client connection, or up to five connections established by a specific user.
}
\section{Accepted Parameters}{
\preformatted{terminate_client_vpn_connections(
ClientVpnEndpointId = "string",
ConnectionId = "string",
Username = "string",
DryRun = TRUE|FALSE
)
}
}
|
library(bcp)
### Name: fitted.bcp
### Title: Extract model fitted values
### Aliases: fitted.bcp
### Keywords: datasets
### ** Examples
##### A random sample from a few normal distributions #####
testdata <- c(rnorm(50), rnorm(50, 5, 1), rnorm(50))
bcp.0 <- bcp(testdata)
residuals(bcp.0)
| /data/genthat_extracted_code/bcp/examples/fitted.bcp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 297 | r | library(bcp)
### Name: fitted.bcp
### Title: Extract model fitted values
### Aliases: fitted.bcp
### Keywords: datasets
### ** Examples
##### A random sample from a few normal distributions #####
testdata <- c(rnorm(50), rnorm(50, 5, 1), rnorm(50))
bcp.0 <- bcp(testdata)
residuals(bcp.0)
|
#library import
library(lubridate)
library(stringr)
library(dplyr)
library(ggplot2)
library(sqldf)
#Read input
historyDF <- readLines('history_database')
historyDF <- as.data.frame(historyDF)
colnames(historyDF)<-'whole_val'
#column seperations (date column, code column)
historyDF$date_ts<-as.numeric(str_sub(historyDF$whole_val,1,10))
historyDF$code<-str_trim(str_sub(historyDF$whole_val,15))
historyDF <- historyDF %>% select (-c(1))
historyDF$code <- as.character(historyDF$code )
#Epoch value converter
historyDF$date_ts <- as_datetime(as.POSIXct(historyDF$date_ts, origin="1970-01-01"),tz='GMT')
#Mutate new columns like day, year, minth using date
historyDF_added<-mutate(historyDF,year=year(date_ts),month=month(date_ts),hour=hour(date_ts),day = weekdays(date_ts))
#filter from the day that this assignment started
historyDF_added<-filter(historyDF_added,month>=11)
#mutate new column 'commented' using column 'code'
historyDF_added<-mutate(historyDF_added,commented=ifelse(str_detect(code,'#')==TRUE,TRUE,FALSE))
#mutate new column 'library'
historyDF_added<-mutate(historyDF_added,library=str_replace(ifelse(str_detect(code,'library')==TRUE|str_detect(code,'package')==TRUE,TRUE,FALSE),'library',''))
#factor conversion
historyDF_added$year<-as.factor( historyDF_added$year)
historyDF_added$month<-as.factor( historyDF_added$month)
historyDF_added$hour<-as.factor( historyDF_added$hour)
historyDF_added$day<-as.factor( historyDF_added$day)
historyDF_added$library<-as.factor( historyDF_added$library)
hourLine<-sqldf("select hour,count(*) as total_count from historyDF_added group by hour")
#Bar chart with Mean Temperature across hours#
ggplot(hourLine, aes(factor(hour),total_count)) +
geom_bar(stat="identity", position = "dodge") +
scale_fill_brewer(palette = "Set1")
hourcommentLine<-sqldf("select hour,commented,count(*) as total_count from historyDF_added group by hour,commented")
#plot on the column category 'commented' with hour
qplot(data = historyDF_added,x=hour,fill=commented,main='frequency of hour-commented/uncommented')
#plot on the column category 'day' with frequency
qplot(data = historyDF_added,day,main='Frequency of Days')
#plot on the column category 'library' with frequency
qplot(data = historyDF_added,library,main='library codes Presence') | /R core programming/Q5.R | no_license | praveenM417/Data-Engineering-Data-Science | R | false | false | 2,346 | r | #library import
library(lubridate)
library(stringr)
library(dplyr)
library(ggplot2)
library(sqldf)
#Read input
historyDF <- readLines('history_database')
historyDF <- as.data.frame(historyDF)
colnames(historyDF)<-'whole_val'
#column seperations (date column, code column)
historyDF$date_ts<-as.numeric(str_sub(historyDF$whole_val,1,10))
historyDF$code<-str_trim(str_sub(historyDF$whole_val,15))
historyDF <- historyDF %>% select (-c(1))
historyDF$code <- as.character(historyDF$code )
#Epoch value converter
historyDF$date_ts <- as_datetime(as.POSIXct(historyDF$date_ts, origin="1970-01-01"),tz='GMT')
#Mutate new columns like day, year, minth using date
historyDF_added<-mutate(historyDF,year=year(date_ts),month=month(date_ts),hour=hour(date_ts),day = weekdays(date_ts))
#filter from the day that this assignment started
historyDF_added<-filter(historyDF_added,month>=11)
#mutate new column 'commented' using column 'code'
historyDF_added<-mutate(historyDF_added,commented=ifelse(str_detect(code,'#')==TRUE,TRUE,FALSE))
#mutate new column 'library'
historyDF_added<-mutate(historyDF_added,library=str_replace(ifelse(str_detect(code,'library')==TRUE|str_detect(code,'package')==TRUE,TRUE,FALSE),'library',''))
#factor conversion
historyDF_added$year<-as.factor( historyDF_added$year)
historyDF_added$month<-as.factor( historyDF_added$month)
historyDF_added$hour<-as.factor( historyDF_added$hour)
historyDF_added$day<-as.factor( historyDF_added$day)
historyDF_added$library<-as.factor( historyDF_added$library)
hourLine<-sqldf("select hour,count(*) as total_count from historyDF_added group by hour")
#Bar chart with Mean Temperature across hours#
ggplot(hourLine, aes(factor(hour),total_count)) +
geom_bar(stat="identity", position = "dodge") +
scale_fill_brewer(palette = "Set1")
hourcommentLine<-sqldf("select hour,commented,count(*) as total_count from historyDF_added group by hour,commented")
#plot on the column category 'commented' with hour
qplot(data = historyDF_added,x=hour,fill=commented,main='frequency of hour-commented/uncommented')
#plot on the column category 'day' with frequency
qplot(data = historyDF_added,day,main='Frequency of Days')
#plot on the column category 'library' with frequency
qplot(data = historyDF_added,library,main='library codes Presence') |
library(MASS)
## Zelig 5
z.out <- zeligw(dist ~ speed, model = "ls", data = cars)
print(z.out)
x.out <- setxw(z.out, speed=30)
x1.out <- setxw(z.out, speed = 50)
s.out <- simw(z.out, x.out, x1.out, num = 1000)
summaryw(s.out)
library(Zelig)
## Zelig 4
z.out <- zelig(dist ~ speed, model = "ls", data = cars)
print(z.out)
x.out <- setx(z.out, speed=30)
x1.out <- setx(z.out, speed = 50)
s.out <- sim(z.out, x.out, x1.out, num = 1000)
summary(s.out)
f <- function(form = dist ~ speed, model = "ls", data = cars) {
zz.out <- zeligw(formula = form, model = model, data = data)
print(zz.out)
xx.out <- setxw2(zz.out, speed = 30)
ss.out <- simw2(zz.out, xx.out, num = 1000)
ss.out$summarize()
}
f()
FF <- function(form = dist ~ speed, data = cars) {
Z <- zls$new()
Z$zelig(formula = form, data = data)
print(Z)
Z$setx(speed = 3)
return(Z)
}
ZZ <- FF()
| /test/demo-wrappers.R | no_license | wwdxfa/Zelig5 | R | false | false | 873 | r | library(MASS)
## Zelig 5
z.out <- zeligw(dist ~ speed, model = "ls", data = cars)
print(z.out)
x.out <- setxw(z.out, speed=30)
x1.out <- setxw(z.out, speed = 50)
s.out <- simw(z.out, x.out, x1.out, num = 1000)
summaryw(s.out)
library(Zelig)
## Zelig 4
z.out <- zelig(dist ~ speed, model = "ls", data = cars)
print(z.out)
x.out <- setx(z.out, speed=30)
x1.out <- setx(z.out, speed = 50)
s.out <- sim(z.out, x.out, x1.out, num = 1000)
summary(s.out)
f <- function(form = dist ~ speed, model = "ls", data = cars) {
zz.out <- zeligw(formula = form, model = model, data = data)
print(zz.out)
xx.out <- setxw2(zz.out, speed = 30)
ss.out <- simw2(zz.out, xx.out, num = 1000)
ss.out$summarize()
}
f()
FF <- function(form = dist ~ speed, data = cars) {
Z <- zls$new()
Z$zelig(formula = form, data = data)
print(Z)
Z$setx(speed = 3)
return(Z)
}
ZZ <- FF()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/minimal.r
\encoding{UTF-8}
\name{minimal}
\alias{minimal}
\title{Minimal template}
\usage{
minimal(number_sections = FALSE, fig_width = 7, fig_height = 5,
fig_retina = if (!fig_caption) 2, fig_caption = FALSE, dev = "png",
smart = TRUE, self_contained = TRUE, highlight = "default",
mathjax = "default", extra_dependencies = NULL, css = NULL,
includes = NULL, keep_md = FALSE, lib_dir = NULL,
md_extensions = NULL, pandoc_args = NULL, ...)
}
\arguments{
\item{number_sections}{\code{TRUE} to number section headings}
\item{fig_width}{Default width (in inches) for figures}
\item{fig_height}{Default width (in inches) for figures}
\item{fig_retina}{Scaling to perform for retina displays (defaults to 2 when
\code{fig_caption} is \code{FALSE}, which currently works for all widely
used retina displays). Set to \code{NULL} to prevent retina scaling. Note
that this will always be \code{NULL} when \code{keep_md} is specified (this
is because \code{fig_retina} relies on outputting HTML directly into the
markdown document).}
\item{fig_caption}{\code{TRUE} to render figures with captions}
\item{dev}{Graphics device to use for figure output (defaults to png)}
\item{smart}{Produce typographically correct output, converting straight
quotes to curly quotes, --- to em-dashes, -- to en-dashes, and ... to
ellipses.}
\item{self_contained}{Produce a standalone HTML file with no external
dependencies, using data: URIs to incorporate the contents of linked
scripts, stylesheets, images, and videos. Note that even for self contained
documents MathJax is still loaded externally (this is necessary because of
it's size).}
\item{highlight}{Syntax highlighting style. Supported styles include
"default", "tango", "pygments", "kate", "monochrome", "espresso",
"zenburn", "haddock", and "textmate". Pass \code{NULL} to prevent syntax
highlighting.}
\item{mathjax}{Include mathjax. The "default" option uses an https URL from
the official MathJax CDN. The "local" option uses a local version of
MathJax (which is copied into the output directory). You can pass an
alternate URL or pass \code{NULL} to exclude MathJax entirely.}
\item{extra_dependencies, ...}{Additional function arguments to pass to the
base R Markdown HTML output formatter}
\item{css}{One or more css files to include}
\item{includes}{Named list of additional content to include within the
document (typically created using the \code{\link{includes}} function).}
\item{keep_md}{Keep the markdown file generated by knitting.}
\item{lib_dir}{Directory to copy dependent HTML libraries (e.g. jquery,
bootstrap, etc.) into. By default this will be the name of the document
with \code{_files} appended to it.}
\item{md_extensions}{Markdown extensions to be added or removed from the
default definition or R Markdown. See the \code{\link{rmarkdown_format}}
for additional details.}
\item{pandoc_args}{Additional command line options to pass to pandoc}
}
\description{
Template for creating an R markdown document with minimal markup
}
\details{
\if{html}{
\figure{minimal.png}{options: width="100\%" alt="Figure: minimal example"}
}
\if{latex}{
\figure{minimal.pdf}{options: width=10cm}
}
}
\section{YAML Frontmatter}{
The following example shows all possible YAML frontmatter options:
\preformatted{---
title: "INSERT_TITLE_HERE"
output: markdowntemplates::minimal
---}
}
\examples{
\dontrun{
rmarkdown::render("source.Rmd", clean=TRUE, quiet=TRUE, output_file="output.html")
}
}
| /man/minimal.Rd | no_license | gragusa/markdowntemplates | R | false | true | 3,540 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/minimal.r
\encoding{UTF-8}
\name{minimal}
\alias{minimal}
\title{Minimal template}
\usage{
minimal(number_sections = FALSE, fig_width = 7, fig_height = 5,
fig_retina = if (!fig_caption) 2, fig_caption = FALSE, dev = "png",
smart = TRUE, self_contained = TRUE, highlight = "default",
mathjax = "default", extra_dependencies = NULL, css = NULL,
includes = NULL, keep_md = FALSE, lib_dir = NULL,
md_extensions = NULL, pandoc_args = NULL, ...)
}
\arguments{
\item{number_sections}{\code{TRUE} to number section headings}
\item{fig_width}{Default width (in inches) for figures}
\item{fig_height}{Default width (in inches) for figures}
\item{fig_retina}{Scaling to perform for retina displays (defaults to 2 when
\code{fig_caption} is \code{FALSE}, which currently works for all widely
used retina displays). Set to \code{NULL} to prevent retina scaling. Note
that this will always be \code{NULL} when \code{keep_md} is specified (this
is because \code{fig_retina} relies on outputting HTML directly into the
markdown document).}
\item{fig_caption}{\code{TRUE} to render figures with captions}
\item{dev}{Graphics device to use for figure output (defaults to png)}
\item{smart}{Produce typographically correct output, converting straight
quotes to curly quotes, --- to em-dashes, -- to en-dashes, and ... to
ellipses.}
\item{self_contained}{Produce a standalone HTML file with no external
dependencies, using data: URIs to incorporate the contents of linked
scripts, stylesheets, images, and videos. Note that even for self contained
documents MathJax is still loaded externally (this is necessary because of
it's size).}
\item{highlight}{Syntax highlighting style. Supported styles include
"default", "tango", "pygments", "kate", "monochrome", "espresso",
"zenburn", "haddock", and "textmate". Pass \code{NULL} to prevent syntax
highlighting.}
\item{mathjax}{Include mathjax. The "default" option uses an https URL from
the official MathJax CDN. The "local" option uses a local version of
MathJax (which is copied into the output directory). You can pass an
alternate URL or pass \code{NULL} to exclude MathJax entirely.}
\item{extra_dependencies, ...}{Additional function arguments to pass to the
base R Markdown HTML output formatter}
\item{css}{One or more css files to include}
\item{includes}{Named list of additional content to include within the
document (typically created using the \code{\link{includes}} function).}
\item{keep_md}{Keep the markdown file generated by knitting.}
\item{lib_dir}{Directory to copy dependent HTML libraries (e.g. jquery,
bootstrap, etc.) into. By default this will be the name of the document
with \code{_files} appended to it.}
\item{md_extensions}{Markdown extensions to be added or removed from the
default definition or R Markdown. See the \code{\link{rmarkdown_format}}
for additional details.}
\item{pandoc_args}{Additional command line options to pass to pandoc}
}
\description{
Template for creating an R markdown document with minimal markup
}
\details{
\if{html}{
\figure{minimal.png}{options: width="100\%" alt="Figure: minimal example"}
}
\if{latex}{
\figure{minimal.pdf}{options: width=10cm}
}
}
\section{YAML Frontmatter}{
The following example shows all possible YAML frontmatter options:
\preformatted{---
title: "INSERT_TITLE_HERE"
output: markdowntemplates::minimal
---}
}
\examples{
\dontrun{
rmarkdown::render("source.Rmd", clean=TRUE, quiet=TRUE, output_file="output.html")
}
}
|
install.packages("ROCR")
library(ROCR)
#En los datos nos dice la probabilidad de exito de cada fila
datos =read.csv("../data/tema3/roc-example-1.csv")
datos2 =read.csv("../data/tema3/roc-example-2.csv")
#0 significa fallo y 1 es exito
predict1 = ROCR::prediction(datos$prob,datos$class)
perf1 = performance(predict1,"tpr","fpr") #TPR true positive rate, FPR false positive rate
#Nos genera nuestra primera curva ROC
plot(perf1)
lines(par()$usr[1:2],par()$usr[3:4]) #Dibujamos la linea diagonal
probs.cut.1 = data.frame(cut= perf1@alpha.values[[1]],
fpr=perf1@x.values[[1]],
tpr=perf1@y.values[[1]])
head(probs.cut.1)
tail(probs.cut.1)
probs.cut.1[probs.cut.1$tpr>=0.8,]
#Esto es lo mismo, pero lo que hacemos es usar el dataset 2 donde las clases son categoricos byer y non-buyer
pred2 = prediction(datos2$prob,datos2$class,label.ordering = c("non-buyer","buyer"))
perf2 = performance(pred2,"tpr","fpr")
plot(perf2,col="green")
lines(par()$usr[1:2],par()$usr[3:4],col="red") #Dibujamos la linea diagonal
| /scripts/26_CurvasROC.R | no_license | soullest/R | R | false | false | 1,090 | r | install.packages("ROCR")
library(ROCR)
#En los datos nos dice la probabilidad de exito de cada fila
datos =read.csv("../data/tema3/roc-example-1.csv")
datos2 =read.csv("../data/tema3/roc-example-2.csv")
#0 significa fallo y 1 es exito
predict1 = ROCR::prediction(datos$prob,datos$class)
perf1 = performance(predict1,"tpr","fpr") #TPR true positive rate, FPR false positive rate
#Nos genera nuestra primera curva ROC
plot(perf1)
lines(par()$usr[1:2],par()$usr[3:4]) #Dibujamos la linea diagonal
probs.cut.1 = data.frame(cut= perf1@alpha.values[[1]],
fpr=perf1@x.values[[1]],
tpr=perf1@y.values[[1]])
head(probs.cut.1)
tail(probs.cut.1)
probs.cut.1[probs.cut.1$tpr>=0.8,]
#Esto es lo mismo, pero lo que hacemos es usar el dataset 2 donde las clases son categoricos byer y non-buyer
pred2 = prediction(datos2$prob,datos2$class,label.ordering = c("non-buyer","buyer"))
perf2 = performance(pred2,"tpr","fpr")
plot(perf2,col="green")
lines(par()$usr[1:2],par()$usr[3:4],col="red") #Dibujamos la linea diagonal
|
library(logmult)
### Name: anoas
### Title: Analysis of Association Functions
### Aliases: anoas anoasL
### Keywords: models nonlinear
### ** Examples
## Wong (2010), Table 2.6
data(gss8590)
# The table used in Wong (2010) is not perfectly consistent
# with that of Wong (2001)
tab <- margin.table(gss8590[,,c(2,4)], 1:2)
tab[2,4] <- 49
# Results correspond to lines 1, 6 and 11
results <- anoas(tab, nd=2)
results
## Don't show:
det <- summary(results)
stopifnot(isTRUE(all.equal(det[[1]], c(12, 6, 2))))
stopifnot(isTRUE(all.equal(round(det[[2]], 2), c(1373.18, 125.06, 0.60))))
stopifnot(isTRUE(all.equal(round(det[[4]], 2), c(23.86, 6.44, 0.09))))
stopifnot(isTRUE(all.equal(round(det[[5]], 2), c(1274.08, 75.51, -15.92))))
stopifnot(isTRUE(all.equal(round(det[[7]][-1], 2), c(-1248.12, -124.46))))
stopifnot(isTRUE(all.equal(det[[8]][-1], c(-6, -4))))
## End(Don't show)
| /data/genthat_extracted_code/logmult/examples/anoas.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 952 | r | library(logmult)
### Name: anoas
### Title: Analysis of Association Functions
### Aliases: anoas anoasL
### Keywords: models nonlinear
### ** Examples
## Wong (2010), Table 2.6
data(gss8590)
# The table used in Wong (2010) is not perfectly consistent
# with that of Wong (2001)
tab <- margin.table(gss8590[,,c(2,4)], 1:2)
tab[2,4] <- 49
# Results correspond to lines 1, 6 and 11
results <- anoas(tab, nd=2)
results
## Don't show:
det <- summary(results)
stopifnot(isTRUE(all.equal(det[[1]], c(12, 6, 2))))
stopifnot(isTRUE(all.equal(round(det[[2]], 2), c(1373.18, 125.06, 0.60))))
stopifnot(isTRUE(all.equal(round(det[[4]], 2), c(23.86, 6.44, 0.09))))
stopifnot(isTRUE(all.equal(round(det[[5]], 2), c(1274.08, 75.51, -15.92))))
stopifnot(isTRUE(all.equal(round(det[[7]][-1], 2), c(-1248.12, -124.46))))
stopifnot(isTRUE(all.equal(det[[8]][-1], c(-6, -4))))
## End(Don't show)
|
#' @title MAEI Calculation for Individually-Randomized Experiments
#'
#' @description This function calculates the maximum aggregate electoral impact (MAEI) for individually-randomized experiments following Slough (2020). This function returns the MAEIs under the assumption of no interference between voters (SUTVA). The optional argument psi uses the calculation of MAEI_d to implements the decision rule proposed in the paper.
#'
#' @param vr a data frame contains variables: district and the corresponding number of registered voters in that district.
#' @param dist a character to specify the column name of the district variable in the data frame vr.
#' @param nvoters a character to specify the column number of the number of voters in the data frame vr.
#' @param s10 a number or vector to denote the individuals exposed to the treatment because it is assigned experimentally. It can be the exact number or proportion (between 0 and 1).
#' @param s01 a number or vector to denote theindividuals not exposed to the treatment because is assigned experimentally. It can be the exact number or proportion (between 0 and 1). The default value is NULL which corresponds to the case in which a researcher designs and implements an intervention that would otherwise not have occurred. If it is not NULL, it corresponds to the case in which some intervention by a third party is modified to include an experimental component.
#' @param exp_ac0 a number or vector to denote the expectation of untreated potential outcome. The default value is one which will return the most conservative bound.
#'
#' @param psi a number or vector specifying “margin to pivotality”, as minimum change in vote share, as a proportion of registered voters, at which a different officeholder would be elected in district. If psi > 2MAEI (Maximal Aggregate Electoral Impact), an experiment could not change the ultimate electoral outcome (the output result will show "PASS" the decision rule); in contrast, if psi < 2MAEI, the experiment could affect the ultimate electoral outcome (the output result will show "FAIL").
#'
#' @examples
#' ### input data
#' data(rv1)
#'
#' ### specify s10 as a constant number across districts
#' get_maei_ind_rand(vr = rv1, dist = "d",
#' nvoters = "n_voters", s10 = 200)
#'
#' ### let s10 be different proportion and specify s01 = 10 (thus case 2)
#' set.seed(10)
#' get_maei_ind_rand(vr = rv1, dist = "d",
#' nvoters = "n_voters", s10 = runif(10), s01 = 10)
#'
#' ### add margin to pivotality say psi = 0.3
#' get_maei_ind_rand(vr = rv1, dist = "d",
#' nvoters = "n_voters", s10 = 0.13, s01 = 10, psi = 0.3)
#'
#' @references Slough, Tara. 2020. "The Ethics of Electoral Experimentation: Design-Based Recommendations." Working paper. Available at www.taraslough.com/assets/pdf/eee.pdf.
#'
#'
#' @import dplyr
#' @import magrittr
#' @import randomizr
#'
#' @export
get_maei_ind_rand <- function(vr, ## voter_rolls
dist, ## specify variable name in the vr
nvoters, ## the same
s10, ## number or proportion
s01 = NULL,
exp_ac0 = 1, # most conservative
psi = NULL,
...
){
### check the inputs
if ( !("data.frame" %in% class(vr) ) )
stop("vr should be the data.frame")
if (!(length(s10)==1 | length(s10)== nrow(vr)))
stop("the length of s10 is not consistent with the vr")
if (!is.null(s01)){
if(!(length(s01)==1 | length(s01)== nrow(vr)))
stop("the length of s01 is not consistent with the vr")
}
if (!(length(exp_ac0)==1 | length(exp_ac0)== nrow(vr)))
stop("the length of exp_ac0 is not consistent with the vr")
if (sum(exp_ac0>1)>0|sum(exp_ac0<0)>0)
stop("exp_ac0 should be between 0 and 1")
if (!("character" %in% class(dist)))
stop("dist should be a character variable that is the same as the column name of the district variable in the vr")
if (!(class(nvoters) %in% "character"))
stop("nvoters should be a character variable that is the same as the column name of the number of the voters in the vr")
if(!is.null(psi)){
if(sum(psi>1 | psi<0)>=1)
stop("psi denotes the proportion of registered voters; it should be between 0 and 1")
}
### generate complete data.frame
if (is.null(s01)) {
s01 <- 0
case <- "Case 1"
}else{
case <- "Case 2"
}
## check s10,s01 is number or proportion
if(sum(s10<1 & s10>0)>0){ ### perhaps proportion
if(sum(s10>1)>0){stop("s10 is mixed with proportion and the number")}
s10 <- s10 * vr[,nvoters] ## calculate numbers
}
## check s10,s01 is number or proportion
if(sum(s01<1 & s01>0)>0){ ### perhaps proportion
if(sum(s01>1)>0){stop("s01 is mixed with proportion and the number")}
s01 <- s01 * vr[,nvoters] ## calculate numbers
}
maei = data.frame(vr[,dist],vr[,nvoters],s10, s01, exp_ac0)
colnames(maei) <- c("district","nvoters","s10","s01","exp_ac0")
maei1 <- maei %>%
group_by(district) %>%
summarise(MAEI_d = max( exp_ac0*(s10+s01)/nvoters,
(1-exp_ac0)*(s10+s01)/nvoters )
)
### output
output <- tibble(Districts = vr[,dist],
MAEI_d = maei1$MAEI_d)
### psi
if (!is.null(psi)){
output <- output %>%
mutate(psi=psi) %>%
mutate(result = NA)
output$result <- ifelse(output$psi>2*output$MAEI_d,"PASS","FAIL")
output <- output[,-3]
}
cat("\n")
cat("MAEI_ind_randomized: exp_ac0 = ", exp_ac0,"\n")
cat("Individually randomized: ", case,"\n")
if(!is.null(psi)){cat("Margin to pivotality: Psi = ", psi,"\n")}
print(output)
# for extraction
output2 <- list()
output2$MAEI_d <- output[, 2]
output2$district <- output[, 1]
if(!is.null(psi)){
output2$psi <- psi
output2$result <- output[,3]
}
invisible(output2)
}
| /R/get_maei_ind_rand.R | no_license | Jiawei-Fu/maei_tmp | R | false | false | 5,952 | r | #' @title MAEI Calculation for Individually-Randomized Experiments
#'
#' @description This function calculates the maximum aggregate electoral impact (MAEI) for individually-randomized experiments following Slough (2020). This function returns the MAEIs under the assumption of no interference between voters (SUTVA). The optional argument psi uses the calculation of MAEI_d to implements the decision rule proposed in the paper.
#'
#' @param vr a data frame contains variables: district and the corresponding number of registered voters in that district.
#' @param dist a character to specify the column name of the district variable in the data frame vr.
#' @param nvoters a character to specify the column number of the number of voters in the data frame vr.
#' @param s10 a number or vector to denote the individuals exposed to the treatment because it is assigned experimentally. It can be the exact number or proportion (between 0 and 1).
#' @param s01 a number or vector to denote theindividuals not exposed to the treatment because is assigned experimentally. It can be the exact number or proportion (between 0 and 1). The default value is NULL which corresponds to the case in which a researcher designs and implements an intervention that would otherwise not have occurred. If it is not NULL, it corresponds to the case in which some intervention by a third party is modified to include an experimental component.
#' @param exp_ac0 a number or vector to denote the expectation of untreated potential outcome. The default value is one which will return the most conservative bound.
#'
#' @param psi a number or vector specifying “margin to pivotality”, as minimum change in vote share, as a proportion of registered voters, at which a different officeholder would be elected in district. If psi > 2MAEI (Maximal Aggregate Electoral Impact), an experiment could not change the ultimate electoral outcome (the output result will show "PASS" the decision rule); in contrast, if psi < 2MAEI, the experiment could affect the ultimate electoral outcome (the output result will show "FAIL").
#'
#' @examples
#' ### input data
#' data(rv1)
#'
#' ### specify s10 as a constant number across districts
#' get_maei_ind_rand(vr = rv1, dist = "d",
#' nvoters = "n_voters", s10 = 200)
#'
#' ### let s10 be different proportion and specify s01 = 10 (thus case 2)
#' set.seed(10)
#' get_maei_ind_rand(vr = rv1, dist = "d",
#' nvoters = "n_voters", s10 = runif(10), s01 = 10)
#'
#' ### add margin to pivotality say psi = 0.3
#' get_maei_ind_rand(vr = rv1, dist = "d",
#' nvoters = "n_voters", s10 = 0.13, s01 = 10, psi = 0.3)
#'
#' @references Slough, Tara. 2020. "The Ethics of Electoral Experimentation: Design-Based Recommendations." Working paper. Available at www.taraslough.com/assets/pdf/eee.pdf.
#'
#'
#' @import dplyr
#' @import magrittr
#' @import randomizr
#'
#' @export
get_maei_ind_rand <- function(vr, ## voter_rolls
dist, ## specify variable name in the vr
nvoters, ## the same
s10, ## number or proportion
s01 = NULL,
exp_ac0 = 1, # most conservative
psi = NULL,
...
){
### check the inputs
if ( !("data.frame" %in% class(vr) ) )
stop("vr should be the data.frame")
if (!(length(s10)==1 | length(s10)== nrow(vr)))
stop("the length of s10 is not consistent with the vr")
if (!is.null(s01)){
if(!(length(s01)==1 | length(s01)== nrow(vr)))
stop("the length of s01 is not consistent with the vr")
}
if (!(length(exp_ac0)==1 | length(exp_ac0)== nrow(vr)))
stop("the length of exp_ac0 is not consistent with the vr")
if (sum(exp_ac0>1)>0|sum(exp_ac0<0)>0)
stop("exp_ac0 should be between 0 and 1")
if (!("character" %in% class(dist)))
stop("dist should be a character variable that is the same as the column name of the district variable in the vr")
if (!(class(nvoters) %in% "character"))
stop("nvoters should be a character variable that is the same as the column name of the number of the voters in the vr")
if(!is.null(psi)){
if(sum(psi>1 | psi<0)>=1)
stop("psi denotes the proportion of registered voters; it should be between 0 and 1")
}
### generate complete data.frame
if (is.null(s01)) {
s01 <- 0
case <- "Case 1"
}else{
case <- "Case 2"
}
## check s10,s01 is number or proportion
if(sum(s10<1 & s10>0)>0){ ### perhaps proportion
if(sum(s10>1)>0){stop("s10 is mixed with proportion and the number")}
s10 <- s10 * vr[,nvoters] ## calculate numbers
}
## check s10,s01 is number or proportion
if(sum(s01<1 & s01>0)>0){ ### perhaps proportion
if(sum(s01>1)>0){stop("s01 is mixed with proportion and the number")}
s01 <- s01 * vr[,nvoters] ## calculate numbers
}
maei = data.frame(vr[,dist],vr[,nvoters],s10, s01, exp_ac0)
colnames(maei) <- c("district","nvoters","s10","s01","exp_ac0")
maei1 <- maei %>%
group_by(district) %>%
summarise(MAEI_d = max( exp_ac0*(s10+s01)/nvoters,
(1-exp_ac0)*(s10+s01)/nvoters )
)
### output
output <- tibble(Districts = vr[,dist],
MAEI_d = maei1$MAEI_d)
### psi
if (!is.null(psi)){
output <- output %>%
mutate(psi=psi) %>%
mutate(result = NA)
output$result <- ifelse(output$psi>2*output$MAEI_d,"PASS","FAIL")
output <- output[,-3]
}
cat("\n")
cat("MAEI_ind_randomized: exp_ac0 = ", exp_ac0,"\n")
cat("Individually randomized: ", case,"\n")
if(!is.null(psi)){cat("Margin to pivotality: Psi = ", psi,"\n")}
print(output)
# for extraction
output2 <- list()
output2$MAEI_d <- output[, 2]
output2$district <- output[, 1]
if(!is.null(psi)){
output2$psi <- psi
output2$result <- output[,3]
}
invisible(output2)
}
|
data = read.csv("analysis_data.csv")
data = data[-c(1,2,3)]
data$SPENDS = log(data$SPENDS)
set.seed(1)
training = sample(nrow(data), nrow(data)/2)
train = data[training, ]
test = data[-training, ]
# lasso
library(glmnet)
y.train = train$SPENDS
x.train = model.matrix(SPENDS~., train)[, -1]
x.test = model.matrix(SPENDS~., test)[, -1]
y.test = test$SPENDS
x = model.matrix(SPENDS~., data)[, -1]
y = data$SPENDS
grid=10^seq(10,-2,length=100)
lasso.mod = glmnet(x.train, y.train, alpha=1, lambda = grid)
cv.out = cv.glmnet(x.train, y.train,alpha = 1, lambda = grid, nfolds = 10)
plot(cv.out)
best.lam = cv.out$lambda.min
lasso.pred = predict(lasso.mod, s=best.lam, newx=x.test)
print("This is our test error with lasso")
mean((lasso.pred-y.test)^2)
# use whole data set to train our model
out = glmnet(x, y, alpha = 1, lambda = grid)
lasso.coef = predict(out, type = "coefficients", s=best.lam)
print(lasso.coef)
# we can plot the residuals to see if linear regression is a proper method
yhat = predict(out, s=best.lam, newx=x)
residuals = yhat - y
plot(yhat,residuals)
# it turns out not, we'll try the log because of the shape of residuals plot
# try subset selectionm, for simple linear
library(leaps)
regfit.full = regsubsets(SPENDS~., data)
reg.summary = summary(regfit.full)
print(reg.summary)
print(reg.summary$adjr2)
# From the adjusted R2, is not a very good model
# use linear regression to see the summary
lm.fit = lm(SPENDS~.,data = data)
##################################
# coupon takes value of only 0&1, so we just try inverse from pairs(data)
#data$SPENDSPRV = 1/data$SPENDSPRV
#data$PRIMARY_VISIT = 1/data$PRIMARY_VISIT
# seperate the data into train and test part
set.seed(1)
training = sample(nrow(data), nrow(data)/2)
train = data[training, ]
test = data[-training, ]
# lasso
library(glmnet)
y.train = train$SPENDS
x.train = model.matrix(SPENDS~., train)[, -1]
x.test = model.matrix(SPENDS~., test)[, -1]
y.test = test$SPENDS
x = model.matrix(SPENDS~., data)[, -1]
y = data$SPENDS
grid=10^seq(10,-2,length=100)
lasso.mod = glmnet(x.train, y.train, alpha=1, lambda = grid)
cv.out = cv.glmnet(x.train, y.train,alpha = 1, lambda = grid)
plot(cv.out)
best.lam = cv.out$lambda.min
lasso.pred = predict(lasso.mod, s=best.lam, newx=x.test)
print("This is our test error with lasso")
mean((lasso.pred-y.test)^2)
# use whole data set to train our model
out = glmnet(x, y, alpha = 1, lambda = grid)
lasso.coef = predict(out, type = "coefficients", s=best.lam)
print(lasso.coef)
# we can plot the residuals to see if linear regression is a proper method
yhat = predict(out, s=best.lam, newx=x)
residuals = -yhat + y
plot(x[,'LogSpends'],residuals)
plot(x[, 'SPENDS'], residuals)
# try subset selection
library(leaps)
regfit.full = regsubsets(SPENDS~., data)
reg.summary = summary(regfit.full)
print(reg.summary)
print(reg.summary$adjr2)
| /TeamProj/analysis for final report.R | no_license | akshoop/DS502-StatisticalMethods | R | false | false | 2,855 | r | data = read.csv("analysis_data.csv")
data = data[-c(1,2,3)]
data$SPENDS = log(data$SPENDS)
set.seed(1)
training = sample(nrow(data), nrow(data)/2)
train = data[training, ]
test = data[-training, ]
# lasso
library(glmnet)
y.train = train$SPENDS
x.train = model.matrix(SPENDS~., train)[, -1]
x.test = model.matrix(SPENDS~., test)[, -1]
y.test = test$SPENDS
x = model.matrix(SPENDS~., data)[, -1]
y = data$SPENDS
grid=10^seq(10,-2,length=100)
lasso.mod = glmnet(x.train, y.train, alpha=1, lambda = grid)
cv.out = cv.glmnet(x.train, y.train,alpha = 1, lambda = grid, nfolds = 10)
plot(cv.out)
best.lam = cv.out$lambda.min
lasso.pred = predict(lasso.mod, s=best.lam, newx=x.test)
print("This is our test error with lasso")
mean((lasso.pred-y.test)^2)
# use whole data set to train our model
out = glmnet(x, y, alpha = 1, lambda = grid)
lasso.coef = predict(out, type = "coefficients", s=best.lam)
print(lasso.coef)
# we can plot the residuals to see if linear regression is a proper method
yhat = predict(out, s=best.lam, newx=x)
residuals = yhat - y
plot(yhat,residuals)
# it turns out not, we'll try the log because of the shape of residuals plot
# try subset selectionm, for simple linear
library(leaps)
regfit.full = regsubsets(SPENDS~., data)
reg.summary = summary(regfit.full)
print(reg.summary)
print(reg.summary$adjr2)
# From the adjusted R2, is not a very good model
# use linear regression to see the summary
lm.fit = lm(SPENDS~.,data = data)
##################################
# coupon takes value of only 0&1, so we just try inverse from pairs(data)
#data$SPENDSPRV = 1/data$SPENDSPRV
#data$PRIMARY_VISIT = 1/data$PRIMARY_VISIT
# seperate the data into train and test part
set.seed(1)
training = sample(nrow(data), nrow(data)/2)
train = data[training, ]
test = data[-training, ]
# lasso
library(glmnet)
y.train = train$SPENDS
x.train = model.matrix(SPENDS~., train)[, -1]
x.test = model.matrix(SPENDS~., test)[, -1]
y.test = test$SPENDS
x = model.matrix(SPENDS~., data)[, -1]
y = data$SPENDS
grid=10^seq(10,-2,length=100)
lasso.mod = glmnet(x.train, y.train, alpha=1, lambda = grid)
cv.out = cv.glmnet(x.train, y.train,alpha = 1, lambda = grid)
plot(cv.out)
best.lam = cv.out$lambda.min
lasso.pred = predict(lasso.mod, s=best.lam, newx=x.test)
print("This is our test error with lasso")
mean((lasso.pred-y.test)^2)
# use whole data set to train our model
out = glmnet(x, y, alpha = 1, lambda = grid)
lasso.coef = predict(out, type = "coefficients", s=best.lam)
print(lasso.coef)
# we can plot the residuals to see if linear regression is a proper method
yhat = predict(out, s=best.lam, newx=x)
residuals = -yhat + y
plot(x[,'LogSpends'],residuals)
plot(x[, 'SPENDS'], residuals)
# try subset selection
library(leaps)
regfit.full = regsubsets(SPENDS~., data)
reg.summary = summary(regfit.full)
print(reg.summary)
print(reg.summary$adjr2)
|
#' Combined dataframe of "Per base sequence quality" in 4 fastqc reports
#'
#' @description this is an object for storing a dataframe composed of
#' "Per base sequence quality" in 4 fastqc reports
#'
#' @format this data frame has 204 rows and the following 8 columns
#' \describe{
#' \item{Base}{number of base}
#' \item{Mean}{the mean of coverage of this base}
#' \item{Median}{the median of coverage of this base}
#' \item{Lower Quartile}{the lower quartile of coverage of this base}
#' \item{Upper Quartile}{the upper quartile of coverage of this base}
#' \item{10th Percentile}{the 10th percentile of coverage of this base}
#' \item{90th Percentile}{the 90th percentile of coverage of this base}
#' \item{sampleName}{user-defined sample id}
#' }
"all.reports"
| /R/data.R | permissive | chilampoon/ohmyR | R | false | false | 781 | r | #' Combined dataframe of "Per base sequence quality" in 4 fastqc reports
#'
#' @description this is an object for storing a dataframe composed of
#' "Per base sequence quality" in 4 fastqc reports
#'
#' @format this data frame has 204 rows and the following 8 columns
#' \describe{
#' \item{Base}{number of base}
#' \item{Mean}{the mean of coverage of this base}
#' \item{Median}{the median of coverage of this base}
#' \item{Lower Quartile}{the lower quartile of coverage of this base}
#' \item{Upper Quartile}{the upper quartile of coverage of this base}
#' \item{10th Percentile}{the 10th percentile of coverage of this base}
#' \item{90th Percentile}{the 90th percentile of coverage of this base}
#' \item{sampleName}{user-defined sample id}
#' }
"all.reports"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_engineering.r
\name{add_sequence_length}
\alias{add_sequence_length}
\title{Add a column to the existing dataframe}
\usage{
add_sequence_length(df_sequence, name_seq_col)
}
\arguments{
\item{df_sequence}{global dataframe}
\item{name_seq_col}{Name of the Sequence Column}
}
\value{
df_updated
}
\description{
Calculate length of the sequence column
}
\examples{
df <- data.frame("id" = c('ID1','ID2') , "Sequnce_DNA" = c('AAAGGGCTTCCC','AGGGGGTTTCCC'))
df_new <- add_sequence_length(df, 'Sequnce_DNA')
}
| /rnalab.Rcheck/00_pkg_src/rnalab/man/add_sequence_length.Rd | no_license | emilyd5077/rnalab | R | false | true | 613 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_engineering.r
\name{add_sequence_length}
\alias{add_sequence_length}
\title{Add a column to the existing dataframe}
\usage{
add_sequence_length(df_sequence, name_seq_col)
}
\arguments{
\item{df_sequence}{global dataframe}
\item{name_seq_col}{Name of the Sequence Column}
}
\value{
df_updated
}
\description{
Calculate length of the sequence column
}
\examples{
df <- data.frame("id" = c('ID1','ID2') , "Sequnce_DNA" = c('AAAGGGCTTCCC','AGGGGGTTTCCC'))
df_new <- add_sequence_length(df, 'Sequnce_DNA')
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elastictranscoder_operations.R
\name{elastictranscoder_update_pipeline_notifications}
\alias{elastictranscoder_update_pipeline_notifications}
\title{With the UpdatePipelineNotifications operation, you can update Amazon
Simple Notification Service (Amazon SNS) notifications for a pipeline}
\usage{
elastictranscoder_update_pipeline_notifications(Id, Notifications)
}
\arguments{
\item{Id}{[required] The identifier of the pipeline for which you want to change notification
settings.}
\item{Notifications}{[required] The topic ARN for the Amazon Simple Notification Service (Amazon SNS)
topic that you want to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
\itemize{
\item \strong{Progressing}: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process jobs that are added to this
pipeline. This is the ARN that Amazon SNS returned when you created
the topic.
\item \strong{Complete}: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job.
This is the ARN that Amazon SNS returned when you created the topic.
\item \strong{Warning}: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition. This
is the ARN that Amazon SNS returned when you created the topic.
\item \strong{Error}: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition. This
is the ARN that Amazon SNS returned when you created the topic.
}}
}
\description{
With the UpdatePipelineNotifications operation, you can update Amazon
Simple Notification Service (Amazon SNS) notifications for a pipeline.
}
\details{
When you update notifications for a pipeline, Elastic Transcoder returns
the values that you specified in the request.
}
\section{Request syntax}{
\preformatted{svc$update_pipeline_notifications(
Id = "string",
Notifications = list(
Progressing = "string",
Completed = "string",
Warning = "string",
Error = "string"
)
)
}
}
\keyword{internal}
| /cran/paws.media.services/man/elastictranscoder_update_pipeline_notifications.Rd | permissive | johnnytommy/paws | R | false | true | 2,275 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elastictranscoder_operations.R
\name{elastictranscoder_update_pipeline_notifications}
\alias{elastictranscoder_update_pipeline_notifications}
\title{With the UpdatePipelineNotifications operation, you can update Amazon
Simple Notification Service (Amazon SNS) notifications for a pipeline}
\usage{
elastictranscoder_update_pipeline_notifications(Id, Notifications)
}
\arguments{
\item{Id}{[required] The identifier of the pipeline for which you want to change notification
settings.}
\item{Notifications}{[required] The topic ARN for the Amazon Simple Notification Service (Amazon SNS)
topic that you want to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
\itemize{
\item \strong{Progressing}: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process jobs that are added to this
pipeline. This is the ARN that Amazon SNS returned when you created
the topic.
\item \strong{Complete}: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job.
This is the ARN that Amazon SNS returned when you created the topic.
\item \strong{Warning}: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition. This
is the ARN that Amazon SNS returned when you created the topic.
\item \strong{Error}: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition. This
is the ARN that Amazon SNS returned when you created the topic.
}}
}
\description{
With the UpdatePipelineNotifications operation, you can update Amazon
Simple Notification Service (Amazon SNS) notifications for a pipeline.
}
\details{
When you update notifications for a pipeline, Elastic Transcoder returns
the values that you specified in the request.
}
\section{Request syntax}{
\preformatted{svc$update_pipeline_notifications(
Id = "string",
Notifications = list(
Progressing = "string",
Completed = "string",
Warning = "string",
Error = "string"
)
)
}
}
\keyword{internal}
|
# Reference: http://www.systematicportfolio.com
# Evaluate and analyze Trading Strategies
rm(list=ls())
#
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
source(con)
close(con)
load.packages('quantmod')
# data is a time series of price
# signal is a indicator vector for buy and sell
bt.simple <- function(data, signal)
{
# lag serial
signal <- lag(signal,1)
# back fill
signal <- na.locf(signal, na.rm = FALSE)
signal[is.na(signal)] = 0
# calculate close-to-close returns
# ROC() : Calculate the (rate of) change of a series over n periods.
ret <- ROC(Cl(data), type="discrete")
ret[1] = 0
# compute stats
bt <- list()
bt$ret <- ret * signal
bt$equity <- cumprod(1 + bt$ret)
return(bt)
}
# Test for bt.simple functions
# load historical prices from Yahoo Finance
data <- getSymbols('SPY', src = 'yahoo', from = '2000-01-01', to = '2018-12-31', auto.assign = F)
# buy and hold
signal <- rep(1, nrow(data))
buy.hold <- bt.simple(data, signal)
head(buy.hold$equity)
tail(buy.hold$equity)
head(buy.hold$ret)
# MA cross (moving average)
# Cl: get closing price
sma <- SMA(Cl(data), 200)
head(sma, 200)
#
signal <- ifelse(Cl(data) > sma, 1, 0) # if price large than moving mean, buy
head(signal, 201)
sma.cross <- bt.simple(data, signal)
# Create a chart showing the strategies perfromance in 2000:2009
dates <- '2000::2018'
buy.hold.equity <- buy.hold$equity[dates] / as.double(buy.hold$equity[dates][1])
sma.cross.equity <- sma.cross$equity[dates] / as.double(sma.cross$equity[dates][1])
# chartSeries() : Charting tool to create standard financial charts given a time series like object
chartSeries(buy.hold.equity, TA = c(addTA(sma.cross.equity, on=1, col='red')),
theme ='white', yrange = range(buy.hold.equity, sma.cross.equity) )
#
library(magrittr)
strategy.sma<-merge(buy.hold.equity, sma.cross.equity) %>%
set_colnames(c("BH", "SMA"))
head(strategy.sma,30)
tail(strategy.sma)
# plot using ggplot2
library(ggplot2)
strategy.sma.df<-fortify(strategy.sma, melt=TRUE)
head(strategy.sma.df)
#
p<-ggplot(strategy.sma.df, aes(x = Index, y = Value))+
geom_line(aes(color = Series), size = 0.5) +
scale_x_date(date_labels = "%Y/%m") +
geom_hline(yintercept = c(1.0, 0.6))
p
#===================================================================
# sample code to implement the above strategies using the backtesting
# library in the Systematic Investor Toolbox:
#*****************************************************************
# Load historical data
#******************************************************************
load.packages('quantmod')
tickers <- spl('SPY')
data <- new.env() # data is a environment
# bt.prep function merges and aligns all symbols in the data environment
getSymbols(tickers, src = 'yahoo', from = '2000-01-01', to = '2018-12-31', env = data, auto.assign = T)
# bt.prep(data, align='keep.all')
names(data)
prices<-Ad(data$SPY)
data$prices<-prices
data$weight<-prices * NA
data$execution.price <- prices * NA
head(data$prices)
tail(data$prices)
#*****************************************************************
# Code Strategies
#*****************************************************************
# bt.run computes the equity curve of strategy specified by data$weight matrix.
# The data$weight matrix holds weights (signals) to open/close positions
# Buy & Hold
data$weight[] <- 1
buy.hold <- bt.run.share(data, clean.signal=F, trade.summary = TRUE)
buy.hold <- bt.run(data)
# MA Cross
# bt.apply function applies user given function to each symbol in the data environment
sma <- bt.apply(data, function(x) { SMA(Cl(x), 200) } )
data$weight[] <- NA # update weights matirx
data$weight[] <- iif(prices >= sma, 1, 0)
sma.cross <- bt.run(data, trade.summary=T)
plotbt.custom.report(sma.cross, buy.hold)
#
etf4.all<-readRDS("etf4_xts_all")
#
tickers = spl('^GSPC')
data1 <- new.env()
getSymbols(tickers, src = 'yahoo', from = '1896-01-01', env = data1, auto.assign = T)
bt.prep(data1, align='keep.all', dates='1896::2011')
#*****************************************************************
# Code Strategies
#******************************************************************
prices = data1$prices
# Buy & Hold
data1$weight[] = 1
buy.hold = bt.run(data1)
#
b<- data1
bt.run <- function
(
b, # environment with symbols time series
trade.summary = F, # flag to create trade summary
do.lag = 1, # lag signal
do.CarryLastObservationForwardIfNA = TRUE,
type = c('weight', 'share'),
silent = F,
capital = 100000,
commission = 0,
weight = b$weight,
dates = 1:nrow(b$prices)
)
{
# convert dates to dates.index
dates.index = dates2index(b$prices, dates)
# setup
type = type[1]
# create signal
weight[] = ifna(weight, NA)
# lag
if(do.lag > 0)
weight = mlag(weight, do.lag) # Note k=1 implies a move *forward*
# backfill
if(do.CarryLastObservationForwardIfNA)
weight[] = apply(coredata(weight), 2, ifna.prev)
weight[is.na(weight)] = 0
# find trades
weight1 = mlag(weight, -1)
tstart = weight != weight1 & weight1 != 0
tend = weight != 0 & weight != weight1
trade = ifna(tstart | tend, FALSE)
# prices
prices = b$prices
# execution.price logic
if( sum(trade) > 0 ) {
execution.price = coredata(b$execution.price)
prices1 = coredata(b$prices)
prices1[trade] = iif( is.na(execution.price[trade]), prices1[trade], execution.price[trade] )
prices[] = prices1
}
# type of backtest
if( type == 'weight') {
ret = prices / mlag(prices) - 1
ret[] = ifna(ret, NA)
ret[is.na(ret)] = 0
} else { # shares, hence provide prices
ret = prices
}
#weight = make.xts(weight, b$dates)
temp = b$weight
temp[] = weight
weight = temp
# prepare output
bt = bt.summary(weight, ret, type, b$prices, capital, commission)
bt$dates.index = dates.index
bt = bt.run.trim.helper(bt, dates.index)
if( trade.summary ) bt$trade.summary = bt.trade.summary(b, bt)
if( !silent ) {
# print last signal / weight observation
cat('Latest weights :\n')
print(round(100*last(bt$weight),2))
cat('\n')
cat('Performance summary :\n')
cat('', spl('CAGR,Best,Worst'), '\n', sep = '\t')
cat('', sapply(cbind(bt$cagr, bt$best, bt$worst), function(x) round(100*x,1)), '\n', sep = '\t')
cat('\n')
}
return(bt)
}
| /MA_strategy.R | no_license | 106035638/finDB | R | false | false | 6,427 | r | # Reference: http://www.systematicportfolio.com
# Evaluate and analyze Trading Strategies
rm(list=ls())
#
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
source(con)
close(con)
load.packages('quantmod')
# data is a time series of price
# signal is a indicator vector for buy and sell
bt.simple <- function(data, signal)
{
# lag serial
signal <- lag(signal,1)
# back fill
signal <- na.locf(signal, na.rm = FALSE)
signal[is.na(signal)] = 0
# calculate close-to-close returns
# ROC() : Calculate the (rate of) change of a series over n periods.
ret <- ROC(Cl(data), type="discrete")
ret[1] = 0
# compute stats
bt <- list()
bt$ret <- ret * signal
bt$equity <- cumprod(1 + bt$ret)
return(bt)
}
# Test for bt.simple functions
# load historical prices from Yahoo Finance
data <- getSymbols('SPY', src = 'yahoo', from = '2000-01-01', to = '2018-12-31', auto.assign = F)
# buy and hold
signal <- rep(1, nrow(data))
buy.hold <- bt.simple(data, signal)
head(buy.hold$equity)
tail(buy.hold$equity)
head(buy.hold$ret)
# MA cross (moving average)
# Cl: get closing price
sma <- SMA(Cl(data), 200)
head(sma, 200)
#
signal <- ifelse(Cl(data) > sma, 1, 0) # if price large than moving mean, buy
head(signal, 201)
sma.cross <- bt.simple(data, signal)
# Create a chart showing the strategies perfromance in 2000:2009
dates <- '2000::2018'
buy.hold.equity <- buy.hold$equity[dates] / as.double(buy.hold$equity[dates][1])
sma.cross.equity <- sma.cross$equity[dates] / as.double(sma.cross$equity[dates][1])
# chartSeries() : Charting tool to create standard financial charts given a time series like object
chartSeries(buy.hold.equity, TA = c(addTA(sma.cross.equity, on=1, col='red')),
theme ='white', yrange = range(buy.hold.equity, sma.cross.equity) )
#
library(magrittr)
strategy.sma<-merge(buy.hold.equity, sma.cross.equity) %>%
set_colnames(c("BH", "SMA"))
head(strategy.sma,30)
tail(strategy.sma)
# plot using ggplot2
library(ggplot2)
strategy.sma.df<-fortify(strategy.sma, melt=TRUE)
head(strategy.sma.df)
#
p<-ggplot(strategy.sma.df, aes(x = Index, y = Value))+
geom_line(aes(color = Series), size = 0.5) +
scale_x_date(date_labels = "%Y/%m") +
geom_hline(yintercept = c(1.0, 0.6))
p
#===================================================================
# sample code to implement the above strategies using the backtesting
# library in the Systematic Investor Toolbox:
#*****************************************************************
# Load historical data
#******************************************************************
load.packages('quantmod')
tickers <- spl('SPY')
data <- new.env() # data is a environment
# bt.prep function merges and aligns all symbols in the data environment
getSymbols(tickers, src = 'yahoo', from = '2000-01-01', to = '2018-12-31', env = data, auto.assign = T)
# bt.prep(data, align='keep.all')
names(data)
prices<-Ad(data$SPY)
data$prices<-prices
data$weight<-prices * NA
data$execution.price <- prices * NA
head(data$prices)
tail(data$prices)
#*****************************************************************
# Code Strategies
#*****************************************************************
# bt.run computes the equity curve of strategy specified by data$weight matrix.
# The data$weight matrix holds weights (signals) to open/close positions
# Buy & Hold
data$weight[] <- 1
buy.hold <- bt.run.share(data, clean.signal=F, trade.summary = TRUE)
buy.hold <- bt.run(data)
# MA Cross
# bt.apply function applies user given function to each symbol in the data environment
sma <- bt.apply(data, function(x) { SMA(Cl(x), 200) } )
data$weight[] <- NA # update weights matirx
data$weight[] <- iif(prices >= sma, 1, 0)
sma.cross <- bt.run(data, trade.summary=T)
plotbt.custom.report(sma.cross, buy.hold)
#
etf4.all<-readRDS("etf4_xts_all")
#
tickers = spl('^GSPC')
data1 <- new.env()
getSymbols(tickers, src = 'yahoo', from = '1896-01-01', env = data1, auto.assign = T)
bt.prep(data1, align='keep.all', dates='1896::2011')
#*****************************************************************
# Code Strategies
#******************************************************************
prices = data1$prices
# Buy & Hold
data1$weight[] = 1
buy.hold = bt.run(data1)
#
b<- data1
bt.run <- function
(
b, # environment with symbols time series
trade.summary = F, # flag to create trade summary
do.lag = 1, # lag signal
do.CarryLastObservationForwardIfNA = TRUE,
type = c('weight', 'share'),
silent = F,
capital = 100000,
commission = 0,
weight = b$weight,
dates = 1:nrow(b$prices)
)
{
# convert dates to dates.index
dates.index = dates2index(b$prices, dates)
# setup
type = type[1]
# create signal
weight[] = ifna(weight, NA)
# lag
if(do.lag > 0)
weight = mlag(weight, do.lag) # Note k=1 implies a move *forward*
# backfill
if(do.CarryLastObservationForwardIfNA)
weight[] = apply(coredata(weight), 2, ifna.prev)
weight[is.na(weight)] = 0
# find trades
weight1 = mlag(weight, -1)
tstart = weight != weight1 & weight1 != 0
tend = weight != 0 & weight != weight1
trade = ifna(tstart | tend, FALSE)
# prices
prices = b$prices
# execution.price logic
if( sum(trade) > 0 ) {
execution.price = coredata(b$execution.price)
prices1 = coredata(b$prices)
prices1[trade] = iif( is.na(execution.price[trade]), prices1[trade], execution.price[trade] )
prices[] = prices1
}
# type of backtest
if( type == 'weight') {
ret = prices / mlag(prices) - 1
ret[] = ifna(ret, NA)
ret[is.na(ret)] = 0
} else { # shares, hence provide prices
ret = prices
}
#weight = make.xts(weight, b$dates)
temp = b$weight
temp[] = weight
weight = temp
# prepare output
bt = bt.summary(weight, ret, type, b$prices, capital, commission)
bt$dates.index = dates.index
bt = bt.run.trim.helper(bt, dates.index)
if( trade.summary ) bt$trade.summary = bt.trade.summary(b, bt)
if( !silent ) {
# print last signal / weight observation
cat('Latest weights :\n')
print(round(100*last(bt$weight),2))
cat('\n')
cat('Performance summary :\n')
cat('', spl('CAGR,Best,Worst'), '\n', sep = '\t')
cat('', sapply(cbind(bt$cagr, bt$best, bt$worst), function(x) round(100*x,1)), '\n', sep = '\t')
cat('\n')
}
return(bt)
}
|
label <- function(key, label.link, label.punc, type){
env = specenv(type)
if(missing(label.link))
link <- opts_figr$get('label.link')
if(missing(label.punc))
label.punc <- opts_figr$get('label.punc')
paste(renderMarkdown(text=paste(paste(cite(key, link=label.link, type=type),
label.punc, sep=""),
getcaption(key, envir=env))))
}
| /R/label.r | no_license | mkoohafkan/figr | R | false | false | 432 | r | label <- function(key, label.link, label.punc, type){
env = specenv(type)
if(missing(label.link))
link <- opts_figr$get('label.link')
if(missing(label.punc))
label.punc <- opts_figr$get('label.punc')
paste(renderMarkdown(text=paste(paste(cite(key, link=label.link, type=type),
label.punc, sep=""),
getcaption(key, envir=env))))
}
|
source('global.R')
# Document which indicators are missing for which countries, and when
left <- expand.grid(country = sort(unique(df$country)),
key = sort(unique(df$key)),
year = sort(unique(df$year)))
right <- df %>%
mutate(missing = is.na(value)) %>%
dplyr::select(country, key, year, missing)
joined <- left_join(x = left,
y = right) %>%
mutate(missing = ifelse(is.na(missing), TRUE, missing)) %>%
group_by(key, year) %>%
mutate(flag = all(missing)) %>%
ungroup %>%
dplyr::filter(!flag) %>%
dplyr::select(-flag) %>%
filter(year >= 2010,
year <= 2017)
summarized <-
joined %>%
group_by(key, year) %>%
summarise(countries_missing = paste0(sort(unique(country[missing])), collapse =';'),
countries_not_missing = paste0(sort(unique(country[!missing])), collapse = ';'))
joined <- joined %>%
dplyr::filter(missing) %>%
dplyr::select(-missing)
write_csv(joined, 'missing_details.csv')
write_csv(summarized, 'missing_overview.csv')
| /missingness.R | no_license | databrew/landscapedash | R | false | false | 1,043 | r | source('global.R')
# Document which indicators are missing for which countries, and when
left <- expand.grid(country = sort(unique(df$country)),
key = sort(unique(df$key)),
year = sort(unique(df$year)))
right <- df %>%
mutate(missing = is.na(value)) %>%
dplyr::select(country, key, year, missing)
joined <- left_join(x = left,
y = right) %>%
mutate(missing = ifelse(is.na(missing), TRUE, missing)) %>%
group_by(key, year) %>%
mutate(flag = all(missing)) %>%
ungroup %>%
dplyr::filter(!flag) %>%
dplyr::select(-flag) %>%
filter(year >= 2010,
year <= 2017)
summarized <-
joined %>%
group_by(key, year) %>%
summarise(countries_missing = paste0(sort(unique(country[missing])), collapse =';'),
countries_not_missing = paste0(sort(unique(country[!missing])), collapse = ';'))
joined <- joined %>%
dplyr::filter(missing) %>%
dplyr::select(-missing)
write_csv(joined, 'missing_details.csv')
write_csv(summarized, 'missing_overview.csv')
|
base = "http://fantasy.espn.com/apis/v3/games/ffl/seasons/"
year = "2019"
mid = "/segments/0/leagues/"
leagueID = "89417258"
tail = "?view=mDraftDetail&view=mLiveScoring&view=mMatchupScore&view=mPendingTransactions&view=mPositionalRatings&view=mSettings&view=mTeam&view=modular&view=mNav&view=mMatchupScore"
url = paste0(base,year,mid,leagueID,tail)
# ESPNGet <- httr::GET(url = url,
# httr::set_cookies(
# `swid` = "{78538BF1-DE01-4269-A101-AC98E7620E27}",
# `espn_s2` = "AEAysPn25UkePQCS33o3NmdRItXI0fZ7BhQFCcY020p8yCq0CDJGrlvuqAxjP42wn%2F8YZymuQOcG94GHEtkIIHnU7BWfQr6cpEKQXkcev7zKxEWiRf57PlIPEsWqIIm72dSmnL4dxW8TYufPzrIbiNZvtU0cYnLBV3nw1CAmc%2BGwghKIqRy7qPMCsSN13WibU5BHxVfxjkRttkE5Yd27cP8vAbndYor2P2FZrR%2BPVbRGThNIL8XuEJBw2rLmhqmc6tQA%2BGeNNh9dXrySFJHm72TY"
# ))
ESPNGet <- httr::GET(url = url)
ESPNGet$status_code
ESPNRaw <- rawToChar(ESPNGet$content)
ESPNFromJSON <- jsonlite::fromJSON(ESPNRaw)
ESPNFromJSON$schedule %>% listviewer::jsonedit()
ESPNFromJSON$teams %>% listviewer::jsonedit()
## records
TeamRecords =
tibble(
location = ESPNFromJSON$teams$location,
nickname = ESPNFromJSON$teams$nickname,
teamId = ESPNFromJSON$teams$id,
losses = ESPNFromJSON$teams$record$overall$losses,
wins = ESPNFromJSON$teams$record$overall$wins
) %>%
unite(Team, c(location,nickname), sep = " ")
## schedule below
Schedule =
tibble(
winner = ESPNFromJSON$schedule$winner,
Week = ESPNFromJSON$schedule$matchupPeriodId,
AwayTeam = ESPNFromJSON$schedule$away$teamId,
AwayPoints = ESPNFromJSON$schedule$away$totalPoints,
HomeTeam = ESPNFromJSON$schedule$home$teamId,
HomePoints = ESPNFromJSON$schedule$away$totalPoints
) %>%
left_join(TeamRecords %>% select(teamId, Team), by = c("AwayTeam"="teamId")) %>%
select(-AwayTeam) %>%
rename(AwayTeam = Team) %>%
left_join(TeamRecords %>% select(teamId, Team), by = c("HomeTeam"="teamId")) %>%
select(-HomeTeam) %>%
rename(HomeTeam = Team)
| /old_files/ESPNV3 Success.R | no_license | dusty-turner/ESPN-V3 | R | false | false | 1,999 | r | base = "http://fantasy.espn.com/apis/v3/games/ffl/seasons/"
year = "2019"
mid = "/segments/0/leagues/"
leagueID = "89417258"
tail = "?view=mDraftDetail&view=mLiveScoring&view=mMatchupScore&view=mPendingTransactions&view=mPositionalRatings&view=mSettings&view=mTeam&view=modular&view=mNav&view=mMatchupScore"
url = paste0(base,year,mid,leagueID,tail)
# ESPNGet <- httr::GET(url = url,
# httr::set_cookies(
# `swid` = "{78538BF1-DE01-4269-A101-AC98E7620E27}",
# `espn_s2` = "AEAysPn25UkePQCS33o3NmdRItXI0fZ7BhQFCcY020p8yCq0CDJGrlvuqAxjP42wn%2F8YZymuQOcG94GHEtkIIHnU7BWfQr6cpEKQXkcev7zKxEWiRf57PlIPEsWqIIm72dSmnL4dxW8TYufPzrIbiNZvtU0cYnLBV3nw1CAmc%2BGwghKIqRy7qPMCsSN13WibU5BHxVfxjkRttkE5Yd27cP8vAbndYor2P2FZrR%2BPVbRGThNIL8XuEJBw2rLmhqmc6tQA%2BGeNNh9dXrySFJHm72TY"
# ))
ESPNGet <- httr::GET(url = url)
ESPNGet$status_code
ESPNRaw <- rawToChar(ESPNGet$content)
ESPNFromJSON <- jsonlite::fromJSON(ESPNRaw)
ESPNFromJSON$schedule %>% listviewer::jsonedit()
ESPNFromJSON$teams %>% listviewer::jsonedit()
## records
TeamRecords =
tibble(
location = ESPNFromJSON$teams$location,
nickname = ESPNFromJSON$teams$nickname,
teamId = ESPNFromJSON$teams$id,
losses = ESPNFromJSON$teams$record$overall$losses,
wins = ESPNFromJSON$teams$record$overall$wins
) %>%
unite(Team, c(location,nickname), sep = " ")
## schedule below
Schedule =
tibble(
winner = ESPNFromJSON$schedule$winner,
Week = ESPNFromJSON$schedule$matchupPeriodId,
AwayTeam = ESPNFromJSON$schedule$away$teamId,
AwayPoints = ESPNFromJSON$schedule$away$totalPoints,
HomeTeam = ESPNFromJSON$schedule$home$teamId,
HomePoints = ESPNFromJSON$schedule$away$totalPoints
) %>%
left_join(TeamRecords %>% select(teamId, Team), by = c("AwayTeam"="teamId")) %>%
select(-AwayTeam) %>%
rename(AwayTeam = Team) %>%
left_join(TeamRecords %>% select(teamId, Team), by = c("HomeTeam"="teamId")) %>%
select(-HomeTeam) %>%
rename(HomeTeam = Team)
|
# This R file accomanies the .Rmd blog post
# _source/my-first-blog-post/2016-09-17-my-first-blog-post.Rmd
| /_source/my-first-blog-post/my-first-blog-post.R | permissive | yenzichun/MysteryBox | R | false | false | 108 | r | # This R file accomanies the .Rmd blog post
# _source/my-first-blog-post/2016-09-17-my-first-blog-post.Rmd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geneSetEnrich.R
\name{geneSetEnrich}
\alias{geneSetEnrich}
\alias{geneSetEnrich,SingleCellExperiment-method}
\alias{geneSetEnrich,matrix-method}
\title{Gene set enrichment}
\usage{
geneSetEnrich(
x,
celdaModel,
useAssay = "counts",
altExpName = "featureSubset",
databases,
fdr = 0.05
)
\S4method{geneSetEnrich}{SingleCellExperiment}(
x,
useAssay = "counts",
altExpName = "featureSubset",
databases,
fdr = 0.05
)
\S4method{geneSetEnrich}{matrix}(x, celdaModel, databases, fdr = 0.05)
}
\arguments{
\item{x}{A numeric \link{matrix} of counts or a
\linkS4class{SingleCellExperiment}
with the matrix located in the assay slot under \code{useAssay}.
Rows represent features and columns represent cells. Rownames of the
matrix or \linkS4class{SingleCellExperiment} object should be gene names.}
\item{celdaModel}{Celda object of class \code{celda_G} or \code{celda_CG}.}
\item{useAssay}{A string specifying which \link{assay}
slot to use if \code{x} is a
\linkS4class{SingleCellExperiment} object. Default "counts".}
\item{altExpName}{The name for the \link{altExp} slot
to use. Default "featureSubset".}
\item{databases}{Character vector. Name of reference database. Available
databases can be viewed by \link[enrichR]{listEnrichrDbs}.}
\item{fdr}{False discovery rate (FDR). Numeric. Cutoff value for adjusted
p-value, terms with FDR below this value are considered significantly
enriched.}
}
\value{
List of length 'L' where each member contains the significantly
enriched terms for the corresponding module.
}
\description{
Identify and return significantly-enriched terms for each gene
module in a Celda object or a \linkS4class{SingleCellExperiment} object.
Performs gene set enrichment analysis for Celda
identified modules using the \link[enrichR]{enrichr}.
}
\examples{
library(M3DExampleData)
counts <- M3DExampleData::Mmus_example_list$data
# subset 500 genes for fast clustering
counts <- counts[seq(1501, 2000), ]
# cluster genes into 10 modules for quick demo
sce <- celda_G(x = as.matrix(counts), L = 10, verbose = FALSE)
gse <- geneSetEnrich(sce,
databases = c("GO_Biological_Process_2018", "GO_Molecular_Function_2018"))
}
\author{
Ahmed Youssef, Zhe Wang
}
| /man/geneSetEnrich.Rd | permissive | campbio/celda | R | false | true | 2,282 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geneSetEnrich.R
\name{geneSetEnrich}
\alias{geneSetEnrich}
\alias{geneSetEnrich,SingleCellExperiment-method}
\alias{geneSetEnrich,matrix-method}
\title{Gene set enrichment}
\usage{
geneSetEnrich(
x,
celdaModel,
useAssay = "counts",
altExpName = "featureSubset",
databases,
fdr = 0.05
)
\S4method{geneSetEnrich}{SingleCellExperiment}(
x,
useAssay = "counts",
altExpName = "featureSubset",
databases,
fdr = 0.05
)
\S4method{geneSetEnrich}{matrix}(x, celdaModel, databases, fdr = 0.05)
}
\arguments{
\item{x}{A numeric \link{matrix} of counts or a
\linkS4class{SingleCellExperiment}
with the matrix located in the assay slot under \code{useAssay}.
Rows represent features and columns represent cells. Rownames of the
matrix or \linkS4class{SingleCellExperiment} object should be gene names.}
\item{celdaModel}{Celda object of class \code{celda_G} or \code{celda_CG}.}
\item{useAssay}{A string specifying which \link{assay}
slot to use if \code{x} is a
\linkS4class{SingleCellExperiment} object. Default "counts".}
\item{altExpName}{The name for the \link{altExp} slot
to use. Default "featureSubset".}
\item{databases}{Character vector. Name of reference database. Available
databases can be viewed by \link[enrichR]{listEnrichrDbs}.}
\item{fdr}{False discovery rate (FDR). Numeric. Cutoff value for adjusted
p-value, terms with FDR below this value are considered significantly
enriched.}
}
\value{
List of length 'L' where each member contains the significantly
enriched terms for the corresponding module.
}
\description{
Identify and return significantly-enriched terms for each gene
module in a Celda object or a \linkS4class{SingleCellExperiment} object.
Performs gene set enrichment analysis for Celda
identified modules using the \link[enrichR]{enrichr}.
}
\examples{
library(M3DExampleData)
counts <- M3DExampleData::Mmus_example_list$data
# subset 500 genes for fast clustering
counts <- counts[seq(1501, 2000), ]
# cluster genes into 10 modules for quick demo
sce <- celda_G(x = as.matrix(counts), L = 10, verbose = FALSE)
gse <- geneSetEnrich(sce,
databases = c("GO_Biological_Process_2018", "GO_Molecular_Function_2018"))
}
\author{
Ahmed Youssef, Zhe Wang
}
|
plot1 <- function(){
#Load data and return the data path
loadData <- function(){
temp <- tempfile()
td = tempdir()
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, temp)
fname <- unzip(temp, list=TRUE)$Name[1]
unzip(temp, files=fname, exdir=td, overwrite=TRUE)
fpath <- file.path(td, fname)
unlink(temp)
unlink(td)
fpath
}
#Retrieve data using sql (faster)
getdata2 <- function(fpath){
library(sqldf)
sql <- "select * from file where Date in ('1/2/2007','2/2/2007')"
data <- read.csv.sql(file = fpath, sql = sql ,sep=";", header=TRUE)
data <- dateConversion(data)
data
}
#Retrieve data using read.tables (slow but no warning).
#This function is not used here. It is only for testing
getdata <- function(fpath){
data <- read.table(file=fpath, sep=";", header=TRUE, stringsAsFactors=FALSE)
data <- dateConversion(data)
data <- subset (data, data$Date >= '2007-02-01' & data$Date <= '2007-02-02')
data$Global_active_power <- as.numeric(data$Global_active_power)
data
}
#Convert Date and Time
dateConversion <- function(data){
data$DateTime <- paste(data$Date, data$Time)
data$Date <- strptime(data$Date, format='%d/%m/%Y')
data$DateTime <- strptime(data$DateTime, format='%d/%m/%Y %H:%M:%S')
data
}
#Load data path and retrieve data
fpath <- loadData()
#fpath <- "data/household_power_consumption.txt"
#data <- getdata(fpath)
data <- getdata2(fpath)
#Print the result to the device
library(datasets) #histogram library
png(filename = "plot1.png", width = 480, height = 480)
hist(data$Global_active_power, xlab = "Global Active Power (kilowatts)",
main="Global Active Power", col="red")
dev.off()
} | /04_Exploratory Data Analysis/Assignment/Project1/plot1.R | no_license | duybuile/DataScience | R | false | false | 1,854 | r | plot1 <- function(){
#Load data and return the data path
loadData <- function(){
temp <- tempfile()
td = tempdir()
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, temp)
fname <- unzip(temp, list=TRUE)$Name[1]
unzip(temp, files=fname, exdir=td, overwrite=TRUE)
fpath <- file.path(td, fname)
unlink(temp)
unlink(td)
fpath
}
#Retrieve data using sql (faster)
getdata2 <- function(fpath){
library(sqldf)
sql <- "select * from file where Date in ('1/2/2007','2/2/2007')"
data <- read.csv.sql(file = fpath, sql = sql ,sep=";", header=TRUE)
data <- dateConversion(data)
data
}
#Retrieve data using read.tables (slow but no warning).
#This function is not used here. It is only for testing
getdata <- function(fpath){
data <- read.table(file=fpath, sep=";", header=TRUE, stringsAsFactors=FALSE)
data <- dateConversion(data)
data <- subset (data, data$Date >= '2007-02-01' & data$Date <= '2007-02-02')
data$Global_active_power <- as.numeric(data$Global_active_power)
data
}
#Convert Date and Time
dateConversion <- function(data){
data$DateTime <- paste(data$Date, data$Time)
data$Date <- strptime(data$Date, format='%d/%m/%Y')
data$DateTime <- strptime(data$DateTime, format='%d/%m/%Y %H:%M:%S')
data
}
#Load data path and retrieve data
fpath <- loadData()
#fpath <- "data/household_power_consumption.txt"
#data <- getdata(fpath)
data <- getdata2(fpath)
#Print the result to the device
library(datasets) #histogram library
png(filename = "plot1.png", width = 480, height = 480)
hist(data$Global_active_power, xlab = "Global Active Power (kilowatts)",
main="Global Active Power", col="red")
dev.off()
} |
# Copyright (c) 2019 Master of Data Science at the University of British Columbia
# Licensed under the MIT License (the "License").
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://mit-license.org
# Feburary 2019
# This script tests the function from sharpen.R
# This function sharpens an image.
# Input : An image in .png, .jpeg,.gif,.bmp, .jpg format
# Output : A sharpened image in the same format as the input image file type
library(devtools)
library(usethis)
#' Sharpened
#'
#' @param input_path string, path for the input png file
#'
#' @return a png file at the same path as input_path
#' @export
#'
#' @examples
#' #' sharpen("../img/test.png")
#'
sharpen <- function(input_path) {
}
| /R/sharpen.R | permissive | akanshaVashisth/filterizeR | R | false | false | 769 | r | # Copyright (c) 2019 Master of Data Science at the University of British Columbia
# Licensed under the MIT License (the "License").
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://mit-license.org
# Feburary 2019
# This script tests the function from sharpen.R
# This function sharpens an image.
# Input : An image in .png, .jpeg,.gif,.bmp, .jpg format
# Output : A sharpened image in the same format as the input image file type
library(devtools)
library(usethis)
#' Sharpened
#'
#' @param input_path string, path for the input png file
#'
#' @return a png file at the same path as input_path
#' @export
#'
#' @examples
#' #' sharpen("../img/test.png")
#'
sharpen <- function(input_path) {
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clamp.env.R
\name{clamp.env}
\alias{clamp.env}
\title{Takes an emtools.model object and a set of environment layers and clamps the environment layers so that no variable falls outside of the range available in the training data.}
\usage{
clamp.env(model, env)
}
\arguments{
\item{model}{An enmtools.model object. Alternatively the analysis.df component of an enmtools.model object.}
\item{env}{A raster or raster stack of environmental data.}
}
\value{
An enmtools model object containing species name, model formula (if any), model object, suitability raster, marginal response plots, and any evaluation objects that were created.
}
\description{
Takes an emtools.model object and a set of environment layers and clamps the environment layers so that no variable falls outside of the range available in the training data.
}
\examples{
\donttest{
data(euro.worldclim)
data(iberolacerta.clade)
monticola.gam <- enmtools.gam(iberolacerta.clade$species$monticola, euro.worldclim[[c(1,5,9,13)]])
euro.clamped <- clamp.env(monticola.gam, euro.worldclim)
clamped.prediction <- predict(monticola.gam, euro.clamped)
raster::plot(clamped.prediction$suitability - monticola.gam$suitability)
}
}
| /man/clamp.env.Rd | no_license | rdinnager/ENMTools | R | false | true | 1,265 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clamp.env.R
\name{clamp.env}
\alias{clamp.env}
\title{Takes an emtools.model object and a set of environment layers and clamps the environment layers so that no variable falls outside of the range available in the training data.}
\usage{
clamp.env(model, env)
}
\arguments{
\item{model}{An enmtools.model object. Alternatively the analysis.df component of an enmtools.model object.}
\item{env}{A raster or raster stack of environmental data.}
}
\value{
An enmtools model object containing species name, model formula (if any), model object, suitability raster, marginal response plots, and any evaluation objects that were created.
}
\description{
Takes an emtools.model object and a set of environment layers and clamps the environment layers so that no variable falls outside of the range available in the training data.
}
\examples{
\donttest{
data(euro.worldclim)
data(iberolacerta.clade)
monticola.gam <- enmtools.gam(iberolacerta.clade$species$monticola, euro.worldclim[[c(1,5,9,13)]])
euro.clamped <- clamp.env(monticola.gam, euro.worldclim)
clamped.prediction <- predict(monticola.gam, euro.clamped)
raster::plot(clamped.prediction$suitability - monticola.gam$suitability)
}
}
|
"train" <-
function(x, ...){
UseMethod("train")
}
train.default <- function(x, y,
method = "rf",
preProcess = NULL,
...,
weights = NULL,
metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric %in% c("RMSE", "logLoss"), FALSE, TRUE),
trControl = trainControl(),
tuneGrid = NULL,
tuneLength = 3) {
startTime <- proc.time()
if(is.character(y)) y <- as.factor(y)
if(is.list(method)) {
minNames <- c("library", "type", "parameters", "grid",
"fit", "predict", "prob")
nameCheck <- minNames %in% names(method)
if(!all(nameCheck)) stop(paste("some required components are missing:",
paste(minNames[!nameCheck], collapse = ", ")))
models <- method
method <- "custom"
} else {
models <- getModelInfo(method, regex = FALSE)[[1]]
if (length(models) == 0)
stop(paste("Model", method, "is not in caret's built-in library"))
}
checkInstall(models$library)
for(i in seq(along = models$library)) do.call("require", list(package = models$library[i]))
if(any(names(models) == "check") && is.function(models$check)) {
software_check <- models$check(models$library)
}
paramNames <- as.character(models$parameters$parameter)
funcCall <- match.call(expand.dots = TRUE)
modelType <- get_model_type(y)
if(!(modelType %in% models$type)) stop(paste("wrong model type for", tolower(modelType)))
if(grepl("^svm", method) & grepl("String$", method)) {
if(is.vector(x) && is.character(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.matrix(x) && is.numeric(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.data.frame(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
}
if(modelType == "Regression" & length(unique(y)) == 2)
warning(paste("You are trying to do regression and your outcome only has",
"two possible values Are you trying to do classification?",
"If so, use a 2 level factor as your outcome column."))
if(modelType != "Classification" & !is.null(trControl$sampling))
stop("sampling methods are only implemented for classification problems")
if(!is.null(trControl$sampling)) {
trControl$sampling <- parse_sampling(trControl$sampling)
}
if(any(class(x) == "data.table")) x <- as.data.frame(x)
check_dims(x = x, y = y)
n <- if(class(y)[1] == "Surv") nrow(y) else length(y)
## TODO add check method and execute here
## Some models that use RWeka start multiple threads and this conflicts with multicore:
if(any(search() == "package:doMC") && getDoParRegistered() && "RWeka" %in% models$library)
warning("Models using Weka will not work with parallel processing with multicore/doMC")
flush.console()
if(!is.null(preProcess) && !(all(preProcess %in% ppMethods)))
stop(paste('pre-processing methods are limited to:', paste(ppMethods, collapse = ", ")))
if(modelType == "Classification") {
## We should get and save the class labels to ensure that predictions are coerced
## to factors that have the same levels as the original data. This is especially
## important with multiclass systems where one or more classes have low sample sizes
## relative to the others
classLevels <- levels(y)
xtab <- table(y)
if(any(xtab == 0)) {
xtab_msg <- paste("'", names(xtab)[xtab == 0], "'", collapse = ", ", sep = "")
stop(paste("One or more factor levels in the outcome has no data:", xtab_msg))
}
if(trControl$classProbs && any(classLevels != make.names(classLevels))) {
stop(paste("At least one of the class levels is not a valid R variable name;",
"This will cause errors when class probabilities are generated because",
"the variables names will be converted to ",
paste(make.names(classLevels), collapse = ", "),
". Please use factor levels that can be used as valid R variable names",
" (see ?make.names for help)."))
}
if(metric %in% c("RMSE", "Rsquared"))
stop(paste("Metric", metric, "not applicable for classification models"))
if(!trControl$classProbs && metric == "ROC")
stop(paste("Class probabilities are needed to score models using the",
"area under the ROC curve. Set `classProbs = TRUE`",
"in the trainControl() function."))
if(trControl$classProbs) {
if(!is.function(models$prob)) {
warning("Class probabilities were requested for a model that does not implement them")
trControl$classProbs <- FALSE
}
}
} else {
if(metric %in% c("Accuracy", "Kappa"))
stop(paste("Metric", metric, "not applicable for regression models"))
classLevels <- NA
if(trControl$classProbs) {
warning("cannnot compute class probabilities for regression")
trControl$classProbs <- FALSE
}
}
if(trControl$method == "oob" & is.null(models$oob))
stop("Out of bag estimates are not implemented for this model")
## SURV TODO: make resampling functions classes or ifelses based on data type
## If they don't exist, make the data partitions for the resampling iterations.
if(is.null(trControl$index)) {
if(trControl$method == "custom")
stop("'custom' resampling is appropriate when the `trControl` argument `index` is used")
trControl$index <- switch(tolower(trControl$method),
oob = NULL,
none = list(seq(along = y)),
alt_cv =, cv = createFolds(y, trControl$number, returnTrain = TRUE),
repeatedcv =, adaptive_cv = createMultiFolds(y, trControl$number, trControl$repeats),
loocv = createFolds(y, n, returnTrain = TRUE),
boot =, boot632 =, adaptive_boot = createResample(y, trControl$number),
test = createDataPartition(y, 1, trControl$p),
adaptive_lgocv =, lgocv = createDataPartition(y, trControl$number, trControl$p),
timeslice = createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$train,
subsemble = subsemble_index(y, V = trControl$number, J = trControl$repeats))
} else {
index_types <- unlist(lapply(trControl$index, is.integer))
if(!isTRUE(all(index_types)))
stop("`index` should be lists of integers.")
if(!is.null(trControl$indexOut)) {
index_types <- unlist(lapply(trControl$indexOut, is.integer))
if(!isTRUE(all(index_types)))
stop("`indexOut` should be lists of integers.")
}
}
if(trControl$method == "subsemble") {
if(!trControl$savePredictions) trControl$savePredictions <- TRUE
trControl$indexOut <- trControl$index$holdout
trControl$index <- trControl$index$model
}
if(is.logical(trControl$savePredictions)) {
trControl$savePredictions <- if(trControl$savePredictions) "all" else "none"
} else {
if(!(trControl$savePredictions %in% c("all", "final", "none")))
stop('`savePredictions` should be either logical or "all", "final" or "none"')
}
## Create hold--out indicies
if(is.null(trControl$indexOut) & trControl$method != "oob"){
if(tolower(trControl$method) != "timeslice") {
y_index <- if(class(y)[1] == "Surv") 1:nrow(y) else seq(along = y)
trControl$indexOut <- lapply(trControl$index,
function(training, allSamples) allSamples[-unique(training)],
allSamples = y_index)
names(trControl$indexOut) <- prettySeq(trControl$indexOut)
} else {
trControl$indexOut <- createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$test
}
}
if(trControl$method != "oob" & is.null(trControl$index)) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$index))) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$indexOut))) names(trControl$indexOut) <- prettySeq(trControl$indexOut)
## Gather all the pre-processing info. We will need it to pass into the grid creation
## code so that there is a concordance between the data used for modeling and grid creation
if(!is.null(preProcess)) {
ppOpt <- list(options = preProcess)
if(length(trControl$preProcOptions) > 0) ppOpt <- c(ppOpt,trControl$preProcOptions)
} else ppOpt <- NULL
## If no default training grid is specified, get one. We have to pass in the formula
## and data for some models (rpart, pam, etc - see manual for more details)
if(is.null(tuneGrid)) {
if(!is.null(ppOpt) && length(models$parameters$parameter) > 1 && as.character(models$parameters$parameter) != "parameter") {
pp <- list(method = ppOpt$options)
if("ica" %in% pp$method) pp$n.comp <- ppOpt$ICAcomp
if("pca" %in% pp$method) pp$thresh <- ppOpt$thresh
if("knnImpute" %in% pp$method) pp$k <- ppOpt$k
pp$x <- x
ppObj <- do.call("preProcess", pp)
tuneGrid <- models$grid(x = predict(ppObj, x),
y = y,
len = tuneLength,
search = trControl$search)
rm(ppObj, pp)
} else tuneGrid <- models$grid(x = x, y = y, len = tuneLength, search = trControl$search)
}
## Check to make sure that there are tuning parameters in some cases
if(grepl("adaptive", trControl$method) & nrow(tuneGrid) == 1) {
stop(paste("For adaptive resampling, there needs to be more than one",
"tuning parameter for evaluation"))
}
dotNames <- hasDots(tuneGrid, models)
if(dotNames) colnames(tuneGrid) <- gsub("^\\.", "", colnames(tuneGrid))
## Check tuning parameter names
tuneNames <- as.character(models$parameters$parameter)
goodNames <- all.equal(sort(tuneNames), sort(names(tuneGrid)))
if(!is.logical(goodNames) || !goodNames) {
stop(paste("The tuning parameter grid should have columns",
paste(tuneNames, collapse = ", ", sep = "")))
}
if(trControl$method == "none" && nrow(tuneGrid) != 1)
stop("Only one model should be specified in tuneGrid with no resampling")
## In case prediction bounds are used, compute the limits. For now,
## store these in the control object since that gets passed everywhere
trControl$yLimits <- if(is.numeric(y)) get_range(y) else NULL
if(trControl$method != "none") {
##------------------------------------------------------------------------------------------------------------------------------------------------------#
## For each tuning parameter combination, we will loop over them, fit models and generate predictions.
## We only save the predictions at this point, not the models (and in the case of method = "oob" we
## only save the prediction summaries at this stage.
## trainInfo will hold the information about how we should loop to train the model and what types
## of parameters are used.
## There are two types of methods to build the models: "basic" means that each tuning parameter
## combination requires it's own model fit and "seq" where a single model fit can be used to
## get predictions for multiple tuning parameters.
## The tuneScheme() function is in miscr.R and it helps define the following:
## - A data frame called "loop" with columns for parameters and a row for each model to be fit.
## For "basic" models, this is the same as the tuning grid. For "seq" models, it is only
## the subset of parameters that need to be fit
## - A list called "submodels". If "basic", it is NULL. For "seq" models, it is a list. Each list
## item is a data frame of the parameters that need to be varied for the corresponding row of
## the loop object.
##
## For example, for a gbm model, our tuning grid might be:
## .interaction.depth .n.trees .shrinkage
## 1 50 0.1
## 1 100 0.1
## 2 50 0.1
## 2 100 0.1
## 2 150 0.1
##
## For this example:
##
## loop:
## .interaction.depth .shrinkage .n.trees
## 1 0.1 100
## 2 0.1 150
##
## submodels:
## [[1]]
## .n.trees
## 50
##
## [[2]]
## .n.trees
## 50
## 100
##
## A simplified version of predictionFunction() would have the following gbm section:
##
## # First get the predictions with the value of n.trees as given in the current
## # row of loop
## out <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = modelFit$tuneValue$.n.trees)
##
## # param is the current value of submodels. In normal prediction mode (i.e
## # when using predict.train), param = NULL. When called within train()
## # with this model, it will have the other values for n.trees.
## # In this case, the output of the function is a list of predictions
## # These values are deconvoluted in workerTasks() in misc.R
## if(!is.null(param))
## {
## tmp <- vector(mode = "list", length = nrow(param) + 1)
## tmp[[1]] <- out
##
## for(j in seq(along = param$.n.trees))
## {
## tmp[[j]] <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = param$.n.trees[j])
## }
## out <- tmp
##
# paramCols <- paste(".", as.character(models$parameters$parameter), sep = "")
if(is.function(models$loop) && nrow(tuneGrid) > 1){
trainInfo <- models$loop(tuneGrid)
if(!all(c("loop", "submodels") %in% names(trainInfo)))
stop("The 'loop' function should produce a list with elements 'loop' and 'submodels'")
lengths <- unlist(lapply(trainInfo$submodels, nrow))
if(all(lengths == 0)) trainInfo$submodels <- NULL
} else trainInfo <- list(loop = tuneGrid)
## Set or check the seeds when needed
if(is.null(trControl$seeds) | all(is.na(trControl$seeds))) {
seeds <- vector(mode = "list", length = length(trControl$index))
seeds <- lapply(seeds, function(x) sample.int(n = 1000000, size = nrow(trainInfo$loop)))
seeds[[length(trControl$index) + 1]] <- sample.int(n = 1000000, size = 1)
trControl$seeds <- seeds
} else {
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) {
## check versus number of tasks
numSeeds <- unlist(lapply(trControl$seeds, length))
badSeed <- (length(trControl$seeds) < length(trControl$index) + 1) ||
(any(numSeeds[-length(numSeeds)] < nrow(trainInfo$loop)))
if(badSeed) stop(paste("Bad seeds: the seed object should be a list of length",
length(trControl$index) + 1, "with",
length(trControl$index), "integer vectors of size",
nrow(trainInfo$loop), "and the last list element having a",
"single integer"))
}
}
## SURV TODO: modify defaultSummary for Surv objects
if(trControl$method == "oob") {
## delay this test until later
perfNames <- metric
} else {
## run some data thru the summary function and see what we get
testSummary <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(testSummary)
}
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
if(trControl$method == "oob"){
tmp <- oobTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp
perfNames <- colnames(performance)
perfNames <- perfNames[!(perfNames %in% as.character(models$parameters$parameter))]
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
} else {
if(trControl$method == "LOOCV"){
tmp <- looTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
} else {
if(!grepl("adapt", trControl$method)){
tmp <- nominalTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
resampleResults <- tmp$resample
} else {
tmp <- adaptiveWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess,
ctrl = trControl,
lev = classLevels,
metric = metric,
maximize = maximize,
...)
performance <- tmp$performance
resampleResults <- tmp$resample
}
}
}
## TODO we used to give resampled results for LOO
if(!(trControl$method %in% c("LOOCV", "oob"))) {
if(modelType == "Classification" && length(grep("^\\cell", colnames(resampleResults))) > 0) {
resampledCM <- resampleResults[, !(names(resampleResults) %in% perfNames)]
resampleResults <- resampleResults[, -grep("^\\cell", colnames(resampleResults))]
#colnames(resampledCM) <- gsub("^\\.", "", colnames(resampledCM))
} else resampledCM <- NULL
} else resampledCM <- NULL
if(trControl$verboseIter) {
cat("Aggregating results\n")
flush.console()
}
perfCols <- names(performance)
perfCols <- perfCols[!(perfCols %in% paramNames)]
if(all(is.na(performance[, metric]))) {
cat(paste("Something is wrong; all the", metric, "metric values are missing:\n"))
print(summary(performance[, perfCols[!grepl("SD$", perfCols)], drop = FALSE]))
stop("Stopping")
}
## Sort the tuning parameters from least complex to most complex
if(!is.null(models$sort)) performance <- models$sort(performance)
if(any(is.na(performance[, metric])))
warning("missing values found in aggregated results")
if(trControl$verboseIter && nrow(performance) > 1) {
cat("Selecting tuning parameters\n")
flush.console()
}
## select the optimal set
selectClass <- class(trControl$selectionFunction)[1]
## Select the "optimal" tuning parameter.
if(grepl("adapt", trControl$method)) {
perf_check <- subset(performance, .B == max(performance$.B))
} else perf_check <- performance
## Make adaptive only look at parameters with B = max(B)
if(selectClass == "function") {
bestIter <- trControl$selectionFunction(x = perf_check,
metric = metric,
maximize = maximize)
}
else {
if(trControl$selectionFunction == "oneSE") {
bestIter <- oneSE(perf_check,
metric,
length(trControl$index),
maximize)
} else {
bestIter <- do.call(trControl$selectionFunction,
list(x = perf_check,
metric = metric,
maximize = maximize))
}
}
if(is.na(bestIter) || length(bestIter) != 1) stop("final tuning parameters could not be determined")
if(grepl("adapt", trControl$method)) {
best_perf <- perf_check[bestIter,as.character(models$parameters$parameter),drop = FALSE]
performance$order <- 1:nrow(performance)
bestIter <- merge(performance, best_perf)$order
performance$order <- NULL
}
## Based on the optimality criterion, select the tuning parameter(s)
bestTune <- performance[bestIter, paramNames, drop = FALSE]
} else {
bestTune <- tuneGrid
performance <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(performance)
performance <- as.data.frame(t(performance))
performance <- cbind(performance, tuneGrid)
performance <- performance[-1,,drop = FALSE]
tmp <- resampledCM <- NULL
}
## Save some or all of the resampling summary metrics
if(!(trControl$method %in% c("LOOCV", "oob", "none"))) {
byResample <- switch(trControl$returnResamp,
none = NULL,
all = {
out <- resampleResults
colnames(out) <- gsub("^\\.", "", colnames(out))
out
},
final = {
out <- merge(bestTune, resampleResults)
out <- out[,!(names(out) %in% names(tuneGrid)), drop = FALSE]
out
})
} else {
byResample <- NULL
}
# names(bestTune) <- paste(".", names(bestTune), sep = "")
## Reorder rows of performance
orderList <- list()
for(i in seq(along = paramNames)) orderList[[i]] <- performance[,paramNames[i]]
names(orderList) <- paramNames
performance <- performance[do.call("order", orderList),]
if(trControl$verboseIter) {
bestText <- paste(paste(names(bestTune), "=",
format(bestTune, digits = 3)),
collapse = ", ")
if(nrow(performance) == 1) bestText <- "final model"
cat("Fitting", bestText, "on full training set\n")
flush.console()
}
## Make the final model based on the tuning results
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) set.seed(trControl$seeds[[length(trControl$seeds)]][1])
finalTime <- system.time(
finalModel <- createModel(x = x, y = y, wts = weights,
method = models,
tuneValue = bestTune,
obsLevels = classLevels,
pp = ppOpt,
last = TRUE,
classProbs = trControl$classProbs,
sampling = trControl$sampling,
...))
if(trControl$trim && !is.null(models$trim)) {
if(trControl$verboseIter) old_size <- object.size(finalModel$fit)
finalModel$fit <- models$trim(finalModel$fit)
if(trControl$verboseIter) {
new_size <- object.size(finalModel$fit)
reduction <- format(old_size - new_size, units = "Mb")
if(reduction == "0 Mb") reduction <- "< 0 Mb"
p_reduction <- (unclass(old_size) - unclass(new_size))/unclass(old_size)*100
p_reduction <- if(p_reduction < 1) "< 1%" else paste0(round(p_reduction, 0), "%")
cat("Final model footprint reduced by", reduction, "or", p_reduction, "\n")
}
}
## get pp info
pp <- finalModel$preProc
finalModel <- finalModel$fit
## Remove this and check for other places it is reference
## replaced by tuneValue
if(method == "pls") finalModel$bestIter <- bestTune
## To use predict.train and automatically use the optimal lambda,
## we need to save it
if(method == "glmnet") finalModel$lambdaOpt <- bestTune$lambda
if(trControl$returnData) {
outData <- if(!is.data.frame(x)) try(as.data.frame(x), silent = TRUE) else x
if(class(outData)[1] == "try-error") {
warning("The training data could not be converted to a data frame for saving")
outData <- NULL
} else {
outData$.outcome <- y
if(!is.null(weights)) outData$.weights <- weights
}
} else outData <- NULL
## In the case of pam, the data will need to be saved differently
if(trControl$returnData & method == "pam") {
finalModel$xData <- x
finalModel$yData <- y
}
if(trControl$savePredictions == "final")
tmp$predictions <- merge(bestTune, tmp$predictions)
endTime <- proc.time()
times <- list(everything = endTime - startTime,
final = finalTime)
out <- structure(list(method = method,
modelInfo = models,
modelType = modelType,
results = performance,
pred = tmp$predictions,
bestTune = bestTune,
call = funcCall,
dots = list(...),
metric = metric,
control = trControl,
finalModel = finalModel,
preProcess = pp,
trainingData = outData,
resample = byResample,
resampledCM = resampledCM,
perfNames = perfNames,
maximize = maximize,
yLimits = trControl$yLimits,
times = times),
class = "train")
trControl$yLimits <- NULL
if(trControl$timingSamps > 0) {
pData <- lapply(x, function(x, n) sample(x, n, replace = TRUE), n = trControl$timingSamps)
pData <- as.data.frame(pData)
out$times$prediction <- system.time(predict(out, pData))
} else out$times$prediction <- rep(NA, 3)
out
}
train.formula <- function (form, data, ..., weights, subset, na.action = na.fail, contrasts = NULL) {
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval.parent(m$data))) m$data <- as.data.frame(data)
m$... <- m$contrasts <- NULL
m[[1]] <- as.name("model.frame")
m <- eval.parent(m)
if(nrow(m) < 1) stop("Every row has at least one missing value were found")
Terms <- attr(m, "terms")
x <- model.matrix(Terms, m, contrasts, na.action = na.action)
cons <- attr(x, "contrast")
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0) x <- x[, -xint, drop = FALSE]
y <- model.response(m)
w <- as.vector(model.weights(m))
res <- train(x, y, weights = w, ...)
res$terms <- Terms
res$coefnames <- colnames(x)
res$call <- match.call()
res$na.action <- attr(m, "na.action")
res$contrasts <- cons
res$xlevels <- .getXlevels(Terms, m)
if(!is.null(res$trainingData)) {
res$trainingData <- data
isY <- names(res$trainingData) %in% as.character(form[[2]])
if(any(isY)) colnames(res$trainingData)[isY] <- ".outcome"
}
class(res) <- c("train", "train.formula")
res
}
summary.train <- function(object, ...) summary(object$finalModel, ...)
residuals.train <- function(object, ...) {
if(object$modelType != "Regression") stop("train() only produces residuals on numeric outcomes")
resid <- residuals(object$finalModel, ...)
if(is.null(resid)) {
if(!is.null(object$trainingData)) {
resid <- object$trainingData$.outcome - predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce residuals")
}
resid
}
fitted.train <- function(object, ...) {
prd <- fitted(object$finalModel)
if(is.null(prd)) {
if(!is.null(object$trainingData)) {
prd <- predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce fitted values")
}
prd
}
| /R/train.default.R | no_license | brucebb10/caret | R | false | false | 29,987 | r | "train" <-
function(x, ...){
UseMethod("train")
}
train.default <- function(x, y,
method = "rf",
preProcess = NULL,
...,
weights = NULL,
metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric %in% c("RMSE", "logLoss"), FALSE, TRUE),
trControl = trainControl(),
tuneGrid = NULL,
tuneLength = 3) {
startTime <- proc.time()
if(is.character(y)) y <- as.factor(y)
if(is.list(method)) {
minNames <- c("library", "type", "parameters", "grid",
"fit", "predict", "prob")
nameCheck <- minNames %in% names(method)
if(!all(nameCheck)) stop(paste("some required components are missing:",
paste(minNames[!nameCheck], collapse = ", ")))
models <- method
method <- "custom"
} else {
models <- getModelInfo(method, regex = FALSE)[[1]]
if (length(models) == 0)
stop(paste("Model", method, "is not in caret's built-in library"))
}
checkInstall(models$library)
for(i in seq(along = models$library)) do.call("require", list(package = models$library[i]))
if(any(names(models) == "check") && is.function(models$check)) {
software_check <- models$check(models$library)
}
paramNames <- as.character(models$parameters$parameter)
funcCall <- match.call(expand.dots = TRUE)
modelType <- get_model_type(y)
if(!(modelType %in% models$type)) stop(paste("wrong model type for", tolower(modelType)))
if(grepl("^svm", method) & grepl("String$", method)) {
if(is.vector(x) && is.character(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.matrix(x) && is.numeric(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.data.frame(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
}
if(modelType == "Regression" & length(unique(y)) == 2)
warning(paste("You are trying to do regression and your outcome only has",
"two possible values Are you trying to do classification?",
"If so, use a 2 level factor as your outcome column."))
if(modelType != "Classification" & !is.null(trControl$sampling))
stop("sampling methods are only implemented for classification problems")
if(!is.null(trControl$sampling)) {
trControl$sampling <- parse_sampling(trControl$sampling)
}
if(any(class(x) == "data.table")) x <- as.data.frame(x)
check_dims(x = x, y = y)
n <- if(class(y)[1] == "Surv") nrow(y) else length(y)
## TODO add check method and execute here
## Some models that use RWeka start multiple threads and this conflicts with multicore:
if(any(search() == "package:doMC") && getDoParRegistered() && "RWeka" %in% models$library)
warning("Models using Weka will not work with parallel processing with multicore/doMC")
flush.console()
if(!is.null(preProcess) && !(all(preProcess %in% ppMethods)))
stop(paste('pre-processing methods are limited to:', paste(ppMethods, collapse = ", ")))
if(modelType == "Classification") {
## We should get and save the class labels to ensure that predictions are coerced
## to factors that have the same levels as the original data. This is especially
## important with multiclass systems where one or more classes have low sample sizes
## relative to the others
classLevels <- levels(y)
xtab <- table(y)
if(any(xtab == 0)) {
xtab_msg <- paste("'", names(xtab)[xtab == 0], "'", collapse = ", ", sep = "")
stop(paste("One or more factor levels in the outcome has no data:", xtab_msg))
}
if(trControl$classProbs && any(classLevels != make.names(classLevels))) {
stop(paste("At least one of the class levels is not a valid R variable name;",
"This will cause errors when class probabilities are generated because",
"the variables names will be converted to ",
paste(make.names(classLevels), collapse = ", "),
". Please use factor levels that can be used as valid R variable names",
" (see ?make.names for help)."))
}
if(metric %in% c("RMSE", "Rsquared"))
stop(paste("Metric", metric, "not applicable for classification models"))
if(!trControl$classProbs && metric == "ROC")
stop(paste("Class probabilities are needed to score models using the",
"area under the ROC curve. Set `classProbs = TRUE`",
"in the trainControl() function."))
if(trControl$classProbs) {
if(!is.function(models$prob)) {
warning("Class probabilities were requested for a model that does not implement them")
trControl$classProbs <- FALSE
}
}
} else {
if(metric %in% c("Accuracy", "Kappa"))
stop(paste("Metric", metric, "not applicable for regression models"))
classLevels <- NA
if(trControl$classProbs) {
warning("cannnot compute class probabilities for regression")
trControl$classProbs <- FALSE
}
}
if(trControl$method == "oob" & is.null(models$oob))
stop("Out of bag estimates are not implemented for this model")
## SURV TODO: make resampling functions classes or ifelses based on data type
## If they don't exist, make the data partitions for the resampling iterations.
if(is.null(trControl$index)) {
if(trControl$method == "custom")
stop("'custom' resampling is appropriate when the `trControl` argument `index` is used")
trControl$index <- switch(tolower(trControl$method),
oob = NULL,
none = list(seq(along = y)),
alt_cv =, cv = createFolds(y, trControl$number, returnTrain = TRUE),
repeatedcv =, adaptive_cv = createMultiFolds(y, trControl$number, trControl$repeats),
loocv = createFolds(y, n, returnTrain = TRUE),
boot =, boot632 =, adaptive_boot = createResample(y, trControl$number),
test = createDataPartition(y, 1, trControl$p),
adaptive_lgocv =, lgocv = createDataPartition(y, trControl$number, trControl$p),
timeslice = createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$train,
subsemble = subsemble_index(y, V = trControl$number, J = trControl$repeats))
} else {
index_types <- unlist(lapply(trControl$index, is.integer))
if(!isTRUE(all(index_types)))
stop("`index` should be lists of integers.")
if(!is.null(trControl$indexOut)) {
index_types <- unlist(lapply(trControl$indexOut, is.integer))
if(!isTRUE(all(index_types)))
stop("`indexOut` should be lists of integers.")
}
}
if(trControl$method == "subsemble") {
if(!trControl$savePredictions) trControl$savePredictions <- TRUE
trControl$indexOut <- trControl$index$holdout
trControl$index <- trControl$index$model
}
if(is.logical(trControl$savePredictions)) {
trControl$savePredictions <- if(trControl$savePredictions) "all" else "none"
} else {
if(!(trControl$savePredictions %in% c("all", "final", "none")))
stop('`savePredictions` should be either logical or "all", "final" or "none"')
}
## Create hold--out indicies
if(is.null(trControl$indexOut) & trControl$method != "oob"){
if(tolower(trControl$method) != "timeslice") {
y_index <- if(class(y)[1] == "Surv") 1:nrow(y) else seq(along = y)
trControl$indexOut <- lapply(trControl$index,
function(training, allSamples) allSamples[-unique(training)],
allSamples = y_index)
names(trControl$indexOut) <- prettySeq(trControl$indexOut)
} else {
trControl$indexOut <- createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$test
}
}
if(trControl$method != "oob" & is.null(trControl$index)) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$index))) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$indexOut))) names(trControl$indexOut) <- prettySeq(trControl$indexOut)
## Gather all the pre-processing info. We will need it to pass into the grid creation
## code so that there is a concordance between the data used for modeling and grid creation
if(!is.null(preProcess)) {
ppOpt <- list(options = preProcess)
if(length(trControl$preProcOptions) > 0) ppOpt <- c(ppOpt,trControl$preProcOptions)
} else ppOpt <- NULL
## If no default training grid is specified, get one. We have to pass in the formula
## and data for some models (rpart, pam, etc - see manual for more details)
if(is.null(tuneGrid)) {
if(!is.null(ppOpt) && length(models$parameters$parameter) > 1 && as.character(models$parameters$parameter) != "parameter") {
pp <- list(method = ppOpt$options)
if("ica" %in% pp$method) pp$n.comp <- ppOpt$ICAcomp
if("pca" %in% pp$method) pp$thresh <- ppOpt$thresh
if("knnImpute" %in% pp$method) pp$k <- ppOpt$k
pp$x <- x
ppObj <- do.call("preProcess", pp)
tuneGrid <- models$grid(x = predict(ppObj, x),
y = y,
len = tuneLength,
search = trControl$search)
rm(ppObj, pp)
} else tuneGrid <- models$grid(x = x, y = y, len = tuneLength, search = trControl$search)
}
## Check to make sure that there are tuning parameters in some cases
if(grepl("adaptive", trControl$method) & nrow(tuneGrid) == 1) {
stop(paste("For adaptive resampling, there needs to be more than one",
"tuning parameter for evaluation"))
}
dotNames <- hasDots(tuneGrid, models)
if(dotNames) colnames(tuneGrid) <- gsub("^\\.", "", colnames(tuneGrid))
## Check tuning parameter names
tuneNames <- as.character(models$parameters$parameter)
goodNames <- all.equal(sort(tuneNames), sort(names(tuneGrid)))
if(!is.logical(goodNames) || !goodNames) {
stop(paste("The tuning parameter grid should have columns",
paste(tuneNames, collapse = ", ", sep = "")))
}
if(trControl$method == "none" && nrow(tuneGrid) != 1)
stop("Only one model should be specified in tuneGrid with no resampling")
## In case prediction bounds are used, compute the limits. For now,
## store these in the control object since that gets passed everywhere
trControl$yLimits <- if(is.numeric(y)) get_range(y) else NULL
if(trControl$method != "none") {
##------------------------------------------------------------------------------------------------------------------------------------------------------#
## For each tuning parameter combination, we will loop over them, fit models and generate predictions.
## We only save the predictions at this point, not the models (and in the case of method = "oob" we
## only save the prediction summaries at this stage.
## trainInfo will hold the information about how we should loop to train the model and what types
## of parameters are used.
## There are two types of methods to build the models: "basic" means that each tuning parameter
## combination requires it's own model fit and "seq" where a single model fit can be used to
## get predictions for multiple tuning parameters.
## The tuneScheme() function is in miscr.R and it helps define the following:
## - A data frame called "loop" with columns for parameters and a row for each model to be fit.
## For "basic" models, this is the same as the tuning grid. For "seq" models, it is only
## the subset of parameters that need to be fit
## - A list called "submodels". If "basic", it is NULL. For "seq" models, it is a list. Each list
## item is a data frame of the parameters that need to be varied for the corresponding row of
## the loop object.
##
## For example, for a gbm model, our tuning grid might be:
## .interaction.depth .n.trees .shrinkage
## 1 50 0.1
## 1 100 0.1
## 2 50 0.1
## 2 100 0.1
## 2 150 0.1
##
## For this example:
##
## loop:
## .interaction.depth .shrinkage .n.trees
## 1 0.1 100
## 2 0.1 150
##
## submodels:
## [[1]]
## .n.trees
## 50
##
## [[2]]
## .n.trees
## 50
## 100
##
## A simplified version of predictionFunction() would have the following gbm section:
##
## # First get the predictions with the value of n.trees as given in the current
## # row of loop
## out <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = modelFit$tuneValue$.n.trees)
##
## # param is the current value of submodels. In normal prediction mode (i.e
## # when using predict.train), param = NULL. When called within train()
## # with this model, it will have the other values for n.trees.
## # In this case, the output of the function is a list of predictions
## # These values are deconvoluted in workerTasks() in misc.R
## if(!is.null(param))
## {
## tmp <- vector(mode = "list", length = nrow(param) + 1)
## tmp[[1]] <- out
##
## for(j in seq(along = param$.n.trees))
## {
## tmp[[j]] <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = param$.n.trees[j])
## }
## out <- tmp
##
# paramCols <- paste(".", as.character(models$parameters$parameter), sep = "")
if(is.function(models$loop) && nrow(tuneGrid) > 1){
trainInfo <- models$loop(tuneGrid)
if(!all(c("loop", "submodels") %in% names(trainInfo)))
stop("The 'loop' function should produce a list with elements 'loop' and 'submodels'")
lengths <- unlist(lapply(trainInfo$submodels, nrow))
if(all(lengths == 0)) trainInfo$submodels <- NULL
} else trainInfo <- list(loop = tuneGrid)
## Set or check the seeds when needed
if(is.null(trControl$seeds) | all(is.na(trControl$seeds))) {
seeds <- vector(mode = "list", length = length(trControl$index))
seeds <- lapply(seeds, function(x) sample.int(n = 1000000, size = nrow(trainInfo$loop)))
seeds[[length(trControl$index) + 1]] <- sample.int(n = 1000000, size = 1)
trControl$seeds <- seeds
} else {
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) {
## check versus number of tasks
numSeeds <- unlist(lapply(trControl$seeds, length))
badSeed <- (length(trControl$seeds) < length(trControl$index) + 1) ||
(any(numSeeds[-length(numSeeds)] < nrow(trainInfo$loop)))
if(badSeed) stop(paste("Bad seeds: the seed object should be a list of length",
length(trControl$index) + 1, "with",
length(trControl$index), "integer vectors of size",
nrow(trainInfo$loop), "and the last list element having a",
"single integer"))
}
}
## SURV TODO: modify defaultSummary for Surv objects
if(trControl$method == "oob") {
## delay this test until later
perfNames <- metric
} else {
## run some data thru the summary function and see what we get
testSummary <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(testSummary)
}
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
if(trControl$method == "oob"){
tmp <- oobTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp
perfNames <- colnames(performance)
perfNames <- perfNames[!(perfNames %in% as.character(models$parameters$parameter))]
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
} else {
if(trControl$method == "LOOCV"){
tmp <- looTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
} else {
if(!grepl("adapt", trControl$method)){
tmp <- nominalTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
resampleResults <- tmp$resample
} else {
tmp <- adaptiveWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess,
ctrl = trControl,
lev = classLevels,
metric = metric,
maximize = maximize,
...)
performance <- tmp$performance
resampleResults <- tmp$resample
}
}
}
## TODO we used to give resampled results for LOO
if(!(trControl$method %in% c("LOOCV", "oob"))) {
if(modelType == "Classification" && length(grep("^\\cell", colnames(resampleResults))) > 0) {
resampledCM <- resampleResults[, !(names(resampleResults) %in% perfNames)]
resampleResults <- resampleResults[, -grep("^\\cell", colnames(resampleResults))]
#colnames(resampledCM) <- gsub("^\\.", "", colnames(resampledCM))
} else resampledCM <- NULL
} else resampledCM <- NULL
if(trControl$verboseIter) {
cat("Aggregating results\n")
flush.console()
}
perfCols <- names(performance)
perfCols <- perfCols[!(perfCols %in% paramNames)]
if(all(is.na(performance[, metric]))) {
cat(paste("Something is wrong; all the", metric, "metric values are missing:\n"))
print(summary(performance[, perfCols[!grepl("SD$", perfCols)], drop = FALSE]))
stop("Stopping")
}
## Sort the tuning parameters from least complex to most complex
if(!is.null(models$sort)) performance <- models$sort(performance)
if(any(is.na(performance[, metric])))
warning("missing values found in aggregated results")
if(trControl$verboseIter && nrow(performance) > 1) {
cat("Selecting tuning parameters\n")
flush.console()
}
## select the optimal set
selectClass <- class(trControl$selectionFunction)[1]
## Select the "optimal" tuning parameter.
if(grepl("adapt", trControl$method)) {
perf_check <- subset(performance, .B == max(performance$.B))
} else perf_check <- performance
## Make adaptive only look at parameters with B = max(B)
if(selectClass == "function") {
bestIter <- trControl$selectionFunction(x = perf_check,
metric = metric,
maximize = maximize)
}
else {
if(trControl$selectionFunction == "oneSE") {
bestIter <- oneSE(perf_check,
metric,
length(trControl$index),
maximize)
} else {
bestIter <- do.call(trControl$selectionFunction,
list(x = perf_check,
metric = metric,
maximize = maximize))
}
}
if(is.na(bestIter) || length(bestIter) != 1) stop("final tuning parameters could not be determined")
if(grepl("adapt", trControl$method)) {
best_perf <- perf_check[bestIter,as.character(models$parameters$parameter),drop = FALSE]
performance$order <- 1:nrow(performance)
bestIter <- merge(performance, best_perf)$order
performance$order <- NULL
}
## Based on the optimality criterion, select the tuning parameter(s)
bestTune <- performance[bestIter, paramNames, drop = FALSE]
} else {
bestTune <- tuneGrid
performance <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(performance)
performance <- as.data.frame(t(performance))
performance <- cbind(performance, tuneGrid)
performance <- performance[-1,,drop = FALSE]
tmp <- resampledCM <- NULL
}
## Save some or all of the resampling summary metrics
if(!(trControl$method %in% c("LOOCV", "oob", "none"))) {
byResample <- switch(trControl$returnResamp,
none = NULL,
all = {
out <- resampleResults
colnames(out) <- gsub("^\\.", "", colnames(out))
out
},
final = {
out <- merge(bestTune, resampleResults)
out <- out[,!(names(out) %in% names(tuneGrid)), drop = FALSE]
out
})
} else {
byResample <- NULL
}
# names(bestTune) <- paste(".", names(bestTune), sep = "")
## Reorder rows of performance
orderList <- list()
for(i in seq(along = paramNames)) orderList[[i]] <- performance[,paramNames[i]]
names(orderList) <- paramNames
performance <- performance[do.call("order", orderList),]
if(trControl$verboseIter) {
bestText <- paste(paste(names(bestTune), "=",
format(bestTune, digits = 3)),
collapse = ", ")
if(nrow(performance) == 1) bestText <- "final model"
cat("Fitting", bestText, "on full training set\n")
flush.console()
}
## Make the final model based on the tuning results
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) set.seed(trControl$seeds[[length(trControl$seeds)]][1])
finalTime <- system.time(
finalModel <- createModel(x = x, y = y, wts = weights,
method = models,
tuneValue = bestTune,
obsLevels = classLevels,
pp = ppOpt,
last = TRUE,
classProbs = trControl$classProbs,
sampling = trControl$sampling,
...))
if(trControl$trim && !is.null(models$trim)) {
if(trControl$verboseIter) old_size <- object.size(finalModel$fit)
finalModel$fit <- models$trim(finalModel$fit)
if(trControl$verboseIter) {
new_size <- object.size(finalModel$fit)
reduction <- format(old_size - new_size, units = "Mb")
if(reduction == "0 Mb") reduction <- "< 0 Mb"
p_reduction <- (unclass(old_size) - unclass(new_size))/unclass(old_size)*100
p_reduction <- if(p_reduction < 1) "< 1%" else paste0(round(p_reduction, 0), "%")
cat("Final model footprint reduced by", reduction, "or", p_reduction, "\n")
}
}
## get pp info
pp <- finalModel$preProc
finalModel <- finalModel$fit
## Remove this and check for other places it is reference
## replaced by tuneValue
if(method == "pls") finalModel$bestIter <- bestTune
## To use predict.train and automatically use the optimal lambda,
## we need to save it
if(method == "glmnet") finalModel$lambdaOpt <- bestTune$lambda
if(trControl$returnData) {
outData <- if(!is.data.frame(x)) try(as.data.frame(x), silent = TRUE) else x
if(class(outData)[1] == "try-error") {
warning("The training data could not be converted to a data frame for saving")
outData <- NULL
} else {
outData$.outcome <- y
if(!is.null(weights)) outData$.weights <- weights
}
} else outData <- NULL
## In the case of pam, the data will need to be saved differently
if(trControl$returnData & method == "pam") {
finalModel$xData <- x
finalModel$yData <- y
}
if(trControl$savePredictions == "final")
tmp$predictions <- merge(bestTune, tmp$predictions)
endTime <- proc.time()
times <- list(everything = endTime - startTime,
final = finalTime)
out <- structure(list(method = method,
modelInfo = models,
modelType = modelType,
results = performance,
pred = tmp$predictions,
bestTune = bestTune,
call = funcCall,
dots = list(...),
metric = metric,
control = trControl,
finalModel = finalModel,
preProcess = pp,
trainingData = outData,
resample = byResample,
resampledCM = resampledCM,
perfNames = perfNames,
maximize = maximize,
yLimits = trControl$yLimits,
times = times),
class = "train")
trControl$yLimits <- NULL
if(trControl$timingSamps > 0) {
pData <- lapply(x, function(x, n) sample(x, n, replace = TRUE), n = trControl$timingSamps)
pData <- as.data.frame(pData)
out$times$prediction <- system.time(predict(out, pData))
} else out$times$prediction <- rep(NA, 3)
out
}
train.formula <- function (form, data, ..., weights, subset, na.action = na.fail, contrasts = NULL) {
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval.parent(m$data))) m$data <- as.data.frame(data)
m$... <- m$contrasts <- NULL
m[[1]] <- as.name("model.frame")
m <- eval.parent(m)
if(nrow(m) < 1) stop("Every row has at least one missing value were found")
Terms <- attr(m, "terms")
x <- model.matrix(Terms, m, contrasts, na.action = na.action)
cons <- attr(x, "contrast")
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0) x <- x[, -xint, drop = FALSE]
y <- model.response(m)
w <- as.vector(model.weights(m))
res <- train(x, y, weights = w, ...)
res$terms <- Terms
res$coefnames <- colnames(x)
res$call <- match.call()
res$na.action <- attr(m, "na.action")
res$contrasts <- cons
res$xlevels <- .getXlevels(Terms, m)
if(!is.null(res$trainingData)) {
res$trainingData <- data
isY <- names(res$trainingData) %in% as.character(form[[2]])
if(any(isY)) colnames(res$trainingData)[isY] <- ".outcome"
}
class(res) <- c("train", "train.formula")
res
}
summary.train <- function(object, ...) summary(object$finalModel, ...)
residuals.train <- function(object, ...) {
if(object$modelType != "Regression") stop("train() only produces residuals on numeric outcomes")
resid <- residuals(object$finalModel, ...)
if(is.null(resid)) {
if(!is.null(object$trainingData)) {
resid <- object$trainingData$.outcome - predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce residuals")
}
resid
}
fitted.train <- function(object, ...) {
prd <- fitted(object$finalModel)
if(is.null(prd)) {
if(!is.null(object$trainingData)) {
prd <- predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce fitted values")
}
prd
}
|
# crear tabla de categorías CONANP y extents ANPs
library(tidyverse)
library(sf)
anp_eco_df <- read_rds("datos_procesados/area_ecorregion/2018-08-08_ecorregion.RData")
anp_ha <- anp_eco_df %>%
group_by(anp) %>%
mutate(
p_area_eco = (hectareas / sum(hectareas)) * 100,
hectareas = first(S_TERRES)
) %>%
top_n(1, p_area_eco)
# obtener categoría de manejo de los shapes
path_anps_shp <- "datos_insumo/shapes_anp/anp_sinBuffer"
anps_shp <- list.files(path_anps_shp,
pattern = ".shp", recursive = FALSE) %>%
tools::file_path_sans_ext()
names(anps_shp) <- anps_shp
manejo_anp <- map_df(anps_shp, ~st_read(str_c(path_anps_shp, "/", ., ".shp"),
stringsAsFactors = FALSE)$CAT_MANEJO[1]) %>%
gather(anp, cat_manejo)
region_anp <- map_df(anps_shp, ~st_read(str_c(path_anps_shp, "/", ., ".shp"),
stringsAsFactors = FALSE)$REGION[1]) %>%
gather(anp, region)
region_anp <- region_anp %>%
mutate(
region = str_replace(region, "Dirección Regional ", ""),
region = ifelse(region == "Norte y Sierra Madre Occidental; Occidente y Pacífico Centro",
"Norte y Sierra Madre Occidental", region),
region = ifelse(region == "Península de Baja California y Pacífico Norte; Noroeste y Alto Golfo de California",
"Península de Baja California y Pacífico Norte", region)
)
manejo_ha_anp <- anp_ha %>% left_join(manejo_anp) %>% left_join(region_anp)
manejo_ha_anp$tamano <- Hmisc::cut2(manejo_ha_anp$hectareas, g = 3)
glimpse(manejo_ha_anp)
# write_csv(manejo_ha_anp, "datos_procesados/2018-08-10_manejo_ha_anp.csv")
# obtener extent
extent_anp <- map_df(anps_shp, function(x){
bbox <- st_read(str_c(path_anps_shp, "/", x, ".shp"),
stringsAsFactors = FALSE) %>%
st_bbox()
data.frame(xmin = bbox["xmin"], xmax = bbox["xmax"], ymin = bbox["ymin"],
ymax = bbox["ymax"])
}, .id = "anp")
manejo_ha_extent_anp <- manejo_ha_anp %>%
left_join(extent_anp)
write_csv(manejo_ha_extent_anp, "datos_procesados/2019-01-15_manejo_ha_extent_anp.csv")
nombres <- readr::read_delim("datos_insumo/anp_nombres.tsv", "\t",
escape_double = FALSE, trim_ws = TRUE)
nombres_anp <- nombres %>%
left_join(nombres_anp, by = c("anp_sin_acentos" = "anp")) %>%
select(id_07, anp = anp_sin_acentos, nombre)
# errores en geometrías
path_anps_shp <- "datos_insumo/shapes_anp/anp_sinBuffer"
# para anillos
# path_anps_shp <- "../datos_insumo/shapes_anp/anp_rings"
anps_shp <- list.files(path_anps_shp,
pattern = ".shp", recursive = FALSE) %>%
tools::file_path_sans_ext()
names(anps_shp) <- anps_shp
nombres_anp <- map_df(anps_shp, function(x){
sf_anp <- st_read(str_c(path_anps_shp, "/", x, ".shp"),
stringsAsFactors = FALSE) %>%
as_data_frame() %>%
select(id_07 = ID_07, nombre = NOMBRE)}, .id = "anp")
nombres_anp %>% filter(duplicated(nombres_anp$anp))
| /preprocesamiento/crear_tabla_categorias_extents.R | no_license | tereom/anp_reporte | R | false | false | 2,949 | r | # crear tabla de categorías CONANP y extents ANPs
library(tidyverse)
library(sf)
anp_eco_df <- read_rds("datos_procesados/area_ecorregion/2018-08-08_ecorregion.RData")
anp_ha <- anp_eco_df %>%
group_by(anp) %>%
mutate(
p_area_eco = (hectareas / sum(hectareas)) * 100,
hectareas = first(S_TERRES)
) %>%
top_n(1, p_area_eco)
# obtener categoría de manejo de los shapes
path_anps_shp <- "datos_insumo/shapes_anp/anp_sinBuffer"
anps_shp <- list.files(path_anps_shp,
pattern = ".shp", recursive = FALSE) %>%
tools::file_path_sans_ext()
names(anps_shp) <- anps_shp
manejo_anp <- map_df(anps_shp, ~st_read(str_c(path_anps_shp, "/", ., ".shp"),
stringsAsFactors = FALSE)$CAT_MANEJO[1]) %>%
gather(anp, cat_manejo)
region_anp <- map_df(anps_shp, ~st_read(str_c(path_anps_shp, "/", ., ".shp"),
stringsAsFactors = FALSE)$REGION[1]) %>%
gather(anp, region)
region_anp <- region_anp %>%
mutate(
region = str_replace(region, "Dirección Regional ", ""),
region = ifelse(region == "Norte y Sierra Madre Occidental; Occidente y Pacífico Centro",
"Norte y Sierra Madre Occidental", region),
region = ifelse(region == "Península de Baja California y Pacífico Norte; Noroeste y Alto Golfo de California",
"Península de Baja California y Pacífico Norte", region)
)
manejo_ha_anp <- anp_ha %>% left_join(manejo_anp) %>% left_join(region_anp)
manejo_ha_anp$tamano <- Hmisc::cut2(manejo_ha_anp$hectareas, g = 3)
glimpse(manejo_ha_anp)
# write_csv(manejo_ha_anp, "datos_procesados/2018-08-10_manejo_ha_anp.csv")
# obtener extent
extent_anp <- map_df(anps_shp, function(x){
bbox <- st_read(str_c(path_anps_shp, "/", x, ".shp"),
stringsAsFactors = FALSE) %>%
st_bbox()
data.frame(xmin = bbox["xmin"], xmax = bbox["xmax"], ymin = bbox["ymin"],
ymax = bbox["ymax"])
}, .id = "anp")
manejo_ha_extent_anp <- manejo_ha_anp %>%
left_join(extent_anp)
write_csv(manejo_ha_extent_anp, "datos_procesados/2019-01-15_manejo_ha_extent_anp.csv")
nombres <- readr::read_delim("datos_insumo/anp_nombres.tsv", "\t",
escape_double = FALSE, trim_ws = TRUE)
nombres_anp <- nombres %>%
left_join(nombres_anp, by = c("anp_sin_acentos" = "anp")) %>%
select(id_07, anp = anp_sin_acentos, nombre)
# errores en geometrías
path_anps_shp <- "datos_insumo/shapes_anp/anp_sinBuffer"
# para anillos
# path_anps_shp <- "../datos_insumo/shapes_anp/anp_rings"
anps_shp <- list.files(path_anps_shp,
pattern = ".shp", recursive = FALSE) %>%
tools::file_path_sans_ext()
names(anps_shp) <- anps_shp
nombres_anp <- map_df(anps_shp, function(x){
sf_anp <- st_read(str_c(path_anps_shp, "/", x, ".shp"),
stringsAsFactors = FALSE) %>%
as_data_frame() %>%
select(id_07 = ID_07, nombre = NOMBRE)}, .id = "anp")
nombres_anp %>% filter(duplicated(nombres_anp$anp))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MIQ.R
\name{MIQ}
\alias{MIQ}
\title{MIQ}
\usage{
MIQ(
label = "MIQ",
num_items = 5,
with_welcome = TRUE,
take_training = FALSE,
feedback_page = NULL,
with_finish = FALSE,
next_item.criterion = "MFI",
next_item.estimator = "BM",
next_item.prior_dist = "norm",
next_item.prior_par = c(0, 1),
final_ability.estimator = "WL",
constrain_answers = FALSE,
eligible_first_items = c(3),
dict = MIQ::MIQ_dict
)
}
\arguments{
\item{label}{(Character scalar) Label to give the MIQ results in the output file.}
\item{num_items}{(Integer scalar) Number of items in the test.}
\item{with_welcome}{(Logical scalar) Whether to display a welcome page.
Defaults to TRUE}
\item{take_training}{(Logical scalar) Whether to include the training phase.
Defaults to FALSE.}
\item{feedback_page}{(Function) Defines a feedback page function for displaying
the results to the participant at the end of the test. Defaults to NULL.
Possible feedback page functions include \code{"feedback_with_score()"}, and
\code{"feedback_with_graph()"}.}
\item{with_finish}{(Logical scalar) Whether to display a finish page.
Defaults to FALSE}
\item{next_item.criterion}{(Character scalar)
Criterion for selecting successive items in the adaptive test.
See the \code{criterion} argument in \code{\link[catR]{nextItem}} for possible values.
Defaults to \code{"MFI"}.}
\item{next_item.estimator}{(Character scalar)
Ability estimation method used for selecting successive items in the adaptive test.
See the \code{method} argument in \code{\link[catR]{thetaEst}} for possible values.
\code{"BM"}, Bayes modal,
corresponds to the setting used in the original MPT paper.
\code{"WL"}, weighted likelihood,
corresponds to the default setting used in versions <= 0.2.0 of this package.}
\item{next_item.prior_dist}{(Character scalar)
The type of prior distribution to use when calculating ability estimates
for item selection.
Ignored if \code{next_item.estimator} is not a Bayesian method.
Defaults to \code{"norm"} for a normal distribution.
See the \code{priorDist} argument in \code{\link[catR]{thetaEst}} for possible values.}
\item{next_item.prior_par}{(Numeric vector, length 2)
Parameters for the prior distribution;
see the \code{priorPar} argument in \code{\link[catR]{thetaEst}} for details.
Ignored if \code{next_item.estimator} is not a Bayesian method.
The default is \code{c(0, 1)}.}
\item{final_ability.estimator}{Estimation method used for the final ability estimate.
See the \code{method} argument in \code{\link[catR]{thetaEst}} for possible values.
The default is \code{"WL"}, weighted likelihood.
#' If a Bayesian method is chosen, its prior distribution will be defined
by the \code{next_item.prior_dist} and \code{next_item.prior_par} arguments.}
\item{constrain_answers}{(Logical scalar)
If \code{TRUE}, then item selection will be constrained so that the
correct answers are distributed as evenly as possible over the course of the test.
We recommend leaving this option disabled.}
\item{eligible_first_items}{(Character scalar)
(NULL or integerish vector) If not NULL, lists the eligible items for the first item
in the test, where each item is identified by its 1-indexed row number in item_bank
(see adapt_test). For example, c(2, 3, 4) means that the first item will be drawn
from rows 2, 3, 4 of the item bank). Default is \code{c(3)} (the third item).}
\item{dict}{(i18n_dict) The psychTestR dictionary used for internationalisation.}
}
\description{
This function defines a MIQ module for incorporation into a
psychTestR timeline.
Use this function if you want to include MIQ in a battery of other tests, or
if you want to add custom psychTestR pages to your test timeline.
For demoing the MIQ, consider using \code{\link{MIQ_demo}()}.
For a standalone implementation of the MIQ, consider using
\code{\link{MIQ_standalone}()}.
}
| /man/MIQ.Rd | permissive | ViolaPsch/MIQ | R | false | true | 3,929 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MIQ.R
\name{MIQ}
\alias{MIQ}
\title{MIQ}
\usage{
MIQ(
label = "MIQ",
num_items = 5,
with_welcome = TRUE,
take_training = FALSE,
feedback_page = NULL,
with_finish = FALSE,
next_item.criterion = "MFI",
next_item.estimator = "BM",
next_item.prior_dist = "norm",
next_item.prior_par = c(0, 1),
final_ability.estimator = "WL",
constrain_answers = FALSE,
eligible_first_items = c(3),
dict = MIQ::MIQ_dict
)
}
\arguments{
\item{label}{(Character scalar) Label to give the MIQ results in the output file.}
\item{num_items}{(Integer scalar) Number of items in the test.}
\item{with_welcome}{(Logical scalar) Whether to display a welcome page.
Defaults to TRUE}
\item{take_training}{(Logical scalar) Whether to include the training phase.
Defaults to FALSE.}
\item{feedback_page}{(Function) Defines a feedback page function for displaying
the results to the participant at the end of the test. Defaults to NULL.
Possible feedback page functions include \code{"feedback_with_score()"}, and
\code{"feedback_with_graph()"}.}
\item{with_finish}{(Logical scalar) Whether to display a finish page.
Defaults to FALSE}
\item{next_item.criterion}{(Character scalar)
Criterion for selecting successive items in the adaptive test.
See the \code{criterion} argument in \code{\link[catR]{nextItem}} for possible values.
Defaults to \code{"MFI"}.}
\item{next_item.estimator}{(Character scalar)
Ability estimation method used for selecting successive items in the adaptive test.
See the \code{method} argument in \code{\link[catR]{thetaEst}} for possible values.
\code{"BM"}, Bayes modal,
corresponds to the setting used in the original MPT paper.
\code{"WL"}, weighted likelihood,
corresponds to the default setting used in versions <= 0.2.0 of this package.}
\item{next_item.prior_dist}{(Character scalar)
The type of prior distribution to use when calculating ability estimates
for item selection.
Ignored if \code{next_item.estimator} is not a Bayesian method.
Defaults to \code{"norm"} for a normal distribution.
See the \code{priorDist} argument in \code{\link[catR]{thetaEst}} for possible values.}
\item{next_item.prior_par}{(Numeric vector, length 2)
Parameters for the prior distribution;
see the \code{priorPar} argument in \code{\link[catR]{thetaEst}} for details.
Ignored if \code{next_item.estimator} is not a Bayesian method.
The default is \code{c(0, 1)}.}
\item{final_ability.estimator}{Estimation method used for the final ability estimate.
See the \code{method} argument in \code{\link[catR]{thetaEst}} for possible values.
The default is \code{"WL"}, weighted likelihood.
#' If a Bayesian method is chosen, its prior distribution will be defined
by the \code{next_item.prior_dist} and \code{next_item.prior_par} arguments.}
\item{constrain_answers}{(Logical scalar)
If \code{TRUE}, then item selection will be constrained so that the
correct answers are distributed as evenly as possible over the course of the test.
We recommend leaving this option disabled.}
\item{eligible_first_items}{(Character scalar)
(NULL or integerish vector) If not NULL, lists the eligible items for the first item
in the test, where each item is identified by its 1-indexed row number in item_bank
(see adapt_test). For example, c(2, 3, 4) means that the first item will be drawn
from rows 2, 3, 4 of the item bank). Default is \code{c(3)} (the third item).}
\item{dict}{(i18n_dict) The psychTestR dictionary used for internationalisation.}
}
\description{
This function defines a MIQ module for incorporation into a
psychTestR timeline.
Use this function if you want to include MIQ in a battery of other tests, or
if you want to add custom psychTestR pages to your test timeline.
For demoing the MIQ, consider using \code{\link{MIQ_demo}()}.
For a standalone implementation of the MIQ, consider using
\code{\link{MIQ_standalone}()}.
}
|
Stn = 'S1'
model = 'Geidersimple'
#Read best parameters:
filedir = paste('~/working/FlexEFT1D/DRAM_0.2/',
Stn,'/',model,sep='')
setwd(filedir)
bestpar = read.table('bestpar',skip=5)
| /Rscripts/mu_theta_response.R | no_license | Marinov-Ocean-Group/FlexEFT1D | R | false | false | 209 | r | Stn = 'S1'
model = 'Geidersimple'
#Read best parameters:
filedir = paste('~/working/FlexEFT1D/DRAM_0.2/',
Stn,'/',model,sep='')
setwd(filedir)
bestpar = read.table('bestpar',skip=5)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | asfandyar78/ProgrammingAssignment2 | R | false | false | 988 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
#' @title Weighted standard error
#' @description A bootstrapped weighted standard error for fast polarisations
#' @param summ Dataframe containing Castelazzi graded events (CZ_*.summ)
#' @param weights A vector containing the weights with length equal to the number of filters used (usually 3) in order with the first corresponding to F1
#' @param seed A random number seed
#' @param iter Number of iterations
#' @return The circular standard error in degrees
#' @details This function can also be run with a custom weight for each measurement by setting them with weights. Or, for the unweighted version, set weights=rep(1,length(summ$fast)).
#' @export
stde.weighted <- function(summ,weights=c(1,2,3),seed=NULL,iter=9999) {
source("~/paper/R/weightedmean.R")
require(parallel)
if (is.null(seed)) {
}else{
set.seed(seed)
}
n <- length(summ$fast)
for (j in 1:iter){
samp <- sample(1:n,size=n,replace=TRUE)
fast <- as.data.frame(summ$fast[c(samp)])
finalgrade <- as.data.frame(summ$finalgrade[c(samp)])
fsumm <- cbind(fast,finalgrade)
colnames(fsumm) <- c("fast","finalgrade")
m <- fast.weighted(fsumm,weights=weights)$mean
if (j == 1) {
means <- m
} else {
means <- c(means,m)
}
}
sd <- sd.circular(means*2)
#sd <- asin(sd)
sd <- deg(sd/2)
return(sd)
} | /R/fn_stde.R | no_license | shearwavesplitter/MFASTR | R | false | false | 1,291 | r | #' @title Weighted standard error
#' @description A bootstrapped weighted standard error for fast polarisations
#' @param summ Dataframe containing Castelazzi graded events (CZ_*.summ)
#' @param weights A vector containing the weights with length equal to the number of filters used (usually 3) in order with the first corresponding to F1
#' @param seed A random number seed
#' @param iter Number of iterations
#' @return The circular standard error in degrees
#' @details This function can also be run with a custom weight for each measurement by setting them with weights. Or, for the unweighted version, set weights=rep(1,length(summ$fast)).
#' @export
stde.weighted <- function(summ,weights=c(1,2,3),seed=NULL,iter=9999) {
source("~/paper/R/weightedmean.R")
require(parallel)
if (is.null(seed)) {
}else{
set.seed(seed)
}
n <- length(summ$fast)
for (j in 1:iter){
samp <- sample(1:n,size=n,replace=TRUE)
fast <- as.data.frame(summ$fast[c(samp)])
finalgrade <- as.data.frame(summ$finalgrade[c(samp)])
fsumm <- cbind(fast,finalgrade)
colnames(fsumm) <- c("fast","finalgrade")
m <- fast.weighted(fsumm,weights=weights)$mean
if (j == 1) {
means <- m
} else {
means <- c(means,m)
}
}
sd <- sd.circular(means*2)
#sd <- asin(sd)
sd <- deg(sd/2)
return(sd)
} |
### Exercise 2 ###
library(shiny)
# We'll look into these more next week: http://shiny.rstudio.com/gallery/widget-gallery.html
# Create a shiny server that creates a scatterplot.
# It should takes as an input the number of observations, and a color
# It should return a rendered plot
shinyServer(function(input, output) {
# Save a 'scatter' property which is a renderPlot object (that renders a scatterplot)
output$scatterplot <- renderPlot({
x <- rnorm(intput$num)
y <- rnorm(input$num)
return(plot(x, y, col=input$color))
})
}) | /exercise-2/server.R | permissive | holzealy/m18-shiny | R | false | false | 553 | r | ### Exercise 2 ###
library(shiny)
# We'll look into these more next week: http://shiny.rstudio.com/gallery/widget-gallery.html
# Create a shiny server that creates a scatterplot.
# It should takes as an input the number of observations, and a color
# It should return a rendered plot
shinyServer(function(input, output) {
# Save a 'scatter' property which is a renderPlot object (that renders a scatterplot)
output$scatterplot <- renderPlot({
x <- rnorm(intput$num)
y <- rnorm(input$num)
return(plot(x, y, col=input$color))
})
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_cholesky_lkj}
\alias{tfd_cholesky_lkj}
\title{The CholeskyLKJ distribution on cholesky factors of correlation matrices}
\usage{
tfd_cholesky_lkj(
dimension,
concentration,
validate_args = FALSE,
allow_nan_stats = TRUE,
name = "CholeskyLKJ"
)
}
\arguments{
\item{dimension}{\code{integer}. The dimension of the correlation matrices
to sample.}
\item{concentration}{\code{float} or \code{double} \code{Tensor}. The positive concentration
parameter of the CholeskyLKJ distributions.}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
This is a one-parameter family of distributions on cholesky factors of
correlation matrices.
In other words, if If \code{X ~ CholeskyLKJ(c)}, then \code{X @ X^T ~ LKJ(c)}.
For more details on the LKJ distribution, see \code{tfd_lkj}.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
Other distributions:
\code{\link{tfd_autoregressive}()},
\code{\link{tfd_batch_reshape}()},
\code{\link{tfd_bates}()},
\code{\link{tfd_bernoulli}()},
\code{\link{tfd_beta_binomial}()},
\code{\link{tfd_beta}()},
\code{\link{tfd_binomial}()},
\code{\link{tfd_categorical}()},
\code{\link{tfd_cauchy}()},
\code{\link{tfd_chi2}()},
\code{\link{tfd_chi}()},
\code{\link{tfd_continuous_bernoulli}()},
\code{\link{tfd_deterministic}()},
\code{\link{tfd_dirichlet_multinomial}()},
\code{\link{tfd_dirichlet}()},
\code{\link{tfd_empirical}()},
\code{\link{tfd_exp_gamma}()},
\code{\link{tfd_exp_inverse_gamma}()},
\code{\link{tfd_exponential}()},
\code{\link{tfd_gamma_gamma}()},
\code{\link{tfd_gamma}()},
\code{\link{tfd_gaussian_process_regression_model}()},
\code{\link{tfd_gaussian_process}()},
\code{\link{tfd_generalized_normal}()},
\code{\link{tfd_geometric}()},
\code{\link{tfd_gumbel}()},
\code{\link{tfd_half_cauchy}()},
\code{\link{tfd_half_normal}()},
\code{\link{tfd_hidden_markov_model}()},
\code{\link{tfd_horseshoe}()},
\code{\link{tfd_independent}()},
\code{\link{tfd_inverse_gamma}()},
\code{\link{tfd_inverse_gaussian}()},
\code{\link{tfd_johnson_s_u}()},
\code{\link{tfd_joint_distribution_named_auto_batched}()},
\code{\link{tfd_joint_distribution_named}()},
\code{\link{tfd_joint_distribution_sequential_auto_batched}()},
\code{\link{tfd_joint_distribution_sequential}()},
\code{\link{tfd_kumaraswamy}()},
\code{\link{tfd_laplace}()},
\code{\link{tfd_linear_gaussian_state_space_model}()},
\code{\link{tfd_lkj}()},
\code{\link{tfd_log_logistic}()},
\code{\link{tfd_log_normal}()},
\code{\link{tfd_logistic}()},
\code{\link{tfd_mixture_same_family}()},
\code{\link{tfd_mixture}()},
\code{\link{tfd_multinomial}()},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}()},
\code{\link{tfd_multivariate_normal_diag}()},
\code{\link{tfd_multivariate_normal_full_covariance}()},
\code{\link{tfd_multivariate_normal_linear_operator}()},
\code{\link{tfd_multivariate_normal_tri_l}()},
\code{\link{tfd_multivariate_student_t_linear_operator}()},
\code{\link{tfd_negative_binomial}()},
\code{\link{tfd_normal}()},
\code{\link{tfd_one_hot_categorical}()},
\code{\link{tfd_pareto}()},
\code{\link{tfd_pixel_cnn}()},
\code{\link{tfd_poisson_log_normal_quadrature_compound}()},
\code{\link{tfd_poisson}()},
\code{\link{tfd_power_spherical}()},
\code{\link{tfd_probit_bernoulli}()},
\code{\link{tfd_quantized}()},
\code{\link{tfd_relaxed_bernoulli}()},
\code{\link{tfd_relaxed_one_hot_categorical}()},
\code{\link{tfd_sample_distribution}()},
\code{\link{tfd_sinh_arcsinh}()},
\code{\link{tfd_skellam}()},
\code{\link{tfd_spherical_uniform}()},
\code{\link{tfd_student_t_process}()},
\code{\link{tfd_student_t}()},
\code{\link{tfd_transformed_distribution}()},
\code{\link{tfd_triangular}()},
\code{\link{tfd_truncated_cauchy}()},
\code{\link{tfd_truncated_normal}()},
\code{\link{tfd_uniform}()},
\code{\link{tfd_variational_gaussian_process}()},
\code{\link{tfd_vector_diffeomixture}()},
\code{\link{tfd_vector_exponential_diag}()},
\code{\link{tfd_vector_exponential_linear_operator}()},
\code{\link{tfd_vector_laplace_diag}()},
\code{\link{tfd_vector_laplace_linear_operator}()},
\code{\link{tfd_vector_sinh_arcsinh_diag}()},
\code{\link{tfd_von_mises_fisher}()},
\code{\link{tfd_von_mises}()},
\code{\link{tfd_weibull}()},
\code{\link{tfd_wishart_linear_operator}()},
\code{\link{tfd_wishart_tri_l}()},
\code{\link{tfd_wishart}()},
\code{\link{tfd_zipf}()}
}
\concept{distributions}
| /man/tfd_cholesky_lkj.Rd | no_license | cran/tfprobability | R | false | true | 5,088 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_cholesky_lkj}
\alias{tfd_cholesky_lkj}
\title{The CholeskyLKJ distribution on cholesky factors of correlation matrices}
\usage{
tfd_cholesky_lkj(
dimension,
concentration,
validate_args = FALSE,
allow_nan_stats = TRUE,
name = "CholeskyLKJ"
)
}
\arguments{
\item{dimension}{\code{integer}. The dimension of the correlation matrices
to sample.}
\item{concentration}{\code{float} or \code{double} \code{Tensor}. The positive concentration
parameter of the CholeskyLKJ distributions.}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
This is a one-parameter family of distributions on cholesky factors of
correlation matrices.
In other words, if If \code{X ~ CholeskyLKJ(c)}, then \code{X @ X^T ~ LKJ(c)}.
For more details on the LKJ distribution, see \code{tfd_lkj}.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
Other distributions:
\code{\link{tfd_autoregressive}()},
\code{\link{tfd_batch_reshape}()},
\code{\link{tfd_bates}()},
\code{\link{tfd_bernoulli}()},
\code{\link{tfd_beta_binomial}()},
\code{\link{tfd_beta}()},
\code{\link{tfd_binomial}()},
\code{\link{tfd_categorical}()},
\code{\link{tfd_cauchy}()},
\code{\link{tfd_chi2}()},
\code{\link{tfd_chi}()},
\code{\link{tfd_continuous_bernoulli}()},
\code{\link{tfd_deterministic}()},
\code{\link{tfd_dirichlet_multinomial}()},
\code{\link{tfd_dirichlet}()},
\code{\link{tfd_empirical}()},
\code{\link{tfd_exp_gamma}()},
\code{\link{tfd_exp_inverse_gamma}()},
\code{\link{tfd_exponential}()},
\code{\link{tfd_gamma_gamma}()},
\code{\link{tfd_gamma}()},
\code{\link{tfd_gaussian_process_regression_model}()},
\code{\link{tfd_gaussian_process}()},
\code{\link{tfd_generalized_normal}()},
\code{\link{tfd_geometric}()},
\code{\link{tfd_gumbel}()},
\code{\link{tfd_half_cauchy}()},
\code{\link{tfd_half_normal}()},
\code{\link{tfd_hidden_markov_model}()},
\code{\link{tfd_horseshoe}()},
\code{\link{tfd_independent}()},
\code{\link{tfd_inverse_gamma}()},
\code{\link{tfd_inverse_gaussian}()},
\code{\link{tfd_johnson_s_u}()},
\code{\link{tfd_joint_distribution_named_auto_batched}()},
\code{\link{tfd_joint_distribution_named}()},
\code{\link{tfd_joint_distribution_sequential_auto_batched}()},
\code{\link{tfd_joint_distribution_sequential}()},
\code{\link{tfd_kumaraswamy}()},
\code{\link{tfd_laplace}()},
\code{\link{tfd_linear_gaussian_state_space_model}()},
\code{\link{tfd_lkj}()},
\code{\link{tfd_log_logistic}()},
\code{\link{tfd_log_normal}()},
\code{\link{tfd_logistic}()},
\code{\link{tfd_mixture_same_family}()},
\code{\link{tfd_mixture}()},
\code{\link{tfd_multinomial}()},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}()},
\code{\link{tfd_multivariate_normal_diag}()},
\code{\link{tfd_multivariate_normal_full_covariance}()},
\code{\link{tfd_multivariate_normal_linear_operator}()},
\code{\link{tfd_multivariate_normal_tri_l}()},
\code{\link{tfd_multivariate_student_t_linear_operator}()},
\code{\link{tfd_negative_binomial}()},
\code{\link{tfd_normal}()},
\code{\link{tfd_one_hot_categorical}()},
\code{\link{tfd_pareto}()},
\code{\link{tfd_pixel_cnn}()},
\code{\link{tfd_poisson_log_normal_quadrature_compound}()},
\code{\link{tfd_poisson}()},
\code{\link{tfd_power_spherical}()},
\code{\link{tfd_probit_bernoulli}()},
\code{\link{tfd_quantized}()},
\code{\link{tfd_relaxed_bernoulli}()},
\code{\link{tfd_relaxed_one_hot_categorical}()},
\code{\link{tfd_sample_distribution}()},
\code{\link{tfd_sinh_arcsinh}()},
\code{\link{tfd_skellam}()},
\code{\link{tfd_spherical_uniform}()},
\code{\link{tfd_student_t_process}()},
\code{\link{tfd_student_t}()},
\code{\link{tfd_transformed_distribution}()},
\code{\link{tfd_triangular}()},
\code{\link{tfd_truncated_cauchy}()},
\code{\link{tfd_truncated_normal}()},
\code{\link{tfd_uniform}()},
\code{\link{tfd_variational_gaussian_process}()},
\code{\link{tfd_vector_diffeomixture}()},
\code{\link{tfd_vector_exponential_diag}()},
\code{\link{tfd_vector_exponential_linear_operator}()},
\code{\link{tfd_vector_laplace_diag}()},
\code{\link{tfd_vector_laplace_linear_operator}()},
\code{\link{tfd_vector_sinh_arcsinh_diag}()},
\code{\link{tfd_von_mises_fisher}()},
\code{\link{tfd_von_mises}()},
\code{\link{tfd_weibull}()},
\code{\link{tfd_wishart_linear_operator}()},
\code{\link{tfd_wishart_tri_l}()},
\code{\link{tfd_wishart}()},
\code{\link{tfd_zipf}()}
}
\concept{distributions}
|
library(shiny)
server <- function(input, output, session) {
# Now if you change the slider only the slider result changes
# and the text box result stays the same. This is because
# we isolated the reactive values in their own reactive function
txt <- reactive({paste(input$mytext, sample(1:100, 1))})
val <- reactive({paste(input$myslider, sample(1:100, 1), sep="-")})
observe({
res <- paste0(txt(), " | Slider ", val())
updateTextInput(session, "myresults", value = res)
})
}
ui <- basicPage(
h3("Changes to the text box and slider are separated so that a change to the text box will not affect the slider part of the results textbox"),
sliderInput("myslider", "A slider:", min=0, max=1000, value=500),
textInput("mytext", "Input goes here", "Text"),
textInput("myresults", "Results will be printed here", "Initial value")
)
shinyApp(ui = ui, server = server)
| /ReactiveKeepSeparated/app.R | no_license | boostbob/shiny_examples | R | false | false | 901 | r | library(shiny)
server <- function(input, output, session) {
# Now if you change the slider only the slider result changes
# and the text box result stays the same. This is because
# we isolated the reactive values in their own reactive function
txt <- reactive({paste(input$mytext, sample(1:100, 1))})
val <- reactive({paste(input$myslider, sample(1:100, 1), sep="-")})
observe({
res <- paste0(txt(), " | Slider ", val())
updateTextInput(session, "myresults", value = res)
})
}
ui <- basicPage(
h3("Changes to the text box and slider are separated so that a change to the text box will not affect the slider part of the results textbox"),
sliderInput("myslider", "A slider:", min=0, max=1000, value=500),
textInput("mytext", "Input goes here", "Text"),
textInput("myresults", "Results will be printed here", "Initial value")
)
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{fit}
\alias{fit}
\title{Train a Keras model}
\usage{
fit(object, x, y, batch_size = 32, epochs = 10, verbose = 1,
callbacks = NULL, view_metrics = getOption("keras.view_metrics", default =
"auto"), validation_split = 0, validation_data = NULL, shuffle = TRUE,
class_weight = NULL, sample_weight = NULL, initial_epoch = 0, ...)
}
\arguments{
\item{object}{Model to train.}
\item{x}{Vector, matrix, or array of training data (or list if the model has
multiple inputs). If all inputs in the model are named, you can also pass a
list mapping input names to data.}
\item{y}{Vector, matrix, or array of target data (or list if the model has
multiple outputs). If all outputs in the model are named, you can also pass
a list mapping output names to data.}
\item{batch_size}{Number of samples per gradient update.}
\item{epochs}{Number of times to iterate over the training data arrays.}
\item{verbose}{Verbosity mode (0 = silent, 1 = verbose, 2 = one log line per
epoch).}
\item{callbacks}{List of callbacks to be called during training.}
\item{view_metrics}{View realtime plot of training metrics (by epoch). The
default (\code{"auto"}) will display the plot when running within RStudio,
\code{metrics} were specified during model \code{\link[=compile]{compile()}}, \code{epochs > 1} and
\code{verbose > 0}. Use the global \code{keras.view_metrics} option to establish a
different default.}
\item{validation_split}{Float between 0 and 1: fraction of the training data
to be used as validation data. The model will set apart this fraction of
the training data, will not train on it, and will evaluate the loss and any
model metrics on this data at the end of each epoch.}
\item{validation_data}{Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained on this
data. This could be a list (x_val, y_val) or a list (x_val, y_val,
val_sample_weights).}
\item{shuffle}{\code{TRUE} to shuffle the training data before each epoch.}
\item{class_weight}{Optional named list mapping indices (integers) to a
weight (float) to apply to the model's loss for the samples from this class
during training. This can be useful to tell the model to "pay more
attention" to samples from an under-represented class.}
\item{sample_weight}{Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of every
sample. In this case you should make sure to specify
sample_weight_mode="temporal" in \code{\link[=compile]{compile()}}.}
\item{initial_epoch}{epoch at which to start training (useful for resuming a
previous training run).}
\item{...}{Unused}
}
\description{
Trains the model for a fixed number of epochs (iterations on a dataset).
}
\seealso{
Other model functions: \code{\link{compile}},
\code{\link{evaluate_generator}}, \code{\link{evaluate}},
\code{\link{fit_generator}}, \code{\link{get_config}},
\code{\link{get_layer}},
\code{\link{keras_model_sequential}},
\code{\link{keras_model}}, \code{\link{pop_layer}},
\code{\link{predict.keras.engine.training.Model}},
\code{\link{predict_generator}},
\code{\link{predict_on_batch}},
\code{\link{predict_proba}},
\code{\link{summary.keras.engine.training.Model}},
\code{\link{train_on_batch}}
}
| /man/fit.Rd | no_license | cinneesol/keras-1 | R | false | true | 3,498 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{fit}
\alias{fit}
\title{Train a Keras model}
\usage{
fit(object, x, y, batch_size = 32, epochs = 10, verbose = 1,
callbacks = NULL, view_metrics = getOption("keras.view_metrics", default =
"auto"), validation_split = 0, validation_data = NULL, shuffle = TRUE,
class_weight = NULL, sample_weight = NULL, initial_epoch = 0, ...)
}
\arguments{
\item{object}{Model to train.}
\item{x}{Vector, matrix, or array of training data (or list if the model has
multiple inputs). If all inputs in the model are named, you can also pass a
list mapping input names to data.}
\item{y}{Vector, matrix, or array of target data (or list if the model has
multiple outputs). If all outputs in the model are named, you can also pass
a list mapping output names to data.}
\item{batch_size}{Number of samples per gradient update.}
\item{epochs}{Number of times to iterate over the training data arrays.}
\item{verbose}{Verbosity mode (0 = silent, 1 = verbose, 2 = one log line per
epoch).}
\item{callbacks}{List of callbacks to be called during training.}
\item{view_metrics}{View realtime plot of training metrics (by epoch). The
default (\code{"auto"}) will display the plot when running within RStudio,
\code{metrics} were specified during model \code{\link[=compile]{compile()}}, \code{epochs > 1} and
\code{verbose > 0}. Use the global \code{keras.view_metrics} option to establish a
different default.}
\item{validation_split}{Float between 0 and 1: fraction of the training data
to be used as validation data. The model will set apart this fraction of
the training data, will not train on it, and will evaluate the loss and any
model metrics on this data at the end of each epoch.}
\item{validation_data}{Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained on this
data. This could be a list (x_val, y_val) or a list (x_val, y_val,
val_sample_weights).}
\item{shuffle}{\code{TRUE} to shuffle the training data before each epoch.}
\item{class_weight}{Optional named list mapping indices (integers) to a
weight (float) to apply to the model's loss for the samples from this class
during training. This can be useful to tell the model to "pay more
attention" to samples from an under-represented class.}
\item{sample_weight}{Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of every
sample. In this case you should make sure to specify
sample_weight_mode="temporal" in \code{\link[=compile]{compile()}}.}
\item{initial_epoch}{epoch at which to start training (useful for resuming a
previous training run).}
\item{...}{Unused}
}
\description{
Trains the model for a fixed number of epochs (iterations on a dataset).
}
\seealso{
Other model functions: \code{\link{compile}},
\code{\link{evaluate_generator}}, \code{\link{evaluate}},
\code{\link{fit_generator}}, \code{\link{get_config}},
\code{\link{get_layer}},
\code{\link{keras_model_sequential}},
\code{\link{keras_model}}, \code{\link{pop_layer}},
\code{\link{predict.keras.engine.training.Model}},
\code{\link{predict_generator}},
\code{\link{predict_on_batch}},
\code{\link{predict_proba}},
\code{\link{summary.keras.engine.training.Model}},
\code{\link{train_on_batch}}
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918629063e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615832912-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918629063e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
h5readDataset <- function (h5dataset, index = NULL, start = NULL, stride = NULL,
block = NULL, count = NULL, compoundAsDataFrame = TRUE, drop = FALSE, ...) {
try({
h5spaceFile <- H5Dget_space(h5dataset)
})
h5spaceMem = NULL
if (!is.null(index)) {
s <- H5Sget_simple_extent_dims(h5spaceFile)$size
if (length(index) != length(s)) {
stop("length of index has to be equal to dimensional extension of HDF5 dataset.")
}
for (i in seq_len(length(index))) {
if (is.null(index[[i]])) {
index[[i]] = seq_len(s[i])
## if we passed an object to the index, we need to get its values
} else if (is.name(index[[i]])) {
index[[i]] <- eval(index[[i]])
}
}
size = 0
try({
size = H5Sselect_index(h5spaceFile, index)
})
h5spaceMem = H5Screate_simple(size)
}
else {
if (any(c(!is.null(start), !is.null(stride),
!is.null(count), !is.null(block)))) {
size = 0
try({
size = H5Sselect_hyperslab(h5spaceFile,
start = start, stride = stride, count = count,
block = block)
})
h5spaceMem = H5Screate_simple(size)
}
}
obj <- NULL
try({
obj <- H5Dread(h5dataset = h5dataset, h5spaceFile = h5spaceFile,
h5spaceMem = h5spaceMem,
compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
})
if (!is.null(h5spaceMem)) {
try({
H5Sclose(h5spaceMem)
})
}
if (!is.null(index)) {
I = list()
for (i in seq_len(length(index))) {
tmp = unique(sort(index[[i]]))
I[[i]] = match(index[[i]], tmp)
}
obj <- do.call("[", c(list(obj), I, drop = FALSE))
}
try({
H5Sclose(h5spaceFile)
})
obj
}
h5read <- function(file, name, index=NULL, start=NULL, stride=NULL, block=NULL, count=NULL, compoundAsDataFrame = TRUE, callGeneric = TRUE, read.attributes=FALSE, drop = FALSE, ... ) {
loc = h5checktypeOrOpenLoc(file, readonly=TRUE)
on.exit( h5closeitLoc(loc) )
if (!H5Lexists(loc$H5Identifier, name)) {
stop("Object '", name, "' does not exist in this HDF5 file.")
} else {
oid = H5Oopen(loc$H5Identifier, name)
type = H5Iget_type(oid)
num_attrs = H5Oget_num_attrs(oid)
if (is.na(num_attrs)) { num_attrs = 0 }
H5Oclose(oid)
if (type == "H5I_GROUP") {
gid <- H5Gopen(loc$H5Identifier, name)
obj = h5dump(gid, start=start, stride=stride, block=block, count=count, compoundAsDataFrame = compoundAsDataFrame, callGeneric = callGeneric, ...)
H5Gclose(gid)
} else {
if (type == "H5I_DATASET") {
try( { h5dataset <- H5Dopen(loc$H5Identifier, name) } )
obj <- h5readDataset(h5dataset, index = index, start = start, stride = stride,
block = block, count = count, compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
try( { H5Dclose(h5dataset) } )
cl <- attr(obj,"class")
if (!is.null(cl) & callGeneric) {
if (exists(paste("h5read",cl,sep="."),mode="function")) {
obj <- do.call(paste("h5read",cl,sep="."), args=list(obj = obj))
}
}
} else {
message("Reading of object type not supported.")
obj <- NULL
} ## DATASET
} ## GROUP
if (read.attributes & (num_attrs > 0) & !is.null(obj)) {
for (i in seq_len(num_attrs)) {
A = H5Aopen_by_idx(loc$H5Identifier, n = i-1, objname = name)
attrname <- H5Aget_name(A)
if (attrname != "dim") {
attr(obj, attrname) = H5Aread(A)
}
H5Aclose(A)
}
}
} # !H5Lexists
obj
}
| /R/h5read.R | no_license | grimbough/archive-rhdf5 | R | false | false | 3,713 | r | h5readDataset <- function (h5dataset, index = NULL, start = NULL, stride = NULL,
block = NULL, count = NULL, compoundAsDataFrame = TRUE, drop = FALSE, ...) {
try({
h5spaceFile <- H5Dget_space(h5dataset)
})
h5spaceMem = NULL
if (!is.null(index)) {
s <- H5Sget_simple_extent_dims(h5spaceFile)$size
if (length(index) != length(s)) {
stop("length of index has to be equal to dimensional extension of HDF5 dataset.")
}
for (i in seq_len(length(index))) {
if (is.null(index[[i]])) {
index[[i]] = seq_len(s[i])
## if we passed an object to the index, we need to get its values
} else if (is.name(index[[i]])) {
index[[i]] <- eval(index[[i]])
}
}
size = 0
try({
size = H5Sselect_index(h5spaceFile, index)
})
h5spaceMem = H5Screate_simple(size)
}
else {
if (any(c(!is.null(start), !is.null(stride),
!is.null(count), !is.null(block)))) {
size = 0
try({
size = H5Sselect_hyperslab(h5spaceFile,
start = start, stride = stride, count = count,
block = block)
})
h5spaceMem = H5Screate_simple(size)
}
}
obj <- NULL
try({
obj <- H5Dread(h5dataset = h5dataset, h5spaceFile = h5spaceFile,
h5spaceMem = h5spaceMem,
compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
})
if (!is.null(h5spaceMem)) {
try({
H5Sclose(h5spaceMem)
})
}
if (!is.null(index)) {
I = list()
for (i in seq_len(length(index))) {
tmp = unique(sort(index[[i]]))
I[[i]] = match(index[[i]], tmp)
}
obj <- do.call("[", c(list(obj), I, drop = FALSE))
}
try({
H5Sclose(h5spaceFile)
})
obj
}
h5read <- function(file, name, index=NULL, start=NULL, stride=NULL, block=NULL, count=NULL, compoundAsDataFrame = TRUE, callGeneric = TRUE, read.attributes=FALSE, drop = FALSE, ... ) {
loc = h5checktypeOrOpenLoc(file, readonly=TRUE)
on.exit( h5closeitLoc(loc) )
if (!H5Lexists(loc$H5Identifier, name)) {
stop("Object '", name, "' does not exist in this HDF5 file.")
} else {
oid = H5Oopen(loc$H5Identifier, name)
type = H5Iget_type(oid)
num_attrs = H5Oget_num_attrs(oid)
if (is.na(num_attrs)) { num_attrs = 0 }
H5Oclose(oid)
if (type == "H5I_GROUP") {
gid <- H5Gopen(loc$H5Identifier, name)
obj = h5dump(gid, start=start, stride=stride, block=block, count=count, compoundAsDataFrame = compoundAsDataFrame, callGeneric = callGeneric, ...)
H5Gclose(gid)
} else {
if (type == "H5I_DATASET") {
try( { h5dataset <- H5Dopen(loc$H5Identifier, name) } )
obj <- h5readDataset(h5dataset, index = index, start = start, stride = stride,
block = block, count = count, compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
try( { H5Dclose(h5dataset) } )
cl <- attr(obj,"class")
if (!is.null(cl) & callGeneric) {
if (exists(paste("h5read",cl,sep="."),mode="function")) {
obj <- do.call(paste("h5read",cl,sep="."), args=list(obj = obj))
}
}
} else {
message("Reading of object type not supported.")
obj <- NULL
} ## DATASET
} ## GROUP
if (read.attributes & (num_attrs > 0) & !is.null(obj)) {
for (i in seq_len(num_attrs)) {
A = H5Aopen_by_idx(loc$H5Identifier, n = i-1, objname = name)
attrname <- H5Aget_name(A)
if (attrname != "dim") {
attr(obj, attrname) = H5Aread(A)
}
H5Aclose(A)
}
}
} # !H5Lexists
obj
}
|
source("to_rwl.R")
if(!require(dplyr)){
install.packages("dplyr")
library(dplyr)
}
if(!require(ggplot2)){
install.packages("ggplot2")
library(ggplot2)
}
if(!require(reshape2)){
install.packages("reshape2")
library(reshape2)
}
# Liest eine Dendroprogramm-Datei ein und speichert sie in tucson-rwl:
# Die Funktion ist in to_rwl.R, die erste Zeile in diesem Script lädt sie zur Verwendung.
# Filename übergeben, saveRWL -> der Inhalt der Datei kommt in rwl-Format in die Variable rwl, und die Datei wird gleichzeitig für spätere Verwendung in .rwl-Format gespeichert
rwl <- read.Dendro.toRWL("62167.txt", saveRWL=T)
# Test: Auslesen aus File
rwl <- read.rwl("62167.rwl", format ="tucson")
# Kurze Übersicht
summary(rwl)
# Die Jahre sind die Zeilennamen im eingelesenen File. Für das Zusammenfügen
# mit den Klimadaten müssen sie in eine eigene Spalte:
rwl_to_join <- tibble::rownames_to_column(rwl,var = "Jahr")
# Dann die Spalten benennen, Jahr als Zahl formatieren
colnames(rwl_to_join) <- c("Jahr", "Chrono")
rwl_to_join$Jahr <- as.numeric(rwl_to_join$Jahr)
# Temperaturabweichungen einlesen
temp <- read.table("Jahrestemperatur_Abweichung.csv", sep=";")
colnames(temp) <- c("Jahr", "Abweichung")
# Die Daten der Chrono und Temp-Abweichungen werden zusammengefügt und in zwei versch. Variablen gespeichert
joined_data <- joined_data_log10 <- dplyr::inner_join(rwl_to_join, temp, by = "Jahr")
# Die Temperaturabweichungen müssen für log10 auf positive Werte tranformiert werden, damit
# sie nachher in log-skala dargestellt werden können. Wir addieren den Absolutwert
# des Minimums zu jeder Abweichung, so dass alle > 0 sind.
t_add <- abs(min(temp$Abweichung))
joined_data_log10$Abweichung <- joined_data_log10$Abweichung + t_add
# Daten für Plot umbauen
data_plot_log10 <- reshape2::melt(joined_data_log10, id.vars = "Jahr", variable.name="Reihe", value.name="Wert")
# Plots vorbereiten. Jede geom_*- Zeile fügt eine neue Ebene zum Plot.
# scale_y_log10 bringt die Y-Achse in log-Skala, so dass die unterschiedlichen
# Wertebereiche besser verglichen werden können.
plot_log10 <- ggplot(data=data_plot_log10, aes(x=Jahr, y=Wert, color=Reihe)) +
geom_line() +
scale_y_log10()
# Zweiter Ansatz: Die einzelnen Reihen werden zentriert und reduziert, d.h. der
# Mittelwert wird von jedem Einzelwert subtrahiert, anschliessend wird durch die
# Standardabweichung dividiert.
joined_data_centered <- cbind(joined_data[1],apply(joined_data[,2-3],2,scale))
data_plot_centered <- reshape2::melt(joined_data_centered, id.vars = "Jahr", variable.name="Reihe", value.name="Wert")
plot_scaled <- ggplot(data=data_plot_centered, aes(x=Jahr, color=Reihe)) +
geom_line(aes(y=Wert))
# Plots anzeigen:
plot_log10
plot_scaled
| /plot_chron_temperature.R | no_license | datarian/dendro-tricks | R | false | false | 2,777 | r | source("to_rwl.R")
if(!require(dplyr)){
install.packages("dplyr")
library(dplyr)
}
if(!require(ggplot2)){
install.packages("ggplot2")
library(ggplot2)
}
if(!require(reshape2)){
install.packages("reshape2")
library(reshape2)
}
# Liest eine Dendroprogramm-Datei ein und speichert sie in tucson-rwl:
# Die Funktion ist in to_rwl.R, die erste Zeile in diesem Script lädt sie zur Verwendung.
# Filename übergeben, saveRWL -> der Inhalt der Datei kommt in rwl-Format in die Variable rwl, und die Datei wird gleichzeitig für spätere Verwendung in .rwl-Format gespeichert
rwl <- read.Dendro.toRWL("62167.txt", saveRWL=T)
# Test: Auslesen aus File
rwl <- read.rwl("62167.rwl", format ="tucson")
# Kurze Übersicht
summary(rwl)
# Die Jahre sind die Zeilennamen im eingelesenen File. Für das Zusammenfügen
# mit den Klimadaten müssen sie in eine eigene Spalte:
rwl_to_join <- tibble::rownames_to_column(rwl,var = "Jahr")
# Dann die Spalten benennen, Jahr als Zahl formatieren
colnames(rwl_to_join) <- c("Jahr", "Chrono")
rwl_to_join$Jahr <- as.numeric(rwl_to_join$Jahr)
# Temperaturabweichungen einlesen
temp <- read.table("Jahrestemperatur_Abweichung.csv", sep=";")
colnames(temp) <- c("Jahr", "Abweichung")
# Die Daten der Chrono und Temp-Abweichungen werden zusammengefügt und in zwei versch. Variablen gespeichert
joined_data <- joined_data_log10 <- dplyr::inner_join(rwl_to_join, temp, by = "Jahr")
# Die Temperaturabweichungen müssen für log10 auf positive Werte tranformiert werden, damit
# sie nachher in log-skala dargestellt werden können. Wir addieren den Absolutwert
# des Minimums zu jeder Abweichung, so dass alle > 0 sind.
t_add <- abs(min(temp$Abweichung))
joined_data_log10$Abweichung <- joined_data_log10$Abweichung + t_add
# Daten für Plot umbauen
data_plot_log10 <- reshape2::melt(joined_data_log10, id.vars = "Jahr", variable.name="Reihe", value.name="Wert")
# Plots vorbereiten. Jede geom_*- Zeile fügt eine neue Ebene zum Plot.
# scale_y_log10 bringt die Y-Achse in log-Skala, so dass die unterschiedlichen
# Wertebereiche besser verglichen werden können.
plot_log10 <- ggplot(data=data_plot_log10, aes(x=Jahr, y=Wert, color=Reihe)) +
geom_line() +
scale_y_log10()
# Zweiter Ansatz: Die einzelnen Reihen werden zentriert und reduziert, d.h. der
# Mittelwert wird von jedem Einzelwert subtrahiert, anschliessend wird durch die
# Standardabweichung dividiert.
joined_data_centered <- cbind(joined_data[1],apply(joined_data[,2-3],2,scale))
data_plot_centered <- reshape2::melt(joined_data_centered, id.vars = "Jahr", variable.name="Reihe", value.name="Wert")
plot_scaled <- ggplot(data=data_plot_centered, aes(x=Jahr, color=Reihe)) +
geom_line(aes(y=Wert))
# Plots anzeigen:
plot_log10
plot_scaled
|
portfolio.totalQuant <- function(){ sum(portfolio.pos$quant) }
portfolio.reset <-function() {portfolio.pos <<- list(quant=NULL,buyP=NULL,sellP=NULL)}
portfolio.buy_old <- function(quantity, buyPrice, sellPrice)
{
val=0;QB=0
x=which(portfolio.pos$buyP>=buyPrice)
x=which(portfolio.pos$quant[x]<0)
if(length(x)>0)
{
for(i in x)
{
if(quantity<=-portfolio.pos$quant[i])
{
QB=QB+quantity
val=quantity*buyPrice
portfolio.pos$quant[i]<<-portfolio.pos$quant[i]+quantity
quantity=0
}
else
{
val=-portfolio.pos$quant[i]*buyPrice
QB=QB-portfolio.pos$quant[i]
portfolio.pos$quant[i]<<-0
quantity=quantity+portfolio.pos$quant[i]
}
}
}
if(quantity>0)
{
t=c(quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QB=QB+quantity
}
portfolio.clean()
return(c(val,QB))
}
portfolio.buy <- function(quantity, buyPrice, sellPrice)
{
val=0;QB=0
x1=which(portfolio.pos$buyP>=buyPrice)
x2=which(portfolio.pos$quant<0)
x=intersect(x1,x2)
if(length(x)>0)
{
for(i in x)
{
val=val-portfolio.pos$quant[i]*buyPrice
QB=QB-portfolio.pos$quant[i]
portfolio.pos$quant[i]<<-0
}
}
if(quantity>0)
{
t=c(quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QB=QB+quantity
}
portfolio.clean()
return(c(val,QB))
}
portfolio.sell_old <- function(quantity, buyPrice, sellPrice)
{
QS=0
val=0
x=which(portfolio.pos$sellP<=sellPrice)
x=which(portfolio.pos$quant[x]>0)
if(length(x)>0)
{
for(i in x)
{
if(quantity>portfolio.pos$quant[i])
{
val=val+portfolio.pos$quant[i]*sellPrice;
quantity=quantity-portfolio.pos$quant[i];
QS=QS+portfolio.pos$quant[i];
portfolio.pos$quant[i]<<- 0;
}
else
{
val=val+quantity*sellPrice;
portfolio.pos$quant[i]<<- portfolio.pos$quant[i]-quantity;
quantity=0
QS=QS+quantity;
}
}
}
if(quantity>0)
{
t=c(-quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QS=QS+quantity
}
portfolio.clean()
return(c(val,QS));
}
portfolio.sell <- function(quantity, buyPrice, sellPrice)
{
val=0;QS=0
x1=which(portfolio.pos$sellP<=sellPrice)
x2=which(portfolio.pos$quant>0)
x=intersect(x1,x2)
if(length(x)>0)
{
for(i in x)
{
val=val+portfolio.pos$quant[i]*sellPrice;
QS=QS+portfolio.pos$quant[i];
portfolio.pos$quant[i]<<- 0;
}
}
if(quantity>0)
{
t=c(-quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QS=QS+quantity
}
portfolio.clean()
return(c(as.double(val),as.numeric(QS)));
}
portfolio.clean <- function() {
x=which(portfolio.pos$quant==0);
if(length(x)>0){ for(j in 1:3) portfolio.pos[[j]]<<-portfolio.pos[[j]][-x]}
}
TakePosition <- function(PSpot,Limit.A,Limit.B,qBmax,qSmax)
{
qB=0; qS=0;
if(PSpot<=Limit.A) qB=min(qBmax , ceiling(10*(Limit.A-PSpot)/Limit.A))
if(PSpot>=Limit.B) qS=min(qSmax , ceiling(10*(PSpot-Limit.B)/Limit.B))
tmp=portfolio.buy (qB,PSpot,Limit.B)
vB=-tmp[1]
qB=tmp[2]
tmp_=portfolio.sell(qS,Limit.A,PSpot)
vS=tmp_[1]
qSeff=tmp_[2]
return(c(qB,qSeff,vB,vS))
} | /Portfolio.R | no_license | AdriMarteau/RTradingStation | R | false | false | 3,633 | r |
portfolio.totalQuant <- function(){ sum(portfolio.pos$quant) }
portfolio.reset <-function() {portfolio.pos <<- list(quant=NULL,buyP=NULL,sellP=NULL)}
portfolio.buy_old <- function(quantity, buyPrice, sellPrice)
{
val=0;QB=0
x=which(portfolio.pos$buyP>=buyPrice)
x=which(portfolio.pos$quant[x]<0)
if(length(x)>0)
{
for(i in x)
{
if(quantity<=-portfolio.pos$quant[i])
{
QB=QB+quantity
val=quantity*buyPrice
portfolio.pos$quant[i]<<-portfolio.pos$quant[i]+quantity
quantity=0
}
else
{
val=-portfolio.pos$quant[i]*buyPrice
QB=QB-portfolio.pos$quant[i]
portfolio.pos$quant[i]<<-0
quantity=quantity+portfolio.pos$quant[i]
}
}
}
if(quantity>0)
{
t=c(quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QB=QB+quantity
}
portfolio.clean()
return(c(val,QB))
}
portfolio.buy <- function(quantity, buyPrice, sellPrice)
{
val=0;QB=0
x1=which(portfolio.pos$buyP>=buyPrice)
x2=which(portfolio.pos$quant<0)
x=intersect(x1,x2)
if(length(x)>0)
{
for(i in x)
{
val=val-portfolio.pos$quant[i]*buyPrice
QB=QB-portfolio.pos$quant[i]
portfolio.pos$quant[i]<<-0
}
}
if(quantity>0)
{
t=c(quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QB=QB+quantity
}
portfolio.clean()
return(c(val,QB))
}
portfolio.sell_old <- function(quantity, buyPrice, sellPrice)
{
QS=0
val=0
x=which(portfolio.pos$sellP<=sellPrice)
x=which(portfolio.pos$quant[x]>0)
if(length(x)>0)
{
for(i in x)
{
if(quantity>portfolio.pos$quant[i])
{
val=val+portfolio.pos$quant[i]*sellPrice;
quantity=quantity-portfolio.pos$quant[i];
QS=QS+portfolio.pos$quant[i];
portfolio.pos$quant[i]<<- 0;
}
else
{
val=val+quantity*sellPrice;
portfolio.pos$quant[i]<<- portfolio.pos$quant[i]-quantity;
quantity=0
QS=QS+quantity;
}
}
}
if(quantity>0)
{
t=c(-quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QS=QS+quantity
}
portfolio.clean()
return(c(val,QS));
}
portfolio.sell <- function(quantity, buyPrice, sellPrice)
{
val=0;QS=0
x1=which(portfolio.pos$sellP<=sellPrice)
x2=which(portfolio.pos$quant>0)
x=intersect(x1,x2)
if(length(x)>0)
{
for(i in x)
{
val=val+portfolio.pos$quant[i]*sellPrice;
QS=QS+portfolio.pos$quant[i];
portfolio.pos$quant[i]<<- 0;
}
}
if(quantity>0)
{
t=c(-quantity,buyPrice,sellPrice)
for( i in 1:length(portfolio.pos)) portfolio.pos[[i]]<<-append(portfolio.pos[[i]],t[i])
val=val+quantity*buyPrice
QS=QS+quantity
}
portfolio.clean()
return(c(as.double(val),as.numeric(QS)));
}
portfolio.clean <- function() {
x=which(portfolio.pos$quant==0);
if(length(x)>0){ for(j in 1:3) portfolio.pos[[j]]<<-portfolio.pos[[j]][-x]}
}
TakePosition <- function(PSpot,Limit.A,Limit.B,qBmax,qSmax)
{
qB=0; qS=0;
if(PSpot<=Limit.A) qB=min(qBmax , ceiling(10*(Limit.A-PSpot)/Limit.A))
if(PSpot>=Limit.B) qS=min(qSmax , ceiling(10*(PSpot-Limit.B)/Limit.B))
tmp=portfolio.buy (qB,PSpot,Limit.B)
vB=-tmp[1]
qB=tmp[2]
tmp_=portfolio.sell(qS,Limit.A,PSpot)
vS=tmp_[1]
qSeff=tmp_[2]
return(c(qB,qSeff,vB,vS))
} |
library(corrplot)
#correlation between Sepal Length and Sepal Width
cor(iris[1:2])
#correlation between Petal Length and Petal Width
cor(iris[3:4])
nospecies <- cor(iris[1:4])
cr<-corrplot(nospecies)
corrplot(cr,method="pie")
corrplot(cr,method="ellipse")
corrplot(cr,method="number")
require(psych)
pairs.panels(iris[1:4], hist.col="cyan")
| /corr.R | no_license | akshatshrivastava/Basic-R | R | false | false | 367 | r | library(corrplot)
#correlation between Sepal Length and Sepal Width
cor(iris[1:2])
#correlation between Petal Length and Petal Width
cor(iris[3:4])
nospecies <- cor(iris[1:4])
cr<-corrplot(nospecies)
corrplot(cr,method="pie")
corrplot(cr,method="ellipse")
corrplot(cr,method="number")
require(psych)
pairs.panels(iris[1:4], hist.col="cyan")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.