blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8d5f63bb1bd5ca5296609d54dcd0ecdef1d2f3a | 3ae15c160733d93b01156b792c0d485fdfd63aea | /fork-bomb.r | 7dac2a6cda112269de53516b448b84cbabc9b878 | [
"Unlicense"
] | permissive | Backup-eric645/fork-bomb | 3a7d49c4ebfd6e44e2ca7173cf006ab59df23a0f | 67bcd9396b03b4e662593b27b50082747d2a9098 | refs/heads/master | 2021-05-23T13:37:42.945791 | 2020-04-05T21:11:01 | 2020-04-05T21:11:01 | 253,315,585 | 1 | 0 | Unlicense | 2020-04-05T19:30:10 | 2020-04-05T19:30:10 | null | UTF-8 | R | false | false | 41 | r | fork-bomb.r | library(multicore)
while (TRUE) fork()
|
7d8e8ccb83327e255b83f35f841708c1fd70d0ed | 5f8497ec36417aa1dfc9a8f687c979b47d40556e | /R/likelihoodRatioSelection.R | eb591c9a48fb5c36469bc03da71da660ea6ad669 | [] | no_license | garthtarr/ClassifyR | f51bf32f99f58da59a416d017343e144cebfd4a2 | 5a7b9afa510753ed8422c74efa978f72e51049d1 | refs/heads/master | 2020-03-20T07:43:28.366037 | 2018-04-30T14:41:40 | 2018-04-30T14:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,213 | r | likelihoodRatioSelection.R | setGeneric("likelihoodRatioSelection", function(measurements, ...)
{standardGeneric("likelihoodRatioSelection")})
# Matrix of numeric measurements.
setMethod("likelihoodRatioSelection", "matrix", function(measurements, classes, ...)
{
likelihoodRatioSelection(DataFrame(t(measurements), check.names = FALSE), classes, ...)
})
setMethod("likelihoodRatioSelection", "DataFrame", # Clinical data only.
function(measurements, classes, datasetName,
trainParams, predictParams, resubstituteParams,
alternative = c(location = "different", scale = "different"),
..., selectionName = "Likelihood Ratio Test (Normal)", verbose = 3)
{
splitDataset <- .splitDataAndClasses(measurements, classes)
measurements <- splitDataset[["measurements"]]
isNumeric <- sapply(measurements, is.numeric)
measurements <- measurements[, isNumeric, drop = FALSE]
if(sum(isNumeric) == 0)
stop("No features are numeric but at least one must be.")
if(verbose == 3)
message("Selecting features by likelihood ratio ranking.")
oneClass <- classes == levels(classes)[1]
otherClass <- classes == levels(classes)[2]
oneClassMeasurements <- measurements[oneClass, ]
otherClassMeasurements <- measurements[otherClass, ]
oneClassDistribution <- getLocationsAndScales(oneClassMeasurements, ...)
otherClassDistribution <- getLocationsAndScales(otherClassMeasurements, ...)
allDistribution <- getLocationsAndScales(measurements, ...)
logLikelihoodRatios <- -2 * (unlist(mapply(function(featureMeasurements, scale, location)
sum(dnorm(featureMeasurements, scale, location, log = TRUE)),
measurements, allDistribution[[1]], allDistribution[[2]])) -
unlist(mapply(function(featureMeasurements, scale, location)
sum(dnorm(featureMeasurements, scale, location, log = TRUE)),
oneClassMeasurements,
switch(alternative[["location"]], same = allDistribution[[1]], different = oneClassDistribution[[1]]),
switch(alternative[["scale"]], same = allDistribution[[2]], different = oneClassDistribution[[2]]))) -
unlist(mapply(function(featureMeasurements, scale, location)
sum(dnorm(featureMeasurements, scale, location, log = TRUE)),
otherClassMeasurements,
switch(alternative[["location"]], same = allDistribution[[1]], different = otherClassDistribution[[1]]),
switch(alternative[["scale"]], same = allDistribution[[2]], different = otherClassDistribution[[2]]))))
orderedFeatures <- order(logLikelihoodRatios, decreasing = TRUE)
.pickFeatures(measurements, classes, datasetName,
trainParams, predictParams, resubstituteParams,
orderedFeatures, selectionName, verbose)
})
# One or more omics datasets, possibly with clinical data.
setMethod("likelihoodRatioSelection", "MultiAssayExperiment",
function(measurements, targets = names(measurements), ...)
{
tablesAndClasses <- .MAEtoWideTable(measurements, targets)
dataTable <- tablesAndClasses[["dataTable"]]
classes <- tablesAndClasses[["classes"]]
if(ncol(dataTable) == 0)
stop("No variables in data tables specified by \'targets\' are numeric.")
else
likelihoodRatioSelection(dataTable, classes, ...)
}) |
c6735409023b22721b7ae0075e06694b1e6a614f | 8b423736b1ed57097bbb7cb9706cbfb58aecd5c4 | /.Rproj.user/9290F97D/sources/s-E1D1340C/9C088814-contents | f5d9dbbab4dc9bcdb3fd7f2abd1ca9f8af7fdd97 | [] | no_license | FredericLoge/taskDrivenRandomForest | ef06aa5ecc99a47361109e57d3e851f5efe68385 | dae0922e5ed37e6739a16227cfefb620195999ba | refs/heads/master | 2020-09-08T03:03:08.969500 | 2019-11-11T14:25:06 | 2019-11-11T14:25:06 | 220,996,018 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,868 | 9C088814-contents | #' @title This function computes greedily the best split for samples (x, y) for a customized loss function
#' @param x a matrix, representing the features of parent node observations
#' @param y a vector, representing the target of parent node observations
#' @param customized_loss_foo loss function user-specified, which is a function of the y vector in daugther node, subsequent to the split
#' @param nb_points to find the best split, we analyze every feature variable and discretize the search grid from min to max by a specified number of points
#' @param nb_points_y number of points to find optimal y value
find_best_split <- function(x, y, customized_loss_foo, nb_points, nb_points_y){
threshold_res <- lapply(X = 1:ncol(x), FUN = function(j){
threshold_lower <- min(x[,j], na.rm = TRUE)
threshold_upper <- max(x[,j], na.rm = TRUE)
threshold_vec <- seq(from = threshold_lower, to = threshold_upper, length.out = nb_points)
search_y <- seq(from = min(y), to = max(y), length.out = nb_points_y)
res <- sapply(X = threshold_vec, FUN = function(threshold_xj){
split_lower <- (x[,j] <= threshold_xj)
split_upper <- (x[,j] > threshold_xj)
loss_split_lower <- min(sapply(search_y, function(search_yi){
customized_loss_foo(y = y[split_lower], y_pred = search_yi)
}))
loss_split_upper <- min(sapply(search_y, function(search_yi){
customized_loss_foo(y = y[split_upper], y_pred = search_yi)
}))
sum(c(loss_split_lower, loss_split_upper))
})
cbind(j, threshold_vec, res)
})
threshold_res <- do.call(rbind.data.frame, threshold_res)
return(threshold_res[which.min(threshold_res$res),])
}
#' @title Build decision tree, relying on method *find_best_split*
#' @param x a matrix, representing the features of observations
#' @param y a vector, representing the target of observations
#' @param max_depth maximum depth of the tree
#' @param customized_loss_foo loss function user-specified, which is a function of the y vector in node, subsequent to the split
#' @param nb_points to find the best split, we analyze every feature variable and discretize the search grid from min to max by a specified number of points
#' @param nb_points_y number of points to find optimal y value
#' @param min_data_size minimum number of points to attempt split
#' @description unaware of how this method is done in classic R packages such as *rpart*, we built it from scratch
#' without considering other codes. In later versions, we will try to adjust to standard paradigms, if any does exist.
#' Details must be added to this function. For now, it is quite long and heavy.
build_tree <- function(x, y, max_depth, customized_loss_foo, nb_points, nb_points_y, min_data_size){
#################################################
# compute tree structure for binary decision tree
temp <- NULL
parent_nodes <- 'root'
for(depth in 1:max_depth){
new_parent_nodes <- NULL
for(parent_node in parent_nodes){
child_node_lower <- paste0(parent_node, '_lower')
child_node_upper <- paste0(parent_node, '_upper')
temp <- rbind(temp, cbind(depth, parent_node, child_node_lower, child_node_upper))
new_parent_nodes <- c(new_parent_nodes, child_node_lower, child_node_upper)
}
parent_nodes <- new_parent_nodes
}
temp <- as.data.frame(temp, stringsAsFactors = FALSE)
temp$depth <- as.numeric(temp$depth)
node_levels <- unique(c(temp$parent_node, temp$child_node_lower, temp$child_node_upper))
temp$parent_node <- factor(x = temp$parent_node, levels = node_levels)
temp$child_node_lower <- factor(x = temp$child_node_lower, levels = node_levels)
temp$child_node_upper <- factor(x = temp$child_node_upper, levels = node_levels)
#################################################
# compute split sequentially
temp$split_variable <- NA
temp$split_threshold <- NA
xy_indicator <- array(data = "NA", dim = c(nrow(x), 1 + max_depth))
xy_indicator[,1] <- "root"
last_leaf <- array(data = max_depth, dim = nrow(x))
for(depth in 1:max_depth){
u <- unique(xy_indicator[,depth])
u <- u[u != "NA"]
if(length(u) == 0) next
for(i in 1:length(u)){
# if(grepl(pattern = '_stop', x = u[i])) next
xy_cond <- (xy_indicator[,depth] == u[i])
stop_cutting <- FALSE
if(sum(xy_cond) > min_data_size){
best_split <- find_best_split(x[xy_cond,], y[xy_cond], customized_loss_foo = customized_loss_foo, nb_points = nb_points, nb_points_y = nb_points_y)
if(nrow(best_split) == 0) stop_cutting <- TRUE
}else{
stop_cutting <- TRUE
}
if(stop_cutting == TRUE){
last_leaf[xy_cond] <- depth - 1
temp_row_index <- which(temp$parent_node == u[i])
temp$split_variable[temp_row_index] <- NA
temp$split_threshold[temp_row_index] <- NA
temp$child_node_lower[temp_row_index] <- NA
temp$child_node_upper[temp_row_index] <- NA
}else{
j <- best_split$j
thresh <- best_split$threshold_vec
xy_cond_lower <- xy_cond & (x[,j] <= thresh)
xy_cond_upper <- xy_cond & (x[,j] > thresh)
xy_indicator[xy_cond_lower, depth+1] <- paste0(u[i], '_lower')
xy_indicator[xy_cond_upper, depth+1] <- paste0(u[i], '_upper')
temp_row_index <- which(temp$parent_node == u[i])
temp$split_variable[temp_row_index] <- j
temp$split_threshold[temp_row_index] <- thresh
}
}
}
#
input_data_leaf_vec <- apply(xy_indicator, 1, function(x){ rev(x[x != "NA"])[1] })
temp <- temp[!is.na(temp$split_variable),]
#
l <- list('tree_structure' = temp,
'last_leaf_index' = last_leaf,
'input_data' = list('x' = x, 'y' = y),
'input_data_leaf' = xy_indicator,
'input_data_leaf_vec' = input_data_leaf_vec)
return(l)
}
#' @title Build random forest, classic bagging on top of the custom decision tree
#' @param x a matrix, representing the features of observations
#' @param y a vector, representing the target of observations
#' @param max_depth maximum depth of the tree
#' @param customized_loss_foo loss function user-specified, which is a function of the y vector in node, subsequent to the split
#' @param nb_points to find the best split, we analyze every feature variable and discretize the search grid from min to max by a specified number of points
#' @param min_data_size minimum number of points to attempt split
#' @param bootstrap_prop proportion of complete data sampled to build decision tree
#' @description For each tree, we sample some proportion of the dataset, build a decision tree and parse results in a list.
build_rf <- function(x, y, n_trees, max_depth, customized_loss_foo, nb_points, nb_points_y, min_data_size, bootstrap_prop){
lapply(X = 1:n_trees, FUN = function(tree_index){
row_index_sample <- sample(x = 1:nrow(x), size = ceiling(nrow(x) * bootstrap_prop))
x_train_sample <- x[row_index_sample,]
y_train_sample <- y[row_index_sample]
build_tree(x = x_train_sample, y = y_train_sample, max_depth = max_depth,
customized_loss_foo = customized_loss_foo, nb_points = nb_points,
nb_points_y = nb_points_y, min_data_size = min_data_size)
})
}
#' @title "Descend" the tree hierarchy to give prediction of x_vector
#' @param tree custom Tree built
#' @param x_vector vector of features
#' @param nb_points_y number of pointsd for the search grid of y
predict_from_tree <- function(tree, x_vector, nb_points_y, customized_loss_foo){
node <- 'root'
keep <- TRUE
while(keep){
j <- tree$tree_structure$split_variable[tree$tree_structure$parent_node == node]
thres <- tree$tree_structure$split_threshold[tree$tree_structure$parent_node == node]
if(x_vector[j] <= thres){
node <- paste0(node, '_lower')
}else{
node <- paste0(node, '_upper')
}
keep <- (sum(tree$tree_structure$parent_node == node) == 1)
}
cond <- (tree$input_data_leaf_vec == node)
search_y <- seq(from = min(tree$input_data$y[cond]), to = max(tree$input_data$y[cond]), length.out = nb_points_y)
losses <- sapply(search_y, function(search_yi){
customized_loss_foo(y = tree$input_data$y[cond], y_pred = search_yi)
})
yopt <- search_y[which.min(losses)]
return( yopt )
}
#' @title Predict from custom random forest (returns individual tree predictions, no aggregations yet)
#' @param rf custom Random Forest built
#' @param x_vector vector of features
#' @param nb_points_y number of pointsd for the search grid of y
predict_from_rf <- function(rf, x_vector, nb_points_y, customized_loss_foo){
sapply(X = 1:length(rf), FUN = function(i){
new_tree <- rf[[i]]
predict_from_tree(tree = new_tree, x_vector = x_vector, customized_loss_foo = customized_loss_foo, nb_points_y = nb_points_y)
})
}
| |
75e64d76a98e423e4a66e0fd1d15ad315cd4382a | 076d000d41c9d0bbf387521a3e967f05b7e03b68 | /Scripts/RasterizeResults/ResultRasters_Avg.R | 5953de0c85f69754b9b425ff3dcc50625babc3d0 | [] | no_license | rilllydi/MidwestSALUS | 9ab12be37e70c545530566570fefc73863ea62fb | 54ae5060061629ac9647a7528b6480ed5387ec72 | refs/heads/master | 2021-04-30T17:34:30.208111 | 2017-02-02T20:39:49 | 2017-02-02T20:39:49 | 80,221,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,481 | r | ResultRasters_Avg.R | # This script extracts data from the Soil/CDL raster. It also uses the Weather polygon shapefile to determine weather grid points for each raster pixel.
# The SALUS results (per MUKEY and weather ID) are matched to each pixel grid point
# A new raster for average GWAD, CWAD, and max NLCC is create for each state.
# Note using a statewide raster may crash R???
# Note you need the associated .vat.dbf file along with the original raster for this to work
# Input Data:
# - raster with MUKEY values
# - polygon shapefile of the NLDAS weather grid (with the attribute table including the centroid coordinates)
# - csv file of the SALUS results along with the associated MUKEY and Weather ID
# Process:
# 1. Read in the raster along with the associated dbf file (without the dbf file you get incorrect MUKEY values!)
# 2. Create a new raster of just the MUKEY values
# 3. Create a data table of the pixel coordinates and associated MUKEY value
# 4. Overlay the data table the the weather grid shapefile
# 5. Write out the unique weather grid coordinates and MUKEY values in a csv file (Output Data)
# (completed with all rasters in the list of files)
#VERY HELPFUL: http://ncss-tech.github.io/AQP/soilDB/gSSURGO-SDA.html
library(raster)
library(rgdal)
library(sp)
library(maptools)
library(rgeos)
library(data.table)
library(rgeos)
library(foreign)
library(plyr)
#install.packages('data.table')
setwd("Z:/Users/rilllydi/MidwestSALUS/Soils_in_CDL/States/CornOnly")
###########################################
# mosaic rasters together and write
mosaicme <- function(result,out){
st.mosaicargs <- result
st.mosaicargs$fun <- mean
strast <- do.call(mosaic, st.mosaicargs)
writeRaster(strast, out, overwrite=TRUE)
}
############################################ The function!
myfunc <- function(inraster){
print(inraster)
# Read the raster
r <- raster(inraster)
#str(r) # To get more information about the raster
r <- ratify(r)
rat <- levels(r)[[1]]
dbf_file <- gsub(".TIF",".TIF.vat.dbf",inraster)
mu <- read.dbf(dbf_file, as.is=TRUE)
names(mu)[1] <- 'ID'
mu$MUKEY <- as.integer(mu$MUKEY)
rat.new <- join(rat, mu, by='ID', type='left')
levels(r) <- rat.new
r.mu <- deratify(r, att='MUKEY') # THIS IS CORRECT!
MUKEY<-extract(r.mu,1:ncell(r.mu))
coord<-xyFromCell(r.mu,1:ncell(r.mu))
pixels<-as.data.table(cbind(coord,MUKEY))
##########################
coordinates(pixels) <- c("x", "y")
proj4string(pixels) <- proj4string(poly)
df <- over(pixels, poly)
pixels <- as.data.table(as.data.frame(pixels))
pixels[,wx_x:=df$CENTROID_X]
pixels[,wx_y:=df$CENTROID_Y]
###backup <- pixels
setkeyv(pixels,c("MUKEY","wx_y","wx_x"))
pixels <- SALUSres[pixels] # join the data with the SALUS results
gras <- rasterFromXYZ(pixels[,c("x", "y", "avgGWAD"),with = FALSE]) # CREATE RASTER STACK? AND THEN WRITE OUT RASTER STACK IN ONE LINE?
cras <- rasterFromXYZ(pixels[,c("x", "y", "avgCWAD"),with = FALSE])
nras <- rasterFromXYZ(pixels[,c("x", "y", "maxNLCC"),with = FALSE])
projection(gras) <- mycrs
projection(cras) <- mycrs
projection(nras) <- mycrs
print("rasterized!")
return (list(gras,cras,nras))
}
#################################################################################################################
# Shapefile Data
SC = "SC1"
#st <- c('mi','wi','oh','in','il','ia','sd','mn','mo')
st <- c('ia')
for (state in st) {
wxname <- paste("Z:/Users/rilllydi/MidwestSALUS/Weather/",state,"_WxPoly.shp",sep="")
poly <- readShapePoly(wxname)
mycrs=CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
proj4string(poly) <- mycrs
# Find all the raster data files
folder <- paste("Z:/Users/rilllydi/MidwestSALUS/Soils_in_CDL/States/CornOnly/",state,"_split_55/",sep="")
inraster <- list.files(folder, pattern="*.TIF$", full.names=TRUE)
###inraster <- "Z:/Users/rilllydi/MidwestSALUS/Soils_in_CDL/States/CornOnly/ia_split_55/ia_soil_CornOnly_WGS84_17.TIF"
############################ Read and add the SALUS results to the data frame
csvfolder <- "Z:/Users/rilllydi/MidwestSALUS/SALUSresults/CornOnly/results_csv/"
chunkpatt = paste(state,"_._",SC,"_finalresults_avg.csv$",sep="")
inSALUS <- list.files(csvfolder, pattern=chunkpatt, full.names=TRUE)
SALUStables <- list()
# Read as data table
rfun <- function(inSALUS){
dat <- fread(inSALUS, header=TRUE)
return(dat)
}
SALUStables <- lapply(inSALUS,rfun)
SALUSres <- rbindlist(SALUStables)
SALUSres <- fread("Z:/Users/rilllydi/MidwestSALUS/Wx_for_Soils/ia_fakeSALUSdata.csv", header=TRUE)
setkeyv(SALUSres,c("MUKEY","wxID_y","wxID_x"))
####################
result <- list()
gresult <- list()
cresult <- list()
nresult <- list()
system.time(result <- lapply(inraster, myfunc)) #started 3:30, ended 4:30
gresult <- lapply(result,"[[", 1)
cresult <- lapply(result,"[[", 2)
nresult <- lapply(result,"[[", 3)
gout <- paste("Z:/Users/rilllydi/MidwestSALUS/SALUSresults/CornOnly/raster_State_GWAD/AvgGWAD_",state,"_final.tif",sep="")
cout <- paste("Z:/Users/rilllydi/MidwestSALUS/SALUSresults/CornOnly/raster_State_CWAD/AvgCWAD_",state,"_final.tif",sep="")
nout <- paste("Z:/Users/rilllydi/MidwestSALUS/SALUSresults/CornOnly/raster_State_NLCC/MaxNLCC_",state,"_final.tif",sep="")
mosaicme(gresult,gout)
mosaicme(cresult,cout)
system.time(mosaicme(nresult,nout)) #98.54 seconds
} |
f1de987a58a96d8303d02a42adb0fa9c3a6d647a | d193bbb36f572b6ab9a14fd02b0647b83f2df2ba | /tests/testthat/test-utils.R | 888d4615f09b915e4a1a31fc1e29ff8fcf8e008a | [] | no_license | gaborcsardi/argufy | 9e7f9d0c137f2ca2e9f88ce4dca31b57407a836c | bceef7904eef178c9aa67709940a29c1483c3c13 | refs/heads/master | 2020-12-24T16:16:08.348182 | 2016-03-12T14:54:45 | 2016-03-12T14:54:45 | 40,500,181 | 34 | 1 | null | 2016-02-02T14:55:32 | 2015-08-10T18:56:23 | R | UTF-8 | R | false | false | 1,902 | r | test-utils.R |
context("Utilities")
test_that("find_parent", {
f <- function() {
foo <- "bar"
g()
}
g <- function() {
parent <- find_parent(quote(f))
expect_equal(
get("foo", envir = sys.frame(parent)),
"bar"
)
}
f()
})
test_that("find_all_parents", {
f <- function(recurse = FALSE) {
foo <- "bar"
if (recurse) f() else g()
}
g <- function() {
parents <- find_all_parents(quote(f))
expect_equal(length(parents), 2)
expect_equal(
get("foo", envir = sys.frame(parents[1])),
"bar"
)
expect_equal(
get("foo", envir = sys.frame(parents[2])),
"bar"
)
}
f(recurse = TRUE)
})
test_that("parse_deps", {
test_cases <- list(
list("foo, bar, foobar", c("foo", "bar", "foobar")),
list("foo,\n bar,\n foobar", c("foo", "bar", "foobar")),
list("foo", "foo"),
list("foo (>= 1.0)", "foo"),
list("foo (>= 1.0), bar", c("foo", "bar")),
list("foo, bar (>=0.2-3)", c("foo", "bar")),
list("", character(0)),
list("\n\n", character(0))
)
lapply(test_cases, function(t) {
expect_equal(parse_deps(t[[1]]), t[[2]], info = t[[1]])
})
})
test_that("str_trim", {
res <- "foo"
expect_equal(str_trim(""), "")
expect_equal(str_trim(" "), "")
expect_equal(str_trim("\n"), "")
expect_equal(str_trim("\t"), "")
expect_equal(str_trim(" \n\t\n "), "")
expect_equal(str_trim(res), res)
expect_equal(str_trim("foo "), res)
expect_equal(str_trim(" foo"), res)
expect_equal(str_trim(" foo "), res)
expect_equal(str_trim("foo "), res)
expect_equal(str_trim(" foo "), res)
expect_equal(str_trim(" foo "), res)
expect_equal(str_trim("foo\n "), res)
expect_equal(str_trim(" \nfoo"), res)
expect_equal(str_trim(" \n foo \n "), res)
expect_equal(str_trim("foo\t"), res)
expect_equal(str_trim("\tfoo"), res)
expect_equal(str_trim("\tfoo\n"), res)
})
|
b37a7345adaab8df3a62da36d23168393554bc08 | b4c321b3ee864ff55f42442e2cd8ec309002e7d8 | /TOPIC1/topic1.R | 86849a77f7c838b7d948932c4775ccb2ef680e90 | [] | no_license | ColinRho/L_POINT | 1b2236ce19e76818bfed60075ef904a56e224824 | 87fad4fad5da705903eefa4df8a23c3f8d124d1c | refs/heads/master | 2021-01-10T18:19:13.156434 | 2016-01-08T09:30:58 | 2016-01-08T09:30:58 | 48,106,600 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,743 | r | topic1.R | ### TOPIC 1.
## 분석목적: 상품 출시 후 반응 진단 및 향후 매출 예측
## 평가기준
# - 모델 적합성(45) : 분석 데이터 가공, 통계적 적용에 따른 최적의 방법론
# - 독창성(20) : 분석 방법의 다양성 및 창의성
# - 결과활용도(25) : 분석 결과 해석의 적합성 및 활용성
# - 프레젠테이션(10) : 명확한 표현 및 효과적인 의사전달
suppressPackageStartupMessages({
library(data.table)
library(dplyr)
library(ggplot2)
})
###################################################################################################
### Data import, manipulation, detect outliers
###################################################################################################
# two sets of data and merged data
demo <- fread("TOPIC1/1-1. Demo.txt")
order <- fread("TOPIC1//1-2. 구매내역정보.txt")
merged <- merge(demo, order, by = "ID")
# variables should be treated as 'numeric'
numeric_var <- c("성별", "연령", "구매시점", "구매시간", "구매요일", "구매건수", "구매금액",
"취소여부", "평균기온")
for ( i in 1:length(numeric_var) ) {
merged[[numeric_var[i]]] <- as.numeric(merged[[numeric_var[i]]])
}
# to delete 'canceled' purchases
merged %>% select(취소여부) %>% table()
merged %<>% filter(취소여부 != 1) %>% select(-취소여부)
# choosing the category
merged %>% filter(카테고리 == "감자스낵") -> merged_potato
merged %>% select(성별) %>% table
merged %>% select(연령) %>% table
merged %>% select(구매시간) %>% table
###################################################################################################
### Modularized functions
###################################################################################################
plotter <-
# drawing time vs number of purchaser given categories
# product
function(df, product = "all", gender = "all", location = "all") {
# filtering by inputs and counting a number of purchase(regardless of volume of purchase)
# or the volume should be considered?
# depends on how to define the diffusion model.
if (product != "all") {
df %<>% filter(상품구분 == product)
}
if (gender != "all") {
df %<>% filter(성별 == gender)
}
if (location != "all") {
df %<>% filter(구매지역 == location)
}
df %<>% group_by(구매시점) %>% summarise(n = n()) %>% arrange(구매시점)
# plotting
ggplot(df, aes(x = 구매시점, y = n)) +
geom_point()
}
plotter(df = merged_potato, product = "상품B", gender = 2, location = "부산")
|
3b1a2d6a77a9e547c65c7af215c57a057d57da2e | c0e8ed5f61647f58ab98a7f83eebcd0dac8a02d9 | /R/calculate_TOWT_model_predictions.R | 680cbd377638dffa8faaeb137dc9c95f16390ff4 | [
"MIT"
] | permissive | LW-G38/nmecr | 08bb9391a50c61f727fe1edf9331832a1d92883c | dd7556430c5d91495df9e89bc7bd761af532e69a | refs/heads/master | 2020-09-17T07:21:43.950487 | 2019-11-27T20:25:53 | 2019-11-27T20:25:53 | 224,034,107 | 1 | 0 | MIT | 2019-11-27T20:25:54 | 2019-11-25T20:22:29 | null | UTF-8 | R | false | false | 3,898 | r | calculate_TOWT_model_predictions.R | #' Calculate TOWT model predictions
#'
#' \code{This function calculates predictions from model_with_TOWT}
#'
#' @param training_data Training dataframe and operating mode dataframe. Output from create_dataframe
#' @param prediction_data Prediction dataframe and operating mode dataframe. Output from create_dataframe
#' @param model_input_options List with model inputs specified using assign_model_inputs
#'
#' @return a list with the following components:
#' \describe{
#' \item{predictions} {dataframe with model predictions}
#' }
#'
calculate_TOWT_model_predictions <- function(training_data = NULL, prediction_data = NULL, modeled_object = NULL){
# Create training data temperature matrix
temp_mat <- create_temp_matrix(training_data$temp, modeled_object$model_input_options$calculated_temp_knots)
temp_m_name <- rep(NA, ncol(temp_mat))
for (i in 1 : ncol(temp_mat)) {
temp_m_name[i] <- paste("temp_mat", i, sep = "")
}
names(temp_mat) <- temp_m_name
# Create prediction data temperature matrix ----
temp_mat_pred <- create_temp_matrix(prediction_data$temp, modeled_object$model_input_options$calculated_temp_knots)
names(temp_mat_pred) <- temp_m_name
# Create prediction dataframe based on interval of week ----
minute_of_week_pred <- (lubridate::wday(prediction_data$time) - 1) * 24 * 60 +
lubridate::hour(prediction_data$time) * 60 + lubridate::minute(prediction_data$time)
interval_of_week_pred <- 1 + floor(minute_of_week_pred / modeled_object$model_input_options$interval_minutes)
ftow <- factor(interval_of_week_pred)
dframe_pred <- data.frame(prediction_data, ftow)
if(modeled_object$model_input_options$chosen_modeling_interval == "Hourly") {
dframe_pred <- dframe_pred %>%
select(-c("time", "temp"))
} else if (modeled_object$model_input_options$chosen_modeling_interval == "Daily") {
dframe_pred <- dframe_pred %>%
select(-c("time", "temp", "HDD", "CDD"))
}
# Time-of-Week ----
if (modeled_object$model_input_options$regression_type == "TOW") {
ok_tow_pred <- factor(ftow) %in% modeled_object$model_occupied$xlevels$ftow
predictions <- rep(NA, length(prediction_data$time))
predictions[ok_tow_pred] <- predict(modeled_object$model_occupied, dframe_pred)
} else {
# Determine occupancy information
ok_load <- !is.na(training_data$eload)
minute_of_week <- (lubridate::wday(training_data$time) - 1) * 24 * 60 +
lubridate::hour(training_data$time) * 60 + lubridate::minute(training_data$time)
interval_of_week <- 1 + floor(minute_of_week / modeled_object$model_input_options$interval_minutes)
occ_info <- find_occ_unocc(interval_of_week[ok_load],
training_data$eload[ok_load], training_data$temp[ok_load])
occ_intervals <- occ_info[occ_info[, 2] == 1, 1]
occ_vec <- rep(0, length(training_data$eload))
if (length(occ_intervals) > 2) {
for (i in 1 : length(occ_intervals)) {
occ_vec[interval_of_week == occ_intervals[i]] <- 1
}
}
# Add temperature matrix information to the prediction dataframe
dframe_pred <- data.frame(dframe_pred, temp_mat_pred)
predictions <- rep(NA, length(prediction_data$time))
# create subsets by occupancy - predict for each subset
ok_occ <- occ_vec == 1
ok_occ[is.na(ok_occ)] <- TRUE
if (sum(ok_occ > 0)) {
ok_tow_pred <- dframe_pred$ftow %in% modeled_object$model_occupied$xlevels$ftow
predictions[ok_tow_pred] <- predict(modeled_object$model_occupied, dframe_pred[ok_tow_pred, ])
}
if (sum(! ok_occ) > 0) {
ok_tow_pred <- dframe_pred$ftow %in% modeled_object$model_unoccupied$xlevels$ftow
predictions[ok_tow_pred] <- predict(modeled_object$model_unoccupied, dframe_pred[ok_tow_pred, ])
}
}
output <- NULL
predictions[predictions < 0] <- 0
output <- predictions
return(output)
}
|
fdbf4541599f5359684e8d1d4eed3bf2aeafb801 | d47833e60e3b9760619cf9c348d97b188f342db3 | /MobileNetworkDataSimulationTemplate/code/src/deduplication/man/getGenericModel.Rd | ef4e60ec7c48908fadb8d36a5a65f702f582f15b | [] | no_license | Lorencrack3/TFG-Lorenzo | 1a8ef9dedee45edda19ec93146e9f7701d261fbc | e781b139e59a338d78bdaf4d5b73605de222cd1c | refs/heads/main | 2023-06-04T00:23:16.141485 | 2021-06-29T21:31:25 | 2021-06-29T21:31:25 | 364,060,022 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,222 | rd | getGenericModel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGenericModel.R
\name{getGenericModel}
\alias{getGenericModel}
\title{Builds the generic HMM model.}
\usage{
getGenericModel(
nrows,
ncols,
emissionProbs,
initSteady = TRUE,
aprioriProb = NULL
)
}
\arguments{
\item{nrows}{Number of rows in the grid.}
\item{ncols}{Number of columns in the grid.}
\item{emissionProbs}{A matrix with the event location probabilities. The number of rows equals the number of tiles in
the grid and the number of columns equals the number of antennas. This matrix is obtained by calling
\code{getEmissionProbs()} function.}
\item{initSteady}{If TRUE the initial apriori distribution is set to the steady state of the transition matrix, if
FALSE the apriori distribution should be given as a parameter.}
\item{aprioriProb}{The apriori distribution for the HMM model. It is needed only if initSteady is FALSE.}
}
\value{
Returns an HMM model with the initial apriori distribution set to the steady state of the transition matrix
or to the value given by \code{aprioriProb} parameter.
}
\description{
Builds the generic HMM model using the emission probabilities given by \code{getEmissionProbs()}.
}
|
0d94ea88a5a555ada99a1eb93f280bb9a3a0fe67 | b4b05cee5fd571d1b4f7483d38308065d94add4f | /R/datanode.R | 43ca614d6897648f1b9678c32962f4d267a49ea7 | [] | no_license | jullybobble/datanodes | d55ddfd87a03b0e9497bcc10e8c7a75499a612fd | 84c222108f0b07d7cedc0e066b7fdc038a8139c0 | refs/heads/master | 2021-01-10T09:29:48.983346 | 2015-06-14T08:34:03 | 2015-06-14T08:34:03 | 36,557,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,106 | r | datanode.R | #' Cache the result of a given expression.
#'
#' Evaluates an expression and caches its result. If a cache exists, the
#' expression is re-evaluated only if one of the dependency files is more
#' recent than the cache.
#'
#' The given expression \code{expr} is evaluated only if one of the
#' following condition is true:
#'
#' \itemize{
#' - the file identified in \code{path} does *not* exist; or
#' - \code{force} is \code{TRUE}; or
#' - the latest modified time of the files in \code{depends_on} is later than
#' the modified time of the file in \code{path}.
#' }
#'
#' @param path the file caching the result of \code{expr}
#' @param expr the expression to be evaluated if triggered
#' @param force whether to force the evaluation of the expression \code{expr}
#' and the update of its cache, defaults to \code{TRUE}
#' @param depends_on a character vector of files on which the evaluation of the
#' expression \code{expr} depends on.
#' @param io a list with two named function \code{read} and \code{write}.
#' \code{read} takes \code{path} and \code{args} as arguments,
#' and \code{write} takes in addition \code{data}, \code{path} and
#' \code{args}. \code{args} are a list of additional arguments possibly
#' to the underlying reading or writing functions. Implementations for
#' \code{\link{csv_io}}, \code{\link{rdata_io}} and \code{\link{rds_io}}
#' are provided. The default value depends on the extension of the
#' file described by the \code{path} parameter: \code{.csv},
#' \code{.RData} and \code{.rds} corresponding to the 3 \code{io}
#' implementations, defaulting to \code{rds_io}.
#' @param write_args a list of additional parameters to the \code{io$write} function.
#' @param read_args a list of additional parameters to the \code{io$read} function.
#' @param ... additinal parameters to the \code{io$read} function, concatenated
#' after the list in \code{read_args}.
#'
#' @return the result of the evaluation of the expression \code{expr} if
#' triggered, or its cached value stored in \code{path} otherwise
#' @import readr
#' @export
datanode <- function(path,
expr,
force = FALSE,
depends_on = character(0),
io = if (grepl("(?i).*\\.csv$", path)) csv_io
else if (grepl("(?i).*\\.RData$", path)) rdata_io
else rds_io,
write_args = NULL,
read_args = NULL,
...) {
triggered <-
force ||
!file.exists(path) ||
(!is.null(depends_on) && length(depends_on) != 0 &&
file_time_trigger(path, depends_on))
if(triggered) {
result <- expr
io$write(data = result, path = path, args = write_args)
result
} else {
io$read(path = path, args = read_args)
}
}
file_modif_time <- function(path) file.info(path)$mtime
file_time_trigger <- function(path, depends_on) {
!is.null(depends_on) &&
file_modif_time(path) < max(sapply(depends_on, file_modif_time))
}
|
b068fea3270f7c39d3eac3b329d924bff99fa71f | 087b18969b18b4c1ec310b1a74dd35c845ce9b4d | /plot1.R | a608a659402aecb0a337ced59e81e324de664fe2 | [] | no_license | DigitalSocrates/ExData_Plotting1 | 3562f3c5d4143bede20a7b3d26fae3ccff154c74 | 61a13b26a1049c555a5e056dd1de0ed84a0c168e | refs/heads/master | 2021-01-18T13:14:18.498029 | 2015-11-15T03:00:32 | 2015-11-15T03:00:32 | 46,198,288 | 0 | 0 | null | 2015-11-15T00:42:57 | 2015-11-15T00:42:57 | null | UTF-8 | R | false | false | 756 | r | plot1.R | plot1 <- function() {
# specify output file
png(file = "plot1.png")
#Read raw data from a txt file
# specify: keep header and define delimiter
rawData <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?")
# tranform a column in a raw dataset and assign it
powerconsumption <- transform(rawData,Date=as.Date(rawData$Date,format="%d/%m/%Y"))
# get data only for two days
powerconsumptionSubset <- powerconsumption[powerconsumption$Date=="2007-2-1" | powerconsumption$Date=="2007-2-2", ]
# make a plot
hist(powerconsumptionSubset$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", main = "Global Active Power")
# write
dev.off()
} |
c85621a04b8bd05c6406c92338594b04e02f1996 | d6759ae2d4ea707fce4a65ba1c4c0bf999395236 | /man/verify.Rd | e28f42336ff6b84613e607aac9610424354304db | [
"MIT"
] | permissive | xtmgah/assertr | 9855af8db9353229d133f25e768450d2ad3f651c | fd4042a594fbd62a1803dbfe38edeabdb9f7352d | refs/heads/master | 2020-12-26T04:55:14.240432 | 2015-01-23T19:56:01 | 2015-01-23T19:56:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,280 | rd | verify.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/assertions.R
\name{verify}
\alias{verify}
\title{Raises error if expression is FALSE anywhere}
\usage{
verify(data, expr)
}
\arguments{
\item{data}{A data frame, list, or environment}
\item{expr}{A logical expression}
}
\value{
data if verification passes. error if not.
}
\description{
Meant for use in a data analysis pipeline, this function will
just return the data it's supplied if all the logicals in the
expression supplied are TRUE. If at least one is FALSE, this
function will raise a error, effectively terminating the pipeline
early
}
\note{
See \code{vignette("assertr")} for how to use this in context
}
\examples{
verify(mtcars, drat > 2) # returns mtcars
\dontrun{
verify(mtcars, drat > 3) # produces error}
library(magrittr) # for piping operator
\dontrun{
mtcars \%>\%
verify(drat > 3) \%>\%
# anything here will not run}
mtcars \%>\%
verify(nrow(mtcars) > 2)
# anything here will run
alist <- list(a=c(1,2,3), b=c(4,5,6))
verify(alist, length(a) > 2)
verify(alist, length(a) > 2 && length(b) > 2)
verify(alist, a > 0 & b > 2)
\dontrun{
alist \%>\%
verify(alist, length(a) > 5)
# nothing here will run}
}
\seealso{
\code{\link{assert}}
}
|
330c22b3e5a7600e0676969e562ce39af4fdfcb8 | c7663ac9bc5171eef2fc488c150e7214f5ad74fa | /R Programming/while_s.R | 5da423154480e88991d95aa2028c2948b6ad66f0 | [] | no_license | androidpcguy/datasciencecoursera | d6c7009d4894ecce17a374d022f15fa75acad083 | bcfc15ae63ae3c09522cebc8f95cb7167e6c3366 | refs/heads/master | 2021-01-01T18:23:41.685126 | 2015-08-28T05:01:57 | 2015-08-28T05:01:57 | 40,271,179 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 132 | r | while_s.R | z <- 5
coin <- 1
while(z>= 3 && z <= 10) {
print(z)
coin <- rbinom(1,1,0.5)
z <- if(coin == 1) {
z + 1
} else {
z-1 }
}
|
af4400d76f85f37ab47650b24096b00434c2cc7c | 5f413d1ac57354edbb3735265c64fcc20f801b30 | /tuto5/Oil_price forecasting.R | 5683084b35ffa4f30852211e83189509f1006f1c | [] | no_license | otakoryu/Econometrics-for-Finance | b3f89d050095fcd43853c33ed1835bb5fce24e05 | 9294566a037354b49576b8ccd662eb1812e99fef | refs/heads/master | 2020-04-29T22:32:38.657075 | 2019-03-19T08:22:43 | 2019-03-19T08:22:43 | 176,450,833 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,299 | r | Oil_price forecasting.R | crude.oil<-read.delim("crude.txt",header=T)
attach(crude.oil)
crude<-ts(crude)
plot(crude)
crude.r<-100*diff(log(crude))
plot(crude.r)
library("quantmod")
chartSeries(crude.r,type = "candles")
candleChart(crude.r)
acf(crude.r)
pacf(crude.r)
library(tseries)
adf.test(crude.r)
length(crude.r)
crude.rt<-crude.r[1:201]
crude.te<-crude.r[202:206]
crude.tr<-ts(crude.rt)
crude.te<-ts(crude.te)
plot(crude.te)
library(forecast)
model1<-arima(crude.tr,order=c(1,0,1), include.mean=T)
summary(model1)
plot(model1)
tsdiag(model1)
#-------------------
##Naive model
f_rw<-rwf(crude.tr,h=5)
f_mean<-mean(crude.tr,h=5)
plot(f_rw)
plot(f_mean)
#--------------------------
library(forecast)
model1_foc<-forecast(model1, h=5)
plot(model1_foc)
accuracy(model1_foc)
accuracy(model1_foc,crude.te[1:5])
library(xts)
data(crude.tr)
crude.tr.test<-as.xts(crude.tr)
my_fc<-function(crude.rets){
model<-arima(crude.tr,order=c(1,0,1))
return(forecast(model,h=1))
}
library(zoo)
model2_foc<-rollapplyr(crude.tr,200,my_fc)
library(xts)
data(sample_matrix)
test <- as.xts(sample_matrix)
myFun <- function(x) {
x$xt <- seq(1-nrow(x),0)
lm(Close ~ poly(xt,4), data=x)
}
test1 <- rollapplyr(test, width=20, FUN=myFun, by.column=FALSE)
|
d31e11f0c5b6e876acb3cf1630c21fc04f559219 | 4f2743db548d08f57ec5c441011d94c28aa0ccac | /man/metaplot_character.Rd | 07b322938a319557d38a9c792a13aaf27670396f | [] | no_license | bergsmat/nonmemica | 85cdf26fa83c0fcccc89112c5843958669373a2a | 8eddf25fdd603a5aca719a665c5b9475013c55b3 | refs/heads/master | 2023-09-04T06:10:48.651153 | 2023-08-28T13:23:18 | 2023-08-28T13:23:18 | 78,268,029 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,267 | rd | metaplot_character.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superset.R
\name{metaplot_character}
\alias{metaplot_character}
\title{Metaplot Character, Standard Evaluation}
\usage{
metaplot_character(x, groups, meta = NULL, subset, var, ...)
}
\arguments{
\item{x}{object}
\item{groups}{columns by which to group the dataset}
\item{meta}{metadata; meta(x) by default}
\item{subset}{a condition for filtering data}
\item{var}{variables to plot}
\item{...}{passed arguments}
}
\description{
Plots character by treating as model name. A dataset
is constructed by combining the model input with a
the model output and calling metaplot with the result.
}
\seealso{
Other superset:
\code{\link{generalize}()},
\code{\link{ignored}()},
\code{\link{meta.character}()},
\code{\link{meta.numeric}()},
\code{\link{metaplot.character}()},
\code{\link{metaplot.numeric}()},
\code{\link{metasuperset}()},
\code{\link{meta}()},
\code{\link{ninput.character}()},
\code{\link{ninput.numeric}()},
\code{\link{ninput}()},
\code{\link{shuffle}()},
\code{\link{superset.character}()},
\code{\link{superset.numeric}()},
\code{\link{superset}()},
\code{\link{superspec.character}()},
\code{\link{superspec.numeric}()},
\code{\link{superspec}()}
}
\concept{superset}
|
774330d8abc54f9b0c30f139e836b68e4f2570ea | 50077e1b39dd5c29cde35299667f46f74a5479f2 | /man/synthesize.Rd | 8058982b7112adabc15647350d16a2a101145f3c | [] | no_license | dkyleward/ipfr | 812ddab7296c9406040fd92619d994470dce9de4 | 0e4aad0b624a3816132d7ddc03cca468cdfc6996 | refs/heads/master | 2021-07-02T21:40:53.377678 | 2020-04-02T13:02:12 | 2020-04-02T13:02:12 | 199,922,071 | 4 | 4 | null | 2020-11-20T15:01:03 | 2019-07-31T20:09:55 | R | UTF-8 | R | false | true | 1,513 | rd | synthesize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/synthesize.R
\name{synthesize}
\alias{synthesize}
\title{Creates a synthetic population based on ipu results}
\usage{
synthesize(weight_tbl, group_by = NULL, primary_id = "id")
}
\arguments{
\item{weight_tbl}{the \code{data.frame} of the same name output by
\code{\link{ipu}}.}
\item{group_by}{if provided, the \code{data.frame} will be grouped by this
variable before sampling. If not provided, tidyverse/dplyr groupings will
be respected. If no grouping info is present, samples are drawn from the
entire table.}
\item{primary_id}{The field used to join the primary and secondary seed
tables. Only necessary if \code{secondary_seed} is provided.}
}
\value{
A \code{data.frame} with one record for each synthesized member of
the population (e.g. household). A \code{new_id} column is created, but
the previous \code{primary_id} column is maintained to facilitate joining
back to other data sources (e.g. a person attribute table).
}
\description{
A simple function that takes the \code{weight_tbl} output from
\code{\link{ipu}} and randomly samples based on the weight.
}
\examples{
hh_seed <- dplyr::tibble(
id = c(1, 2, 3, 4),
siz = c(1, 2, 2, 1),
weight = c(1, 1, 1, 1),
geo_cluster = c(1, 1, 2, 2)
)
hh_targets <- list()
hh_targets$siz <- dplyr::tibble(
geo_cluster = c(1, 2),
`1` = c(75, 100),
`2` = c(25, 150)
)
result <- ipu(hh_seed, hh_targets, max_iterations = 5)
synthesize(result$weight_tbl, "geo_cluster")
}
|
2c47bf158a3ff0dbb509f7c7b77f82b5817b322d | 408427dfca1d2c58bc91ff2e5477253aafbf0213 | /Web traffic/web traffic 2.R | ef7beaa4d7828fb2bb96a538f07969a9cf1389c9 | [] | no_license | krag57/DS-Python | 71a846122720b0dc97f8ed9b8b9f519a2cb4ce00 | a115a6837ffb1d9292ec3e427177fbf83578be1a | refs/heads/master | 2021-01-05T12:42:05.409127 | 2020-02-26T17:53:13 | 2020-02-26T17:53:13 | 241,027,104 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 387 | r | web traffic 2.R | #install.packages("forecast")
library(forecast)
data <- readLines(file("stdin"))
#data <- readLines(file("input.txt"))
d <- as.numeric(data)
startDate <- as.Date("2012-10-01")
t <- ts(as.numeric(d[2:d[1]+1]),start = startDate, frequency = 10)
set.seed(123)
fit <- nnetar(t, decay=0.5, maxit=10)
fcast <- forecast(fit, 30)
writeLines(as.character(round(fcast$mean))) |
0b2221a192b365934af9c2c63b5de9e8f1c29353 | d859174ad3cb31ab87088437cd1f0411a9d7449b | /autonomics.import/man/filter_exprs_replicated_in_some_subgroup.Rd | d1119a8f2eb6862e8eefa9417d032e0ed936a4a1 | [] | no_license | bhagwataditya/autonomics0 | 97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c | c7ca7b69161e5181409c6b1ebcbeede4afde9974 | refs/heads/master | 2023-02-24T21:33:02.717621 | 2021-01-29T16:30:54 | 2021-01-29T16:30:54 | 133,491,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 884 | rd | filter_exprs_replicated_in_some_subgroup.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter.R
\name{filter_exprs_replicated_in_some_subgroup}
\alias{filter_exprs_replicated_in_some_subgroup}
\title{Filter features with replicated expression in some subgroup}
\usage{
filter_exprs_replicated_in_some_subgroup(
object,
comparator = if (contains_ratios(object)) "!=" else ">",
lod = 0
)
}
\arguments{
\item{object}{SummarizedExperiment}
\item{comparator}{'>' or '!='}
\item{lod}{numeric(1)}
}
\value{
Filtered SummarizedExperiment
}
\description{
Filter features with replicated expression in some subgroup
}
\examples{
require(magrittr)
if (require(autonomics.data)){
object <- autonomics.data::stemcomp.proteinratios
object \%>\% filter_exprs_replicated_in_some_subgroup()
object <- autonomics.data::glutaminase
object \%>\% filter_exprs_replicated_in_some_subgroup()
}
}
|
53dad58a767ca93921b31b94c8f47774e333590d | f527faa5336900eb9108fde1c2da41c117fdc415 | /src/regres.R | 4aa72539d27614d606d1075b39d8f6cfb175f389 | [] | no_license | racarlos/optimizeR | 04c29e48bef269b1f2dcbf3ca8462ec74810a8ee | ff12cecd3dc95e4ed9cb59f90227a606559d3639 | refs/heads/master | 2023-03-24T16:59:56.498359 | 2021-03-20T10:21:45 | 2021-03-20T10:21:45 | 349,691,407 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,492 | r | regres.R | # Author: Carlos Robie A.
# Student No: 2018 - 03026
# Gauss Jordan Function for solving the system of equations
GaussJordanMethod <- function(matrix){
# Gets the augcoeff matrix and the variables
test = matrix
# For the row and column of the matrix
row = nrow(test)
col = ncol(test)
#Solution Set for the Equation
solutionSet = c()
# Loop for Gauss Jordan
for (i in 1:row){
if(i != row){
#Getting the Pivot Element
pivot = max(abs(test[i:row,i]))
# Gettting the Row of the Pivot Element
cols = abs(test[,i])
index = order(cols,decreasing = TRUE)[1]
pivotrow = test[index,]
if(pivot == 0){ #if there is no solution
print("No Unique Solution Exists.")
return(NA)
} else { # swaps the rows
swaprow = test[i,]
test[i,] = pivotrow
test[index,] = swaprow
}
}
# Normalizing the rows to create an upper triangular matrix
test[i,] = test[i,]/test[i,i]
# Putting all solutions to the RHS and creating and creating a main diagonal of 1
for(j in 1:row){
if(i == j){
next
}
# Getting the normalized row
normalizedrow = test[j,i]*test[i,]
test[j,] = test[j,] - normalizedrow
}
}
for(i in 1:row){
solutionSet[i] = test[i,col]
}
matrixanswer = test
#returning the list
return(list(solutionSet = solutionSet,matrix = matrixanswer))
}
#Function
PolynomialRegression <- function(x,y,n){
# Gets the left part
a = as.numeric(x)
print("Left: ")
print(a)
#Gets the right part
b = as.numeric(y)
print("Right ")
print(b)
#creating augcoeff matrix
matrix = matrix(NA,nrow = n+1,ncol= n+2)
row = nrow(matrix)
col = ncol(matrix)
length = length(a)
rip = length+1
# populating the matrix
for (i in 1:row) { # Rows
for(j in 1:col){ # Columns
x = i+j
z = x-2
a = as.numeric(a)
z = as.numeric(z)
matrix[i,j] = sum(a^z)
# For the RHS
if(j == col){
sumz = 0
# Loop to get the summation of the left and right parts
for (k in 1:length(a)) {
ak = as.numeric(a[k])
bk = as.numeric(b[k])
sumz = sumz + (( ak^(i-1) ) * bk)
}
# add sum to the rhs
matrix[i,j] = sumz
}
}
}
# Calling Gauss Jordan for Answers
solution = GaussJordanMethod(matrix)
set = solution$solutionSet
mat = solution$matrix
# Parsing the solution set
strsum = "function(x) "
strsum = paste(strsum,set[1])
strsum = paste(strsum,"+ ")
count = 1
# Loop for getting the String Equivalent
for(i in 2:length(set)){
parsed = set[i]
equate = ""
equate = paste(equate,parsed,sep="")
equate = paste(equate," * x^",sep="")
equate = paste(equate,count, sep="")
if(i != length(set))
equate = paste(equate," + ",sep="")
strsum = paste(strsum,equate,sep="")
count = count + 1
}
# Creating a Function
func = eval(parse(text = strsum))
# Final list
final = list(augcoeffmatrix = matrix,unknowns = set,polynomial_string = strsum,polynomial_function = func)
return(final)
}
|
f998259ff8323a62038555e6da4f011015591fdb | 5c2e9b10687dc0d7c98173698e5c538f84e4538e | /R/environments.R | 85968b59d4edc77f833c057c252f423a6f79b13d | [] | no_license | miraisolutions/styler | 3e3c5facd4a1bf4d2ed709af40380f4a42e95bbb | de626c98680c9ddf82e3c78d2d9926c7d657c172 | refs/heads/master | 2020-03-20T14:45:13.829347 | 2019-02-15T18:48:01 | 2019-02-15T18:48:01 | 137,494,448 | 1 | 0 | null | 2019-02-15T18:48:02 | 2018-06-15T14:03:11 | R | UTF-8 | R | false | false | 1,989 | r | environments.R | #' Work with parser versions
#'
#' The structure of the parse data affects many operations in styler. There was
#' unexpected behaviour of the parser that styler was initially designed to work
#' around. Examples are [#187](https://github.com/r-lib/styler/issues/187),
#' [#216](https://github.com/r-lib/styler/issues/216),
#' [#100](https://github.com/r-lib/styler/issues/100) and others. With
#' [#419](https://github.com/r-lib/styler/issues/419), the structure of the parse
#' data changes and we need to dispatch for older versions. As it is inconvenient
#' to pass a parser version down in the call stack in various places, the
#' environment `env_current` is used to store the current version *globally*
#' but internally.
#'
#' We version the parser as follows:
#'
#' * version 1: Before fix mentioned in #419.
#' * version 2: After #419.
#'
#' The following utilities are available:
#'
#' * `parser_version_set()` sets the parser version in the environment
#' `env_current`.
#' * `parser_version_get()` retrieves the parser version from the
#' environment `env_current`.
#' * `parser_version_find()` determines the version of the parser from parse
#' data. This does not necessarily mean that the version found is the
#' actual version, but it *behaves* like it. For example, code that does not
#' contain `EQ_ASSIGN` is parsed the same way with version 1 and 2. If the
#' behaviour is identical, the version is set to 1.
#' @param version The version of the parser to be used.
#' @param pd A parse table such as the output from
#' `utils::getParseData(parse(text = text))`.
#' @keywords internal
parser_version_set <- function(version) {
env_current$parser_version <- version
}
#' @rdname parser_version_set
parser_version_get <- function() {
env_current$parser_version
}
#' @rdname parser_version_set
parser_version_find <- function(pd) {
ifelse(any(pd$token == "equal_assign"), 2, 1)
}
env_current <- rlang::new_environment(parent = rlang::empty_env())
|
3c790c9a8ec0488f47ebdccc5fb1f987925a85aa | ba95ca23cd4d1463fba07d6f88cdfa5fb0e7ebce | /R/localDB.R | a68ab5ab815ada8c32087851abc208c8dbf11c3b | [] | no_license | adrianacarmo/FunctSNP | b0ae937ac0dfb09f54a2658bc066b8cc24c58c2a | 1b52238c1d203d2c3df8ef49f7f18cbe5cbbf7f6 | refs/heads/master | 2021-01-18T02:39:48.461738 | 2010-02-01T00:00:00 | 2010-02-01T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,282 | r | localDB.R | # download pre-assembled/supported database
downloadDB <- function (speciesCode,db.list=FALSE) {
dest.dir <- paste(.install.location(), "extdata", sep=.Platform$file.sep)
#list databases that can be downloaded
if (db.list==TRUE) {
print(.supportedConfig[,c("code","species")])
} else {
if(missing(speciesCode)) {
# default to the value stored in the global variable .FunctSNPspecies
if (is.null(.FunctSNPspecies))
{
#assign empty value to species code which will be trapped below
speciesCode <- ""
}
else
speciesCode <- .FunctSNPspecies
}
# user specified >=1 species code
if (length(which(.supportedConfig[,"code"] %in% speciesCode)) != length(speciesCode)){
cat("Error: You must specify at least one species code from the following valid species:\n")
print(.supportedConfig[,c("code","species")])
} else {
# all species codes specified by user are valid
# download it/them
urls <- .supportedConfig[which(.supportedConfig[,"code"] %in% speciesCode),c("url")]
filenames <- basename(urls)
destfiles <- paste(dest.dir, filenames, sep=.Platform$file.sep)
for(i in 1:length(urls)) {
fh <- url(urls[i])
if(is.null(open(fh))) {
download.file( urls[i], destfiles[i], mode="wb")
if(grep('\\.(zip)$', destfiles[i], perl=TRUE)==1) {
message ("Please wait ... decompressing database")
flush.console()
unzip(destfiles[i], exdir=dest.dir)
file.remove(destfiles[i])
}
close(fh)
}
}
}
}
}
# make new DB from original data sources using dbAutoMaker
# and the species-specific ini files that dbAutoMaker knows about
makeDB <- function (speciesCode,db.list=FALSE) {
#list databases that can be downloaded
if (db.list==TRUE) {
print(.supportedConfig[,c("code","species")])
} else {
if(missing(speciesCode)) {
# default to the value stored in the global variable .FunctSNPspecies
speciesCode <- .FunctSNPspecies
}
# user specified >=1 species code
if (length(which(.supportedConfig[,"code"] %in% speciesCode)) != length(speciesCode)){
cat("Error: You must specify at least one species code from the following valid species:\n")
print(.supportedConfig[,c("code","species")])
} else {
# all specified species codes are OK
# get current working directory, and change back to it when the function exits
cwd <- getwd()
on.exit(setwd(cwd))
# move into the admin dir for dbAutoMaker
dbAutoMakerDir <- paste(.install.location(),"dbAutoMaker",sep=.Platform$file.sep)
setwd(paste(dbAutoMakerDir, "Admin", sep=.Platform$file.sep))
for (species in speciesCode) {
message ("\nPlease wait ... the database creation can take many hours\n")
flush.console()
# run dbAutoMaker for each species specified
r <- try(system (paste ("perl", "startup.pl", species)))
if(r) {
stop("dbAutoMaker was unable to generate a DB for: ", species)
} else {
# check the DB file was created sucessfully
dirName <- .supportedConfig[which(.supportedConfig[,"code"]==species),"dir"]
db.filename <- paste(species,"SNP.db",sep="")
db.filepath <- paste(dbAutoMakerDir,"Species", dirName,"Databases",db.filename,sep=.Platform$file.sep)
if(file.exists(db.filepath)) {
from <- db.filepath
to <- paste(.install.location(),"extdata",paste(species,"SNP.db",sep=""),sep=.Platform$file.sep)
res <- file.rename(from,to)
#Warn the user if file.rename failed
if (!res)
{
stop("Unable to move database ...\nFrom: ", from, "\nTo: ", to, "\nTry moving the database manually\n")
}
} else {
stop("Something didn't go according to plan for species code (", species, "): Couldn't find path - ", db.filepath)
}
}
}
}
}
}
installedDBs <- function() {
cat("Available local databases are:\n")
if (nrow(.localDBs()) > 0) {
print(.localDBs())
cat("\nTo set a database as default: setSpecies",'("<code>")')
cat("\n\te.g. setSpecies",'("bta")',"\n",sep="")
} else {
cat("\nWarning: There are no local databases \n")
}
cat("\nTo download the most recent build of a supported database use downloadDB()\n")
cat("To build a database from public databases use makeDB()\n")
}
# add custom database to list of DBs
addSpecies <- function(speciesCode,speciesName)
{
userDBs <- read.table(paste(.install.location(),"extdata","user_databases.txt",sep=.Platform$file.sep),header=TRUE,sep="\t",colClasses="character")
if (nrow(userDBs) > 0) {
userDBs=rbind(userDBs,c(speciesCode,speciesName))
} else {
userDBs <- rbind(userDBs,data.frame(code=speciesCode,species=speciesName))
}
#Add to the supported species
.supportedSpecies <<- rbind(.supportedSpecies,data.frame(code=speciesCode,species=speciesName))
write.table(userDBs,paste(.install.location(),"extdata","user_databases.txt",sep=.Platform$file.sep),row.names=FALSE,quote=FALSE,sep="\t")
userAddedSpecies (refresh=TRUE)
}
|
1ee29d5f0d0d34cff620a454449f5614e5038239 | 990a049d3ad2341cc32b82e14ee94a342f9d3a8f | /man/plus.Rd | 12ae59cbae9d2ae404378dad317f88fd1d9cd92c | [
"Apache-2.0"
] | permissive | JDOsborne1/AOC2019 | 7fb5644589a283afb5e565b4e3a4162922c2315e | 5c3e50a8b2c1b0de7ea0499a713fea396e60cc87 | refs/heads/master | 2020-09-23T08:12:24.999663 | 2019-12-14T12:21:28 | 2019-12-14T12:21:28 | 225,449,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 264 | rd | plus.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Day_3.R
\name{plus}
\alias{plus}
\title{Written wrapper around `+`}
\usage{
plus(x, y)
}
\arguments{
\item{x}{LHS}
\item{y}{RHS}
}
\value{
}
\description{
Written wrapper around `+`
}
|
20e5c326c59444a13998a92f0e06eab0d505c8fe | 53b811328857ef4a8023d963c49ec9c61427f677 | /src/Linear_method.R | c6a88e8e50cd77c63b68f365e4070019c840cef6 | [] | no_license | ClemenceKiehl/Prediction-of-odour-based-on-molecular-property | 993f7b11c0373b3fc1ea422d908c20566b585c75 | a0f67ca60f3ce2e55eeb3d66734620cae03c2613 | refs/heads/main | 2023-03-19T07:08:56.788532 | 2020-12-21T16:56:58 | 2020-12-21T16:56:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,574 | r | Linear_method.R | library(readr)
library(tidymodels)
library(ROCR)
library(glmnet)
library(caret)
library(PerformanceAnalytics)
library(tensorflow) #use_session_with_seed
use_session_with_seed(199)
#Load data
test_data <- read_csv(file.path("data/test_data.csv"))
training_data <- read_csv(file.path("data/training_data.csv"))
data <- na.omit(training_data)
data$VALENCE.PLEASANTNESS <- NULL
data$Intensity <- as.numeric(as.logical(data$Intensity == "high"))
data$SWEETORSOUR <- as.numeric(data$SWEETORSOUR)
test_data$Intensity <- as.numeric(as.logical(test_data$Intensity == "high"))
#FEATURE SELECTION BY REMOVING PREDICTORS WITH LOW VARIANCES
for(i in colnames(data[,c(-1,-2)])) {
if(var(data[[i]]) <= 1e-5) {
data[[i]] <- NULL
}
}
#FEATURE SELECTION BY REMOVING CORRELATED PREDICTORS
data_cor <- cor(as.matrix(data[,3:ncol(data)]))
hc <- findCorrelation(data_cor, cutoff=0.99)
data_intensity <- data$Intensity
data_SWEETORSOUR <- data$SWEETORSOUR
data <- data[,-c(sort(hc))]
data$SWEETORSOUR <- data_SWEETORSOUR
data$Intensity <- data_intensity
#FEATURE ENGINEERING BY CHECKING SKEWNESS & APPLY LOG
get_skewed_names <- function(d){
ret <- c()
ret <- NULL
for (i in colnames(d[,c(-1,-2)])) {
if (abs(skewness(d[[i]], type = 2)) > 0.7) {
ret <- append(ret, i)
}
}
ret
}
return_logd_data <- function(d, skw_names){
for(i in skw_names){
if (0 %in% d[[i]]) {
for (j in 1:nrow(d)) {
if (d[j,i] != 0) {
d[j,i] <- log(d[j,i])
}
}
} else {
d[[i]] <- log(d[[i]])
}
}
return(d)
}
sk_names <- get_skewed_names(data)
data <- return_logd_data(data, sk_names)
##########SIMPLE REGRESSION
# cv_data <- vfold_cv(data, v = 10) # create the 10 folds
# AUC_values <- vector()
# ntrees <- vector()
# for (i in 1:10) {
# training_set <- analysis(cv_data$splits[[i]])
# validation_set <- assessment(cv_data$splits[[i]])
#
# logreg.fit <- glm(SWEETORSOUR ~ ., training_set, family = 'binomial')
# logreg.pred <- predict(logreg.fit, validation_set, type = "response")
# ROCR.pred.logreg <- prediction(logreg.pred, validation_set$SWEETORSOUR)
# AUC_values <- c(AUC_values, performance(ROCR.pred.logreg, 'auc')@y.values[[1]])
# }
# mean(AUC_values)
#
# logreg.fit <- glm(SWEETORSOUR ~ ., data.train, family = 'binomial')
# logreg.pred <- predict(logreg.fit, data.validation, type = "response")
# ROCR.pred.logreg <- prediction(logreg.pred, data.validation$SWEETORSOUR)
# print(paste('LogReg:', performance(ROCR.pred.logreg, 'auc')@y.values, sep = ' '))
##########SIMPLE REGRESSION
# Splitting the data into training and validation sets
idx.train <- sample(nrow(data), nrow(data)*0.75)
data.train <- data[idx.train,]
data.validation <- data[-idx.train,]
x <- data.matrix(data.train[,names(data.train) != "SWEETORSOUR"])
x[!is.finite(x)] <- 0
y <- data.train$SWEETORSOUR
x.validation <- data.matrix(data.validation[,names(data.validation) != "SWEETORSOUR"])
x.validation[!is.finite(x.validation)] <- 0
y.validation <- data.validation$SWEETORSOUR
#LOGISTIC REGRESSION WITH LASSO REGULARIZATION
cv.lasso <- cv.glmnet(x, y , alpha = 1, nfold = 10)
plot(cv.lasso)
best.lasso <- glmnet(x, y, alpha = 1, lambda = cv.lasso$lambda.min)
lasso.pred <- predict(best.lasso, s = cv.lasso$lambda.min, newx = x.validation, type = "response")
plot(performance(prediction(lasso.pred, y.validation), 'tpr', 'fpr'))
auc.lasso <- performance(prediction(lasso.pred, y.validation), measure = 'auc')
auc.lasso.value <- auc.lasso@y.values[[1]]
print(paste('LogReg + Lasso AUC :', auc.lasso.value, sep = ' '))
|
ed4b70be67cb81763c90da146209f22a1377b59c | be470e53cd8023986847e721c0f0df462b2e01d2 | /man/ChoiceModelDesign.Rd | df21d854ac1392c4789bc06d57d28c0bc4c8b581 | [] | no_license | NailKarimli/flipChoice | a48c924d50f8342aa4500332e2c66b24dff7d724 | 948d2424aa16779b3b05dde7b55ae2a7a273565e | refs/heads/master | 2023-03-16T18:42:24.068699 | 2018-03-08T02:39:33 | 2018-03-08T02:39:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,643 | rd | ChoiceModelDesign.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/experimentaldesign.R
\name{ChoiceModelDesign}
\alias{ChoiceModelDesign}
\title{Choice modeling experimental design}
\usage{
ChoiceModelDesign(design.algorithm = c("Random", "Shortcut",
"Balanced overlap", "Complete enumeration", "Efficient", "Partial profiles"),
attribute.levels = NULL, prior = NULL, n.questions, n.versions = 1,
alternatives.per.question, prohibitions = NULL, none.alternatives = 0,
labeled.alternatives = FALSE, n.constant.attributes = NULL,
output = "Labeled design", seed = 54123)
}
\arguments{
\item{design.algorithm}{The algorithm used to create the
design. One of \code{"Random"}, \code{"Shortcut"},
\code{"Balanced overlap"}, \code{"Complete enumeration"},
\code{"Efficient"} and \code{Partial profiles}.}
\item{attribute.levels}{\code{\link{list}} of \code{\link{vector}}s
containing the labels of levels for each attribute, with names
corresponding to the attribute labels; \emph{or} a character
matrix with first row containing attribute names and subsequent
rows containing attribute levels.}
\item{prior}{Character matrix containing prior values for the model
coefficients; only used for \code{design.algorithm ==
"Efficient"}; see Details.}
\item{n.questions}{Integer; the number of questions asked to each
respondent.}
\item{n.versions}{Integer; the number of versions of the survey to
create.}
\item{alternatives.per.question}{Integer; the number of alternative
products shown in each question. Ignored if
\code{"labeled.alternatives"} is TRUE.}
\item{prohibitions}{Character \code{\link{matrix}} where each row
is a prohibited alternative consisting of the levels of each
attribute. If a level is \code{""} or is \code{"All"} then all
levels of that attribute in combination with the other
specified attribute levels are prohibited.}
\item{none.alternatives}{Integer; the number of 'None' in all
questions.}
\item{labeled.alternatives}{Logical; whether the first attribute
labels the alternatives.}
\item{n.constant.attributes}{Integer; the number of attributes to keep
constant.}
\item{output}{One of \code{"Labeled design"} or \code{"Inputs"}.}
\item{seed}{Integer; random seed to be used by the algorithms.}
}
\value{
A list with components
\itemize{
\item \code{design} - a numeric array of dimensions (number of questions by alternatives per
question by number of attributes) where each value is the index of a level. Ignoring any none
alternatives.
\item \code{design.with.none} - as per \code{design} except one additional row per none alternative
is added to each question with \code{NA} for all attribute levels.
\item \code{labeled.design} - as per \code{design.with.none} except indices of levels are
replaced by their labels.
\item \code{design.algorithm} - as per input.
\item \code{attribute.levels} - as per input.
\item \code{prohibitions} - as per input.
\item \code{n.questions} - as per input.
\item \code{n.versions} - as per input.
\item \code{alternatives.per.question} - as per input.
\item \code{none.alternatives} - as per input.
\item \code{output} - as per input.
\item \code{db.error} - the Db-error of \code{design}.
\item \code{d.error} - the D-error of \code{design}.
\item \code{model.matrix} - the model matrix of dummy coded variables for each alternative
in every choice set.
\item \code{balances.and.overlaps} a list with components
\itemize{
\item\code{singles} a \code{list} of the counts of each level per attribute.
\item\code{pairs} a \code{list} of the counts of pairwise occurences of levels
for each pair of attributes.
\item\code{overlaps} a \code{vector} of the percentage of questions that include
one or more duplicated level per attribute.
}
}
}
\description{
Creates choice model experimental designs according to a given algorithm.
}
\details{
If \code{prior} is supplied and \code{design.algorithm ==
"Efficient"}, the number of coefficients must correspond
to the number of attributes/attribute levels specified in
\code{attribute.levels}. If \code{prior} is \code{NULL}, the prior for the
coefficients is assumed to be identically zero. If the supplied matrix
contains two columns, the first column is taken as the prior
mean for the coefficients and the second is taken to be the
prior variances. If only one column is present, the prior for
the coefficients is assumed to be centered at those values.
}
\examples{
x <- CreateExperiment(c(3, 5, 7, 10), 20)
ChoiceModelDesign("Random", x$attribute.levels, n.questions = 30,
alternatives.per.question = 4, prohibitions = x$prohibitions)
}
|
cd0a42565dad6f22e4fed5a97c872c0d7e54c83b | b406befbab9bcf0ea7c0eac21d6e163c3888ef9a | /man/ftest.pow.Rd | 5878744f682965ad91a2734bd83fc0a3b2b8cd81 | [] | no_license | olli0601/abc.star | 0f5fc8a3d1d4ba7edb3719dc46b688454bab9cfa | dbda96d2e52b096e74a2fbdef32f3443b45da7a7 | refs/heads/master | 2016-09-15T03:35:43.924846 | 2016-04-14T20:11:38 | 2016-04-14T20:11:38 | 8,214,145 | 0 | 1 | null | 2016-04-03T13:43:53 | 2013-02-15T07:11:58 | R | UTF-8 | R | false | true | 2,483 | rd | ftest.pow.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ftest.R
\name{ftest.pow}
\alias{ftest.pow}
\title{\code{ftest} power function}
\usage{
ftest.pow(rho, tau, n.of.y, p, alpha = 0.01, support = c(0, Inf),
log = FALSE, norm = 1)
}
\arguments{
\item{rho}{Vector of quantiles}
\item{tau}{Upper boundary point of the equivalence region}
\item{p}{Number of variables}
\item{alpha}{Level of the equivalence test}
\item{support}{Support of the truncated power function (vector of dimension 2).}
\item{log}{If \code{TRUE}, the power function is returned on the log scale.}
}
\description{
Compute the power of the one-sample multivariate equivalence test for population means of multivariate normal summary values with unknown population variance.
}
\note{
The power function can be truncated to \code{support}.
}
\examples{
# power function of the F-test, to test equality of means for multivariate
# normal samples with unknown covariance matrix
#set number of variables (i.e. summary statistics)
p <- 3
#set number of simulations
n <- 100
#calculate power for fixed equivalence value
tau <- 1.2
rho <- seq(0, .5, length = 1024)
ftest.pow(rho, tau, n = n, p = p)
# power increases as size of equivalence region increases but power function
# flattens out as equivalence region gets large
tmp <- lapply(c(0.05, 0.1, 0.2, 0.3), function(tau)
{
data.table(tau = as.factor(tau), rho = rho, power = ftest.pow(rho, tau, n, p, alpha = 0.01))
})
tmp <- do.call('rbind', tmp)
pp <- ggplot(tmp, aes(x = rho, y = power, colour = tau, group = tau)) + geom_line() + labs(y = 'Power\\n(ABC acceptance probability)')
print(pp)
# power increases as number of simulations increase
tau <- 0.2
rho <- seq(0, .3, length = 1024)
tmp <- lapply(c(25, 50, 100, 200, 400), function(n)
{
data.table(n = as.factor(n), rho = rho, y = ftest.pow(rho, tau, n, p, alpha = 0.01), d='power')
})
tmp <- do.call('rbind', tmp)
pp <- ggplot(tmp, aes(x = rho, y = y, colour = n, group = n)) + geom_line() + labs(y = 'Power\\n(ABC acceptance probability)')
print(pp)
# add likelihood density to last power plot
t2.x <- 0.25
tmp <- rbind(tmp, data.table(n=n, rho=rho, y=ftest.sulkl(rho, t2.x, n, p, norm = 1, support= c(0,Inf), log=FALSE), d='prtl.lkl'))
pp <- ggplot(tmp, aes(x = rho, y = y, colour = n, linetype=d, group = interaction(n,d))) + geom_line() + labs(y = 'Power\\n(ABC acceptance probability)')
print(pp)
}
\references{
http://arxiv.org/abs/1305.4283
}
|
25d3f8fa94feb6eb37a69c19fae00d37113d62b9 | 4a59f3f18c52e582312d5c2323c469990bf0ed0d | /R/DTDWT.R | e06b51c06394d2542a2cbb921fea7ed26849a90d | [
"Unlicense"
] | permissive | dr-offig/listenR | 8d10fbdea7fec3ed0e2448a4e006f662a208b7a8 | 691b72f52aa3321d1d45863ee712e9a4d8c025be | refs/heads/master | 2020-04-04T02:49:22.150174 | 2019-06-26T04:31:01 | 2019-06-26T04:31:01 | 155,697,483 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,966 | r | DTDWT.R | library(waveslim)
library(zoo)
Faf <- waveslim::FSfarras()$af
Fsf <- waveslim::FSfarras()$sf
af <- waveslim::dualfilt1()$af
sf <- waveslim::dualfilt1()$sf
DTDWT <- function(x,J)
{
return(waveslim::dualtree(x,J,Faf,af))
}
DTDWT.inverse <- function(wc)
{
J <- length(wc) - 1
return(waveslim::idualtree(wc,J,Fsf,sf))
}
suppress_coefficients <- function(wc,predicate)
{
JJ <- length(wc)
for (j in 1:JJ) {
for (c in 1:2) {
for (t in 1:length(wc[[j]][[c]]))
if(predicate(j,c,t,wc[[j]][[c]][[t]])) {
wc[[j]][[c]][[t]] <- 0
}
}
}
return(wc)
}
quantile3rd_thresh <- function(wc) {
return( cbind(sapply(1:length(wc),function(j) { stats::quantile(abs(wc[[j]][[1]]))[[4]]}),sapply(1:length(wc),function(j) { stats::quantile(abs(wc[[j]][[2]]))[[4]]})) )
}
quantiler <- function(wc) {
thresholds <- quantile3rd_thresh(wc)
return( function(j,c,t,x) {abs(x) < thresholds[j,c]} )
}
DTDWT.power <- function(wc)
{
tmp <- lapply(wc,function(lev)
{
sqrt(lev[[1]]^2 + lev[[2]]^2)
})
return(tmp[1:(length(wc)-1)])
}
DTDWT.extract <- function(wc,levels,draw=FALSE)
{
wc1 <- suppress_coefficients(wc,function(j,c,t,x) { !(j %in% levels) })
x1 <- DTDWT.inverse(wc1)
if(draw)
plot(x1,type="l")
invisible(x1)
}
DTDWT.mute <- function(wc,levels,draw=FALSE)
{
wc1 <- suppress_coefficients(wc,function(j,c,t,x) { j %in% levels })
x1 <- DTDWT.inverse(wc1)
if(draw)
plot(x1,type="l")
invisible(x1)
}
# DTDWT.mute <- function(wc,levels)
# {
# for (index in levels)
# {
# lev <- wc[[index]]
# lev1 <- lev[[1]]
# lev2 <- lev[[2]]
# lev1[TRUE] <- 0
# lev2[TRUE] <- 0
# }
# J <- length(wc) - 1
# return(DTDWT.inverse(wc))
# }
# lev <- lapply(1:length(ps),function(j) { cbind((1:length(ps[[j]]))*2^(j-1),ps[[j]]) })
# v1 <- lapply(w1,function(a) lapply(a, function(b) sapply(b, function(c) {if (abs(c) < 0.5) return(0.0) else return(c)}))) |
7e6a46cec67ce893b6808527df01b78ab238961a | 1f630c89e17324b9a6222f68b048739e8b728e4f | /useful functions and code/Probability to get Yatzy.R | 8e1e78184e5ad4079da9c235fa3a83f13d178157 | [] | no_license | martinju/VariousR | 4a5eeaf1078dec6359ffd34dec68ca543ff1526f | 347972324d3d225d7fd8d22270be8458c22dee31 | refs/heads/master | 2021-01-20T18:34:21.987741 | 2016-06-02T12:54:04 | 2016-06-02T12:54:04 | 60,178,902 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 808 | r | Probability to get Yatzy.R |
a=2.639
b=2.655
c(a+0.5*(b-a),(b-a)/2)
### Yatzy
n=10^5
throw=function(tt)
{
this.max=which.max(tt)
this.number=as.numeric(names(tt)[this.max])
many.numbers=tt[this.max]
samp=sample(1:6,5-many.numbers,replace=TRUE)
samp.new=c(samp,rep(this.number,many.numbers))
return(table(samp.new))
}
count=rep(0,n)
for (i in 1:n)
{
one=sample(1:6,5,replace=TRUE)
tt.one=table(one)
if (length(tt.one)==1)
{
count[i]=1
}
else {
tt.two=throw(tt.one)
if (length(tt.two)==1)
{
count[i]=1
}
else {
tt.three=throw(tt.two)
if (length(tt.three)==1)
{
count[i]=1
}
}
}
plot(1:i,cumsum(count[1:i])/(1:i),type='l')
lines(c(-10,10^7),c(0.046,0.046),col=2)
# print(c(i,count,count/i))
}
#> sum(count)/n
#[1] 0.04472
# It is actually 0.06 according to Wikipedia.
|
7c4850186325d17f4e9abb95625b752efefde9f0 | de59b0a840d831eeef38dbcbdef9534e54e485ed | /man/random.Rd | cbd25e255c7198e66683704fc26787034a1f5741 | [] | no_license | cao123yudong/D3ManhattanPlots | ac8e487ae2dccd26dde15a4574092f5ccdf96924 | a3d6329f32e73861ddc485e4def139c0974bdd20 | refs/heads/master | 2021-01-13T12:33:01.137075 | 2015-08-01T21:56:56 | 2015-08-01T21:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 496 | rd | random.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/manhattanPlot.R
\docType{data}
\name{random}
\alias{random}
\title{Randomly generated snps and pvals}
\format{A data frame with 100 rows and 2 columns:
\describe{
\item{SNP}{Snp names. Character}
\item{PVal}{P-Values. Float}
}}
\usage{
random
}
\description{
Randomly generated dataset of 400 snps. One should result as significant using
the bonferroni correction threshold built in.
}
\keyword{datasets}
|
84a12dba28f14af78ab4f2eea0ce5561d5d6ef12 | 1474dce63db5c492c8ed4e5df17bde69945586dc | /man/PATHChange.Rd | ee5190783e666d7272d8a6332d91437f4a9fcfc8 | [] | no_license | cran/PATHChange | 5128f5a4bbf305b6e140960566126a97e511f739 | d55ef02f362d50c6762ba522ba6344c41b90e59a | refs/heads/master | 2021-01-17T05:58:18.966559 | 2016-06-17T08:18:35 | 2016-06-17T08:18:35 | 61,350,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,513 | rd | PATHChange.Rd | \name{PATHChange}
\alias{PATHChange}
\title{Determination of differentially expressed pathways using multi-statistic comparison}
\description{This is the main function of the PATHChange package. It applies to each histological comparison selected that the three non-parametric tests (Bootstrap, Fisher's exact and Wilcoxon signed-rank).}
\usage{
PATHChange(path, MeanData, writeCSV, writeRDS, destDIR)
}
\arguments{
\item{path}{List of pathways previously generated by the function PATHChangeList.}
\item{MeanData}{List of the results of the histological comparisons chosen by the user (It can be calculated by the function PATHChangeDat).}
\item{writeCSV}{TRUE for write a .csv file result}
\item{writeRDS}{TRUE for write a .rds file result}
\item{destDIR}{Destination folder for .csv and .rds files.}
}
\details{The approach is a multiple comparison experiment since it involves several pathways, then PATHChange corrects the false discovery rate (FDR) based on Benjamini-Hochberg algorithm.}
\value{
\item{p.value}{Results}}
\author{Carla A. R. S. Fontoura}
\seealso{\code{\link{PATHChangeList}}, \code{\link{PATHChangeDat}}}
\examples{
require(rlist)
path<-list.load(system.file("extdata", "path.rds", package = "PATHChange"))[c(1:10)]
MeanData<-list.load(system.file("extdata", "MeanData.rds", package = "PATHChange"))
\dontrun{PATHChange(path=path, MeanData=MeanData, writeCSV=FALSE, writeRDS=FALSE)}
\dontrun{p.value <- list.load(file.path(tempdir(),"pValue.rds"))}
} |
bb8bf2f4fe6a9cd9c7843ca47a375047bb481019 | d00e16b9befdabc3ad93b99327511d3ec65032cf | /output_generator.R | afca69c678f95cb533f571574f49d0b292b5b8dc | [] | no_license | user05011988/userproject | 420b3063e0b0c7956667268dd2a620a4a39dc736 | d01fc42d3fb78b6b7c291dc0929985a5a9114574 | refs/heads/master | 2020-07-23T21:08:14.839296 | 2017-03-02T11:12:40 | 2017-03-02T11:12:40 | 66,847,673 | 0 | 0 | null | null | null | null | IBM852 | R | false | false | 4,248 | r | output_generator.R | #########################################################################
# Dolphin - R package for reliable automatic quantification of 1H 1D NMR spectra
# Copyright (C) 2017 Daniel Ca˝ueto
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
############################################################################
#Calculation of output variables (Area, fitting_error,shift,signal_area_ratio,intensity, half bandwidth) and plot variables
output_generator = function(signals_to_quantify,
fitted_signals,
Ydata,
Xdata,
signals_parameters,multiplicities,ROI_buckets=seq_along(Ydata)) {
fitted_signals[is.na(fitted_signals)]=0 #to be sure that there are no NA values on the fitted signals
BGsignals = (multiplicities == 0) #finds baseline signals
output_data = list()
#Storage of signals and sum of baseline and metabolite signals
output_data$signals = fitted_signals[!BGsignals, , drop =F]
output_data$signals_sum = colSums(fitted_signals[!BGsignals, , drop =F], na.rm = T)
output_data$baseline_sum = colSums(fitted_signals[BGsignals, , drop =F], na.rm = T)
output_data$fitted_sum = output_data$signals_sum + output_data$baseline_sum
#For every signal I locate in which bins where most of the signal is located and I calculate the fitting error and the signal to area ratio
for (ind in signals_to_quantify) {
#I sort bins according to intensity of signal in each bin.
sorted_bins=sort(fitted_signals[ind,ROI_buckets]/sum(fitted_signals[ind,ROI_buckets ]),decreasing=T,index.return=T)
if(length(sorted_bins$x)>0) {
#I select bins that make a cumulative sum of 90% of total area of the signal
bins= sorted_bins$ix[1:which.min(abs(cumsum(sorted_bins$x)-0.9))]
} else {
bins=seq_along(ROI_buckets)
}
subregion_fitted = output_data$fitted_sum[ROI_buckets[bins]] #fitted spectrum
subregion_signals = fitted_signals[ind, ROI_buckets[bins]] #fitted signals
subregion_spectrum = Ydata[ROI_buckets[bins]] #original spectrum
#I calculate how much the quantified signal represents the total spectrum in the region where the region is located.
output_data$signal_area_ratio = append(output_data$signal_area_ratio, 100 -((abs(sum(subregion_spectrum) - sum(subregion_signals)) / sum(subregion_spectrum)) * 100))
# normalized_rmse=cor(subregion_spectrum, subregion_fitted)
normalized_rmse=summary(lm(subregion_spectrum~subregion_fitted))$sigma/max(subregion_spectrum)
output_data$fitting_error = append(output_data$fitting_error,normalized_rmse)
}
sorted_bins=sort(output_data$fitted_sum[ROI_buckets]/sum(output_data$fitted_sum[ROI_buckets]),decreasing=T,index.return=T)
if(length(sorted_bins$x)>0) {
bins= sorted_bins$ix[1:which.min(abs(cumsum(sorted_bins$x)-0.9))]
} else {
bins=seq_along(ROI_buckets)
}
subregion_fitted = output_data$fitted_sum[ROI_buckets[bins]]
subregion_spectrum = Ydata[ROI_buckets[bins]]
error1=summary(lm(subregion_spectrum~subregion_fitted))$sigma/max(subregion_spectrum)
output_data$Area = rowSums(fitted_signals[signals_to_quantify, , drop =
F])
output_data$shift = signals_parameters[2, signals_to_quantify]
output_data$intensity=signals_parameters[1, signals_to_quantify]
output_data$half_band_width=signals_parameters[3, signals_to_quantify]
dummy=list(output_data=output_data,error1=error1)
return(dummy)
}
|
faf1c6be7e50c87ca7a18a56ac2c8a68952ab38a | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /bayesmove/tests/testthat/test-expand_behavior.R | bbf15c3e4030f6072994c51b881e62080fe7df13 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,362 | r | test-expand_behavior.R | test_that("behavior proportions augmented from segment to observation level", {
#simulate data
id<- rep(1:4, each = 250)
date<- rep(seq(c(ISOdate(2020, 8, 14, tz = "UTC")), by = "hour", length.out = 250), 4)
SL<- sample(1:5, 1000, replace = T)
TA<- sample(1:8, 1000, replace = T)
time1<- rep(1:250, 4)
tseg<- rep(rep(1:10, each = 25), 4)
dat<- data.frame(id, date, tseg, time1, SL, TA)
# Select only id, tseg, SL, and TA columns
dat2<- dat[,c("id","tseg","SL","TA")]
#summarize by time segment
obs<- summarize_tsegs(dat = dat2, nbins = c(5,8))
#cluster data with LDA
res<- cluster_segments(dat = obs, gamma1 = 0.1, alpha = 0.1, ngibbs = 250,
nburn = 125, nmaxclust = 7, ndata.types = 2)
#Extract proportions of behaviors per time segment
theta.estim<- extract_prop(res = res, ngibbs = 250, nburn = 125, nmaxclust = 7)
#Create augmented matrix by replicating rows (tsegs) according to obs per tseg
theta.estim.long<- expand_behavior(dat = dat, theta.estim = theta.estim, obs = obs,
nbehav = 3, behav.names = c("Encamped","ARS","Transit"),
behav.order = c(1,2,3))
expect_equal(3*nrow(dat), nrow(theta.estim.long))
expect_s3_class(theta.estim.long, "data.frame")
expect_equal(length(unique(theta.estim.long$time1)), 250)
})
|
906bb6b117669af753c369d27e9719a468c06a6f | 767ccae7c89cb4b6e3093f9636fba7f5bfe9752c | /plot2.R | 483ab5af121da831225df376bfa095f47204b40b | [] | no_license | MaFer92/Proyecto-2_curso4 | 68dc0b2b06e5909d9bdad5953f3e48192b79fefc | 019825fa892d31227b51b4016dc3f84190c4015e | refs/heads/main | 2023-03-18T01:03:36.645942 | 2021-03-19T22:57:07 | 2021-03-19T22:57:07 | 349,574,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,074 | r | plot2.R | #PREGUNTA 2
#¿Han disminuido las emisiones totales de PM2.5 en la ciudad de Baltimore, Maryland,
#de 1999 a 2008? Utilice el sistema de trazado base para hacer un diagrama que
#responda a esta pregunta.
#Directorio donde se encuentra la BD
setwd("C:/Users/Mafer/Documents/R-curso4/proyecto")
#librerias
library(dplyr)
#lectura de la BD
NEI <- readRDS("summarySCC_PM25.rds")
#filtrar los datos solo de la ciudad de Baltimore
datos = filter(NEI,fips == "24510")
#Suma de las emisiones totales de la ciudad por año
datos1 = datos %>% group_by(year) %>%
summarise(sum(Emissions), na.rm=TRUE)
#grafica
plot(datos1$year, datos1$`sum(Emissions)`, col= "pink", lwd=1.5, type = "l",
xlab = "Año", ylab = expression(PM[2.5] * "(Ton)"),
main= expression("Emisión Total de "*PM[2.5]*
" en la ciudad de Baltimore"), pch=19)
points(datos1$year, datos1$`sum(Emissions)`, col="pink", lwd=4)
#Guardar grafica en png
dev.copy(png, file = "plot2.png") #copia la grafica en un archivo png
dev.off() #cerrar el dispositivo png
|
0ba39b29a33d7d9f7fb855699cd1ca62db8b0127 | e6df7c4a0e955ac7ed2068efabd35a740ccd8895 | /rscripts/scatterPlot.R | 2477fca6b57b8e501f77cd1b315c659e7774f95b | [] | no_license | dvallin/pclref | 3bbb77e2d8e822b9fd7a00251f23a99e469b8cac | 5b7ddf45fba7c5caa99fdf5a31c5cf33bb251131 | refs/heads/master | 2020-12-02T22:08:20.150424 | 2017-07-03T08:17:06 | 2017-07-03T08:17:06 | 96,087,163 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,989 | r | scatterPlot.R | library(ggplot2)
library(GGally)
library(stringr)
require(reshape2)
require(Hmisc)
library(hexbin)
#options(device="png")
min_overlap <<- 0.2;
max_overlap <<- 1.1;
symmetric <<- 3000;
#names = c("re_r_rep.f", "ransac_translation_error.f");
methods = c("linear", "linear");
names = c("L2_keypoints.f","L2_features.f", "set.e");
setwd("F:/Projects/elysium/rscripts/")
source("impl/utils.R");
source("impl/plotting.R");
source("impl/evaluations.R");
source("impl/scatterImpl.R");
sampleDist = function(n, hist)
{
sample(x = hist$mids, n, replace = T, prob = hist$density)
}
scatterEvalHistos <- function(d2, i, j)
{
df <- seed_scatter[[1]]
dc <- subset(df, df$set.e == "c")
dm <- subset(df, df$set.e == "m")
du <- subset(df, df$set.e == "u")
bin_max <- max(dc[,c(5)])+1
bin_max <- max(max(dm[,c(5)])+1, bin_max)
bin_max <- max(max(du[,c(5)])+1, bin_max)
bin_min <- max(0, min(dc[,c(5)]))
bin_min <- min(max(0, min(dm[,c(5)])), bin_min)
bin_min <- min(max(0, min(du[,c(5)])), bin_min)
chist <- hist(dc[,c(5)], breaks=seq(bin_min,bin_max,by=(bin_max-bin_min)/200), plot=F)
mhist <- hist(dm[,c(5)], breaks=seq(bin_min,bin_max,by=(bin_max-bin_min)/200), plot=F)
uhist <- hist(du[,c(5)], breaks=seq(bin_min,bin_max,by=(bin_max-bin_min)/200), plot=F)
mu0 <- l1_norm(mhist$density, uhist$density)
cm0 <- l1_norm(chist$density, mhist$density)
mu1 <- l2_norm(mhist$density, uhist$density)
cm1 <- l2_norm(chist$density, mhist$density)
mu2 <- d_intersection(mhist$density, uhist$density)
cm2 <- d_intersection(chist$density, mhist$density)
mu3 <- d_chi2(mhist$density, uhist$density)
cm3 <- d_chi2(chist$density, mhist$density)
mu4 <- d_hellinger(mhist$density, uhist$density)
cm4 <- d_hellinger(chist$density, mhist$density)
mu5 <- d_corr(mhist$density, uhist$density)
cm5 <- d_corr(chist$density, mhist$density)
samples <- sampleDist(10000, chist)
v <- 0
c <- 0
for(i in 1:10000)
{
i_m <- which(abs(mhist$mids-samples[i])==min(abs(mhist$mids-samples[i])))
mval <- mhist$counts[i_m]
i_u <- which(abs(uhist$mids-samples[i])==min(abs(uhist$mids-samples[i])))
uval <- uhist$counts[i_u]
if(mval + uval > 0)
{
v <- v + mval / (mval + uval)
c <- c + 1
}
}
v <- v / c
cat(sprintf('%.3f & %.3f & %.3f & %.3f & %.3f & %.3f & %.3f', mu0 / cm0, mu1 / cm1, mu2 / cm2, mu3 / cm3, mu4 / cm4, mu5 / cm5, v))
}
#setwd("../tests/archived_tests/feature_tests/fpfh_radius_old/eth_apartment");
#setwd("../tests/archived_tests/feature_tests/harris_fpfh_mu_pe/eth_apartment");
setwd("../tests/archived_tests/ec_neighbors_iss_fpfh/eth_apartment");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalc, scatterPrint);
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalc, scatterPrintColored);
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterPrint);
full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterPrintPairs);
#setwd("F:/Projects/elysium/rscripts/")
#setwd("../tests/archived_tests/feature_tests/harris_fpfh_mu_pe/eth_apartment");
#setwd("../tests/archived_tests/feature_tests/harris_shot_mu_pe/eth_apartment");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
#setwd("../eth_gaz_summer");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
#setwd("../eth_gaz_winter");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
#setwd("../eth_haupt");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
#setwd("../eth_mountain");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
#setwd("../eth_stairs");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
#setwd("../eth_wood_autumn");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
#setwd("../eth_wood_summer");
#full_dir_eval(scatterInit, scatterSeedInit, scatterCalcAdd, scatterEvalHistos);
|
7a3764ef89bb1cabd2c7a7efdef3ace0192dbcea | f43931a3d2fe0075098a13662c3497e6dbc49115 | /R/mainDisplayUI.R | 7c873deb1fcbc073930ab92b0a6eb537ebdda698 | [
"MIT"
] | permissive | JDMusc/READ-TV | 43f25df5659d28a044cea5765855a4aab7bec773 | 8ddceec04563f5586bbc35eb9918eda8ed06cf6d | refs/heads/master | 2021-07-04T13:57:40.323156 | 2021-01-19T00:01:36 | 2021-01-19T00:01:36 | 214,256,737 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 565 | r | mainDisplayUI.R |
mainDisplayUI <- function(id) {
ns = NS(id)
fp = fluidPage(
shinyjs::useShinyjs(),
shinyjs::inlineCSS(list(.invalid_query = 'background-color: #f006')),
tabsetPanel(
tabPanel("Data Upload",
dataUploadTabUI(ns("dataUpload"))
),
tabPanel(
'Filter & Facet',
basicDisplayTabUI(ns("basicDisplay"))
),
tabPanel(
"CPA",
cpaTabUI(ns("cpa"))
),
tabPanel(
"CPA Overlay",
cpaOverlayTabUI(ns("cpaOverlay"))
),
id = ns("tabs")
)
)
fp
}
|
8958cd68faf95eb41142d849a7525ea2b8898133 | 2b81cd341c4d789cd39bc6dbf346d0e9d2cd5d16 | /man/get_msigdbr.Rd | 23e3916350d5b9f31965ac11e4bc6e3863d7a5a9 | [
"GPL-2.0-only",
"MIT"
] | permissive | CityUHK-CompBio/DeepCC | c9ca802823c0f40df58c278ee606ed491c9e82e0 | 3cd5dc849e1a64c43b9a354c3209b607d95761d0 | refs/heads/master | 2021-12-03T14:29:19.422369 | 2021-12-01T14:47:07 | 2021-12-01T14:47:07 | 67,822,552 | 18 | 18 | MIT | 2020-06-12T08:13:45 | 2016-09-09T18:25:42 | R | UTF-8 | R | false | true | 529 | rd | get_msigdbr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_msigdbr.R
\name{get_msigdbr}
\alias{get_msigdbr}
\title{get MSigDBr from R package `msigdbr`}
\usage{
get_msigdbr(cores)
}
\arguments{
\item{cores}{a integer indicating cpu cores used in parallel computing (default = all cores -2 )}
}
\value{
a list containing 25, 724 gene sets, each sets contains multiple entrez_gene
}
\description{
This function defines MSigDBr from a R package, with 25, 724 gene sets
}
\examples{
MSigDBr <- get_msigdbr()
}
|
a03a80ece78a35e6389d5b1c6044f581cd99fae2 | 7fc453391224956da9ce2867d9bd54530a66aa43 | /man/cbs_tree_prior_to_xml_prior_distr.Rd | ef992767b12934d68bba04b673ea8feeb4ac009f | [] | no_license | cran/beautier | 880277272f6cf48b4eca9c28db68e4a42c4ccc3a | 439683e296d755698c3861b447106556e540aa9f | refs/heads/master | 2022-08-30T18:21:31.772630 | 2022-08-11T09:40:07 | 2022-08-11T09:40:07 | 127,175,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,133 | rd | cbs_tree_prior_to_xml_prior_distr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_beast2_input_distr.R
\name{cbs_tree_prior_to_xml_prior_distr}
\alias{cbs_tree_prior_to_xml_prior_distr}
\title{Creates the tree prior section in the prior section of
the prior section of the distribution section
of a BEAST2 XML parameter file for a Birth-Death tree prior}
\usage{
cbs_tree_prior_to_xml_prior_distr(cbs_tree_prior)
}
\arguments{
\item{cbs_tree_prior}{a Coalescent Bayesian Skyline tree prior,
as returned by \code{\link{create_cbs_tree_prior}}}
}
\description{
Creates the tree prior section in the prior section of
the prior section of the distribution section
of a BEAST2 XML parameter file for a Birth-Death tree prior
}
\examples{
check_empty_beautier_folder()
# <distribution id="posterior" spec="util.CompoundDistribution">
# <distribution id="prior" spec="util.CompoundDistribution">
# HERE, where the ID of the distribution is 'prior'
# </distribution>
# <distribution id="likelihood" ...>
# </distribution>
# </distribution>
check_empty_beautier_folder()
}
\author{
Richèl J.C. Bilderbeek
}
|
d288e65df6bc4c7797e8eab0d8ad03f607b74b7e | 808e37074a3652ea10ae384f4747bd9b2e3607fd | /R/02_decision_model_functions.R | 7a7d970fc5018844476f6b59fc7622bf29156a6a | [
"MIT"
] | permissive | fthielen/ce16_modelling_course | 248a9eab42d32009e9b417a0fe44e339bf410717 | 62bb04618abfd5ff603b128885b68cda8dc52d9d | refs/heads/master | 2023-05-05T17:17:50.542522 | 2021-05-17T10:03:49 | 2021-05-17T10:03:49 | 368,138,798 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,851 | r | 02_decision_model_functions.R | #' Decision Model
#'
#' \code{decision_model} implements the decision model used.
#'
#' @param l_params_all List with all parameters of decision model
#' @param err_stop Logical variable to stop model run if set up as TRUE. Default = FALSE.
#' @param verbose Logical variable to indicate print out of messages. Default = FALSE
#' @return
#' The transition probability array and the cohort trace matrix.
#' @export
decision_model <- function(l_params_all, err_stop = FALSE, verbose = FALSE){ # User defined
### Definition:
## Decision model implementation function
### Arguments:
## l_params_all: List with all parameters of decision model
## verbose: Logical variable to indicate print out of messages
### Returns:
## a_P: Transition probability array
## m_M: Matrix cohort trace
##
with(as.list(l_params_all), {
#### Error checking ####
if ((n_t + n_age_init) > nrow(v_r_mort_by_age)) {
stop("Not all the age in the age range have a corresponding mortality rate")
}
if ((sum(v_s_init) != 1) | !all(v_s_init >= 0)) {
stop("vector of initial states (v_s_init) is not valid")
}
#### Age-specific transition probabilities ####
# Mortality for healthy individuals
p_HDage <- 1 - exp(-v_r_mort_by_age[(n_age_init + 1) + 0:(n_t - 1)])
# Mortality for sick individuals
p_S1Dage <- 1 - exp(-v_r_mort_by_age[(n_age_init + 1) + 0:(n_t - 1)] * hr_S1)
# Mortality for sicker individuals
p_S2Dage <- 1 - exp(-v_r_mort_by_age[(n_age_init + 1) + 0:(n_t - 1)] * hr_S2)
#### Create age-specific transition probability matrices in an array ####
# Initialize array
a_P <- array(0, dim = c(n_states, n_states, n_t),
dimnames = list(v_n, v_n, 0:(n_t-1)))
# Fill in array
# From H
a_P["H", "H", ] <- (1-p_HDage) * (1 - p_HS1)
a_P["H", "S1", ] <- (1-p_HDage) * p_HS1
a_P["H", "D", ] <- p_HDage
# From S1
a_P["S1", "H", ] <- (1-p_S1Dage) * p_S1H
a_P["S1", "S1", ] <- (1-p_S1Dage) * (1 - (p_S1S2 + p_S1H))
a_P["S1", "S2", ] <- (1-p_S1Dage) * p_S1S2
a_P["S1", "D", ] <- p_S1Dage
# From S2
a_P["S2", "S2", ] <- 1 - p_S2Dage
a_P["S2", "D", ] <- p_S2Dage
# From D
a_P["D", "D", ] <- 1
#### Check if transition array is valid ####
check_transition_probability(a_P, err_stop = err_stop, verbose = verbose)
check_sum_of_transition_array(a_P, n_states, n_t, err_stop = err_stop, verbose = verbose)
#### Compute cohort trace matrix and transition array for age-dependent STM ####
# Initialize cohort trace matrix
m_M <- matrix(0,
nrow = (n_t + 1), ncol = n_states,
dimnames = list(0:n_t, v_n))
# Set first row of m.M with the initial state vector
m_M[1, ] <- v_s_init
# Iterate STM over time
for(t in 1:n_t){
m_M[t + 1, ] <- m_M[t, ] %*% a_P[, , t]
}
return(list(a_P = a_P,
m_M = m_M))
}
)
}
#' Check if transition array is valid
#'
#' \code{check_transition_probability} checks if transition probabilities are in \[0, 1\].
#'
#' @param a_P A transition probability array.
#' @param err_stop Logical variable to stop model run if set up as TRUE. Default = FALSE.
#' @param verbose Logical variable to indicate print out of messages.
#' Default = FALSE
#'
#' @return
#' This function stops if transition probability array is not valid and shows
#' what are the entries that are not valid
#' @import utils
#' @export
check_transition_probability <- function(a_P,
err_stop = FALSE,
verbose = FALSE) {
m_indices_notvalid <- arrayInd(which(a_P < 0 | a_P > 1),
dim(a_P))
if(dim(m_indices_notvalid)[1] != 0){
v_rows_notval <- rownames(a_P)[m_indices_notvalid[, 1]]
v_cols_notval <- colnames(a_P)[m_indices_notvalid[, 2]]
v_cycles_notval <- dimnames(a_P)[[3]][m_indices_notvalid[, 3]]
df_notvalid <- data.frame(`Transition probabilities not valid:` =
matrix(paste0(paste(v_rows_notval, v_cols_notval, sep = "->"),
"; at cycle ",
v_cycles_notval), ncol = 1),
check.names = FALSE)
if(err_stop) {
stop("Not valid transition probabilities\n",
paste(capture.output(df_notvalid), collapse = "\n"))
}
if(verbose){
warning("Not valid transition probabilities\n",
paste(capture.output(df_notvalid), collapse = "\n"))
}
}
}
#' Check if the sum of transition probabilities equal to one.
#'
#' \code{check_sum_of_transition_array} checks if each of the rows of the
#' transition matrices sum to one.
#'
#' @param a_P A transition probability array.
#' @param n_states Number of health states.
#' @param n_t Number of cycles.
#' @param err_stop Logical variable to stop model run if set up as TRUE. Default = FALSE.
#' @param verbose Logical variable to indicate print out of messages.
#' Default = FALSE
#' @return
#' The transition probability array and the cohort trace matrix.
#' @import dplyr
#' @export
check_sum_of_transition_array <- function(a_P,
n_states,
n_t,
err_stop = FALSE,
verbose = FALSE) {
valid <- (apply(a_P, 3, function(x) sum(rowSums(x))) == n_states)
if (!isTRUE(all.equal(as.numeric(sum(valid)), as.numeric(n_t)))) {
if(err_stop) {
stop("This is not a valid transition Matrix")
}
if(verbose){
warning("This is not a valid transition Matrix")
}
}
}
|
dccbce54f0bbe416fa3bb1f0f74576f94615302b | 24172214c9edf499820407de65f322d24e6a0e08 | /tests/testthat.R | e46973e5ccb689267f666fb169fde3d8827c690c | [] | no_license | JohnCoene/chartist | b1f2409e83210a70d1d80f89eb96481fdbe88abb | 9e7ec1734a67965cd744a4b2bf3562d6ade09715 | refs/heads/master | 2021-01-19T11:42:50.287324 | 2017-03-02T05:42:42 | 2017-03-02T05:42:42 | 82,259,387 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 64 | r | testthat.R | library(testthat)
library(chartist)
test_check("chartist")
|
663f5172b0b182fe4e7bcdd9b1df378a5d8a6968 | 1a3d0a196941958034361ea10f900bc3248d3912 | /man/sparse.mediation.Rd | 3a73f43059a95fb77d24cc7ab1ada55c6ed4aab1 | [
"MIT"
] | permissive | seonjoo/sparsemediation | d4535a90ebf5a32ed298a4adae6eed81f3bb9772 | 0f0f6214ad9ed917659207317feb899811f995a1 | refs/heads/master | 2021-01-19T13:05:48.388465 | 2019-06-02T23:27:37 | 2019-06-02T23:27:37 | 88,060,500 | 0 | 1 | MIT | 2019-06-02T23:31:04 | 2017-04-12T14:21:42 | R | UTF-8 | R | false | true | 2,885 | rd | sparse.mediation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparse.mediation.R
\name{sparse.mediation}
\alias{sparse.mediation}
\title{Sparse mediation for high-dimensional mediators
Fit a mediation model via penalized maximum likelihood and structural equation model.
The regularization path is computed for the lasso or elasticnet penalty at a grid of
values for the regularization parameter lambda. Currently, mediation analysis is developed based on gaussian assumption.
Multiple Mediaton Model:
(1) M = Xa + e1
(2) Y = Xc' + Mb + e2
And in the optimization, we do not regularize c', due to the assumption of partial mediation.}
\usage{
sparse.mediation(X, M, Y, tol = 10^(-5), max.iter = 50,
lambda = log(1 + (1:30)/100), lambda2 = c(0.3, 0.5), alpha = 1,
tau = 1, verbose = FALSE, Omega.out = FALSE)
}
\arguments{
\item{X}{One-dimensional predictor}
\item{M}{Multivariate mediator}
\item{Y}{Outcome}
\item{tol}{(default -10^(-5)) convergence criterion}
\item{max.iter}{(default=100) maximum iteration}
\item{lambda}{(default=log(1+(1:50)/125)) tuning parameter for L1 penalization}
\item{lambda2}{(default=c(0.2,0.5)) tuning parameter for L1 penalization for covariance matrix, used only for p>n.}
\item{alpha}{(default=1) tuning parameter for L2 penalization}
\item{tau}{(default=1) tuning parameter for differentail weight between paths a (X -> M) and b (M -> Y)}
\item{verbose}{(default=TRUE) print progress.}
\item{Omega.out}{(defult=TRUE) output Omega estimates}
}
\value{
c: directeffect per each tuning parameter lambda. length(lambda)-dimensional vector
hatb: Path b (M->Y given X) estimates: V-by-lenbth(lambda) matrix
hata: Path a (X->M) estimates: V-by-lenbth(lambda) matrix
medest: Mediation estimates (a*b): V-by-lenbth(lambda) matrix
alpha: a scalor of the numing parameter for L2 regularization
lambda: a vector of tuning parameters for L1-penalization
tau: weight used.
nump: Number of selected mediation paths
Omega Estimated covariance matrix of the mediator
}
\description{
Sparse mediation for high-dimensional mediators
Fit a mediation model via penalized maximum likelihood and structural equation model.
The regularization path is computed for the lasso or elasticnet penalty at a grid of
values for the regularization parameter lambda. Currently, mediation analysis is developed based on gaussian assumption.
Multiple Mediaton Model:
(1) M = Xa + e1
(2) Y = Xc' + Mb + e2
And in the optimization, we do not regularize c', due to the assumption of partial mediation.
}
\examples{
library(sparsemediation)
N=100
V=50
set.seed(1234)
a = rep(0,V);a[1:3]<-5;b<-a
X = rnorm(N)
M = X \%*\% t(a)+ matrix(rnorm(N*V),N,V)
Y = X + M \%*\% b + rnorm(N)
sparse.mediation(X,M,Y)
}
\references{
TBA
}
\author{
Seonjoo Lee, \email{sl3670@cumc.columbia.edu}
}
\keyword{glmnet}
\keyword{highdimensional}
\keyword{mediation}
|
351a0c9e6aa618040324acccff7d5f5e25d8ea53 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/lavaSearch2/man/summary.calibrateType1.Rd | 65a676f801a8a4fd142a6f770f33aef51073b16f | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,225 | rd | summary.calibrateType1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.calibrateType1.R
\name{summary.calibrateType1}
\alias{summary.calibrateType1}
\title{Display the Type 1 Error Rate}
\usage{
\method{summary}{calibrateType1}(
object,
robust = FALSE,
type = "type1error",
alpha = 0.05,
log.transform = TRUE,
digits = 5,
print = TRUE,
...
)
}
\arguments{
\item{object}{output of the \code{calibrateType1} function.}
\item{robust}{[character] should the results be displayed for both model-based and robust standard errors (\code{TRUE}),
only model-based standard error (\code{FALSE}), or only robust standard error (\code{"only"})?}
\item{type}{[character] should the type 1 error rate be diplayed (\code{"type1error"}) or the bias (\code{"bias")}.}
\item{alpha}{[numeric, 0-1] the confidence levels.}
\item{log.transform}{[logical] should the confidence intervals be computed on the logit scale.}
\item{digits}{[integer >0] the number of decimal places to use when displaying the summary.}
\item{print}{should the summary be printed in the terminal.}
\item{...}{[internal] only used by the generic method.}
}
\description{
Display the type 1 error rate from the simulation results.
}
|
c87d5e3d79c3410eb59d5ae6db1f399489dfe7f2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/AMCP/examples/chapter_13_table_2.Rd.R | 1dcbd89c0219ae9f364d90d67dc093e2272d97a3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 338 | r | chapter_13_table_2.Rd.R | library(AMCP)
### Name: chapter_13_table_2
### Title: The data used in Chapter 13, Table 2
### Aliases: chapter_13_table_2 C13T2 Chapter_13_Table_2 c13t2
### Keywords: datasets
### ** Examples
# Load the data
data(chapter_13_table_2)
# Or, alternatively load the data as
data(C13T2)
# View the structure
str(chapter_13_table_2)
|
325740e6fcb70ca68f6aa11b2d08d327ad894aa0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/doMPI/examples/sinkWorkerOutput.Rd.R | eb108ac61cf625a915aa9c4f417c4e4f9e62e158 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 234 | r | sinkWorkerOutput.Rd.R | library(doMPI)
### Name: sinkWorkerOutput
### Title: Redirect worker output to a file
### Aliases: sinkWorkerOutput
### Keywords: utilities
### ** Examples
## Not run:
##D sinkWorkerOutput(sprintf('worker_##D
## End(Not run)
|
598d8cb54eed2320185ed7736c6f2513a3e3467e | 7589304e8ef13df13726a6eb806603cd4bd81e67 | /R/ErrorHandling.R | d6855017bbf9fbab6358da1f7d7db497ab497be0 | [] | no_license | ralmond/Proc4 | ad0f71c7bc652856b0eaae003618541ad9b5e414 | 25522c582e34a3a723418a5f9c5fbfacddae21ec | refs/heads/master | 2023-07-15T15:43:23.453766 | 2023-07-12T17:58:01 | 2023-07-12T17:58:01 | 240,610,249 | 0 | 0 | null | 2023-08-30T21:22:23 | 2020-02-14T22:35:32 | HTML | UTF-8 | R | false | false | 7,731 | r | ErrorHandling.R | ## Code taken from
## https://stackoverflow.com/questions/1975110/printing-stack-trace-and-continuing-after-error-occurs-in-r
withJavaLogging <- function(expr, silentSuccess=FALSE, stopIsFatal=TRUE) {
hasFailed <- FALSE
messages <- list()
warnings <- list()
logger <- function(obj) {
# Change behaviour based on type of message
level <- sapply(class(obj), switch,
debug="DEBUG",
message="INFO",
warning="WARN",
caughtError = "ERROR",
error=if (stopIsFatal) "FATAL" else "ERROR", "")
level <- c(level[level != ""], "ERROR")[1]
simpleMessage <- switch(level, DEBUG=,INFO=TRUE, FALSE)
quashable <- switch(level, DEBUG=,INFO=,WARN=TRUE, FALSE)
## Format message
time <- format(Sys.time(), "%Y-%m-%d %H:%M:%OS3")
txt <- conditionMessage(obj)
if (!simpleMessage) txt <- paste(txt, "\n", sep="")
msg <- paste(time, level, txt, sep=" ")
calls <- sys.calls()
calls <- calls[1:length(calls)-1]
trace <- limitedLabels(c(calls, attr(obj, "calls")))
if (!simpleMessage && length(trace) > 0) {
trace <- trace[length(trace):1]
msg <- paste(msg, " ", paste("at", trace, collapse="\n "), "\n", sep="")
}
## Output message
if (silentSuccess && !hasFailed && quashable) {
messages <<- append(messages, msg)
if (level == "WARN") warnings <<- append(warnings, msg)
} else {
if (silentSuccess && !hasFailed) {
cat(paste(messages, collapse=""))
hasFailed <<- TRUE
}
cat(msg)
}
## Muffle any redundant output of the same message
optionalRestart = function(r) {
res <- findRestart(r)
if (!is.null(res)) invokeRestart(res)
}
optionalRestart("muffleMessage")
optionalRestart("muffleWarning")
}
vexpr = withCallingHandlers(withVisible(expr),
debug=logger, message=logger, warning=logger,
caughtError=logger, error=logger)
if (silentSuccess && !hasFailed) {
cat(paste(warnings, collapse=""))
}
if (vexpr$visible) vexpr$value else invisible(vexpr$value)
}
withFlogging <- function(expr,...,context=deparse(substitute(expr)),
loggername=flog.namespace(),
tracelevel=c("WARN","ERROR","FATAL")) {
fargs <- list(...)
tracelevel <- toupper(tracelevel)
handler <- function(obj) {
## Change behaviour based on type of message
level <- sapply(class(obj), switch,
trace="TRACE",
debug="DEBUG",
message="INFO",
warning="WARN",
caughtError = "ERROR",
error="FATAL", "")
## Fixes multiple classes on message.
level <- c(level[level != ""], "ERROR")[1]
simpleMessage <- switch(level, DEBUG=,INFO=TRUE, FALSE)
## Format message
txt <- conditionMessage(obj)
if (!simpleMessage) txt <- paste(txt, "\n", sep="")
msg <- paste("While ", context, ", ", level,
ifelse(level=="FATAL"," ERROR: ",": "),txt, sep="")
logger <- switch(level,
TRACE=flog.trace,
DEBUG=flog.debug,
INFO=flog.info,
WARN=flog.warn,
ERROR=flog.error,
FATAL=flog.fatal,flog.error)
logger(msg,name=loggername)
for (detail in names(fargs))
flog.debug(paste(detail,"="),fargs[[detail]],name=loggername,capture=TRUE)
if (level %in% tracelevel) {
calls <- sys.calls()
calls <- calls[1:length(calls)-1]
trace <- limitedLabels(c(calls, attr(obj, "calls")))
if (length(trace) > 0L) {
trace <- trace[length(trace):1L]
}
flog.debug("Traceback:",trace,
name=loggername,capture=TRUE)
}
## Muffle any redundant output of the same message
optionalRestart <- function(r) {
res <- findRestart(r)
if (!is.null(res)) invokeRestart(res)
}
optionalRestart("muffleMessage")
optionalRestart("muffleWarning")
if (level %in% c("ERROR","FATAL"))
invokeRestart("tryError",msg,obj)
}
withRestarts(
withCallingHandlers(expr,
debug=handler, message=handler, warning=handler,
caughtError=handler, error=handler),
tryError=
function(msg,obj)
invisible(structure(msg, class = "try-error", condition = obj)))
}
## Breaks futile.logger line into its parts.
parseline <- function(line) {
bracket1 <- regexpr("[",line,fixed=TRUE)
bracket2 <- regexpr("]",line,fixed=TRUE)
list(level=trimws(substr(line,1,bracket1-1)),
time=strptime(substr(line,bracket1+1,bracket2-1),format="%Y-%m-%d %H:%M:%S"),
message=trimws(substring(line,bracket2+1)))
}
## Forces Date into Mongo Format.
mongoDate <- function (dtime) {
jsonlite::fromJSON(jsonlite::toJSON(mongo::unboxer(dtime),POSIXt="mongo"),FALSE)
}
mongoAppender <-
setRefClass("mongoAppender",
fields=c(db="JSONDB",
app="character",
engine="character",
tee="character"),
methods=c(
initialize = function(app="default",
engine="Unspecified",
db=mongo::MongoDB("Log",noMongo=TRUE),
tee=character()) {
callSuper(db=db,app=app,engine=engine,tee=tee)
},
logit = function(line) {
pline <- parseline(line)
entry <- buildJQuery(app=app,engine=engine,level=pline$level,
timestamp=pline$time,
message=pline$message)
mdbInsert(db,entry)
if (length(tee) > 0L) {
cat(line,file=tee,append=TRUE,sep="")
}
},
logger = function() {
function(line) {.self$logit(line)}
}))
shinyAppender <-
setRefClass("shinyAppender",
fields=c(file="character",
field="character",
messages="data.frame"),
methods=list(
initialize = function (file="",field="",
messages=data.frame(
Messages=character()))
{
callSuper(file=file, field=field, steps=steps,
messages=messages)
},
update = function (line, output, renderer=function(tab) shiny::renderTable(tab,colname=FALSE))
{
if (length(file) == 1L && nchar(file)>0L) {
cat(line, file = file, append = TRUE, sep = "")
}
messages$Messages <<- c(messages$Messages,line)
if (length(field)==1L && nchar(field)>0L) {
# Call the render for a test case.
if (is.null(output)) do.call(renderer,list(messages))
} else {
output[[field]] <- do.call(renderer,list(messages))
}
},
logger = function() {
function (line) {
.self$update(line)
}
}
))
|
480d7e41e6c00ec6b96d349644b2ab01fb9b878b | 75e6920c6c8d5c44da0c1c44e3dcc5559efc8e47 | /R/euler.rot.R | 2049dbbedb7b47bc9d5ea4012fe2f901c40239a5 | [] | no_license | cran/fossil | 111edf36c4fe19cd59d52011c1e621e395813c21 | f139d41cea588210fc72eb71910a648d4dab6afd | refs/heads/master | 2021-01-10T20:05:22.617612 | 2020-03-23T10:30:05 | 2020-03-23T10:30:05 | 17,696,124 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 598 | r | euler.rot.R | `euler.rot` <-
function(lat1,long1,rotdeg,lat2,long2) {
rad <- pi/180
a1<-lat1*rad
a2<-long1*rad
rd<-rotdeg*rad
b1<-lat2*rad
b2<-long2*rad
dlon<-b2-a2
dlat<-b1-a1
ca1<-cos(a1)
sa1<-sin(a1)
cb1<-cos(b1)
sb1<-sin(b1)
a<-(sin(dlat/2))^2+ca1*cb1*(sin(dlon/2))^2
d<-2*atan2(sqrt(a),sqrt(1-a))
sd<-sin(d)
cd<-cos(d)
bear<-atan2(sin(dlon)*cb1,ca1*sb1-sa1*cb1*cos(dlon))
deg <-(bear%%(2*pi))
tc<-bear-rd
nlat<-asin(sa1*cd+ca1*sd*cos(tc))
ndlon<-atan2(sin(tc)*sd*ca1,cd-sa1*sin(nlat))
nlon<-((a2+ndlon+pi)%%(2*pi))-pi
npts<-c(nlat/rad,nlon/rad)
return(npts)
} |
e0b1cfd6bb295c20b87d8cd4b13a7e27423e6a9d | 1843152ac893e8d6eb1214de18905a9dd1c39053 | /issb/R/gillespie.R | c50d018fe5825c14d09cafc0fa566aab7faf1f4f | [] | no_license | csgillespie/In-silico-Systems-Biology | cbb66bcee4e3e6026e3426f304a68179914b9617 | 35b9a2a288df26c48ab968a3c848233463d362a1 | refs/heads/master | 2021-01-10T22:12:28.531458 | 2014-08-05T09:43:23 | 2014-08-05T09:43:23 | 5,810,219 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,443 | r | gillespie.R | #' @title Stochastic simulation using the standard Gillespie algorithm
#' @inheritParams deterministic
#' @param tstep output time step
#' @author Colin Gillespie
#' @return A matrix. The first column contains the simulation time, the other columns contain the species
#' levels
#' @keywords character
#' @export
#' @examples demo(lv)
#' gillespie(model, 10)
gillespie = function(model, maxtime, tstep=NULL)
{
tincr = tstep
sim_time = 0; i = 1; x = model$get_initial()
N = 100000
xmat = matrix(0, nrow=N, ncol=(length(x) + 1))
nr = nrow(xmat)
xmat[i, ] = c(sim_time, x)
s = model$get_stoic()
get_haz = model$get_haz
h = get_haz(x)
p = model$get_pars()
while(sim_time <= maxtime && sum(h) > 0){
sim_time = sim_time + rexp(1, sum(h))
j = sample(length(p), 1, prob=h)
x = x + s[ ,j]
##This is a hack :(
if(is.null(tstep)) {
i = i + 1
xmat[i, ] = c(sim_time, x)
} else {
while(tincr <= sim_time && tincr < (maxtime + tstep/2)) {
i = i + 1
xmat[i, ] = c(tincr, x)
tincr = tincr + tstep
}
}
h = get_haz(x)
}
# if(sim_time < maxtime) {
# i = i + 1
# xmat[i, ] = xmat[i-1, ]
# }
xmat[i, 1] = maxtime
colnames(xmat) = c("Time", rownames(s))
return(xmat[1:i, ])
}
|
62d7f9b30faf9621b63a179ead5e392bef56d618 | 67b3b013574e659229dcaf6547ceb9ab062ad4d2 | /cachematrix.R | 0a136668e806d41ffaa99cad9d85cd51c8f6e80a | [] | no_license | edinathesaint/ProgrammingAssignment2-1 | 4118be3e63fa3690f747c326e1f076571eec7955 | 8d3a996437a14668ee04c67831aa9c165359bada | refs/heads/master | 2020-12-25T10:09:59.464865 | 2014-07-27T19:31:22 | 2014-07-27T19:31:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,342 | r | cachematrix.R | # Matrix inversion is usually a costly computation
# and their may be some benefit to caching the inverse
# of a matrix rather than compute it repeatedly
# EXAMPLE usage
# m = rbind(c(1,-2), c(-2,1))
# mcm =makeCacheMatrix(m)
# mcm$get()
#
# When executing the below row, it should compute the inverse of the matrix
# cacheSolve(mcm)
#
# When executing the below row, it should use the cached version of the inverse of the matrix m
# cacheSolve(mcm)
# This function creates a special "matrix" object that can cache its inverse
# set the value of the matrix
# get the value of the matrix
# set the value of the inverse
# get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# This function computes/retrieves the inverse of the special
# "matrix" returned by makeCacheMatrix above.
# Assume this matrix is always inversibe.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("retrieving cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
d82718a4c24f158b5981a22c87894c308791eae1 | 70ceafccb7ed3005e64521551eae6657385118e5 | /R-Portable/library/Matrix/tests/validObj.R | 21c8192120f0fcaf0fd6b2642c3bd2e1ae0ccb4e | [
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | ksasso/Electron_ShinyApp_Deployment | 6e63686b27bc38607bca1e5b50ed7cd58f6a4a3b | 1402f4d6bbb4a415bce07ebfddc8b76704f11f97 | refs/heads/master | 2023-07-06T11:48:21.413305 | 2020-04-30T12:53:11 | 2020-04-30T12:53:11 | 120,221,429 | 100 | 24 | CC0-1.0 | 2023-06-25T03:17:43 | 2018-02-04T20:42:11 | HTML | UTF-8 | R | false | false | 6,748 | r | validObj.R | library(Matrix)
### Do all kinds of object creation and coercion
source(system.file("test-tools.R", package = "Matrix"))
## the empty ones:
checkMatrix(new("dgeMatrix"))
checkMatrix(Matrix(,0,0))
## "dge"
assertError( new("dgeMatrix", Dim = c(2,2), x= 1:4) )# double 'Dim'
assertError( new("dgeMatrix", Dim = as.integer(c(2,2)), x= 1:4) )# int 'x'
assertError( new("dgeMatrix", Dim = 2:2, x=as.double(1:4)) )# length(Dim) !=2
assertError( new("dgeMatrix", Dim = as.integer(c(2,2)), x= as.double(1:5)))
checkMatrix(m1 <- Matrix(1:6, ncol=2))
checkMatrix(m2 <- Matrix(1:7 +0, ncol=3)) # a (desired) warning
c("dgeMatrix", "ddenseMatrix", "generalMatrix", "geMatrix", "dMatrix",
"denseMatrix", "compMatrix", "Matrix", "xMatrix", "mMatrix") -> m1.cl
stopifnot(!anyNA(match(m1.cl, is(m1))),
dim(t(m1)) == 2:3, identical(m1, t(t(m1))))
c.nam <- paste("C",1:2, sep='')
dimnames(m1) <- list(NULL, c.nam)
checkMatrix(m1) # failed in 0.999375-10
checkMatrix(tm1 <- t(m1))
stopifnot(colnames(m1) == c.nam,
identical(dimnames(tm1), list(c.nam, NULL)),
identical(m1, t(tm1)))
## an example of *named* dimnames
(t34N <- as(unclass(table(x = gl(3,4), y=gl(4,3))), "dgeMatrix"))
stopifnot(identical(dimnames(t34N),
dimnames(as(t34N, "matrix"))),
identical(t34N, t(t(t34N))))
## "dpo"
checkMatrix(cm <- crossprod(m1))
checkMatrix(cp <- as(cm, "dppMatrix"))# 'dpp' + factors
checkMatrix(cs <- as(cm, "dsyMatrix"))# 'dsy' + factors
checkMatrix(dcm <- as(cm, "dgeMatrix"))#'dge'
checkMatrix(mcm <- as(cm, "dMatrix")) # 'dsy' + factors -- buglet? rather == cm?
checkMatrix(mc. <- as(cm, "Matrix")) # dpo --> dsy -- (as above) FIXME? ??
stopifnot(identical(mc., mcm),
identical(cm, (2*cm)/2),# remains dpo
identical(cm + cp, cp + cs),# dge
identical(mc., mcm),
all(2*cm == mcm * 2))
checkMatrix(eq <- cm == cs)
stopifnot(all(eq@x),
identical3(pack(eq), cs == cp, cm == cp),
as.logical(!(cs < cp)),
identical4(!(cs < cp), !(cp > cs), cp <= cs, cs >= cp))
## Coercion to 'dpo' should give an error if result would be invalid
M <- Matrix(diag(4) - 1)
assertError(as(M, "dpoMatrix"))
M. <- as(M, "dgeMatrix")
M.[1,2] <- 10 # -> not even symmetric anymore
assertError(as(M., "dpoMatrix"))
## Cholesky
checkMatrix(ch <- chol(cm))
checkMatrix(ch2 <- chol(as(cm, "dsyMatrix")))
checkMatrix(ch3 <- chol(as(cm, "dgeMatrix")))
stopifnot(is.all.equal3(as(ch, "matrix"), as(ch2, "matrix"), as(ch3, "matrix")))
### Very basic triangular matrix stuff
assertError( new("dtrMatrix", Dim = c(2,2), x= 1:4) )# double 'Dim'
assertError( new("dtrMatrix", Dim = as.integer(c(2,2)), x= 1:4) )# int 'x'
## This caused a segfault (before revision r1172 in ../src/dtrMatrix.c):
assertError( new("dtrMatrix", Dim = 2:2, x=as.double(1:4)) )# length(Dim) !=2
assertError( new("dtrMatrix", Dim = as.integer(c(2,2)), x= as.double(1:5)))
tr22 <- new("dtrMatrix", Dim = as.integer(c(2,2)), x=as.double(1:4))
tt22 <- t(tr22)
(tPt <- tr22 + tt22)
stopifnot(identical(10 * tPt, tPt * 10),
as.vector(t.22 <- (tr22 / .5)* .5) == c(1,0,3,4),
TRUE) ## not yet: class(t.22) == "dtrMatrix")
## non-square triagonal Matrices --- are forbidden ---
assertError(new("dtrMatrix", Dim = 2:3,
x=as.double(1:6), uplo="L", diag="U"))
n <- 3:3
assertError(new("dtCMatrix", Dim = c(n,n), diag = "U"))
validObject(T <- new("dtTMatrix", Dim = c(n,n), diag = "U"))
validObject(M <- new("dtCMatrix", Dim = c(n,n), diag = "U",
p = rep.int(0:0, n+1)))
stopifnot(identical(as.mat(T), diag(n)))
set.seed(3) ; (p9 <- as(sample(9), "pMatrix"))
## Check that the correct error message is triggered
ind.try <- try(p9[1,1] <- 1, silent = TRUE)
stopifnot(grep("replacing.*sensible", ind.try[1]) == 1,
is.logical(p9[1,]),
is(p9[2,, drop=FALSE], "indMatrix"),
is(p9[9:1,], "indMatrix"),
isTRUE(p9[-c(1:6, 8:9), 1]),
identical(t(p9), solve(p9)),
## identical(p9[TRUE,], as(p9, "ngTMatrix")),
identical(as(diag(9), "pMatrix"), as(1:9, "pMatrix"))
)
assert.EQ.mat(p9[TRUE,], as.matrix(as(p9, "ngTMatrix")))
## validObject --> Cparse_validate(.)
mm <- new("dgCMatrix", Dim = c(3L, 5L),
i = c(2L, 0L, 1L, 2L, 0L, 1L),
x = c( 2, 1, 1, 2, 1, 2),
p = c(0:2, 4L, 4L, 6L))
## Previously unsorted columns were sorted - now are flagged as invalid
m. <- mm
ip <- c(1:2, 4:3, 6:5) # permute the 'i' and 'x' slot just "inside column":
m.@i <- m.i <- mm@i[ip]
m.@x <- m.x <- mm@x[ip]
stopifnot(grep("row indices are not", validObject(m., test=TRUE)) == 1)
Matrix:::.sortCsparse(m.) # don't use this at home, boys!
m. # now is fixed
## Make sure that validObject() objects...
## 1) to wrong 'p'
m. <- mm; m.@p[1] <- 1L
stopifnot(grep("first element of slot p", validObject(m., test=TRUE)) == 1)
m.@p <- mm@p[c(1,3:2,4:6)]
stopifnot(grep("^slot p.* non-decreasing", validObject(m., test=TRUE)) == 1)
## 2) to non-strictly increasing i's:
m. <- mm ; ix <- c(1:3,3,5:6)
m.@i <- mm@i[ix]
m.@x <- mm@x[ix]
stopifnot(identical(grep("slot i is not.* increasing .*column$",
validObject(m., test=TRUE)), 1L))
## ix <- c(1:3, 3:6) # now the the (i,x) slots are too large (and decreasing at end)
## m.@i <- mm@i[ix]
## m.@x <- mm@x[ix]
## stopifnot(identical(grep("^slot i is not.* increasing .*sort",
## (msg <- validObject(m., test=TRUE))),# seg.fault in the past
## 1L))
## over-allocation of the i- and x- slot should be allowed:
## (though it does not really help in M[.,.] <- * yet)
m. <- mm
m.@i <- c(mm@i, NA, NA, NA)
m.@x <- c(mm@x, 10:12)
validObject(m.)
m. # show() now works
stopifnot(all(m. == mm), # in spite of
length(m.@i) > length(mm@i),
identical(t(t(m.)), mm),
identical3(m. * m., m. * mm, mm * mm))
m.[1,4] <- 99 ## FIXME: warning and cuts (!) the over-allocated slots
## Low-level construction of invalid object:
## Ensure that it does *NOT* segfault
foo <- new("ngCMatrix",
i = as.integer(c(12204, 16799, 16799, 33517, 1128, 11930, 1128, 11930, 32183)),
p = rep(0:9, c(2,4,1,11,10,0,1,0,9,12)),
Dim = c(36952L, 49L))
validObject(foo)# TRUE
foo@i[5] <- foo@i[5] + 50000L
msg <- validObject(foo, test=TRUE)# is -- correctly -- *not* valid anymore
stopifnot(is.character(msg))
## Error in validObject(foo) :
## invalid class "ngCMatrix" object: all row indices must be between 0 and nrow-1
getLastMsg <- function(tryRes) {
## Extract "final" message from erronous try result
sub("\n$", "",
sub(".*: ", "", as.character(tryRes)))
}
t <- try(show(foo)) ## error
t2 <- try(head(foo))
stopifnot(identical(msg, getLastMsg(t)),
identical(1L, grep("as_cholmod_sparse", getLastMsg(t2))))
cat('Time elapsed: ', proc.time(),'\n') # "stats"
if(!interactive()) warnings()
|
9409cf7e714e5b43214df7cbfe0516ad97586528 | 79afffae6d108b1a93aea7c72a55cf1fc7247498 | /man/print.summary.nnr.rd | f8ef9989bb728de9dc816815cb09cccbdbcf702a | [] | no_license | cran/assist | efbbad8da52741412f5dc933457774672de90b12 | 866a22f739a0e84d8631044225e3676651c987f2 | refs/heads/master | 2023-09-01T13:13:28.031385 | 2023-08-22T07:00:02 | 2023-08-22T07:30:44 | 17,718,448 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 607 | rd | print.summary.nnr.rd | \name{print.summary.nnr}
\alias{print.summary.nnr}
\title{Print Vales}
\description{
Print the arguments of a \code{summary.nnr} object
}
\usage{
\method{print}{summary.nnr}(x, ...)
}
\arguments{
\item{x}{ an object of class \code{summary.nnr}}
\item{\dots}{unused argument}
}
\details{
This is a method for the function \code{print} for objects
inheriting from class \code{summary.nnr}.
}
\author{Chunlei Ke \email{chunlei_ke@yahoo.com} and Yuedong Wang \email{yuedong@pstat.ucsb.edu}}
\seealso{
\code{\link{nnr}}, \code{\link{summary.nnr}}
}
\keyword{file}
|
7c8386eae3dfad33ee83b46b5e94472a55b96c06 | ef4f011de8875e867bfba3cda2e60a1b504bdfa8 | /R/calc_maf_mr.R | b2ca897d398d7ee7b182277faa5081ff3dfd774c | [] | no_license | DiDeoxy/pgda | dd081f79b857cfc033bf90ce6df495448010dbe5 | f0888207b4ffbcbd06e4a91948db4c335ab9e3c7 | refs/heads/master | 2021-07-16T21:58:35.362394 | 2020-06-15T20:09:15 | 2020-06-15T20:09:15 | 179,556,989 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 918 | r | calc_maf_mr.R | #' Calculate the MAF and MR
#'
#' Calculates the minor allele frequency (MAF) and missing rate (MR) of each
#' marker
#'
#' @importFrom dplyr bind_rows tibble
#' @importFrom magrittr %>%
#'
#' @param wheat_data the parsed gds object
#'
#' @return a list contianing the above data organised by genome
calc_maf_mr <- function (wheat_data) {
chrom_geno_sums <- by(wheat_data$genotypes, wheat_data$snp$chrom,
function (chrom_genos) {
apply(chrom_genos, 1, function (snp) {
A <- sum(snp == 0)
B <- sum(snp == 2)
missing <- sum(snp == 3)
return(
tibble(
maf = min(c(A, B)) / sum(A, B), mr = missing / sum(A, B, missing)
)
)
}) %>% bind_rows()
}
)
list(
A = chrom_geno_sums[seq(1, 19, 3)] %>% bind_rows(),
B = chrom_geno_sums[seq(2, 20, 3)] %>% bind_rows(),
D = chrom_geno_sums[seq(3, 21, 3)] %>% bind_rows()
)
} |
677ec275a4bef18a5b21512d7eda21a7d04a4f01 | f2ad973fc4e7eae9f5e141f2c008ed330b057930 | /taxonomy/updates.R | 8eae5c4feba0e139fa43fa1bc0d07b2a6b68ee6b | [] | no_license | leithen/hedgerow-manage | abe84c6d5ac7ec15946ce47c3e22f7d8f271cdae | 97e5dad080ebedb1d3514f17b046753283355d38 | refs/heads/master | 2020-04-06T03:39:49.810869 | 2015-04-01T18:22:17 | 2015-04-01T18:22:17 | 33,264,221 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,157 | r | updates.R | update.taxonomy <- function(dd) {
megachile <- read.csv('manage/taxonomy/2013/Megachile_changes.csv',
header=TRUE)
dd$Species[match(megachile$unique.id, dd$UniqueID)] <-
as.character(megachile$species)
dd$SubSpecies[match(megachile$unique.id, dd$UniqueID)] <- ''
dd$SubSpecies[which(dd$SubSpecies=='MOR-1')] <- '1'
dd$SubSpecies[which(dd$SubSpecies=='MOR-1M')] <- '1M'
dd$SubSpecies[which(dd$SubSpecies=='MOR-2M')] <- '2M'
## families
bee.families <- read.csv('original/misc/bee_families.csv')
known.to.genus <-
which(dd$BeeNonbee=='bee' & dd$Genus!='')
ind <- match(dd$Genus[known.to.genus], bee.families$Genus)
dd$Family[is.na(dd$Family)] <- ''
dd$Family[known.to.genus] <-
as.character(bee.families$Family[ind])
## add in sub-genera
dd$SubGenus[which(dd$Species=='incompletum')] <- '(Dialictus)'
dd$SubGenus[which(dd$Species=='kincaidii')] <- '(Evylaeus)'
dd$SubGenus[which(dd$Species=='tegulariforme')] <- '(Dialictus)'
## taxonomic updates
## Lasioglossum (Evylaeus)
evylaeus.updates <- c(E='nigrescens',
K='nigrescens',
C='granosum',
F='diatretum',
G='orthocarpi',
H='orthocarpi')
for(i in seq_along(evylaeus.updates)) {
ind <- which(dd$Genus=='Lasioglossum' &
dd$SubGenus=='(Evylaeus)' &
dd$SubSpecies==names(evylaeus.updates)[i])
if(length(ind)>0) {
dd$Species[ind] <- evylaeus.updates[i]
dd$SubSpecies[ind] <- ''
dd$Determiner[ind] <- 'J. Gibbs'
}
}
## Lasioglossum (Dialictus)
dialictus.updates <- c(H='incompletum',
R='incompletum',
F='impavidum',
J='punctatoventre',
C='punctatoventre',
E='brunneiiventre',
I='brunneiiventre',
K='megastictum',
D='megastictum',
B='diversopunctatum',
'B-1'='diversopunctatum')
for(i in seq_along(dialictus.updates)) {
ind <- which(dd$Genus=='Lasioglossum' &
dd$SubGenus=='(Dialictus)' &
dd$SubSpecies==names(dialictus.updates)[i])
if(length(ind)>0) {
dd$Species[ind] <- dialictus.updates[i]
dd$SubSpecies[ind] <- ''
dd$Determiner[ind] <- 'J. Gibbs'
}
}
ind <- which(dd$Species=='tegulariforme')
dd$Species[ind] <- 'tegulare group'
dd$Determiner[ind] <- 'J. Gibbs'
## plants
plant <- dd$FinalPlantSp
plant[grep('Ambrosia', plant)] <- 'Ambrosia sp.'
plant[grep('Brassica', plant)] <- 'Brassica sp.'
plant[grep('Lavandula', plant)] <- 'Lavandula officinalis'
plant[grep('Lupinus', plant)] <- 'Lupinus sp.'
plant[grep('Malva sp.', plant)] <- 'Malva neglecta'
plant[grep('Raphanus', plant)] <- 'Raphanus raphanistrum'
plant[grep('Salvia', plant)] <- 'Salvia sp.'
plant[grep('Sonchus', plant)] <- 'Sonchus sp.'
plant[grep('Vicia sp.', plant)] <- 'Vicia americana'
dd$FinalPlantSp <- plant
dd
}
|
5b4002cdcc56d2173c0ed5170b6401ca14affce8 | 6fc3ced70177ce1f071b39b04e1500bd3b53b663 | /CoDaSeq/R/codaSeq.effect.r | 5b2ee4138031a6ec7641d2c14682aca96467d960 | [
"MIT"
] | permissive | ggloor/CoDaSeq | 03e691b9acad45315cce4fb16e62f965da11f90c | cafd7e1ea6d565a413a50ee6788f466b386fd060 | refs/heads/master | 2023-08-17T04:32:31.612507 | 2023-08-08T16:43:50 | 2023-08-08T16:43:50 | 58,082,037 | 29 | 19 | null | 2023-09-08T15:36:31 | 2016-05-04T20:30:16 | R | UTF-8 | R | false | false | 1,798 | r | codaSeq.effect.r | #' Estimate effect size between two distributions
#'
#' Calculates relatively robust measures of standardized mean difference (effect) and dispersion from a matrix or dataframe of numbers. The output value is smaller thant Cohen's d by a factor of 1.418 when comparing two Normal distributions. There is an option to normalize the effect size by that factor.
#' @param x A numerical matrix with samples by column
#' @param conds A vector denoting group membership
#' @param corrected Whether to scale to Cohen's d or not,
#' default is FALSE
#' @return returns a vector of effect sizes
#' @export
#' @examples
#' # make a synthetic dataset
#' d <- c(rnorm(100,0,1), rnorm(100,2,1))
#' e <- c(rnorm(100,2,1), rnorm(100,0,1))
#' de <- rbind(d,e)
#' conds <- c(rep("A", 100), rep("B",100))
#' # values should be approximately -2 and 2
#' codaSeq.effect(de, conds, corrected=TRUE)
codaSeq.effect <- function(x, conds,corrected=FALSE){
conds <- as.factor(conds)
levels <- levels(conds)
levels <- vector("list", length(levels))
names(levels) <- levels(conds)
for ( l in levels( conds ) ) {
levels[[l]] <- which( conds == l )
if ( length( levels[[l]] ) < 2 ) stop("condition level '",l,"' has less than two replicates")
}
return.data <- vector()
for(i in 1:nrow(x)){
return.data[i] <- (private.dnef(as.numeric(x[i,levels[[1]] ]), as.numeric(x[i,levels[[2]] ])))
}
if(corrected == FALSE) return(return.data)
if(corrected == TRUE) return(return.data*1.418)
}
# vectorized maximum absolute deviation
private.mxad <- function(x,y){
pmax(abs(x - sample(x, size=10*length(x), replace=T)), abs(y - sample(y,size=10*length(y), replace=T)))
}
private.dnef <- function(a,b){
return(median( (a - b)/ (private.mxad(a,b)), na.rm=T ) )
}
|
01c82136b7938407920879373274f019a0817fee | 477f8c6127c6f2947b8fd5826bf83e15c23e0415 | /man/coef.hqreg.Rd | 03802edbf5dce5fe2a8c8667b590db6842a96043 | [] | no_license | jsliu/hqreg | b61c44b157c18cf07cc9a5055e2e3825fc016702 | 254c89fd8f9d3a7e74511f22572968cce61f7e04 | refs/heads/master | 2020-05-30T22:03:18.393052 | 2019-07-15T10:27:27 | 2019-07-15T10:27:27 | 189,985,677 | 0 | 0 | null | 2019-06-03T10:38:02 | 2019-06-03T10:38:02 | null | UTF-8 | R | false | true | 391 | rd | coef.hqreg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.hqreg.R
\name{coef.hqreg}
\alias{coef.hqreg}
\title{coefficient of cross validated models}
\usage{
\method{coef}{hqreg}(object, lambda, exact = FALSE, ...)
}
\arguments{
\item{object}{cross validated hqreg model}
\item{lambda}{a vector of lambdas}
}
\description{
coefficient of cross validated models
}
|
da158f7a7ad01c0246726b2c94147162bd5a259a | f0c4f62350ce5e7bcdd2c30842d95921d9dc69a3 | /DEGs/DEGs_workflow.R | f6242a4010bec2e7e832285b795d3dfc418ab46a | [] | no_license | unswo/msc_project_files | 1ab8e6517d41a704be3b362438981c0c3b41e490 | fb39e963762390595a00ba94248c0c18d9d152ae | refs/heads/main | 2023-05-14T03:10:42.064993 | 2020-11-25T17:23:19 | 2020-11-25T17:23:19 | 316,006,901 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,223 | r | DEGs_workflow.R | #!/usr/bin/Rscript
# Differential expression analysis
# Adrienne Unsworth 190033848, 2020
#
# Perform standard analysis from transcript abundance estimates obtained from Stringtie
# Could easily be adapted for use with salmon by altering tximport command or raw counts from HTseq/Featurecounts.
#
# Import libraries
library(biomaRt)
library(GenomicFeatures)
library(tximport)
library(DESeq2)
library(ggplot2)
library(dplyr)
library(tidyverse)
library(fgsea)
library(genefilter)
library(pheatmap)
library(apeglm)
library(gprofiler2)
library(EnhancedVolcano)
# Define useful function(s)
`%notin%` <- Negate(`%in%`)
# Set working directory
setwd("path/to/transcript/files")
# Define biomaRt marts that will be used later on
# Full mart, other marts will be drived from this
ensembl_hs_mart <-
useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
ensembl_df <-
getBM(attributes = c("ensembl_gene_id", "external_gene_name"),
mart = ensembl_hs_mart)
# data fram of ncRNAs, mtRNAs, rRNAs and pseudogenes
ensembl_ncrna <-
getBM(
attributes = c("transcript_biotype", "external_gene_name"),
mart = ensembl_hs_mart,
filters = 'biotype',
values = c(
'lncRNA',
'rRNA',
'Mt_rRNA',
'Mt_tRNA',
'miRNA',
'rRNA_pseudogene',
'pseudogene'
)
)
# define protein coding genes
ensembl_coding <- getBM(
attributes = c("transcript_biotype", "external_gene_name"),
mart = ensembl_hs_mart,
filters = 'biotype',
values = c('protein_coding')
)
# example of filtering mart for specific function, ie as defined by a GO term
# ensembl_fusion <-
# getBM(
# attributes = c("ensembl_gene_id", "go_id", 'hgnc_symbol'),
# filters = 'go',
# values = 'GO:0003700',
# mart = ensembl_hs_mart
# )
# TF <- unique(ensembl_fusion$hgnc_symbol)
# another example of creating a dataframe of ensembl IDs from hgnc symbols
# ensembl_splice <-
# getBM(
# attributes = c("ensembl_gene_id", 'hgnc_symbol'),
# filters = 'hgnc_symbol',
# values = c(
# 'VEGFA',
# 'KLF6',
# 'BCL2L2',
# 'FGFR2',
# 'TMPRSS2',
# 'ERG',
# 'AR',
# 'PTEN',
# 'TP53',
# 'RB1',
# 'BRCA1',
# 'BRCA2'
# ),
# mart = ensembl_hs_mart
# )
# Import samples; _t_.ctab is specific to stringtie files
samples_stringtie <-
list.files(pattern = '_t_data.ctab')
# project data acquired from ENA website, uncomment to download
#download.file('https://www.ebi.ac.uk/ena/data/warehouse/filereport?accession=PRJNA411786&result=read_run&fields=study_accession,sample_accession,secondary_sample_accession,experiment_accession,run_accession,tax_id,scientific_name,instrument_model,library_layout,fastq_ftp,fastq_galaxy,submitted_ftp,submitted_galaxy,sra_ftp,sra_galaxy,cram_index_ftp,cram_index_galaxy&download=txt', destfile = 'PRJNA411786.txt')
#download.file('https://www.ebi.ac.uk/ena/data/warehouse/filereport?accession=PRJNA552058&result=read_run&fields=study_accession,sample_accession,secondary_sample_accession,experiment_accession,run_accession,tax_id,scientific_name,instrument_model,library_layout,fastq_ftp,fastq_galaxy,submitted_ftp,submitted_galaxy,sra_ftp,sra_galaxy,cram_index_ftp,cram_index_galaxy&download=txt', destfile = 'PRJNA552058.txt')
samples_PRJNA411786 <-
read.delim(('PRJNA411786.txt'))
samples_PRJNA552058 <-
read.delim(('PRJNA552058.txt'))
samples_info <-
rbind(samples_PRJNA411786, samples_PRJNA552058)
# #remove extra info - ftp links etc so we're only interested in col 1-9
samples_info <- samples_info[, 1:9]
names(samples_stringtie) <-
samples_info$run_accession
# pull annotation from ctab files
tmp <-
read_tsv(samples_stringtie[1], col_types = cols())
tx2gene <- tmp[, c("t_name", "gene_name")]
txi.genes.stringtie <-
tximport(samples_stringtie, type = 'stringtie', tx2gene = tx2gene)
# does.. something? seems superfluous
sampleTable <- read.csv('proj_design.csv')
sampleTable <- sampleTable[,-1]
sampleTable$Patient <-
as.factor(sampleTable$Patient)
rownames(sampleTable) <-
colnames(txi.genes.stringtie$counts)
# DESeq2 - more or less standard DESeq workflow
# ~ BioProject + Condition is the overall design of the experiment
# The first parameter is the batch effect
dds <-
DESeqDataSetFromTximport(txi.genes.stringtie, sampleTable, ~ BioProject + Condition)
dds_2 <- DESeq(dds)
res <- results(dds_2)
# log2fold shrink
res <-
lfcShrink(dds_2,
coef = "Condition_T_vs_N",
res = res,
type = 'apeglm')
# estimate dispersion
plotDispEsts(dds_2,
ylim = c(1e-10, 1e2),
xlab = 'Mean of normalised counts',
ylab = 'Dispersion')
res_df <- as.data.frame(res)
res_df$ensembl_gene_id <- rownames(res_df)
# Not needed unless need to manually annotate external gene names ie counts/abundances from another tool
# res_df <-
# merge.data.frame(
# res_df,
# ensembl_df,
# by = intersect(colnames(res_df), colnames(ensembl_df)),
# all.x = TRUE,
# all.y = FALSE
# )
res_df2 <-
filter(res_df,!is.na(res_df$padj))
gseaInput <-
filter(res_df2,!is.na(rownames(res_df2)))
ranks <- gseaInput$log2FoldChange
names(ranks) <- gseaInput$ensembl_gene_id
ranks <- sort(ranks, decreasing = TRUE)
# Range of gmt files for different enrichment analyses
#
# gmt_list <-
# gmtPathways('h.all.v7.1.symbols.gmt')
# kegg <-
# gmtPathways('c2.cp.kegg.v7.1.symbols.gmt')
# GO_bp <-
# gmtPathways('c5.bp.v7.1.symbols.gmt')
gseaRes <-
fgsea(gmt_list, ranks, nperm = 1000)
gseaResTidy <-
gseaRes %>% as_tibble() %>% arrange(desc(NES))
topPathwaysUp <-
gseaRes[ES > 0][head(order(pval), n = 10), pathway]
topPathwaysDown <-
gseaRes[ES < 0][head(order(pval), n = 10), pathway]
topPathways <-
c(topPathwaysUp, rev(topPathwaysDown))
# Different option for GSEA visuals
#plot.new()
#plotGseaTable(gmt_list[topPathways], ranks, gseaRes,
# gseaParam = 0.5)
# Define top DE genes for heatmap
rownames(res_df) <-
res_df$external_gene_name
topGenes <- arrange(res_df, padj)
topGenes <-
filter(topGenes, padj <= 0.01, abs(log2FoldChange) >= 2)
rownames(topGenes) <-
topGenes$ensembl_gene_id
topGenes <- rownames(topGenes)
#GO overrepresentation analysis
gostres <-
gost(
topGenes[1:5000],
organism = 'hsapiens',
sources = c('GO', 'KEGG'),
evcodes = TRUE
)
gostplot(gostres)
counts_from_dds <- counts(dds_2)
# Pinched from:
# DESeq results to pathways in 60 Seconds with the fgsea package
# Stephen Turner
ggplot(gseaResTidy, aes(reorder(pathway, NES), NES)) +
geom_col(aes(fill = padj < 0.05)) +
coord_flip() +
labs(x = "Pathway", y = "Normalized Enrichment Score",
title = "Hallmark pathways NES from GSEA") +
theme_minimal()
# Filter for protein coding only
dds_2 <-
dds_2[rownames(dds_2) %in% ensembl_coding$external_gene_name, ]
res <- results(dds_2)
res <-
lfcShrink(dds_2,
coef = "Condition_T_vs_N",
res = res,
type = 'apeglm')
plotMA(res, alpha = 0.01, ylim = c(-5, 5))
res_df <- as.data.frame(res)
DE_genes <- subset(res, res$padj < 0.01)
DE_genes <-
DE_genes[order(abs(DE_genes$log2FoldChange), decreasing = TRUE), ]
DE_genes <-
DE_genes[rownames(DE_genes) %in% ensembl_coding$external_gene_name, ]
DE_genes <- as.data.frame(DE_genes)
# write to csv
#write.csv2(DE_genes, file = 'DE_genes.csv')
res_df <- as.data.frame(DE_genes)
res_df$ensembl_gene_id <- rownames(res_df)
res_df2 <-
filter(res_df,!is.na(res_df$padj))
#plot gene counts for gene with smallest padj
rownames(counts_interest) <- NULL
p <- ggplot(counts_interest,
aes(
x = Patient,
y = count,
color = Condition,
group = Condition
)) +
geom_point() + stat_summary(fun = mean, geom = "line") +
scale_y_log10()
p <-
p + facet_wrap(~ gene, scales = 'fixed') + labs(x = 'Gene', y = 'Normalised counts')
p
# QC data
rlog_data_blind <-
rlogTransformation(dds_2, blind = TRUE)
# Euclidean distances of samples
dist_rl <- dist(t(assay(rlog_data_blind)))
dist_rl <- as.matrix(dist_rl)
heatmap(dist_rl)
# PCA analysis
plotPCA(rlog_data_blind, intgroup = c('Condition', 'BioProject'))
top_genes <- head(order(DE_genes$padj), 20)
# gene counts
rownames(res_df) <-
res_df$external_gene_name
topGenes <- arrange(res_df, padj)
topGenes <-
filter(topGenes, padj <= 0.01, abs(log2FoldChange) >= 2)
rownames(topGenes) <-
topGenes$ensembl_gene_id
topGenes <- rownames(topGenes)
plotcounts <- NULL
counts_df <- NULL
plottedGenes <- topGenes[1:20]
for (x in plottedGenes) {
plotcounts[[x]] <-
plotCounts(
dds,
gene = x,
intgroup = c("Condition"),
returnData = TRUE
)
plotcounts[[x]][, 'gene'] <-
rep(x, length(plotcounts[[x]][, 'count']))
counts_df <-
rbind(counts_df, as.data.frame(plotcounts[[x]]))
rownames(counts_df) <- NULL
}
p <-
ggplot(data = counts_df, aes(x = gene, y = count)) + geom_boxplot(aes(fill =
Condition))
p + facet_wrap(~ gene, scales = 'free') + labs(x = 'Gene', y = 'Normalised counts')
## counts for specific genes of interest
# counts_df2 <- NULL
# gene_interest <- ensembl_splice$hgnc_symbol
# for (x in gene_interest) {
# plotcounts[[x]] <-
# plotCounts(
# dds,
# gene = x,
# intgroup = c("Condition"),
# returnData = TRUE
# )
# plotcounts[[x]][, 'gene'] <-
# rep(x, length(plotcounts[[x]][, 'count']))
# # y <-
# # ensembl_df$external_gene_name[ensembl_df$ensembl_gene_id == x]
# # plotcounts[[x]][, 'gene_name'] <-
# # rep(y, length(plotcounts[[x]][, 'count']))
# counts_df2 <-
# rbind(counts_df2, as.data.frame(plotcounts[[x]]))
# rownames(counts_df2) <- NULL
# }
#
# p <-
# ggplot(data = counts_df2, aes(x = gene, y = count)) + geom_boxplot(aes(fill =
# Condition))
# p + facet_wrap( ~ gene, scales = 'free') + labs(x = 'Gene', y = 'Normalised counts')
# Heatmap of most variable genes
rld <- vst(dds_2)
# topVarGenes <- head(order(-rowVars(assay(rld))), 20)
# mat <- assay(rld)[topVarGenes,]
# mat <- mat - rowMeans(mat)
# df <-
# as.data.frame(colData(rld)[, c("BioProject", "Condition")])
# annotation_colours <-
# list(Condition = c(N = "lightblue", T = "red"))
# pheatmap(mat, annotation_col = df,)#annotation_colors = annotation_colours)
# Heatmap of top genes
mat <- assay(rld)[topGenes[1:30], ]
mat <- mat - rowMeans(mat)
df <-
as.data.frame(colData(rld)[, c("BioProject", "Condition")])
annotation_colours <-
list(Condition = c(N = "lightblue", T = "red"))
pheatmap(mat, annotation_col = df, )
#VolcanoPlot
# Better visualisation option than MA plot
EnhancedVolcano(
res,
lab = rownames(res),
x = 'log2FoldChange',
y = 'padj',
xlim = c(-6, 6),
title = 'Tumour versus Normal tissues',
pCutoff = 0.01,
FCcutoff = 2,
legendPosition = 'bottom',
caption = 'Fold change cut off = 2.0, adjusted p-value = 0.01',
subtitle = 'Differential expression'
)
#STRING list
# List of DEGs submitted to STRING
cat(rownames(DE_genes[abs(DE_genes$log2FoldChange) >= 2,]), sep = '\n')
|
23355dcd07496d4088be31b50c2e306845f69462 | a5ecc66b3d85d32f9a033577574622f7cf0cf81d | /Credit application prediction_Logistic Regression.R | b00bf937139c66f199650d1686c3e44d2d9decf1 | [] | no_license | Mulan2019/Classification | 006985eb54d949c5590ecc1f2c928b8cb60a3508 | 23b3f3cbba6f24e2df8ef22192b948fa0dc1d649 | refs/heads/master | 2022-11-28T05:59:40.342319 | 2020-08-10T21:42:48 | 2020-08-10T21:42:48 | 286,556,483 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,661 | r | Credit application prediction_Logistic Regression.R |
if("pacman" %in% rownames(installed.packages()) == FALSE) {install.packages("pacman")} # Check if you have universal installer package, install if not
pacman::p_load("caret","ROCR","lift","glmnet","MASS","e1071") #Check, and if needed install the necessary packages
# To ensure "appled-to-apples" comparisons with logistic regression, use the same training and testing -- the code below only works in the same R session after you've ran the logistic regression code
credit<-read_excel(file.choose())
sum(is.na(credit))
str(credit)
# Load the data, correct mis-classified datafields, fixNAs -- same as you did in the logistic regression file
#data cleaning
credit$SEX <- as.factor(credit$SEX)
credit$EDUCATION<- as.factor(credit$EDUCATION)
credit$MARRIAGE <- as.factor(credit$MARRIAGE)
credit$PAY_1 <- as.factor(credit$PAY_1)
credit$PAY_2 <- as.factor(credit$PAY_2)
credit$PAY_3 <- as.factor(credit$PAY_3)
credit$PAY_4 <- as.factor(credit$PAY_4)
credit$PAY_5 <- as.factor(credit$PAY_5)
credit$PAY_6 <- as.factor(credit$PAY_6)
credit$default_0<-as.factor(credit$default_0)
credit$AGE<-scale(credit$AGE)
credit$BILL_AMT1<-scale(credit$BILL_AMT1)
credit$BILL_AMT2<-scale(credit$BILL_AMT2)
credit$BILL_AMT3<-scale(credit$BILL_AMT3)
credit$BILL_AMT4<-scale(credit$BILL_AMT4)
credit$BILL_AMT5<-scale(credit$BILL_AMT5)
credit$BILL_AMT6<-scale(credit$BILL_AMT6)
credit$PAY_AMT1<-scale(credit$PAY_AMT1)
credit$PAY_AMT2<-scale(credit$PAY_AMT2)
credit$PAY_AMT3<-scale(credit$PAY_AMT3)
credit$PAY_AMT4<-scale(credit$PAY_AMT4)
credit$PAY_AMT5<-scale(credit$PAY_AMT5)
credit$PAY_AMT6<-scale(credit$PAY_AMT6)
credit$LIMIT_BAL<-scale(credit$LIMIT_BAL)
#Feature Engineering
credit$Amount_Owed<-credit$BILL_AMT1+credit$BILL_AMT2+
credit$BILL_AMT3+credit$BILL_AMT4+credit$BILL_AMT5+
credit$BILL_AMT6-credit$PAY_AMT1-credit$PAY_AMT2-credit$PAY_AMT3-
credit$PAY_AMT4-credit$PAY_AMT5-credit$PAY_AMT6
credit$AVG_Amount_Owed<-credit$Amount_Owed/6
credit$Payments_Missed<- ifelse(as.numeric(as.character(credit$PAY_1)) >=1,1,0)
credit$Payments_Missed<- ifelse(as.numeric(as.character(credit$PAY_2)) >=1,credit$Payments_Missed+1,credit$Payments_Missed)
credit$Payments_Missed<- ifelse(as.numeric(as.character(credit$PAY_3)) >=1,credit$Payments_Missed+1,credit$Payments_Missed)
credit$Payments_Missed<- ifelse(as.numeric(as.character(credit$PAY_4)) >=1,credit$Payments_Missed+1,credit$Payments_Missed)
credit$Payments_Missed<- ifelse(as.numeric(as.character(credit$PAY_5)) >=1,credit$Payments_Missed+1,credit$Payments_Missed)
credit$Payments_Missed<- ifelse(as.numeric(as.character(credit$PAY_6)) >=1,credit$Payments_Missed+1,credit$Payments_Missed)
credit$BalLim<- ((credit$BILL_AMT1+credit$BILL_AMT2+credit$BILL_AMT3+credit$BILL_AMT4+credit$BILL_AMT5+credit$BILL_AMT6)/6)/credit$LIMIT_BAL
## Predict test file
new<-read_excel(file.choose())
sum(is.na(new))
str(new)
# Load the data, correct mis-classified datafields, fixNAs -- same as you did in the logistic regression file
#data cleaning
new$SEX <- as.factor(new$SEX)
new$EDUCATION<- as.factor(new$EDUCATION)
new$MARRIAGE <- as.factor(new$MARRIAGE)
new$PAY_1 <- as.factor(new$PAY_1)
new$PAY_2 <- as.factor(new$PAY_2)
new$PAY_3 <- as.factor(new$PAY_3)
new$PAY_4 <- as.factor(new$PAY_4)
new$PAY_5 <- as.factor(new$PAY_5)
new$PAY_6 <- as.factor(new$PAY_6)
new$AGE<-scale(new$AGE)
new$BILL_AMT1<-scale(new$BILL_AMT1)
new$BILL_AMT2<-scale(new$BILL_AMT2)
new$BILL_AMT3<-scale(new$BILL_AMT3)
new$BILL_AMT4<-scale(new$BILL_AMT4)
new$BILL_AMT5<-scale(new$BILL_AMT5)
new$BILL_AMT6<-scale(new$BILL_AMT6)
new$PAY_AMT1<-scale(new$PAY_AMT1)
new$PAY_AMT2<-scale(new$PAY_AMT2)
new$PAY_AMT3<-scale(new$PAY_AMT3)
new$PAY_AMT4<-scale(new$PAY_AMT4)
new$PAY_AMT5<-scale(new$PAY_AMT5)
new$PAY_AMT6<-scale(new$PAY_AMT6)
new$LIMIT_BAL<-scale(new$LIMIT_BAL)
#Feature Engineering
new$Amount_Owed<-new$BILL_AMT1+new$BILL_AMT2+
new$BILL_AMT3+new$BILL_AMT4+new$BILL_AMT5+
new$BILL_AMT6-new$PAY_AMT1-new$PAY_AMT2-new$PAY_AMT3-
new$PAY_AMT4-new$PAY_AMT5-new$PAY_AMT6
new$AVG_Amount_Owed<-new$Amount_Owed/6
new$Payments_Missed<- ifelse(as.numeric(as.character(new$PAY_1)) >=1,1,0)
new$Payments_Missed<- ifelse(as.numeric(as.character(new$PAY_2)) >=1,new$Payments_Missed+1,new$Payments_Missed)
new$Payments_Missed<- ifelse(as.numeric(as.character(new$PAY_3)) >=1,new$Payments_Missed+1,new$Payments_Missed)
new$Payments_Missed<- ifelse(as.numeric(as.character(new$PAY_4)) >=1,new$Payments_Missed+1,new$Payments_Missed)
new$Payments_Missed<- ifelse(as.numeric(as.character(new$PAY_5)) >=1,new$Payments_Missed+1,new$Payments_Missed)
new$Payments_Missed<- ifelse(as.numeric(as.character(new$PAY_6)) >=1,new$Payments_Missed+1,new$Payments_Missed)
new$BalLim<- ((new$BILL_AMT1+new$BILL_AMT2+new$BILL_AMT3+new$BILL_AMT4+new$BILL_AMT5+new$BILL_AMT6)/6)/new$LIMIT_BAL
# Select the variables to be included in the "base-case" model
model_logistic<-glm(default_0~., data=subset(credit, select=-c( ID )), family="binomial"(link="logit"))
summary(model_logistic)
# to add surrogates paste this to the list of variables; note, it will run quite a bit slower
#Special.Pay_surrogate + Early.RPL_surrogate + Latest.RPL_surrogate +
#Initial.System.Date_surrogate + CRM.Segment_surrogate + MDR.High.Grade_surrogate +
#Total.School.Enrollment_surrogate + FirstMeeting_surrogate +
#LastMeeting_surrogate + DifferenceTraveltoFirstMeeting_surrogate +
#DifferenceTraveltoLastMeeting_surrogate + FPP.to.School.enrollment_surrogate
##The model clearly has too many variables, most of which are insignificant
## Stepwise regressions. There are three aproaches to runinng stepwise regressions: backward, forward and "both"
## In either approach we need to specify criterion for inclusion/exclusion. Most common ones: based on information criterion (e.g., AIC) or based on significance
model_logistic_stepwiseAIC<-stepAIC(model_logistic,direction = c("both"),trace = 1) #AIC stepwise
summary(model_logistic_stepwiseAIC)
par(mfrow=c(1,4))
plot(model_logistic_stepwiseAIC) #Error plots: similar nature to lm plots
par(mfrow=c(1,1))
###Finding predicitons: probabilities and classification
frequency(credit, default_0 == 1)
new_probabilities<-predict(model_logistic_stepwiseAIC,newdata=new,type="response") #Predict probabilities
new_classification<-rep("1",1000)
new_classification[new_probabilities<0.221083]="0" #Predict classification using 0.6073 threshold. Why 0.6073 - that's the average probability of being retained in the data. An alternative code: logistic_classification <- as.integer(logistic_probabilities > mean(testing$Retained.in.2012. == "1"))
#export
write.csv(new_classification,file="new application prediction.csv")
getwd()
|
260d92312f27f2386bfb053aef52349fbc548072 | 880c8d4a9401d2e08b62a23306fe5b5f4dfeeb78 | /man/greeks_by_time.Rd | 7fe440b0103a3698b416e1efdc930082f217ac81 | [
"MIT"
] | permissive | zumthor86/OptionsAnalytics | 6717ea8a76238f6a304171352e17e123db8dc088 | a1a9d56a0c635729b333086272d8f8d3c4e8642c | refs/heads/master | 2021-07-07T07:51:51.645454 | 2020-10-16T12:50:35 | 2020-10-16T12:50:35 | 196,432,023 | 8 | 5 | null | null | null | null | UTF-8 | R | false | true | 603 | rd | greeks_by_time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_strategy_greeks_by_time.R
\name{greeks_by_time}
\alias{greeks_by_time}
\title{Compute greeks over time for single option}
\usage{
greeks_by_time(option_leg, underlyer_prices, underlyer_datetimes)
}
\arguments{
\item{option_leg}{Option leg object}
\item{underlyer_prices}{Numeric vector containing option underlyer prices}
\item{underlyer_datetimes}{DateTime vector of the option underlyer prices}
}
\value{
List of option greeks for each underlyer price
}
\description{
Compute greeks over time for single option
}
|
3d70436d8144a7eb8c1a82ca52407df7c8a06aaf | d436640bba4f095b4c999aa770c957fe7d322874 | /tests/testthat/test_datasets.R | 8cffd94fd07247e2ff5c1a2822c2d62a351dfd09 | [
"MIT"
] | permissive | revodavid/azureml-sdk-for-r | e5ceaf4853c2fec445d6cffee786d3246dcc8247 | cdcb6769490d3f616372a52d7cb89342793f43f9 | refs/heads/master | 2020-08-14T09:26:47.906582 | 2020-05-07T18:08:30 | 2020-05-07T18:08:30 | 215,140,811 | 1 | 0 | NOASSERTION | 2020-01-24T21:25:32 | 2019-10-14T20:39:14 | R | UTF-8 | R | false | false | 2,621 | r | test_datasets.R | context("datasets")
source("utils.R")
test_that("create a tabular dataset, register multiple versions of a dataset,
unregister a dataset",{
skip_if_no_subscription()
ws <- existing_ws
# upload files to datastore and create dataset
ds <- get_default_datastore(ws)
file_name <- "iris.csv"
upload_files_to_datastore(ds,
files = list(file.path(".", file_name)),
target_path = 'train-dataset/tabular/',
overwrite = TRUE)
dataset <- create_tabular_dataset_from_delimited_files(ds$path('train-dataset/tabular/iris.csv'))
# load data into data frame
pandas_df <- load_dataset_into_data_frame(dataset)
expect_equal(is.data.frame(pandas_df), TRUE)
# register two versions of the dataset
register_dataset(ws, dataset, "iris")
register_dataset(ws, dataset, "iris", create_new_version = TRUE)
# check updated number of datasets in workspace
all_registered_datasets <- ws$datasets
expect_equal(length(all_registered_datasets), 2)
# unregister datasets
unregister_all_dataset_versions(dataset)
expect_equal(dataset$name, NULL)
expect_equal(dataset$id, NULL)
})
test_that("register datastore, create file dataset,
get file dataset paths,
submit run with dataset as named input", {
skip('skip')
ws <- existing_ws
ds <- get_default_datastore(ws)
# register azure blob datastore with mnist data
account_name <- "pipelinedata"
datastore_name <- "mnist_datastore"
container_name <- "sampledata"
ws_blob_datastore <- get_datastore(ws, "workspaceblobstore")
blob_datastore_name <- paste0("dsblob", gsub("-", "", 1))
mnist_data <- register_azure_blob_container_datastore(
workspace = ws,
datastore_name = blob_datastore_name,
container_name = ws_blob_datastore$container_name,
account_name = ws_blob_datastore$account_name,
account_key = ws_blob_datastore$account_key,
create_if_not_exists = TRUE)
path_on_datastore <- mnist_data$path('mnist')
datapath <- data_path(mnist_data, path_on_datastore)
dataset <- create_file_dataset_from_files(datapath)
file_dataset_path <- get_file_dataset_paths(dataset)
expect_equal(file_dataset_path, 'train-dataset/file/iris.csv')
# submit with run
est <- estimator(".",
entry_script = "train_datasets_dummy.R",
compute_target = "local",
inputs = list(dataset$as_named_input('mnist')))
run <- submit_experiment(exp, est)
wait_for_run_completion(run, show_output = TRUE)
expect_equal(run$status, "Completed")
})
|
81744bb8f6acf39e1b71bd6b2d689c0cdfd3d579 | 6470ce550c26c7cd13245dab8b84623534e78655 | /第5章 数据分布型图表/图5-2-9 云雨图.R | 23f65ecbd1c857234c3462b3af1592aa7b50b1ec | [] | no_license | EasyChart/Beautiful-Visualization-with-R | 0d73ed4ee1e1855e33048330294335fbad6d2a25 | 27990b9349b697ec4336d3e72bae5f3a08d5f5ea | refs/heads/master | 2023-06-10T07:36:29.289034 | 2023-06-05T03:48:59 | 2023-06-05T03:48:59 | 189,740,776 | 687 | 446 | null | 2020-02-26T08:07:21 | 2019-06-01T14:14:10 | PostScript | UTF-8 | R | false | false | 5,119 | r | 图5-2-9 云雨图.R | #EasyCharts团队出品,
#如有问题修正与深入学习,可联系微信:EasyCharts
library(ggplot2)
library(grid)
library(RColorBrewer)
library(dplyr)
library(SuppDists) #提供rJohnson()函数
# somewhat hackish solution to:
# https://twitter.com/EamonCaddigan/status/646759751242620928
# based mostly on copy/pasting from ggplot2 geom_violin source:
# https://github.com/hadley/ggplot2/blob/master/R/geom-violin.r
"%||%" <- function(a, b) {
if (!is.null(a)) a else b
}
color<-brewer.pal(7,"Set2")[c(1,2,4,5)]
geom_flat_violin <- function(mapping = NULL, data = NULL, stat = "ydensity",
position = "dodge", trim = TRUE, scale = "area",
show.legend = NA, inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomFlatViolin,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
trim = trim,
scale = scale,
...
)
)
}
GeomFlatViolin <-
ggproto("GeomFlatViolin", Geom,
setup_data = function(data, params) {
data$width <- data$width %||%
params$width %||% (resolution(data$x, FALSE) * 0.9)
# ymin, ymax, xmin, and xmax define the bounding rectangle for each group
data %>%
group_by(group) %>%
mutate(ymin = min(y),
ymax = max(y),
xmin = x,
xmax = x + width / 2)
},
draw_group = function(data, panel_scales, coord) {
# Find the points for the line to go all the way around
data <- transform(data, xminv = x,
xmaxv = x + violinwidth * (xmax - x)) #利用transform函数为数据框mydata增加数据
newdata <- rbind(plyr::arrange(transform(data, x = xmaxv), -y),plyr::arrange(transform(data, x = xminv), y))
newdata_Polygon <- rbind(newdata, newdata[1,])
newdata_Polygon$colour<-NA
newdata_Path <- plyr::arrange(transform(data, x = xmaxv), -y)
ggplot2:::ggname("geom_flat_violin", grobTree(
GeomPolygon$draw_panel(newdata_Polygon, panel_scales, coord),
GeomPath$draw_panel(newdata_Path, panel_scales, coord))
)
},
draw_key = draw_key_polygon,
default_aes = aes(weight = 1, colour = "grey20", fill = "white", size = 0.5,
alpha = NA, linetype = "solid"),
required_aes = c("x", "y")
)
# "%||%" <- getFromNamespace("%||%", "ggplot2")
# "%>%" <- getFromNamespace("%>%", "magrittr")
set.seed(141079)
# Generate sample data -------------------------------------------------------
#findParams函数参考:https://github.com/hadley/boxplots-paper
findParams <- function(mu, sigma, skew, kurt) {
value <- .C("JohnsonMomentFitR", as.double(mu), as.double(sigma),
as.double(skew), as.double(kurt - 3), gamma = double(1),
delta = double(1), xi = double(1), lambda = double(1),
type = integer(1), PACKAGE = "SuppDists")
list(gamma = value$gamma, delta = value$delta,
xi = value$xi, lambda = value$lambda,
type = c("SN", "SL", "SU", "SB")[value$type])
}
# 均值为3,标准差为1的正态分布
n <- rnorm(100,3,1)
# Johnson分布的偏斜度2.2和峰度13
s <- rJohnson(100, findParams(3, 1, 2., 13.1))
# Johnson分布的偏斜度0和峰度20)
k <- rJohnson(100, findParams(3, 1, 2.2, 20))
# 两个峰的均值μ1,μ2分别为1.89和3.79,σ1 = σ2 =0.31
mm <- rnorm(100, rep(c(2, 4), each = 50) * sqrt(0.9), sqrt(0.1))
mydata <- data.frame(
Class = factor(rep(c("n", "s", "k", "mm"), each = 100),
c("n", "s", "k", "mm")),
Value = c(n, s, k, mm)+3
)
#-------------------------------------------------------------
colnames(mydata)<-c("Class", "Value")
d <- group_by(mydata, Class) %>%
summarize(mean = mean(Value),
sd = sd(Value))
ggplot(mydata, aes(Class, Value, fill=Class)) +
geom_flat_violin(position=position_nudge(x=.2)) +
geom_jitter(aes(color=Class), width=.1) +
geom_pointrange(aes(y=mean, ymin=mean-sd, ymax=mean+sd),
data=d, size=1, position=position_nudge(x=.2)) +
coord_flip() +
theme_bw() +
theme( axis.text = element_text(size=13),
axis.title = element_text(size=15),
legend.position="none")
ggplot(mydata, aes(x=Class, y=Value)) +
geom_flat_violin(aes(fill=Class),position=position_nudge(x=.25),color="black") +
geom_jitter(aes(color=Class), width=0.1) +
geom_boxplot(width=.1,position=position_nudge(x=0.25),fill="white",size=0.5) +
coord_flip() +
theme_bw() +
theme( axis.text = element_text(size=13),
axis.title = element_text(size=15),
legend.position="none")
|
12ecb6de809c550d88439bb4a0e6da59d222ae48 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/NHPoisson/examples/LRTpv.fun.Rd.R | 6ec826f41d1a814a37100e9ef1009c473e3d97ec | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | LRTpv.fun.Rd.R | library(NHPoisson)
### Name: LRTpv.fun
### Title: Likelihood ratio test of the covariates in a model
### Aliases: LRTpv.fun
### ** Examples
data(BarTxTn)
covB<-cbind(cos(2*pi*BarTxTn$dia/365), sin(2*pi*BarTxTn$dia/365),
BarTxTn$TTx,BarTxTn$Txm31,BarTxTn$Txm31**2)
BarEv<-POTevents.fun(T=BarTxTn$Tx,thres=318,
date=cbind(BarTxTn$ano,BarTxTn$mes,BarTxTn$dia))
mod1B<-fitPP.fun(tind=TRUE,covariates=covB,
posE=BarEv$Px, inddat=BarEv$inddat,
tit="BAR Tx; cos, sin, TTx, Txm31, Txm31**2",
start=list(b0=-100,b1=1,b2=10,b3=0,b4=0,b5=0),dplot=FALSE, modCI=FALSE)
LRTpv.fun(mod1B)
|
20b21f6b68f29b8cd3daf3595474c58632a2d2bd | 6fd62788f43f8bcb435efe040bce50bd681de329 | /hashtag 2.R | 4b05330ef75dca6183aa779c39c9c66f7cf16b35 | [] | no_license | ThePatrickLynch/twitterhashtags | 4aeddf32df0f57162d65d29830851b6d28c519ef | f7100298485235eca434664a6bbc6c3a9ef111e9 | refs/heads/master | 2021-07-11T22:47:42.692113 | 2017-10-12T10:08:59 | 2017-10-12T10:08:59 | 106,794,341 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,792 | r | hashtag 2.R | #Retrieving Text from Twitter
rm(list=ls())
#Twitter API requires authentication since March 2013. Please follow instructions in "Section 3 - Authentication with OAuth" in the twitteR vignettes on # CRAN or this link to complete authentication before running the code below.
library(twitteR)
library(wordcloud)
library(stringr)
library(tm)
library(longurl)
library(wordcloud)
library(RColorBrewer)
library(SnowballC)
library(dplyr)
library(tidytext)
library(tidyr)
library(plotly)
#setup_twitter_oauth("dr83qJt5IfcuqmicXPI9yINlA", "c0bSVElsvFtuHQnlZhnvphup98486t1Qm3BJEezTqIlNfSzvM6","37933003-LSHwa6XzUtCwXnt3HN4nw0cq37Qd8ALtnyTEAYsI3", "hXrLrYqKkzmoqyaZJsfmTI5bO5zv3yPVytR9fMDWVuSpl")
#setwd("d:/Data/github/R Twitter")
#rdmTweets <- searchTwitter("#lthechat", n=1500, since='2017-4-4')
#n <- length(rdmTweets)
#tweets <- do.call("rbind", lapply(rdmTweets, as.data.frame)) # convert to a datafame
# write.csv(tweets, file="lthechat 4-4 to 10-4-2017.csv") # save for posterity
tweets <- read.csv("lthechat 4-4 to 10-4-2017.csv")
posters <- sort(unique(tweets$screenName))
n.posters <- length(posters)
# counts per poster
poster.freq <- sort(table(tweets$screenName), decreasing = TRUE)
poster.freq <- as.data.frame(poster.freq) # coerce to dataframe
url_pattern <- "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
for(i in 1:n.posters) {
this.poster <- poster.freq$Var1[i]
subbit <- subset(tweets, tweets$screenName == this.poster)
numtweets <- length(subbit)
poster.freq[i,3] <- sum(str_count(subbit$text, "http")) # number of urls linked by user
poster.freq[i,4] <- sum(subbit$favoriteCount) # number of times posts were favorited
poster.freq[i,5] <- sum(str_count(subbit$text, "@")) # number of others referred
poster.freq[i,6] <- sum(subbit$retweetCount) # number of times posts were retweeted
poster.freq[i,7] <- sum(subbit$isRetweet, na.rm=TRUE) # number of posts that were retweeted
repliesto <- sum( !is.na( subbit$replyToSN)) # number in reply to
replylthe <- length(which(subbit$replyToSN == "LTHEchat")) # don't count
poster.freq[i,8] <- repliesto - replylthe
# textURL <- na.omit(str_extract(subbit$text, url_pattern))
# textURL2 <- expand_urls(textURL)
# textURL3 <- as.list(textURL2$expanded_url)
}
colnames(poster.freq) <- c("Poster","Posts made","Links embedded","Favouriting count", "Included others", "Total retweets", "Posts retweeted", "In reply to !lthe")
######################################################
# mess with content of posts
######################################################
# get rid of posts by LTHEchat - i'm not interested in counting the time the question is asked
subbit <- subset(tweets, tweets$screenName != "LTHEchat")
mytext <- subbit$text
#################
# clean
#################
#remove retweet entries
mytext = gsub('(RT|via)((?:\\b\\W*@\\w+)+)', '', mytext)
#remove emoticons
mytext <- iconv(mytext, "latin1", "ASCII", "")
# remove control characters
mytext <- str_replace_all(mytext, "[[:cntrl:]]", " ")
# remove &
mytext <- str_replace_all(mytext, "&", " ")
# remove links
mytext <- str_replace_all(mytext, "(http[^ ]*)|(www\\.[^ ]*)", " ")
# convert tweets to lower case
mytext <- tolower(mytext)
# remove at people
mytext = gsub('@\\w+', '', mytext)
# remove tags
mytext <-gsub("#[[:alnum:][:punct:]]*","",mytext)
# remove punctuation
mytext = gsub('[[:punct:]]', ' ', mytext)
# remove numbers
mytext <- gsub("\\d", "", mytext)
# build a corpus
# VectorSource specifies that the source is character vectors.
myCorpus <- Corpus(VectorSource(mytext))
copyCorpus <- myCorpus
# myCorpus <- tm_map(myCorpus, PlainTextDocument)
# myCorpus <- tm_map(myCorpus, stemDocument, language="english")
# strip whitespace
myCorpus <- tm_map(myCorpus, stripWhitespace)
#remove stopwords
myCorpus <- tm_map(myCorpus, removeWords, stopwords('english')) #remove stopwords
tdm = TermDocumentMatrix(myCorpus)
# term frequencies
term.freq <- rowSums(as.matrix(tdm))
term.freq <- subset(term.freq, term.freq >= 20)
dm <- data.frame(term = names(term.freq), freq = term.freq)
# wordcloud
wordcloud(dm$term, dm$freq, random.order = FALSE, colors = brewer.pal(8, "Dark2"))
##################
# sentiment
##################
ap_td <- tibble()
ap_td <- tidy(tweets$text)
colnames(ap_td) <- "word"
ap_sentiments <- ap_td %>% right_join(get_sentiments("nrc")) %>% filter(!is.na(sentiment)) %>% count(sentiment, sort=TRUE)
#ap_sentiments <- as.table(ap_sentiments)
colors <- c("Red","Green", col=cm.colors(8))
barplot(ap_sentiments$n, ylab = "Frequency", col=colors, las=3, main = "#LTHEchat 4-9 April 2017",legend.text = ap_sentiments$sentiment)
#legend.text = ap_sentiments$sentiment, names.arg = ap_sentiments$sentiment
|
f36eeb937814db8dda3f2e078fd399467712a7a3 | d5aa7e759f4faa62d954728222f9239dbdd8f995 | /F210I/11.HetVHomDE.R | ddcf83e8ac13f67e6af44dd2e08d6d3d0861dbc8 | [] | no_license | SethMagnusJarvis/PhDFigureCreation | 1b58a42eab64fdf0d49fb28fbb6e965f6d266b28 | b076b021abc2df2677e2e55129f837cd6b3e2a55 | refs/heads/main | 2023-03-09T19:04:14.529315 | 2021-02-25T12:39:29 | 2021-02-25T12:39:29 | 342,240,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,754 | r | 11.HetVHomDE.R | library(tidyverse)
library(ggplot2)
library(rgl)
library(pheatmap)
library(limma)
#import data
HETDE <- read_csv("F210IAdultDE.csv")
colnames(HETDE) <- c("EnsemblID", "log2FoldChange", "stat", "pvalue", "padj", "signedZ")
HETRPKM <- read_csv("rpkm_values.csv")[,c(1,3:14)]
HOMDE <- read_tsv("deseq_F210I_HOM_embryo_June_2016_differential_expression.tab")
HOMRPKM <- read_csv("rpkm_values_F210I_HOM.csv")[,c(1,3:10)]
#merge tables and run PCA
RPKMJoin <- full_join(HETRPKM, HOMRPKM)
DEJoin <- full_join(HETDE, HOMDE, by="EnsemblID")
ExpressionPCA <- prcomp(t(RPKMJoin[,2:21]))$x
ExpressionPCA <- as.data.frame(ExpressionPCA) %>% rownames_to_column("Sample")
#make PCA plot
Cond <- list()
Cond[1:5] <- "WTHET"
Cond[6:12] <- "HET"
Cond[13:16] <- "WTHOM"
Cond[17:20] <- "HOM"
ExpressionPCA$Type <- unlist(Cond)
ggplot(ExpressionPCA, aes(x=PC1, y=PC2, shape=Type)) + geom_point() +
scale_shape_manual(values=c(1,2,3,4)) + geom_text(label = ExpressionPCA$Sample)
##########################################################################################################################
#make venn diagrams
SigGenes <- function(DESeqResults){
QuantDat <- as.data.frame(DESeqResults) %>%
drop_na
SigGenes <- QuantDat[QuantDat$padj < 0.05,]
SigList <- SigGenes$EnsemblID
return(SigList)
}
ListCompare <- function(HET, HOM){
HETSigList <- SigGenes(HET)
HOMSigList <- SigGenes(HOM)
ids = sort(unique(c(as.character(HETSigList), as.character(HOMSigList) )))
counts = matrix(0, nrow=length(ids), ncol=2)
for(i in 1:length(ids)){
counts[i, 1] = ids[i] %in% HETSigList
counts[i, 2] = ids[i] %in% HOMSigList
}
colnames(counts) = c("HET", "HOM")
row.names(counts) = ids
return(as.data.frame(counts))
}
HetVHOMDE <- ListCompare(HETDE, HOMDE)
svg("HETvHOMVennUpdate.svg")
vennDiagram(HetVHOMDE) + title("Comparison of DE of HET and HOM")
dev.off()
###########################################################################################################################
#Colour Z-score
Diff <- HETDE
MakeSignedZscore <- function(Diff){
signedZ <- Diff %>%
mutate(ZScore = ifelse(test = Diff$log2FoldChange > 0,
yes = qnorm(1 - (Diff$pvalue / 2)),
no = qnorm(Diff$pvalue / 2) )) %>%
na.omit
# high pass - otherwise very low P is set to Inf
signedZ$ZScore[signedZ$ZScore > 20] <- 20
signedZ$ZScore[signedZ$ZScore < -20] <- -20
signedZ <- dplyr::select(signedZ, EnsemblID, ZScore)
return(signedZ)
}
produceZscoreTable <- function(HETSEQ, HOMSEQ){
HETZ <- MakeSignedZscore(HETSEQ)
HOMZ <- MakeSignedZscore(HOMSEQ)
Zscores <- full_join(HOMZ, HETZ, by = "EnsemblID", suffix = c(".HOM", ".HET")) %>%
na.omit %>%
column_to_rownames("EnsemblID")
return(Zscores)
}
ZScore <- produceZscoreTable(HETDE, HOMDE)
#Z-score plot 2
ZScoreColour <- drop_na(ZScore) %>% mutate(
colour = case_when(
ZScore.HET > 3 & ZScore.HOM > 3 ~"#1b9e77",
ZScore.HET < -3 & ZScore.HOM < -3 ~"#1b9e77",
ZScore.HET < -3 & ZScore.HOM > 3 ~"#e7298a",
ZScore.HET > 3 & ZScore.HOM < -3 ~"#e7298a",
ZScore.HET > 3 ~"#7570b3",
ZScore.HET < -3 ~"#7570b3",
ZScore.HOM > 3 ~"#d95f02",
ZScore.HOM < -3 ~"#d95f02",
ZScore.HET > -3 & ZScore.HET < 3 & ZScore.HOM > -3 & ZScore.HOM < 3 ~"#999999",
))
ScorePlotFilt <- ggplot(ZScoreColour, aes(x=ZScore.HOM, y=ZScore.HET, color=colour)) + geom_point(alpha=1) + theme_bw() +
labs(title="HET vs HOM F210I Mutant differential expression Zscore plots", x="HOM", y="HET") +
scale_colour_identity() +
geom_vline(xintercept = c(-3,3)) + geom_hline(yintercept = c(-3,3)) +
theme(text = element_text(size=20))
svg("ColouredZscorePlotUpdated.svg")
ScorePlotFilt
dev.off()
|
41d112f3757e677e30d71bc20b0d187bb74cb387 | 0fa66774f82e96082b72ad9556543599ee12ddcb | /demonstrative/R/pbto2/nonlinear_slogistic_binom.R | 90fe9ff00cc8d061ba375deabcff6acafac7341a | [] | no_license | eric-czech/portfolio | 229a6eb662c3b633cda6db357f0f952cfdbb1e46 | d467ae2b3c8b58b35c11bf6aff11ea651e76c1a4 | refs/heads/master | 2021-01-17T03:48:56.571739 | 2018-01-23T21:09:40 | 2018-01-23T21:09:40 | 21,694,456 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,355 | r | nonlinear_slogistic_binom.R | library(foreach)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(rstan)
library(reshape2)
source('~/repos/portfolio/demonstrative/R/pbto2/common.R')
source('~/repos/portfolio/demonstrative/R/pbto2/nonlinear_utils.R')
source('~/repos/portfolio/demonstrative/R/pbto2/nonlinear_binom_utils.R')
rstan_options(auto_write=T)
options(mc.cores = parallel::detectCores())
static.features <- c('age', 'marshall', 'gcs', 'sex')
ts.feature <- c('pao2')
features <- c(static.features, ts.feature)
dsu <- get.long.data(features, scale.vars=F, outcome.func=gos.to.binom, reset.uid=T)
dsu$rand <- rnorm(n = nrow(dsu))
unscaled.value <- function(x, var) x * sd(dsu[,var]) + mean(dsu[,var])
d.stan <- dsu %>% mutate_each_(funs(scale), features)
if (sum(is.na(d.stan[,ts.feature])) > 0)
stop('Found na ts values')
### Stan
d.model <- get.stan.data(d.stan, static.features, ts.feature)
setwd('~/repos/portfolio/demonstrative/R/pbto2/models/stan')
model.file <- 'nonlinear_slogit_binom.stan'
posterior <- stan(model.file, data = d.model,
warmup = 200, iter = 5000, thin = 30,
chains = 1, verbose = FALSE)
# posterior <- stan(model.file, data = d.model,
# warmup = 150, iter = 4000, thin = 5,
# chains = 14, verbose = FALSE)
# Running parallel chains on Mac
library(parallel) # or some other parallelizing package
n.chains <- 5
posterior <- mclapply(1:n.chains, mc.cores = n.chains, FUN = function(chain) {
stan(file = model.file, data = d.model, warmup = 300, iter = 3000, chains = 1, thin = 3,
verbose = FALSE, chain_id=chain)
})
posterior <- sflist2stanfit(posterior)
pars <- c('beta', 'betaz', 'b', 'c', 'alpha')
post <- rstan::extract(posterior)
print(posterior, pars)
rstan::traceplot(posterior, pars)
plot(posterior)
x <- seq(min(d.stan[,ts.feature]), max(d.stan[,ts.feature]), length.out = 100)
x.unscaled <- unscaled.value(x, ts.feature)
y.est.mean <- get.slogit.mean.curve(post, x, agg.func=mean)
y.est.median <- get.slogit.mean.curve(post, x, agg.func=median)
y.mean <- data.frame(i=0, x=x.unscaled, y=y.est.mean)
y.median <- data.frame(i=0, x=x.unscaled, y=y.est.median)
#y.main %>% ggplot(aes(x=x, y=y)) + geom_line()
n = length(post$lp__)
y.samp <- foreach(i=1:n, .combine=rbind) %do% {
y <- single.logistic(x, post$betaz[i], post$b[i], post$c[i])
a = sum((y - y.est.mean)^2)
data.frame(i, x=unscaled.value(x, ts.feature), y, a=a)
} %>% mutate(a=(1-scale.minmax(a))^10)
v.hist <- hist(dsu[,ts.feature], plot=F, breaks=length(x))
v.width <- v.hist$mids[1] - v.hist$breaks[1]
min.v <- min(min(y.mean$y), min(y.samp$y))
max.v <- max(max(y.mean$y), max(y.samp$y))
v.hist <- data.frame(x=v.hist$mids, y=v.hist$counts/sum(v.hist$counts))
v.hist$y = min.v + .35 * abs(max.v - min.v) * scale.minmax(v.hist$y)
c.mid <- median(post$c) %>% unscaled.value(ts.feature)
p1 <- ggplot(NULL) +
geom_line(aes(x=x, y=y, group=i, alpha=a), data=y.samp) +
geom_line(aes(x=x, y=y, color='mean'), size=1, data=y.mean, alpha=.75) +
geom_line(aes(x=x, y=y, color='median'), size=1, data=y.median, alpha=.75) +
scale_alpha(range = c(.05, .05), guide = 'none') + theme_bw() +
scale_color_discrete(guide = guide_legend(title = "Summary")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(ts.feature) + ylab(paste0('w(', ts.feature, ')')) + ggtitle('Timeseries Weight Function') +
geom_rect(aes(xmax=x+v.width, xmin=x-v.width, ymin=min.v, ymax=y), data=v.hist, alpha=.5) +
geom_vline(xintercept=c.mid, linetype='dashed', alpha=.25) +
annotate("text", x = c.mid, y = 1, label = round(c.mid, 2))
post.summary <- get.slogit.posterior.summary(post, static.features)
p2 <- post.summary %>%
filter(!variable %in% c('center', 'weight_magnitude', 'intercept')) %>%
ggplot(aes(x=variable, y=mid, ymin=lo, ymax=hi, color=variable)) +
geom_pointrange(size=1) + coord_flip() + theme_bw() +
geom_hline(yintercept=0, linetype='dashed') +
ggtitle('Coefficient 95% Credible Intervals') +
ylab('Coefficient Range') + xlab('')
# Save above plots to file
file <- sprintf("~/repos/portfolio/demonstrative/R/pbto2/presentations/images/slogit_no_interp/single_var/actual_%s.png", ts.feature)
png(file = file, width=800, height=800)
grid.arrange(p2, p1, nrow=2, ncol=1, heights=c(0.3, 0.7))
dev.off()
|
d662cd3591ef20f55cfb49260f3715dfddb91687 | dc1040d310ba7f19ecff4f38716cd27c722d7a03 | /analysis/HappinessvsIncome.R | 3c891e177011bb40858bcbb425b2a4ebb2eb434f | [] | no_license | datares/happy-pineapples | 5809c760b8c112cec7ceb8c22f4411a3d8c54127 | f5d896b6378f8961bce06625753ccb014fd4093b | refs/heads/main | 2023-03-30T02:13:16.690609 | 2021-03-30T03:48:59 | 2021-03-30T03:48:59 | 309,789,278 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 1,057 | r | HappinessvsIncome.R | #income data from Gallup
income <- read.csv("C:/Users/thehu/Documents/Fall 2020/Data Blog/IncomeByCountry2020.csv", header = TRUE)
happy <- read.csv("C:/Users/thehu/Documents/Fall 2020/Data Blog/2020.csv", header = TRUE)
names(income)[names(income) == "ï..country"] <- "Country.name"
df = merge(income, happy, by = "Country.name")
colnames(df)
plot(Ladder.score ~ medianHouseholdIncome, data = df,
xlab = "Median Household Income", ylab = "Happiness Score",
main = "Median Household Income vs. Happiness by Country",
col = colors[Regional.indicator], pch = 20,
xlim = c(0, 55000), ylim = c(2.5, 8))
legend("bottomright", levels(df$Regional.indicator),
col = colors, pch = 20, inset = 0.01)
fit <- lm(df$Ladder.score ~ log(df$medianHouseholdIncome))
coef <- coef(fit)
curve(coef[1] + coef[2] * log(x), xlim = c(0, 55000), ylim = c(2.5, 8), add = TRUE)
colors <- c("black", "green4", "red", "green", "lightgoldenrod4",
"navy", "turquoise", "violetred", "slateblue", "deepskyblue2")
|
1d584fe4187c147ba812fbe317fa1067583dd091 | 2d3d3d9cd6c1b3be05a297688947d7558f20bcaa | /HopkinsR/Assignment/Hopkins_5_Weather.R | f559ed290144d1668e54d4e755e880a67ace7d93 | [] | no_license | ethanxsun/Code | 84a1dca928d6e1b51673a40570abd8359e9ad7e4 | 4084301044ee7ff8e19ce1b645f2d54941244a98 | refs/heads/master | 2021-01-20T04:43:01.736781 | 2017-05-11T14:27:20 | 2017-05-11T14:27:20 | 89,718,798 | 1 | 0 | null | null | null | null | GB18030 | R | false | false | 4,612 | r | Hopkins_5_Weather.R | # function 1: calculate mean value of sulfate or nitrate
pollutantmean <- function (directory, pollutant, id = 1:322){
#directory <- c("specdata")
#id <-1:20
target.dir <- c("C:/Work/DataAnalytics/HopkinsDataScience/Course2_Wk2_ProgrammingWithR")
location <- paste(target_dir,"/",directory,sep="")
setwd(location)
#character vector
files <- list.files(location)
str(files)
print(n<-length(files))
#input vector length is 1
if(length(id)==1){
weather <- read.table(files[id], header=TRUE, stringsAsFactors = FALSE, sep=",")
} else {
print("here")
#input vector length is more than 1
print(n<-length(id))
print(id)
#read first file data
weather <- read.table(files[id[1]], header=TRUE, stringsAsFactors = FALSE, sep=",")
#append next file data
for(i in id[2:n]){
#print(paste("sequence is",i))
#print(paste("filename is:", files[i]))
next.line <-read.table(files[i], header=TRUE,stringsAsFactors = FALSE, sep = ",")
#print("successful")
weather <- rbind(weather,next.line)
}
}
if(pollutant == "sulfate"){
return(mean(weather$sulfate, na.rm = TRUE))
}
if(pollutant == "nitrate"){
return(mean(weather$nitrate, na.rm = TRUE))
}
}
print("result1******************************************************")
#pollutantmean(directory="specdata",pollutant="sulfate",id=1:10)
#pollutantmean(directory="specdata",pollutant="nitrate", id=70:72)
#pollutantmean(directory="specdata",pollutant="sulfate", id=34)
pollutantmean(directory="specdata",pollutant="nitrate", id=1:323)
#function 2: check completeness of the data read
complete <- function (directory , id = 1:323){
target.dir <- c("C:/Work/DataAnalytics/HopkinsDataScience/Course2_Wk2_ProgrammingWithR")
location <- paste(target_dir,"/",directory,sep="")
setwd(location)
#文件名列表
files <- list.files(location)
if(length(id) == 1){
table1 <- read.table(files[id], header=TRUE, stringsAsFactors = FALSE, sep=",")
complete.table <- table1[complete.cases(table1),]
nobs <- nrow(complete.table)
return(data.frame(id , nobs))
} else {
n <- length(id)
print(paste("vector length =",n))
first <- read.table(files[id[1]], header=TRUE, stringsAsFactors = FALSE, sep=",")
complete <- first[complete.cases(first) , ]
frame2 <- data.frame(id[1], nrow(complete))
names(frame2) <- c("id","nobs")
print(frame2)
print("append the rest") # 循环读取余下的文件
for (i in id[2:n]){
#print(paste("i is",i))
ithfile <- read.table(files[i], header=TRUE, stringsAsFactors = FALSE, sep=",")
ithfile.complete <- ithfile[complete.cases(ithfile) , ]
frame2 <- rbind(frame2, c(i,nrow(ithfile.complete)))
}
return(frame2)
}
}
print("function 2 result****************************************")
#print(complete (directory = "specdata", id = 30:25))
cc <- complete("specdata", c(6, 10, 20, 34, 100, 200, 300))
print(cc$nobs)
cc <- complete("specdata", 54)
print(cc$nobs)
set.seed(42)
cc <- complete("specdata", 332:1)
use <- sample(332, 10)
print(cc[use, "nobs"])
#function 3:
corr <- function (threshold = 0){
#输入:文件夹名,threshold
#读取所有文件,并获得查询表
lookup <- complete(directory = "specdata", id = 1:323)
#print(lookup)
#读取所有大于threshold的id
ids <- lookup[lookup$nobs >= threshold,1]
#print(ids)
#创建存储相关性结果的空vector
result <- NULL
#依次读取这些文件,并计算相关系数
for (index in ids){
#print(index)
#print(paste("here",files[index]))
eachfile <- read.table(files[index], header=TRUE, stringsAsFactors = FALSE, sep=",")
completefile <- eachfile[complete.cases(eachfile),]
coreach <- cor(completefile$sulfate, completefile$nitrate)
result <- append(result, coreach)
}
#获得结果集
print(result)
return(result)
}
print("function 3 result*********************************************")
cr <- corr()
cr <- sort(cr)
set.seed(868)
out <- round(cr[sample(length(cr), 5)], 4)
print(out)
cr <- corr(129)
cr <- sort(cr)
n <- length(cr)
set.seed(197)
out <- c(n, round(cr[sample(n, 5)], 4))
print(out)
cr <- corr(2000)
n <- length(cr)
cr <- corr(1000)
cr <- sort(cr)
print(c(n, round(cr, 4)))
|
ebd8b6bba29091fac464ade5a92004b73e338f9a | 17807e9372c8e22c0d5dd3b23185487ba53a2f37 | /pollution_eda/plot6.R | 286e99d28345e20dfdf453854e04ba240e86fc50 | [] | no_license | S-95-pix/r_repository | 67afa2b7758f9b03d8f48f493f9f5dc144f45699 | 962674a579e383b3d6b33ce428295193d5391a09 | refs/heads/master | 2023-07-08T03:04:09.903954 | 2021-08-17T16:34:10 | 2021-08-17T16:34:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,675 | r | plot6.R | ## Get libraries needed
library(dplyr)
library(ggplot2)
## Set directory
if (!file.exists('final_project_eda/')) {
dir.create('final_project_eda/')
}
setwd('final_project_eda/')
## Data extract
if (!file.exists('epa.zip')) {
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip', 'epa.zip')
unzip('epa.zip')
} else if (!file.exists('/epa')) {
unzip('epa.zip')
}
## Load the data
pm25 <- readRDS('summarySCC_PM25.rds')
source_class <- readRDS('Source_Classification_Code.rds')
## Question 6
## Compare emissions from motor vehicle sources in Baltimore City
## with emissions from motor vehicle sources in Los Angeles County,
## California (fips == "06037"). Which city has seen greater changes
## over time in motor vehicle emissions?
## Get only Baltimore, MA and Los Angeles, CA data
motor <- pm25 %>% filter(fips == '24510' | fips == '06037') %>%
mutate(EI.Sector = source_class$EI.Sector[match(SCC, source_class$SCC)]) %>%
filter(grepl(pattern = 'On-Road', x = EI.Sector)) %>% group_by(fips, year) %>%
summarise(total_pm25 = sum(Emissions)) %>% mutate(fips = ifelse(fips == '24510', 'Baltimore, MD', 'Los Angeles, CA'))
## Create PNG device
png('plot6.png', width = 800, height = 480)
## Create barplot showing the trend of PM2.5 emissions of motor vehicle-related sources in Baltimore and Los Angeles
ggplot(data = motor, aes(x = year, y = total_pm25)) + geom_bar(stat = 'Identity') + facet_grid(.~fips) + labs(x = 'Year', y = 'PM2.5 Emissions', title = 'PM2.5 Emissions of Motor Vehicle-related Sources in Baltimore, MD and Los Angeles, CA')
## Close PNG device
dev.off()
|
8fcd9adeb3566a1dd067c46da8e70fd069da4445 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/estimatr/R/S3_nobs.R | 401b164a04734a21d51da75324c8ea24d93f9403 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 252 | r | S3_nobs.R | #' @export
nobs.lm_robust <- function(object, ...) object$nobs
#' @export
nobs.iv_robust <- function(object, ...) object$nobs
#' @export
nobs.summary.lm_robust <- nobs.lm_robust
#' @export
nobs.horvitz_thompson <- function(object, ...) object$nobs
|
7685c4ffa62a7831674611033a014aed15d2000f | ffd303c7beebbdea53e74448fceb2f2ab2037d54 | /plot2.R | c313b0d7f08904687aeec1ec388525d2b0205791 | [] | no_license | sisi881022/ExData_Plotting1 | 6064785a1283c03482b921129528c8a895b2150b | aab92cbe03ca2011d602e5f48388d08c97afebff | refs/heads/master | 2020-12-25T01:44:00.155683 | 2014-08-10T09:58:36 | 2014-08-10T09:58:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 509 | r | plot2.R | # Read in Data
powerConsumption<- read.csv("household_power_consumption.txt",header=T,sep=";", na.strings = "?")
colnames(powerConsumption)
#Convert date
powerConsumption$Date<- as.Date(powerConsumption$Date,"%d/%m/%Y")
#subset data
dat<- subset(powerConsumption,Date>="2007-02-01" & Date<= "2007-02-02")
#convert the datetime
datetime<-strptime(paste(dat$Date, dat$Time), "%Y-%m-%d %H:%M:%S")
#plot2
png("plot2.png")
plot(datetime,dat[,3],type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off()
|
d16378dc82bf9d549b88275a159d4adc48e0b5d0 | dfd1208b151f320b0430e6adeb7fa5823d283959 | /man/defineMUM.Rd | 3683c214582f8c9b2e89e3b9e907319e881c5195 | [] | no_license | ksawicka/spup | 8bffdd98766d17063a48197a660b3402ebb787bc | b073094603f351c2b55cec42498639a8e4e4b83d | refs/heads/master | 2021-04-18T22:59:36.822987 | 2020-08-24T09:29:32 | 2020-08-24T09:29:32 | 56,239,409 | 9 | 5 | null | 2017-05-02T15:12:25 | 2016-04-14T13:25:06 | R | UTF-8 | R | false | true | 2,015 | rd | defineMUM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defineMUM.r
\name{defineMUM}
\alias{defineMUM}
\title{Define Mulivariate Uncertainty Model}
\usage{
defineMUM(UMlist, cormatrix, ...)
}
\arguments{
\item{UMlist}{a list of uncertain objects creaded in defineUM().}
\item{cormatrix}{matrix of cross-correlations.}
\item{...}{additional parameters.}
}
\value{
Object of a class "JointNumericSpatial" or "JointScalar".
}
\description{
Function that uses output of defineUM() to define joint probability distribution
for uncertain cross-correlated variables.
}
\details{
The cormatrix is a square matrix of correlations,
dimensionally equal to the number of objects, symmetrical
(transposed must be the same as original), diagonal must all be 1
all values must be <-1, +1>) and all eigenvalues must be > 0.
The marginal Um objects must have provided id.
}
\examples{
set.seed(12345)
data(OC, OC_sd, TN, TN_sd)
OC_crm <- makeCRM(acf0 = 0.6, range = 5000, model = "Sph")
OC_UM <- defineUM(TRUE, distribution = "norm", distr_param = c(OC, OC_sd), crm = OC_crm, id = "OC")
class(OC_UM)
TN_crm <- makeCRM(acf0 = 0.4, range = 5000, model = "Sph")
TN_UM <- defineUM(TRUE, distribution = "norm", distr_param = c(TN, TN_sd), crm = TN_crm, id = "TN")
class(TN_UM)
soil_prop <- list(OC_UM,TN_UM)
mySpatialMUM <- defineMUM(soil_prop, matrix(c(1,0.7,0.7,1), nrow=2, ncol=2))
class(mySpatialMUM)
# scalar
scalarUM <- defineUM(uncertain = TRUE, distribution = "norm",
distr_param = c(1, 2), id="Var1")
scalarUM2 <- defineUM(uncertain = TRUE, distribution = "norm",
distr_param = c(3, 2), id="Var2")
scalarUM3 <- defineUM(uncertain = TRUE, distribution = "norm",
distr_param = c(10, 2.5), id="Var3")
myMUM <- defineMUM(UMlist = list(scalarUM, scalarUM2, scalarUM3),
matrix(c(1,0.7,0.2,0.7,1,0.5,0.2,0.5,1), nrow = 3, ncol = 3))
class(myMUM)
}
\author{
Kasia Sawicka, Gerard Heuvelink
}
|
1d58d223ffe7952fdf8808c984d60bb3e06f689b | ac42c7b87b90f08c8ec9b12d5e17a534ffb84b02 | /man/cr_buildtrigger_repo.Rd | 1af346987715768b0eccd2b13901a8239613647a | [] | no_license | alainlompo/googleCloudRunner | 990c819c9f4f46ca5a39a720627da4003a6410f3 | 6622d22f1c19d58f6b5888ae929f32d86b57a1c6 | refs/heads/master | 2023-09-03T15:33:00.968050 | 2021-10-27T21:18:23 | 2021-10-27T21:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,399 | rd | cr_buildtrigger_repo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildtriggers_github.R
\name{cr_buildtrigger_repo}
\alias{cr_buildtrigger_repo}
\title{Create a buildtrigger repo object}
\usage{
cr_buildtrigger_repo(
repo_name,
branch = ".*",
tag = NULL,
type = c("github", "cloud_source"),
github_secret = NULL,
...
)
}
\arguments{
\item{repo_name}{Either the GitHub username/repo_name or the Cloud Source repo_name}
\item{branch}{Regex of the branches that will trigger a build. Ignore if tag is not NULL}
\item{tag}{Regex of tags that will trigger a build}
\item{type}{Whether trigger is GitHub or Cloud Source repoistory}
\item{github_secret}{If you need to pull from a private GitHub repo, add the github secret from Google Secret Manager which will be used via \link{cr_buildstep_secret}}
\item{...}{Other arguments passed to either \link{GitHubEventsConfig} or \link{RepoSource}}
}
\description{
Create a repository trigger object for use in build triggers
}
\seealso{
Other BuildTrigger functions:
\code{\link{BuildTrigger}()},
\code{\link{GitHubEventsConfig}()},
\code{\link{cr_buildtrigger_copy}()},
\code{\link{cr_buildtrigger_delete}()},
\code{\link{cr_buildtrigger_edit}()},
\code{\link{cr_buildtrigger_get}()},
\code{\link{cr_buildtrigger_list}()},
\code{\link{cr_buildtrigger_run}()},
\code{\link{cr_buildtrigger}()}
}
\concept{BuildTrigger functions}
|
e12f90b0490ac86b1d2a2b768667e45dfb8bf6ed | f4404d415af085c7c13fbe39f6c29cbb3fd7b68e | /note1.R | 117ec0f0801003e35d877f9d1e3a441498707a4c | [] | no_license | yunsung/Yunsung_Jung | 5ba46f4fb989bc11066c1d15a8811b83e24ba3e9 | 24112ad53f54453401ce88f2cef6cc99cd04b98c | refs/heads/master | 2021-01-15T11:11:59.123028 | 2015-07-15T06:32:45 | 2015-07-15T06:32:45 | 38,352,857 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 170 | r | note1.R | Pollution <- read.csv("C:/Users/Jung/Desktop/ISS/1. Data science/7.1/SKKU_DataScience_2015-master/data_sets/Pollution.csv")
Pollution
plot(Pollution$POP,Pollution$SO2) |
80349b94a2748662f446fbc9abd0d7fb2d16369e | af33eeb8d253a5150b529477e7f9fd6e0e2a2ce6 | /locations.R | f0a3232529c8e827335a624a5f2384f5a75494b4 | [] | no_license | PeerChristensen/Global_terrorism | 06bd1e142cf3001abc9a863ec99e4fa2e9e8ce4d | b6e7e9e1ae5467fd1c0da7010d640e0e55e91626 | refs/heads/master | 2021-06-22T14:53:28.922570 | 2021-04-05T17:26:53 | 2021-04-05T17:26:53 | 206,867,626 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,050 | r | locations.R | # where attacks occur
library(tidyverse)
library(hrbrthemes)
library(ggsci)
library(gganimate)
library(ggthemes)
library(viridis)
df <- read_csv("global_terror_clean.csv")
df %>%
group_by(region_txt) %>%
count() %>%
ggplot(aes(reorder(region_txt,n),n,fill=n)) +
geom_col(colour=NA) +
coord_flip() +
theme_modern_rc() +
scale_fill_material("red")
df %>%
group_by(country_txt) %>%
count() %>%
ungroup()%>%
top_n(20) %>%
ggplot(aes(reorder(country_txt,n),n,fill=n)) +
geom_col(colour=NA) +
coord_flip() +
theme_modern_rc() +
scale_fill_material("red")
df %>%
filter(city != "Unknown") %>%
mutate(city2 = paste0(city,", ",country_txt)) %>%
group_by(city2) %>%
count() %>%
ungroup()%>%
top_n(20) %>%
ggplot(aes(reorder(city2,n),n,fill=n)) +
geom_col(colour=NA) +
coord_flip() +
theme_modern_rc() +
scale_fill_material("red")
# regions by year
regions <- df %>%
group_by(year,region_txt) %>%
count() %>%
ungroup() %>%
complete(year,region_txt,fill=list(n=0)) %>%
group_by(region_txt) %>%
mutate(cs = cumsum(n)) %>%
ungroup() %>%
group_by(year) %>%
arrange(year,desc(cs)) %>%
mutate(order = rev(row_number())) %>%
mutate(Rank = rank(-cs),
Value_rel = cs/cs[Rank==1],
Value_lbl = paste0(" ",round(cs/1e9))) %>%
ungroup()
p<-regions %>%
ggplot(aes(order,Value_rel,fill=region_txt,group=cs)) +
#geom_col(colour=NA) +
geom_tile(aes(y=cs/2,height=cs),width=0.9,show.legend = F)+
coord_flip(clip="off") +
theme_modern_rc() +
scale_fill_viridis_d() +
labs(title = "Year: {closest_state}") +
transition_states(year, transition_length = 4, state_length = 1) +
theme(legend.position = "none",
axis.text.y = element_blank(),
axis.title.y = element_blank(),
plot.margin = margin(1,1,1,6, "cm")) +
geom_text(aes(y = 0, label = paste(region_txt, " ")), vjust = 0.2, hjust = 1,colour="snow")
#animate(p,fps=2.5,nframes = n_distinct(regions$year)*2)
animate(p, 100, fps = 25, duration = 20, width = 800, height = 600)
|
993e9284dbb376f58278e64ca693f6912244d42e | 423e53b3ca3e81220813d88be963cb4b8b3fd9b2 | /R/plot.survFitCstExp.R | b33efae0d6807cb6ff9f9ad4522e669fdde8a3a9 | [] | no_license | cran/morse | 9715ca0a55cdf7c42ecfd13039065a88a273f6dd | 262ed591e1b80190e1cea7a3ae93164b0d030df2 | refs/heads/master | 2022-11-08T06:29:36.523446 | 2022-10-28T10:45:09 | 2022-10-28T10:45:09 | 20,999,880 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,347 | r | plot.survFitCstExp.R | #' Plotting method for \code{survFit} objects
#'
#' This is the generic \code{plot} S3 method for the
#' \code{survFit}. It plots the fit obtained for each
#' concentration of chemical compound in the original dataset.
#'
#' The fitted curves represent the \strong{estimated survival probability} as a function
#' of time for each concentration.
#' The black dots depict the \strong{observed survival
#' probability} at each time point. Note that since our model does not take
#' inter-replicate variability into consideration, replicates are systematically
#' pooled in this plot.
#' The function plots both 95\% credible intervals for the estimated survival
#' probability (by default the grey area around the fitted curve) and 95\% binomial confidence
#' intervals for the observed survival probability (as black error bars if
#' \code{adddata = TRUE}).
#' Both types of intervals are taken at the same level. Typically
#' a good fit is expected to display a large overlap between the two types of intervals.
#' If \code{spaghetti = TRUE}, the credible intervals are represented by two
#' dotted lines limiting the credible band, and a spaghetti plot is added to this band.
#' This spaghetti plot consists of the representation of simulated curves using parameter values
#' sampled in the posterior distribution (2\% of the MCMC chains are randomly
#' taken for this sample).
#'
#' @param x An object of class \code{survFit}.
#' @param xlab A label for the \eqn{X}-axis, by default \code{Time}.
#' @param ylab A label for the \eqn{Y}-axis, by default \code{Survival probability}.
#' @param main A main title for the plot.
#' @param concentration A numeric value corresponding to some specific concentrations in
#' \code{data}. If \code{concentration = NULL}, draws a plot for each concentration.
#' @param spaghetti if \code{TRUE}, draws a set of survival curves using
#' parameters drawn from the posterior distribution
#' @param one.plot if \code{TRUE}, draws all the estimated curves in
#' one plot instead of one plot per concentration.
#' @param adddata if \code{TRUE}, adds the observed data to the plot
#' with (frequentist binomial) confidence intervals
#' @param addlegend if \code{TRUE}, adds a default legend to the plot.
#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
#' @param \dots Further arguments to be passed to generic methods.
#'
#' @keywords plot
#'
#' @return a plot of class \code{ggplot}
#'
#' @export
#'
#' @import ggplot2
#' @import grDevices
#' @importFrom dplyr filter
#' @importFrom dplyr select
#' @importFrom dplyr contains
#' @importFrom gridExtra grid.arrange arrangeGrob
#' @importFrom grid grid.rect gpar
#' @importFrom graphics plot
#' @importFrom tidyr tibble
#' @importFrom tidyr as_tibble
#'
plot.survFitCstExp <- function(x,
xlab = "Time",
ylab = "Survival probability",
main = NULL,
concentration = NULL,
spaghetti = FALSE,
one.plot = FALSE,
adddata = TRUE,
addlegend = FALSE,
style = "ggplot", ...) {
if (one.plot && !is.null(concentration)) one.plot <- FALSE
if ((addlegend && is.null(concentration)) ||
(addlegend && !one.plot))
warning("The legend is available only if [one.plot] is TRUE or if [concentration] is not NULL !", call. = FALSE)
if (!is.null(concentration) && !any(x$transformed.data$conc == concentration))
stop("The [concentration] argument is not one of the possible concentration !")
if (one.plot)
warning("The credible limits and confidence intervals are not drawn when 'one.plot' = TRUE.", call. = FALSE)
conf.int <- survTKTDConfInt_CstExp(x)
dobs <- data.frame(conc = x$transformed.data$conc,
time = x$transformed.data$time,
psurv = x$transformed.data$Nsurv / x$transformed.data$Ninit,
Points = "Observed values",
#color = as.numeric(as.factor(x$transformed.data$conc)),
color = x$transformed.data$replicate,
conf.int)
# remove time 0 in dobs
dobs <- dplyr::filter(dobs, time != 0)
data.credInt <- survFitPlotCITKTD_CstExp(x)
if (style == "generic") {
survFitPlotTKTDGeneric(data.credInt, dobs, xlab, ylab, main, concentration,
one.plot, spaghetti,
adddata, addlegend)
} else if (style == "ggplot") {
survFitPlotTKTDGG(data.credInt, dobs, xlab, ylab, main, concentration,
one.plot, spaghetti, adddata, addlegend)
} else stop("Unknown style")
}
#' @importFrom stats aggregate binom.test
survTKTDConfInt_CstExp <- function(x) {
# create confidente interval on observed data
# binomial model by a binomial test
# INPUT:
# - x : object of class survFitTT
# OUTPUT:
# - ci : confidente interval
ci <- apply(x$transformed.data, 1, function(x) {
binom.test(as.numeric(x["Nsurv"]), as.numeric(x["Ninit"]))$conf.int
})
ci <- as.data.frame(t(ci))
colnames(ci) <- c("qinf95", "qsup95")
ci$Conf.Int <- "Confidence interval"
return(ci)
}
Surv_SD <- function(Cw, time, kk, kd, z, hb)
{
S <- exp(-hb*time)
x <- ifelse(Cw > z, 1 - z/Cw, NA)
tz <- -(1/kd)*log(x)
y <- ifelse(time > tz,
exp( kk/kd*Cw*(exp(-kd*tz) -exp(-kd*time))
- kk*(Cw-z)*(time - tz)),
NA)
return(ifelse(!is.na(x) & !is.na(y), S * y, S))
}
Surv_IT <- function(Cw, time, kd, hb, alpha, beta)
{
D <- Cw*(1-exp(-kd %*% t(time)))
D.max <- t(apply(D, 1, cummax))
S <- exp(-hb %*% t(time)) * (1 - plogis(log(D.max), location = log(alpha), scale = 1/beta))
return(S)
}
survFitPlotCITKTD_CstExp <- function(x) {
# INPUT
# x : An object of class survFitTKTD
# OUTPUT
# A list of - dobs : observed values
# - dtheo : estimated values
npoints <- 100
concobs <- unique(x$transformed.data$conc)
## tfin <- seq(0, max(x$jags.data$t), length.out = npoints)
tfin <- seq(0, max(x$jags.data$time), length.out = npoints)
# prameters
mctot <- do.call("rbind", x$mcmc)
kd <- 10^mctot[, "kd_log10"]
# "hb" is not in survFit object of morse <v3.2.0
if("hb" %in% colnames(mctot)){
hb <- mctot[, "hb"]
} else{ hb <- 10^mctot[, "hb_log10"] }
# all theorical
k <- 1:length(concobs)
j <- 1:npoints
model_type = x$model_type
if(model_type == "SD"){
z <- 10^mctot[, "z_log10"]
kk <- 10^mctot[, "kk_log10"]
dtheo <- lapply(k, function(kit) { # conc
sapply(j, function(jit) { # time
Surv_SD(Cw = concobs[kit],
time = tfin[jit],
kk = kk,
kd = kd,
z = z,
hb = hb)
})
})
}
if(model_type == "IT"){
alpha <- 10^mctot[, "alpha_log10"]
beta <- 10^mctot[, "beta_log10"]
dtheo <- lapply(k, function(kit) { # concentration pour chaque concentration
Surv_IT(Cw = concobs[kit],
time = tfin,
kd = kd,
hb = hb,
alpha = alpha,
beta = beta)
})
}
# transpose dtheo
dtheo <- do.call("rbind", lapply(dtheo, t))
# quantile
qinf95 <- apply(dtheo, 1, quantile, probs = 0.025, na.rm = TRUE)
qsup95 <- apply(dtheo, 1, quantile, probs = 0.975, na.rm = TRUE)
q50 <- apply(dtheo, 1, quantile, probs = 0.5, na.rm = TRUE)
dtheo <- as.data.frame(dtheo)
colnames(dtheo) <- paste0("X", 1:length(kd))
# divide number of mcmc by 50
sel <- sample(ncol(dtheo))[1:ceiling(ncol(dtheo) / 50)]
dtheo <- dtheo[, sel]
dtheo$conc <- rep(concobs, rep(npoints, length(concobs)))
dtheo$time <- rep(tfin, length(concobs))
# add credible limits
dtheo$qinf95 <- qinf95
dtheo$qsup95 <- qsup95
dtheo$q50 <- q50
# names for legend plots
dtheo$Cred.Lim <- "Credible limits"
dtheo$Mean.C <- "Mean curve"
# vector color
dtheo$color <- as.numeric(as.factor(dtheo$conc))
return(dtheo)
} |
724bcd7bb1794582550a1ff6e1b0347361437f6c | 5781dd18cdbb889e8da1778f8f37f11d10e24b65 | /Lista 03/ajuste_3a_lista.R | 6728acf92971eff4bf9e0fb0b26c22f156a735e1 | [] | no_license | Cayan-Portela/ENAP_regressao | 6ee84a1aaff5e1c87e11f824eda261bcf4b1298a | dc77f8752a674d4544b2a7042e36810cf7a0adba | refs/heads/master | 2020-04-04T03:08:42.811630 | 2018-12-03T12:42:43 | 2018-12-03T12:42:43 | 155,709,073 | 3 | 2 | null | null | null | null | ISO-8859-1 | R | false | false | 2,375 | r | ajuste_3a_lista.R | install.packages("lmtest")
install.packages("plm")
install.packages("sandwich")
install.packages("olsrr")
install.packages("mctest")
install.packages("GGally")
library(lmtest)
library(plm)
library(sandwich)
library(olsrr)
library(mctest)
library(GGally)
library(plyr);
library(lmtest);
library(sandwich)
library(olsrr)
library(mctest)
library(GGally)
library(nortest)
library(car)
library(carData)
###----Lista de exercícios 3-----###
#----- importar os dados --------#
dados3 <- read.csv2("https://raw.githubusercontent.com/Cayan-Portela/ENAP_regressao/master/Aula%2002/IDH_Brasil_2010.csv",
header = TRUE)
#---- Modelo --------#
mod1 <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo)
summary(mod1)
#---teste de heterocestadicidade----#
#----Teste de Breusch-Pagan -------#
# H0: os coeficientes estimados são iguais a zero.
bptest(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo)
#----Estimadores robustos para erros heterocesáticos-----#
coeftest(mod1, vcov = vcovHC(mod1, "HC3"))
#---- estimadores robustos para erros heteroscedasticos e autocorrelacionados
summary(mod1)
coeftest(mod1, vcov = vcovHAC(mod1))
#----- testes de normalidade em R----;
residuos <- mod1$residuals
plot(residuos)
qqPlot(residuos, main = "Normal Q-Q Plot",
xlab = "Theoretical Quantiles", ylab = "Sample Quantiles")
shapiro.test(residuos)
ks.test(residuos,dados3$mort_infantil )
ad.test(residuos)
#----- Teste de multicolinearidade -----;
X <- model.matrix(mod1)
head(X)
Xnoint <- X[, -1]
head(Xnoint)
ggpairs(data.frame(Xnoint))
omcdiag(Xnoint, bondyield$RAARUS)
|
b4c58137ba13fe398ea29182d8a60dbc9f97330c | 82ebc73dc6869319a2bf47f57ac41fe306dc12f2 | /man/fit.fast.model.Rd | 5961920232e048494f4c485adb04f77c2aedaa49 | [] | no_license | Sage-Bionetworks/snm | d5418b0f989089e342f7505756c2c304101f80c9 | 1ef4124d2819577b4428ffd13a25c443f71cf916 | refs/heads/master | 2021-01-01T18:49:14.757942 | 2012-08-24T22:30:22 | 2012-08-24T22:30:22 | 5,533,556 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,869 | rd | fit.fast.model.Rd | \name{fit.fast.model}
\alias{fit.fast.model}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Fast Version of SNM Algorithm
}
\description{
Fits the Study-Specific Model without a mixed effects model. This function estimates the intensity-dependent effects after down-weighting all of the probe-specific variables. The default fit.model call down-weights only the biological variables.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
fit.fast.model(obs.fit, snm.obj, basisSplineFunction)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obs.fit}{
List of estimated coefficient matrices and residuals from full and reduced models
}
\item{snm.obj}{
An object of class snm
}
\item{basisSplineFunction}{
Basis spline function
}
}
\value{
Updated snm.obj with intensity-dependent effects.
}
\author{
Brig Mecham <brig.mecham@sagebase.org>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (obs.fit,snm.obj,
basisSplineFunction)
{
snm.obj$M <- snm.obj$dat - obs.fit$res1
snm.obj$M[snm.obj$nulls,] <- obs.fit$fit0[snm.obj$nulls,]
# Split the data into nbins bins based on their mean intensities
bins <- getSpanningSet(snm.obj)
# Build the matrix of weighted raw data and matrix of weighted fitted values for each bin.
lnp <- length(bins)
np <- 1:lnp
Y.pooled <- 0*snm.obj$dat[np,]
M.pooled <- 0*snm.obj$M[np,]
for(i in 1:lnp) {
Y.pooled[i,] = apply(matrix(snm.obj$r.dat[as.vector(bins[[i]]),],
ncol=ncol(snm.obj$dat)),2,
weighted.mean, w=snm.obj$weights[as.vector(bins[[i]])])
M.pooled[i,] = apply(matrix(snm.obj$M[as.vector(bins[[i]]),],
ncol=ncol(snm.obj$M)),2,
weighted.mean, w=snm.obj$weights[as.vector(bins[[i]])])
}
BB <- predict(basisSplineFunction,M.pooled[,1])
X <- kronecker(contr.sum(length(unique(snm.obj$int.var[,1])))[snm.obj$int.var[,1],], BB)
for(i in 2:dim(snm.obj$int.var)[2]) {
X <- cbind(X,
kronecker(contr.sum(length(unique(snm.obj$int.var[,i])))[snm.obj$int.var[,i],], BB))
}
wts <- sapply(bins,length) / 10; wts[wts > 1] <- 1
cfs <- summary(lm(as.numeric(t(scale(t(Y.pooled),scale=FALSE))) ~ -1+X,weights=rep(wts,times=snm.obj$n.arrays)))$coef[,1]
beta = vector("list", dim(snm.obj$int.var)[2])
beta[[1]] = matrix(cfs[1:(snm.obj$spline.dim * (length(unique(snm.obj$int.var[,1])) - 1))], ncol = length(unique(snm.obj$int.var[,1])) - 1)
beta[[1]] = cbind(beta[[1]], -drop(beta[[1]] \%*\% rep(1, length(unique(snm.obj$int.var[,1])) - 1)))
for(i in 2:(dim(snm.obj$int.var)[2])) {
beta[[i]] = matrix(cfs[1:(snm.obj$spline.dim * (length(unique(snm.obj$int.var[,i])) - 1)) + snm.obj$spline.dim * (length(unique(snm.obj$int.var[,i-1]))- (i - 1))],
ncol = length(unique(snm.obj$int.var[,i])) - 1)
beta[[i]] = cbind(beta[[i]], -drop(beta[[i]] \%*\% rep(1, length(unique(snm.obj$int.var[,i])) - 1)))
}
sapply(1:snm.obj$n.arrays, function(id) {
preds <- predict(basisSplineFunction, snm.obj$M[,id])
int.fx <- -preds \%*\% beta[[1]][,as.numeric(snm.obj$int.var[,1])[id]]
for(i in 2:dim(snm.obj$int.var)[2]) {
int.fx <- int.fx + -preds \%*\% beta[[i]][,as.numeric(snm.obj$int.var[,i])[id]]
}
-int.fx
}) -> snm.obj$array.fx
# Add useful variables to snm.obj
snm.obj$Y.pooled <- Y.pooled
snm.obj$M.pooled <- M.pooled
snm.obj$bin.densities <- sapply(bins,length)
return(snm.obj)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
e19ebc8cbdab4af28941517cd3bd839307c5a12a | aabebc5609759c7b79f8e586a9e00bf3a901a408 | /run_analysis.R | 49dc51279ef043d8c014a5e8fae66546716d4f30 | [] | no_license | Grizzly16/GettingAndCleaningDataPeerAssess1 | af940c82dee59b536faca881bb49cb5f206c7da8 | 038c512b12635fde52f36051d255c46848ec2815 | refs/heads/master | 2016-09-08T02:06:10.383504 | 2014-04-24T20:27:18 | 2014-04-24T20:27:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,508 | r | run_analysis.R | # Getting and Cleaning Data - peer assess 1
setwd("~/Desktop/Classes/Getting and cleaning data/Programming Assignments")
library(reshape2)
#data is stored in a data folder
subFolder <- c("./UCI HAR Dataset/") # update this if you've moved the folders around
print("Reading labels")
#get the activity lables
actLbl <-read.table(paste(subFolder, "activity_labels.txt", sep=""))
colnames(actLbl) <- c("activityID", "activityName")
head(actLbl)
print("Reading features")
#get our features list
featList <-read.table(paste(subFolder, "features.txt", sep=""))
colnames(featList) <- c("num", "name")
head(featList)
#get the training data
print("Reading training data.")
xTrain <- read.table(paste(subFolder, "train/x_train.txt", sep=""))
yTrain <- read.table(paste(subFolder, "train/y_train.txt", sep=""))
trainSubjectID <- read.table(paste(subFolder, "train/subject_train.txt", sep=""))
print("Training data read in.")
#get the test data
print("Reading Test data.")
xTest <- read.table(paste(subFolder, "test/X_test.txt", sep=""))
yTest <- read.table(paste(subFolder, "test/y_test.txt", sep=""))
testSubjectID<- read.table(paste(subFolder, "test/subject_test.txt", sep=""))
print("Testing data read in.")
#apply column names
colnames(xTest) <- featList[,2]
colnames(xTrain) <- featList[,2]
#combine test and train data
fullDataSet <- rbind(xTrain, xTest)
#combine activity lists & subjects info
activityIds <- rbind(yTrain, yTest)
subjectIds <- rbind(trainSubjectID, testSubjectID)
#merge activty descriptors and numerical equivs
fullActivityMap<- merge(x=activityIds, y=actLbl, by.x="V1", by.y="activityID", sort=FALSE)
#add activity and subject details to our single dataset
colnames(fullActivityMap) <- c("activityID", "activityName")
fullDataSet$activityID <- fullActivityMap$activityID
fullDataSet$activityName <- fullActivityMap$activityName
colnames(subjectIds) <- c("subjectID")
#tada our one dataset!
fullDataSet$subjectID <- subjectIds[,1]
dt<-data.table(fullDataSet)
#melt it!
cl <- colnames(fullDataSet)
usableCols <- cl[grep("(std\\(\\)|mean\\(\\))",cl)] # get the columns we want
meltedDT <- melt(fullDataSet, id=563:564, measure=usableCols)
# convert to tidy dataset with one entry per activity, subject and measurement
tidy <- dcast(meltedDT, subjectID +activityName + variable ~., mean)
colnames(tidy) <- c("SubjectID","ActivityName","MeasurementType","MeanValue")
#write out a file, uncomment if you want to do this
write.table(tidy, file=paste(subFolder, "tidy_summary.txt", sep=""))
|
4eada06d06ce4f23b669e75c8317339377ff0dfb | 28474bc816d56724df45e8a74c2b524144212091 | /cachematrix.R | 18c109fa7868aa88ac00b7be7543e4307968445a | [] | no_license | tlee133/ProgrammingAssignment2 | d83a8ac9f212e59bed8e000e9ab1971a344040b9 | f9913c3e541148558837175249ebf11b568b47d5 | refs/heads/master | 2021-01-19T21:58:41.565776 | 2015-05-15T08:49:44 | 2015-05-15T08:49:44 | 35,644,072 | 0 | 0 | null | 2015-05-15T00:17:16 | 2015-05-15T00:17:14 | null | UTF-8 | R | false | false | 2,753 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
# makeCacheMatrix creates a object which can be used to store a matrix and its
# inverse. Calling the cacheSolve function on the makeCacheMatrix object will
# solve for the inverse matrix of the matrix in the makeCacheMatrix object and
# save it to that object. The cacheSolve function will also return the inverse of
# the matrix. If the cacheSolve function is called more than once it will return
# the cached (previously calculated) inverse matrix.
# Function: makeCacheMatrix
# Input: Invertable Matrix
# Output: Matrix Object to cache a matrix and its inverse.
# Notes: cacheSolve must be called on this object at least once to set the
# Inverse matrix.
makeCacheMatrix <- function(x = matrix()) {
# Initialize variables. Dim of inverse matrix will be 1x1
# upon creation of matrix object.
inverse <- matrix()
# Set Value of matrix object.
set <- function(y){
x <<- y
inverse <<-matrix()
}
# Function: Return Original Matrix
get <- function() x
# Function: Set inverse to inv.
setinv <- function(inv) inverse <<- inv
# Function: Return Inverse Matrix
getinv <- function() inverse
# Return Values
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
# Function: cacheSolve
# Input: makeCacheMatrix Object
# Output: Returns the inverse of the matrix stored in makeCacheMatrix object.
# Notes: Updates the inverse matrix of the input object if not available. The
# Function will print to console when retrieving cached data.
cacheSolve <- function(x = makeCacheMatrix(), ...) {
## Return a matrix that is the inverse of 'x'
# Get current value of inverse matrix.
inv <- x$getinv()
# If inverse and original are equal, the inverse matrix has been populated.
if (sum(dim(x$get())==dim(x$getinv())) == 2) {
# Check if matrix is equal to default inverse dimensions (1x1).
# There's a chance the value of the inverse is NA.
if (sum(dim(x$get()) == c(1,1)) == 2){
# Check if 1x1 inverse matrix is NA - if yes, create the inverse matrix.
if(is.na(x$getinv())){
mat = x$get()
inv = solve(mat)
x$setinv(inv)
inv
}
# Otherwise return cached 1x1 inverse.
else{
message("getting cached data")
return(inv)
}
}
# Otherwise return the inverse matrix.
else{
message("getting cached data")
return(inv)
}
}
# If dimensions do not match, calculate the inverse matrix, set the value
# of the inverse matrix in the makeCacheMatrix Object, and
# return the inverse matrix.
else{
mat = x$get()
inv = solve(mat)
x$setinv(inv)
inv
}
}
|
af9622498d3dd7062a0de9c16ff4d821e603a13c | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/4315_0/rinput.R | 5e81c3d029c447d7670b301df7e807d0cdc0dece | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("4315_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4315_0_unrooted.txt") |
7e75119ad33db254078e937ae50d8d6879a7f3e2 | 78c1ddb45451d526b4b60309199345330303ceb0 | /R/aaa.r | ede51bc5b81fc1f9633a2d3a0f4c9348cd881653 | [
"MIT"
] | permissive | mevers/hrbrthemes | 5acc8e4b092e3991bca6ef93eb59e0999f23ab12 | db5717567c28be224c2b42bb9054774720299afb | refs/heads/master | 2020-09-04T20:01:11.665983 | 2019-11-15T00:47:17 | 2019-11-15T00:47:17 | 219,876,680 | 2 | 1 | NOASSERTION | 2019-11-06T00:24:43 | 2019-11-06T00:24:42 | null | UTF-8 | R | false | false | 306 | r | aaa.r | try_require <- function(package, fun) {
if (requireNamespace(package, quietly = TRUE)) {
library(package, character.only = TRUE)
return(invisible())
}
stop("Package `", package, "` required for `", fun , "`.\n", # nocov start
"Please install and try again.", call. = FALSE) # nocov end
} |
0a26c6941bc4b553e82cc1a837e3c790a1ca0ee7 | 9874fc9b629c8893efe000a35ccf27390bece5f6 | /00_R_Source/Mapping.R | b4947615fb9b169d55a7b8539cc2063f9c1ae127 | [
"MIT"
] | permissive | SchSascha/manuscript_tripleRNAseq | b7a15c2cc835dbeae0b5e8893ebccedd3052a2c2 | 9f35ed2d75041e6c87f26afd0c9c1e068ec57e19 | refs/heads/master | 2022-12-23T02:47:37.489128 | 2020-10-01T13:48:58 | 2020-10-01T13:48:58 | 299,326,599 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 37,151 | r | Mapping.R | ####-
##-====================================================
##-========================== MAPPING ===========================
##-====================================================
####-
#' Make TopHat2 (Bowtie2) genome index
#'
#' Wrapper function for creating Bowtie2 index files, which are used by
#' TopHat2.
#'
#' @export
#' @param genomeFile Path to genome FASTA file.
#' @param customOut Optional. Path to directory were the index files will be
#' saved. By default, the 'genomeFile' directory is used.
#' @return A list with keyword arguments: \cr
#' \describe{
#' \item{outpref}{Prefix of index files. This is used as input for Bowtie2 or
#' TopHat2.}
#' \item{call }{System call used to build the index files.}
#' }
#' @examples
#' \dontrun{
#' make_Tophat_index("path/to/genome.fa")
#' }
#' make_Tophat_index
make_Tophat_index <- function
(
genomeFile,
customOut = NULL
){
build_exec <- .get_executable("bowtie2-build", "bowtie2", sysVar = "BOWTIE2_EXEC")
writeLines(c("Make bowtie2 index using:", build_exec))
outpref <- if (is.null(customOut)) tools::file_path_sans_ext(genomeFile) else customOut
call <- paste(build_exec, genomeFile, outpref)
system(call)
return(list(outpref = outpref, call = call))
}
#' List Additional TopHat2 Arguments
#'
#' These arguments are not required to run TopHat2, but their usage usually
#' leads to better results. They are active by default and can be omitted by
#' setting 'no.default' to TRUE.
#'
#' @export
#' @param paired Logical indicating whether paired-end files are used.
#' Defaults to FALSE.
#' @return Character(1).
#' @seealso \code{\link{run_Tophat}}
#' @examples args_Tophat_default()
args_Tophat_default <- function
(
paired = FALSE
){
if (paired)
return("-g 2 --b2-very-sensitive --no-coverage-search --no-mixed --no-discordant")
else
return("-g 2 --b2-very-sensitive --no-coverage-search")
}
#' List TopHat2 Arguments
#'
#' List the arguments used for running TopHat2 by GEO2RNA-seq. More precisely,
#' these arguments are used for the function \code{\link{run_Tophat}}.
#'
#' @export
#' @param paired Logical indicating whether paired-end files are used.
#' Defaults to FALSE.
#' @return A list with keyword arguments. Keywords are 'default' and 'locked'.
#' @seealso \code{\link{run_Tophat}}
#' @examples args_Tophat_default()
args_Tophat <- function
(
paired = FALSE
){
writeLines(c(
"We need these arguments for input, output and multi-threading:",
paste(args_Tophat_locked(paired), collapse = " "),
"And those arguments in addition:",
paste(args_Tophat_default(paired), collapse = " ")
))
return(list(default = args_Tophat_default(paired), locked = args_Tophat_locked(paired)))
}
#' List Locked TopHat2 Arguments
#'
#' TopHat2 requires certain arguments to define input and output files, as well
#' as certain modes. These arguments are set up by the wrapper function and
#' should not be supplied using the 'addArgs' argument of
#' \code{\link{run_Tophat}}.
#'
#' @export
#' @param paired Logical indicating whether paired-end files are used.
#' Defaults to FALSE.
#' @return Character(n).
#' @seealso \code{\link{run_Tophat}}
#' @examples args_Tophat_locked()
args_Tophat_locked <- function
(
paired = FALSE
){
if (paired)
return(c("tophat2", "-p", "-o", "-G"))
else
return(c("tophat2", "-p", "-o", "-G"))
}
#' Run TopHat2
#'
#' Wrapper function for TopHat2, a tool for precise mapping of reads in FASTQ
#' format to a reference genome in FASTA format.
#'
#' The reference genome must be indexed once before it can be used. See
#' \code{\link{make_Tophat_index}} to index genome reference files.
#' \cr\cr
#' See \code{\link{args_Tophat_default}} for default TopHat2 arguments. It is
#' not allowed to set locked arguments defined by \code{\link{args_Tophat_locked}}
#' via 'addArgs' .
#'
#' @export
#' @param files List of files in FASTQ format.
#' @param index Path to index files INCLUDING the genome prefix.
#' @param outDir General directory for output files. Multiple subdirectory will
#' be created. Output BAM files will put in '<outDir>/bamfiles/'.
#' Defaults to "mapping".
#' @param is.paired Logical indicating if paired-end reads are used. Defaults to
#' FALSE.
#' @param transcriptome Path to transcriptome files as prefix (NO file
#' extensions!). If supplied, 'anno' is ignored and the given transcriptome is
#' used instead.
#' @param anno Additional annotation. TopHat2 will try to map to these gene
#' locations first. Any unmapped read will then be searched against the whole
#' genome. Defaults to NA.
#' @param addArgs Additional arguments. It is not allowed to set locked
#' arguments. They are set by the corresponding wrapper function. See details.
#' Defaults to NA.
#' @param no.default If TRUE, do not use default parameters, see details.
#' Defaults to FALSE.
#' @inheritParams parallelizationParameters
#' @param overwrite Logical indicating if TopHat2 should overwrite files if
#' 'outDir' already contains some files.
#' If the directory is not empty and overwrite is FALSE, all BAM files found
#' in '<outDir>/bamfiles' will be returned.
#' @param use.existing Logical indicating if existing output BAM files
#' should be returned. Defaults to TRUE. If set to FALSE, execution will stop
#' if existing BAM files would be overwritten.
#' @param tophat_exec Optional. Path to TopHat2 binary.
#' @references Kim D, Pertea G, Trapnell C, Pimentel H, Kelley R, Salzberg SL.
#' TopHat2: accurate alignment of transcriptomes in the presence of insertions,
#' deletions and gene fusions. Genome Biology 2013, 14:R36. Available online
#' at: \url{https://ccb.jhu.edu/software/tophat/index.shtml}
#' @return A list with keyword arguments: \cr
#' \describe{
#' \item{files }{Paths to output BAM files.}
#' \item{calls }{System calls to TopHat2.}
#' \item{version}{TopHat2 version.}
#' \item{tool }{The name of the tool.}
#' }
#' @examples
#' \dontrun{
#' files <- c("f1.fastq", "f2.fastq")
#' genome <- "genome.fa"
#' index <- "genome"
#' anno <- "annotation.gtf"
#' run_Tophat(files = files, index = index, anno = anno)
#'
#' files <- c("f1_1.fastq", "f1_2.fastq", "f2_1.fastq", "f2_2.fastq")
#' newArgs <- "-g 3"
#' run_Tophat(files = files, index = index, anno = anno, is.paired = TRUE,
#' addArgs = newArgs, no.default = TRUE)
#' }
#' run_Tophat
run_Tophat <- function
(
files,
index,
outDir = "./mapping",
is.paired = FALSE,
transcriptome = NA,
anno = NA,
addArgs = NA,
no.default = FALSE,
cpus = 1,
workers = 5,
overwrite = FALSE,
use.existing = TRUE,
tophat_exec = ""
){
# check if input files exist
if (FALSE %in% file.exists(asPairVector(files))) {
f <- asPairVector(files)
stop(paste(
"Files do not exist:\n",
paste(shQuote(f[!file.exists(f)]), collapse = "\n ")
))
}
# abort execution if one or more outfiles already exist but should not be used
if (!use.existing) {
bamDir <- file.path(outDir, "bamfiles")
if (is.paired)
f <- if (is.matrix(files)) files[,1] else asPaired(files)[,1]
else
f <- files
bam_files <- paste0(tools::file_path_sans_ext(basename(f)), ".bam")
bam_files <- file.path(bamDir, bam_files)
if (TRUE %in% file.exists(bam_files))
stop("One or more files already exist and use.existing is FALSE!")
}
writeLines("TopHat2 - mapping ...")
if (is.paired) {
resmapping <- .run_Tophat_paired(
pairedFiles = files,
index = index,
outDir = outDir,
transcriptome = transcriptome,
anno = anno,
addArgs = addArgs,
no.default = no.default,
cpus = cpus,
workers = workers,
overwrite = overwrite,
tophat_exec = tophat_exec
)
} else {
resmapping <- .run_Tophat_unpaired(
fqFiles = files,
index = index,
outDir = outDir,
transcriptome = transcriptome,
anno = anno,
addArgs = addArgs,
no.default = no.default,
cpus = cpus,
workers = workers,
overwrite = overwrite,
tophat_exec = tophat_exec
)
}
return(resmapping)
}
# Unpaired (Single-end) TopHat2
#
# This version is for single-end reads only.
#
# @rdname run_Tophat_unpaired
# @inherit run_Tophat
# @param fqFiles List of files in FASTQ format.
.run_Tophat_unpaired <- function
(
fqFiles,
index,
outDir,
transcriptome = NA,
anno = NA,
share = TRUE,
addArgs = NA,
no.default = FALSE,
cpus = 1,
workers = 5,
overwrite = FALSE,
tophat_exec = NA
){
driver <- function(i) {
# Note: make a directory for each FASTQ file
fileName <- basename(tools::file_path_sans_ext(fqFiles[i]))
tophatOutDir <- file.path(outDir, fileName)
if (!file.exists(tophatOutDir))
dir.create(tophatOutDir)
bamFile <- file.path(bamdir, paste0(fileName, ".bam"))
if (!overwrite && file.exists(bamFile)) {
message("File already exists: ", shQuote(bamFile), ". Skipped!")
return(list(bam = bamFile, call = "NOT USED"))
}
# set additional arguments. This includes:
# 1. checking addArgs for locked arguments
# 2. dealing with annotation files
args <- if (!no.default) args_Tophat_default(paired=FALSE) else ""
if (TRUE %in% (addArgs %in% args_Tophat_locked(paired=FALSE)))
stop("You supplied one or more locked arguments! See 'args_Tophat_locked' for more information.")
if (!(is.na(addArgs) || is.null(addArgs)))
args <- paste(args, addArgs)
if (!is.na(transcriptome))
args <- paste(args, paste0("--transcriptome-index=", transcriptome))
else if (!(is.na(anno) || is.null(anno))) {
if (!file.exists(anno))
stop("Given annotation file does not exist:", anno)
else
args <- paste(args, "-G", anno)
}
# make cmd call
call = paste(
tophat_exec,
"-p", allocCPUS$for_call[i],# number of threads to use
args,
"-o", tophatOutDir, # bam files will be put here
index, # genome index file
fqFiles[i], # input FASTQ file
">",
file.path(outDir, paste0(fileName, ".tophatout.txt")), # just some statistics from tophat
"2>&1"
)
system(call)
# rename accepted_hits.bam to '<FASTQ file>.bam' and move it to './Mapping/Bamfiles'
file.rename(from=file.path(tophatOutDir, "accepted_hits.bam"), to=bamFile )
alignStatFile <- file.path(outDir, paste0(fileName, ".align_summary.txt"))
file.rename(from=file.path(tophatOutDir, "align_summary.txt"), to=alignStatFile)
return(list(bam = bamFile, call = call))
}
if (FALSE %in% file.exists(fqFiles))
warning(paste0(
"One or more files do not exist:\n",
paste(fqFiles[!file.exists(fqFiles)], collapse="\n")
))
if (tophat_exec == "" || is.null(tophat_exec) || is.na(tophat_exec))
tophat_exec <- .get_executable("tophat2", "tophat", "TOPHAT2_PATH")
else if (!file.exists(tophat_exec))
stop(paste(
"Cannot find TopHat2 binary: user given file does not exist at",
"path:", tophat_exec
))
version <- system(paste(tophat_exec, "--version"), intern = TRUE)
bamdir <- file.path(outDir, "bamfiles")
dir.create(bamdir, recursive = TRUE, showWarnings = FALSE)
# check if transcriptome is valid and exists
if (!is.na(transcriptome)) {
if (!file.exists(paste0(transcriptome, ".fa"))) {
if (!file.exists(paste0(tools::file_path_sans_ext(transcriptome), ".fa"))) {
warning("Transcriptome path to long. Remove file endings for next run!")
transcriptome <- tools::file_path_sans_ext(transcriptome)
} else
stop(paste(
"Cannot find FASTA of transcriptome. Supply only the prefix of files!",
transcriptome
))
}
} else if (!is.na(anno) && !is.null(anno) && file.exists(anno) && share) {
# create transcriptome index beforehand.
# This will speed up mapping on large genomes!
transcriptome_dir <- file.path(outDir, "transcriptome")
transcriptome <- file.path(
transcriptome_dir,
tools::file_path_sans_ext(basename(anno))
)
call <- paste(
tophat_exec,
"-G",
anno,
paste0("--transcriptome-index=", transcriptome),
"-o",
transcriptome_dir, # also store transcriptome index logfiles here
index
)
system(call)
}
allocCPUS <- allocate_cpus(cpus, length(fqFiles), workers)
bio_par <- BiocParallel::MulticoreParam(
workers = allocCPUS$in_parallel,
tasks = length(fqFiles),
progressbar = TRUE
)
allRes <- tryCatch({
BiocParallel::bplapply(1:length(fqFiles), driver, BPPARAM = bio_par)
}, error = identity)
.stop_at_error(allRes, from = match.call()[[1]])
return(list(
files = sapply(allRes, function(x) x[["bam"]]),
calls = sapply(allRes, function(x) x[["call"]]),
version = version,
tool = "tophat"
))
}
# Paired-end TopHat2
#
# This is for paired-end reads only.
#
# @rdname run_Tophat_paired
# @inherit run_Tophat
# @param pairedFiles Matrix or list of files in FASTQ format. Matrix must have
# pairs as rows (first column is for forward reads, second
# column is for backward reads).
.run_Tophat_paired <- function
(
pairedFiles,
index,
outDir = "./mapping",
transcriptome = NA,
anno = NA,
share = TRUE,
addArgs = NA,
no.default = FALSE,
cpus = 1,
workers = 5,
overwrite = FALSE,
tophat_exec = NA
){
driver <- function(i) {
# Note: make a directory for each FASTQ file
fileName <- basename(tools::file_path_sans_ext(pairedFiles[i,1]))
tophatOutDir <- file.path(outDir, fileName)
bamFile <- file.path(bamdir, paste0(fileName, ".bam"))
if (!file.exists(tophatOutDir))
dir.create(tophatOutDir)
if (!overwrite && file.exists(bamFile)) {
message("File already exists: ", shQuote(bamFile), ". Skipped!")
return(list(bam = bamFile, call = "NOT USED"))
}
# get additional arguments
args <- if (!no.default) args_Tophat_default(paired=TRUE) else ""
if (TRUE %in% (addArgs %in% args_Tophat_locked(paired=FALSE)))
stop("You supplied one or more locked arguments! See 'args_Tophat_locked' for more information.")
if (!(is.na(addArgs) || is.null(addArgs)))
args <- paste(args, addArgs)
if (!(is.na(anno) || is.null(anno))) {
if (!file.exists(anno))
stop("Given annotation file does not exist:", anno)
else {
args <- paste(args, "-G", anno)
if (share)
args <- paste(
args,
paste0("--transcriptome-index=", transcriptome)
)
}
}
call=paste(
tophat_exec,
"-p", allocCPUS$for_call[i], # number of threads to use
args,
"-o", tophatOutDir, # BAM files will be put here
index, # genome index file
pairedFiles[i,1], # 1 input FASTQ file
pairedFiles[i,2], # 2 input FASTQ file
">",
file.path(outDir, paste0(fileName, ".tophatout.txt")), # just some statistics from TopHat2
"2>&1"
)
system(call)
# rename accepted_hits.bam to '<FASTQ file>.bam' and move it to './Mapping/Bamfiles'
file.rename(from=file.path(tophatOutDir, "accepted_hits.bam"), to=bamFile)
alignStatFile <- file.path(outDir, paste0(fileName, ".align_summary.txt"))
file.rename(from=file.path(tophatOutDir, "align_summary.txt"), to=alignStatFile)
return(list(bam = bamFile, call = call))
}
if (FALSE %in% file.exists(pairedFiles))
warning(paste0(
"One or more files do not exist:\n",
paste(pairedFiles[!file.exists(pairedFiles)], collapse="\n")
))
if (tophat_exec == "" || is.null(tophat_exec) || is.na(tophat_exec))
tophat_exec <- .get_executable("tophat2", "tophat", "TOPHAT2_PATH")
if (!file.exists(tophat_exec))
stop(paste(
"Cannot find TopHat2 binary: user given file does not exist at",
"path:", tophat_exec
))
version <- system(paste(tophat_exec, "--version"), intern = TRUE)
if (is.vector(pairedFiles) || is.list(pairedFiles)) pairedFiles <- asPaired(pairedFiles)
bamdir <- file.path(outDir, "bamfiles")
dir.create(bamdir, recursive = TRUE, showWarnings = FALSE)
# check if transcriptome is valid and exists
if (!is.na(transcriptome)) {
if (!file.exists(paste0(transcriptome, ".fa"))) {
if (!file.exists(paste0(tools::file_path_sans_ext(transcriptome), ".fa"))) {
warning("Transcriptome path to long. Remove file endings for next run!")
transcriptome <- tools::file_path_sans_ext(transcriptome)
} else
stop(paste(
"Cannot find FASTA of transcriptome. Supply only the prefix of files!",
transcriptome
))
}
} else if (!is.na(anno) && !is.null(anno) && file.exists(anno) && share) {
# create transcriptome index beforehand.
# This will speed up mapping on large genomes!
transcriptome_dir <- file.path(outDir, "transcriptome")
transcriptome <- file.path(
transcriptome_dir,
tools::file_path_sans_ext(basename(anno))
)
call <- paste(
tophat_exec,
"-G",
anno,
paste0("--transcriptome-index=", transcriptome),
"-o",
transcriptome_dir, # also store transcriptome index logfiles here
index
)
system(call)
}
allocCPUS <- allocate_cpus(cpus, nrow(pairedFiles), workers)
bio_par <- BiocParallel::MulticoreParam(
workers = allocCPUS$in_parallel,
tasks = nrow(pairedFiles),
progressbar = TRUE
)
allRes <- tryCatch({
BiocParallel::bplapply(1:nrow(pairedFiles), driver, BPPARAM = bio_par)
}, error = identity)
.stop_at_error(allRes, from = match.call()[[1]])
return(list(
files = sapply(allRes, function(x) x[["bam"]]),
calls = sapply(allRes, function(x) x[["call"]]),
version = version,
tool = "tophat"
))
}
#' Make HISAT2 genome index
#'
#' Wrapper function for creating HISAT2 index files.
#'
#' If you want to use exon and splice site information, you need to use the
#' two python scripts (see arguments) which can be found in your HISAT2
#' installation directory.
#'
#' @export
#' @inheritParams make_Tophat_index
#' @inheritParams parallelizationParameters
#' @inherit make_Tophat_index return return
#' @param exonFile Tabular files as generated by 'hisat2_extract_exons.py'.
#' @param spliceFile Tabular file as generated by 'hisat2_extract_splice_sites.py'.
#' @examples
#' \dontrun{
#' make_HiSat2_index("path/to/genome.fasta")
#' make_HiSat2_index("path/to/genome.fasta",
#' exonFile = "exons.tab",
#' spliceFile = "splice.tab")
#' }
#' make_HiSat2_index
make_HiSat2_index <- function
(
genomeFile,
customOut = NULL,
exonFile = "",
spliceFile = "",
cpus = 1
){
build_exec <- .get_executable("hisat2-build", "hisat2", "HISAT2_EXEC")
writeLines(c("Make HISAT2 index using:", build_exec))
args <- paste("-p", cpus)
if (exonFile != "") {
if (file.exists(exonFile))
args <- paste(args, "--exon", exonFile)
else
stop(paste("Exon file doesn't exist at path: ", exonFile))
}
if (spliceFile != "") {
if (file.exists(spliceFile))
args <- paste(args, "--ss", spliceFile)
else
stop(paste("Splice site files doesn't exist at path:", spliceFile))
}
outpref <- if (is.null(customOut)) tools::file_path_sans_ext(genomeFile) else customOut
call <- paste(build_exec, args, genomeFile, outpref)
system(call)
return(list(outpref = outpref, call = call))
}
#' List Additional HISAT2 Arguments
#'
#' Return the default arguments used to run HISAT2.
#'
#' Input and output arguments are not included. They must not be
#' overwritten directly. See \code{\link{args_HiSat2_locked}} for further
#' information.
#'
#' @export
#' @param paired Show arguments for paired-end or single-end reads.
#' @return Character(1).
#' @examples args_HiSat2_default()
args_HiSat2_default <- function
(
paired = FALSE
){
if (paired) {
return("-k 2 --no-unal --no-mixed --no-discordant")
} else {
return("-k 2 --no-unal")
}
}
#' List HISAT2 Arguments
#'
#' List the arguments used for running HISAT2 by GEO2RNA-seq. More precisely,
#' these arguments are used for the function \code{\link{run_Hisat2}}.
#'
#' @export
#' @param paired Logical indicating whether paired-end files are used.
#' Defaults to FALSE.
#' @return A list with keyword arguments. Keywords are: 'default' and 'locked'.
#' @seealso \code{\link{run_Hisat2}}
#' @examples args_HiSat2()
args_HiSat2 <- function
(
paired = FALSE
){
writeLines(c(
"We need these arguments for input, output and multi-threading:",
paste(args_HiSat2_locked(), collapse = " "),
"And those arguments in addition:",
paste(args_HiSat2_default(paired), collapse = " ")
))
return(list(
default = args_HiSat2_default(paired),
locked = args_HiSat2_locked()
))
}
#' List Locked HISAT2 Arguments
#'
#' HISAT2 requires certain arguments to define input and output files, as well
#' as certain modes. These arguments are set up by the wrapper function and
#' should not be supplied using the 'addArgs' argument of
# \code{\link{run_Hisat2}}.
#'
#' @export
#' @seealso \code{\link{run_Hisat2}}, \code{\link{args_HiSat2}}
#' @return Character(n).
#' @examples args_HiSat2_locked()
args_HiSat2_locked <- function(){
return(c("hisat2", "-p", "--summary-file", "-x", "-U"))
}
# NOTES:
# only SAM output
# -U for unpaired; -1 <m1> -2 <m2> for paired;
# for unpaired: --un, --al
# for paired: --no-mixed, --no-discordant, --un-conc & --al-conc
#' Run HISAT2
#'
#' Wrapper function for HISAT2, a very fast tool for aligning reads in FASTQ
#' format to a reference genome in FASTA format.
#'
#' The reference genome must be indexed once before it can be used. See
#' \code{\link{make_HiSat2_index}} to index genome reference files. Exon and
#' splice site information must be generated from outside the R environment.
#' See the description of \code{\link{make_HiSat2_index}} for more details.
#' \cr\cr
#' See \code{\link{args_HiSat2_default}} for default HISAT2 arguments. It is
#' not allowed to set locked arguments defined by \code{\link{args_HiSat2_locked}}
#' via 'addArgs' .
#'
#' @export
#' @inheritParams run_Tophat
#' @param splice Additional HISAT2 specific argument.
#' @param as.bam If TRUE, HISAT2 output will be converted to BAM files.
#' If SAMtools is installed on your system, this will be done in memory (no
#' SAM files). Defaults to TRUE.
#' @param keepSam Only has an effect if 'as.bam=TRUE'. If FLASE, intermediate
#' SAM files are removed. If TRUE, they are kept. Defaults to FALSE.
#' @param hisat_exec Path to HISAT2 binary.
#' @return A list with keywords:\cr
#' \describe{
#' \item{files }{Character vector. Path to alignment output files.}
#' \item{summaryFiles}{Character vector. Paths to output summary files.}
#' \item{calls }{Character vector. System calls to HISAT2.}
#' \item{version }{Version of HISAT2.}
#' \item{tool }{The name of the tool.}
#' }
#' @references Kim, Daehwan, Ben Langmead, and Steven L. Salzberg. "HISAT: a
#' fast spliced aligner with low memory requirements." Nature methods 12.4
#' (2015): 357-360.
#' Available online at: \url{http://www.ccb.jhu.edu/software/hisat/index.shtml}
#' @examples
#' \dontrun{
#' files <- c("f1.fastq", "f2.fastq")
#' genome <- "genome.fa"
#' index <- "genome"
#' run_Hisat2(files = files, index = index)
#'
#' files <- c("f1_1.fastq", "f1_2.fastq", "f2_1.fastq", "f2_2.fastq")
#' newArgs <- "-k 3"
#' run_Hisat2(files = files, index = index, is.paired = TRUE,
#' addArgs = newArgs, no.default = TRUE)
#' }
#' run_Hisat2
run_Hisat2 <- function
(
files,
index,
outDir = "./mapping",
is.paired = FALSE,
addArgs = "",
splice = TRUE,
no.default = FALSE,
as.bam = TRUE,
keepSam = FALSE,
cpus = 1,
workers = 5,
overwrite = FALSE,
use.existing = TRUE,
hisat_exec = ""
){
# check if input files exist
if (FALSE %in% file.exists(asPairVector(files))) {
f <- asPairVector(files)
stop(paste(
"Files do not exist:\n",
paste(shQuote(f[!file.exists(f)]), collapse = "\n ")
))
}
if (!use.existing) {
bamDir <- file.path(outDir, "bamfiles")
samDir <- file.path(outDir, "samfiles")
if (is.paired)
f <- if (is.matrix(files)) files[,1] else asPaired(files)[,1]
else
f <- files
bam_files <- paste0(tools::file_path_sans_ext(basename(f)), ".bam")
bam_files <- file.path(bamDir, bam_files)
if (TRUE %in% file.exists(bam_files))
stop("One or more files already exist in BAM directory and use.existing is FALSE!")
sam_files <- paste0(tools::file_path_sans_ext(basename(f)), ".sam")
sam_files <- file.path(samDir, sam_files)
if (TRUE %in% file.exists(sam_files))
stop("One or more files already exist in sam directory and use.existing is FALSE!")
}
writeLines("HISAT2 - mapping ...")
if (is.paired) {
res <- .run_HiSat2_paired(
files = files,
outDir = outDir,
index = index,
args = addArgs,
use_default = !no.default,
splice = splice,
as.bam = as.bam,
keepSam = keepSam,
cpus = cpus,
workers = workers,
overwrite = overwrite,
hisat_exec = hisat_exec
)
} else {
res <- .run_HiSat2_unpaired(
files = files,
outDir = outDir,
index = index,
args = addArgs,
use_default = !no.default,
splice = splice,
as.bam = as.bam,
keepSam = keepSam,
cpus = cpus,
workers = workers,
overwrite = overwrite,
hisat_exec = hisat_exec
)
}
return(res)
}
# Run HISAT2 Single-End
#
# This wrapper function is for single-end files only.
#
# @rdname run_HiSat2_unpaired
# @inherit run_Hisat2
.run_HiSat2_unpaired <- function
(
files,
outDir,
index,
args = "",
use_default=TRUE,
splice = TRUE,
as.bam = TRUE,
keepSam = FALSE,
cpus = 1,
workers = 10,
overwrite = FALSE,
hisat_exec = NA
) {
driver <- function(i) {
fileName <- tools::file_path_sans_ext(basename(files[i]))
samOut <- file.path(samDir, paste0(fileName, ".sam"))
bamOut <- file.path(bamDir, paste0(fileName, ".bam"))
summaryOut <- file.path(outDir, paste0(fileName, ".summary.txt"))
# SAM or BAM files may already exist. SAM files may just be converted to BAM files
if (!overwrite && as.bam && file.exists(bamOut)) {
message("\nBAM file already exists:", shQuote(bamOut), " . Skipped!\n")
return(list(map = bamOut, summary=summaryOut, call="NOT USED"))
}
if (!overwrite && file.exists(samOut)) {
message("\nSAM file already exists:", shQuote(samOut), " . Skipped!\n")
if (as.bam) {
writeLines("\nConverting to BAM...")
Rsamtools::asBam(samOut, sub("\\.bam$", "", bamOut), indexDestination=FALSE)
if (!keepSam)
file.remove(samOut)
return(list(map = bamOut, summary=summaryOut, call="NOT USED"))
} else {
return(list(map = samOut, summary=summaryOut, call="NOT USED"))
}
}
if (use_default)
args <- paste(args, args_HiSat2_default())
if (!splice)
args <- paste(args, "--no-spliced-alignment")
if (compareVersion(version, "2.1.0") >= 0)
args <- paste(args, "--new-summary --summary-file", summaryOut)
call <- paste(
hisat_exec,
args,
"-p", allocCPUS$for_call[i],
"-x", index,
"-U", shQuote(files[i])
#"-S", samOut
)
if (as.bam && !is.na(samtools_exec))
call <- paste(call, "|", samtools_exec, "view -bS - >", bamOut)
else
call <- paste(call, "-S", samOut)
# run HISAT2
msg <- system(call, intern = TRUE)
message("\n", msg, "\n")
# if SAMtools is not available, use Rsamtools for BAM conversion
if (as.bam) {
if (!is.na(samtools_exec)) {
return(list(map = bamOut, summary=summaryOut, call=call))
} else {
writeLines(paste("\nConverting:", samOut))
Rsamtools::asBam(samOut, sub("\\.bam$", "", bamOut), indexDestination=FALSE)
if (!keepSam)
file.remove(samOut)
return(list(map = bamOut, summary=summaryOut, call=call))
}
} else
return(list(map = samOut, summary=summaryOut, call=call))
}
if (hisat_exec == "" || is.null(hisat_exec) || is.na(hisat_exec))
hisat_exec <- .get_executable("hisat2", sysVar = "HISAT2_EXEC")
else if (!file.exists(hisat_exec))
stop(paste(
"Cannot find HISAT2 binary: user given file does not exist at",
"path:", hisat_exec
))
# NOTE: do NOT add 'hisat' as prefix. Version is needed as arguments!
version <- grep(
"hisat.*version",
system(paste(hisat_exec, "--version"), intern = TRUE),
value = TRUE
)
version <- gdata::last(unlist(strsplit(version, split = " ")))
samtools_exec <- tryCatch({
.get_executable("samtools", sysVar = "SAMTOOLS_EXEC")
}, error = function(e) {
return(NA)
})
if (is.na(samtools_exec) && as.bam)
message("Could not find SAMtools on your system. In-memory BAM conversion disabled.")
samDir <- file.path(outDir, "samfiles")
if (!file.exists(samDir))
dir.create(samDir, recursive = TRUE, showWarnings = FALSE)
bamDir <- file.path(outDir, "bamfiles")
if (!file.exists(bamDir))
dir.create(bamDir, recursive = TRUE, showWarnings = FALSE)
allocCPUS <- allocate_cpus(cpus, length(files), if (!as.bam) 2 else workers)
bio_par <- BiocParallel::MulticoreParam(
workers = allocCPUS$in_parallel,
tasks = length(files),
progressbar = TRUE
)
allRes <- tryCatch({
BiocParallel::bplapply(1:length(files), driver, BPPARAM = bio_par)
}, error = identity)
.stop_at_error(allRes, from = match.call()[[1]])
return(list(
files = sapply(allRes, function(x) x[["map"]]),
summaryFiles = sapply(allRes, function(x) x[["summary"]]),
calls = sapply(allRes, function(x) x[["call"]]),
version = version,
tool = "hisat"
))
}
# Run Paired HISAT2
#
# This wrapper function is for paired-end files only.
#
# @rdname run_HiSat2_paired
# @inherit run_Hisat2
.run_HiSat2_paired <- function
(
files,
outDir,
index,
args = "",
use_default=TRUE,
splice = TRUE,
as.bam = TRUE,
keepSam = FALSE,
cpus = 1,
workers = 5,
overwrite = FALSE,
hisat_exec = NA
) {
driver <- function(i) {
fileName <- tools::file_path_sans_ext(basename(files[i,1]))
samOut <- file.path(samDir, paste0(fileName, ".sam"))
bamOut <- file.path(bamDir, paste0(fileName, ".bam"))
summaryOut <- file.path(outDir, paste0(fileName, ".summary.txt"))
# SAM or BAM files may already exist. SAM files may just be converted to BAM files
if (!overwrite && as.bam && file.exists(bamOut)) {
message("\nBAM file already exists:", shQuote(bamOut), " . Skipped!\n")
return(list(map = bamOut, summary=summaryOut, call="NOT USED"))
}
if (!overwrite && file.exists(samOut)) {
message("\nSAM file already exists:", shQuote(samOut), " . Skipped!\n")
if (as.bam) {
writeLines("\nConverting to BAM...\n")
Rsamtools::asBam(samOut, sub("\\.bam$", "", bamOut), indexDestination=FALSE)
if (!keepSam)
file.remove(samOut)
return(list(map = bamOut, summary=summaryOut, call="NOT USED"))
} else {
return(list(map = samOut, summary=summaryOut, call="NOT USED"))
}
}
if (use_default)
args <- paste(args, args_HiSat2_default(paired = TRUE))
if (!splice)
args <- paste(args, "--no-spliced-alignment")
if (compareVersion(version, "2.1.0") >= 0)
args <- paste(args, "--new-summary --summary-file", summaryOut)
call <- paste(
hisat_exec,
args,
"-p", allocCPUS$for_call[i],
"-x", index,
"-1", shQuote(files[i,1]),
"-2", shQuote(files[i,2])
#"-S", samOut
)
if (as.bam && !is.na(samtools_exec))
call <- paste(call, "|", samtools_exec, "view -bS - >", bamOut)
else
call <- paste(call, "-S", samOut)
# run HISAT2
msg <- system(call, intern = TRUE)
message("\n", msg, "\n")
# if SAMtools is not available, use Rsamtools for BAM conversion
if (as.bam) {
if (!is.na(samtools_exec)) {
return(list(map = bamOut, summary=summaryOut, call=call))
} else {
Rsamtools::asBam(sub("\\.bam$", "", bamOut), indexDestination=FALSE)
if (!keepSam)
file.remove(samOut)
return(list(map = bamOut, summary=summaryOut, call=call))
}
} else
return(list(map = samOut, summary=summaryOut, call=call))
}
# nothing exists or overwrite == TRUE
if (hisat_exec == "" || is.null(hisat_exec) || is.na(hisat_exec))
hisat_exec <- .get_executable("hisat2", sysVar = "HISAT2_PATH")
else if (!file.exists(hisat_exec))
stop(paste(
"Cannot find HISAT2 binary: user given file does not exist at",
"path:",
hisat_exec
))
version <- grep(
"hisat.*version",
system(paste(hisat_exec, "--version"), intern = TRUE),
value = TRUE
)
version <- gdata::last(unlist(strsplit(version, split = " ")))
# convert to 2 column matrix, if neccessary
if (is.vector(files) || is.list(files))
files <- asPaired(files)
samtools_exec <- tryCatch({
.get_executable("samtools", sysVar = "SAMTOOLS_EXEC")
}, error = function(e) {
return(NA)
})
if (is.na(samtools_exec) && as.bam)
message("Could not find SAMtools on your system. In-memory BAM conversion disabled.")
samDir <- file.path(outDir, "samfiles")
dir.create(samDir, recursive = TRUE, showWarnings = FALSE)
bamDir <- file.path(outDir, "bamfiles")
dir.create(bamDir, recursive = TRUE, showWarnings = FALSE)
allocCPUS <- allocate_cpus(cpus, nrow(files), if (!as.bam) 2 else workers)
bio_par <- BiocParallel::MulticoreParam(
workers = allocCPUS$in_parallel,
tasks = nrow(files),
progressbar = FALSE
)
allRes <- tryCatch({
BiocParallel::bplapply(1:nrow(files), driver, BPPARAM = bio_par)
}, error = identity)
.stop_at_error(allRes, from = match.call()[[1]])
return(list(
files = sapply(allRes, function(x) x[["map"]]),
summaryFiles = sapply(allRes, function(x) x[["summary"]]),
calls = sapply(allRes, function(x) x[["call"]]),
version = version,
tool = "hisat"
))
}
|
9bda910173fc98df1ab13e8345081338937efafe | 384c3dbc571be91c6f743d1427dec00f13e0d8ae | /r/kernels/graf10a-titanic-alexey-pronin-1/script/titanic-alexey-pronin-1.R | 9198ebfe6e8e03a755d5160a12a1347b11c4f8b0 | [] | no_license | helenaK/trustworthy-titanic | b9acdd8ca94f2fa3f7eb965596eed4a62821b21e | ade0e487820cf38974561da2403ebe0da9de8bc6 | refs/heads/master | 2022-12-09T20:56:30.700809 | 2020-09-10T14:22:24 | 2020-09-10T14:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,427 | r | titanic-alexey-pronin-1.R | # Reading the train and test data.
train <- read.csv('../input/train.csv')
test <- read.csv('../input/test.csv')
# Adding the 'Survived' column to the test data
# to make it compatable with the training data
# for rbind-ing.
test$Survived <- rep(NA, nrow(test))
# Combining the data
data <- rbind(train, test)
# Inspecting the data
summary(data)
# We see that there are missing values: 418 in the 'Survided' column
# (expected -- we put them there), 263 in the 'Age' column, and 1 in
# the Fare column. Let's replace them with median values.
library(Hmisc)
data$Age <- impute(data$Age, fun = median)
data$Fare <- impute(data$Fare, fun = median)
# How many unique entries do we have in each columns?
sapply(apply(data, 2, unique), length)
# We will not be using the 'PassengerId', 'Name', 'Ticket',
# and 'Cabin' columns in our modeling. Creating the formula
# for future modeling.
fmla <- as.formula('Survived ~ Pclass + Sex + Age + SibSp +
Parch + Fare + Embarked')
# Separating the training and test data from 'data'
n <- nrow(train)
train <- data[1:n, ]
# Spliting the training data into a training and
set.seed(123)
split <- sample(1:n, round(0.8*n))
train_train <- train[split, ]
train_test <- train[-split, ]
model_short <- glm(fmla, train_train,
family = 'binomial')
summary(model_short)
prob_short <- predict(model_short, train_test, type = 'response')
pred_short <- ifelse(prob_short > 0.5, 1, 0)
table(train_test$Survived, pred_short)
mean(train_test$Survived == pred_short)
# Random forest fit
# randomForest package
library(randomForest)
model_rf <- randomForest(fmla, train_train, nrees = 500, mtry = 2)
summary(model_rf)
prob_rf <- predict(model_rf, train_test, type = 'response')
pred_rf <- ifelse(prob_rf > 0.5, 1, 0)
table(train_test$Survived, pred_rf)
mean(train_test$Survived == pred_rf)
# ranger package
library(ranger)
model_ranger <- ranger(fmla, train_train, num.trees = 500,
respect.unordered.factors = 'order')
prob_ranger <- predict(model_ranger, train_test)$predictions
pred_ranger <- ifelse(prob_ranger > 0.5, 1, 0)
table(train_test$Survived, pred_ranger)
mean(train_test$Survived == pred_ranger)
# Now let's try the 'caret' package:
library(caret)
set.seed(42)
myFolds <- createFolds(train$Survived, k = 5)
myControl <- trainControl(
method = 'cv',
number = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE,
verboseIter = TRUE,
savePredictions = TRUE,
index = myFolds
)
train$Survived <- as.factor(train$Survived)
levels(train$Survived) <- c('No', 'Yes')
model_rf_1 <- train(
fmla, train,
metric = "ROC",
method = "ranger",
trControl = myControl
)
pred_rf_1 <- predict(model_rf_1)
confusionMatrix(pred_rf_1, train$Survived)
model_glmnet <- train(
fmla, train,
metric = "ROC",
method = "glmnet",
tuneGrid = expand.grid(
alpha = 0:1,
lambda = 0:10/10
),
trControl = myControl
)
pred_glmnet <- predict(model_glmnet)
confusionMatrix(pred_glmnet, train$Survived)
# We will use model_rf_1 for our final submission.
# Separating the test data (corrected).
test <- data[(n + 1):nrow(data), ]
# Making predictions.
pred <- predict(model_rf_1, test, type = 'raw')
test$Survived <- ifelse(pred == 'No', 0, 1)
write.csv(test[c('PassengerId', 'Survived')],
'Titanic_submission.csv',
row.names = FALSE) |
9af127fb573c2aaed2f9f375044dbb55e6f62a90 | 8a043e5910c0b145c9a939d28d4f41275e1a0ff4 | /plot4.R | 552f287f176f9ce8cdb0acec772b617382f8424f | [] | no_license | crusainte/Exploratory-Data-Project | a4e21d35f40c1bd072b88273e7e5507ddd86ccbe | d261cdca7048993b7efcd404035e587c9d726fc8 | refs/heads/master | 2021-03-12T22:03:42.875062 | 2015-02-20T16:19:50 | 2015-02-20T16:19:50 | 31,066,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,401 | r | plot4.R | ## Plot addresses the following question:
## Across the United States, how have emissions from coal
## combustion-related sources changed from 1999–2008?
## Read in the files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(dplyr)
## Filter coal related sources
SCC.filter<- SCC %>%
select(SCC,Short.Name) %>%
filter(grepl("coal", SCC$EI.Sector,ignore.case=TRUE))
## Merge and filter NEI for coal related sources
NEI.filter<-merge(NEI,SCC.filter,by.x="SCC",by.y="SCC")
## Perform group by year, then summarize according to coal related Emissions
## Lastly, divide the results by 10^3 to so that the plot would be easier to read
NEI.output<- NEI.filter %>%
group_by(year) %>%
summarize(emit.sum=sum(Emissions)) %>%
mutate(emit.sum=emit.sum/10^3)
## Create png image to plot on
png("plot4.png")
## Plot the output using the base plotting system with line width of 3
## and line histogram type
plot(NEI.output$year,NEI.output$emit.sum,lwd=3,type="h",
xlab="Year",
ylab="PM2.5 Emissions (Kilotons)",
main="PM2.5 Emissions from Coal-Combustion Sources Across US"
)
## Add a line plot across the existing plot to better reveal the
## relationship between each year
lines(NEI.output$year,NEI.output$emit.sum)
## Output plot to png file
dev.off()
## Perform clean up
rm(NEI.output)
rm(NEI.filter)
rm(SCC.filter) |
0155e2e3ef48e9a49bcfe114046ed6c8988f1e1f | 03d819fd2b7fc35f9221310634d6c47693fd69f0 | /man/distance.moved.Rd | c958f80fff2e5b2955178fdb5c1fd9f51cb09fe5 | [] | no_license | pydemull/modeid | 4ccce0b9653e9ca7385d414ffe89001437028829 | 8e6be89501f5df94a10f7e434f22dde81dfbb913 | refs/heads/master | 2023-03-22T23:20:52.443700 | 2019-03-11T06:14:23 | 2019-03-11T06:14:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,048 | rd | distance.moved.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distance.moved.R
\name{distance.moved}
\alias{distance.moved}
\title{Distance moved}
\usage{
distance.moved(dataset, last, time.window, epoch.length)
}
\arguments{
\item{dataset}{The dataset you want to apply this to, we assume it has been created by processing file using this package}
\item{last}{whether you want the time window before (last=TRUE) or the following time window (last=FALSE)}
\item{time.window}{The window across which to measure distance in seconds}
\item{epoch.length}{The epoch.length you are working with, in seconds}
}
\description{
a function that measures distance moved between points separated by a time window
Currently only works with coordinates in easting and northing, can be modified as needed,
contact author if this is required
}
\details{
Given a dataset as input, this will output a variable of the same length as the datasets' variables.
This will contain the distance of each point to the point the specified distance away.
}
|
a8b46089156740b24f2aeeccd080e67f02dc9cec | 5b87ee9403a3c1275c1dbfcc87f7c18b6d0f77f5 | /Course Project 2/plot5.R | cb3e3b7d923870c19e5a00258f7d61838beca314 | [] | no_license | ben-liu/Exploratory-Data-Analysis | 6f882b65299bd5bff245b12f1c71c8251f0ffda8 | c9d460131bcbe60af9c0c550f8f4ccf3f54e1d68 | refs/heads/master | 2021-01-25T06:37:09.326604 | 2014-10-26T08:12:47 | 2014-10-26T08:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 786 | r | plot5.R | # setwd("~/Google Drive/Coursera/Exploratory Data Analysis/Course Project 2")
NEI <- readRDS("./exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("./exdata-data-NEI_data/Source_Classification_Code.rds")
# 5. How have emissions from motor vehicle sources changed from 1999–2008
# in Baltimore City?
mv.code= SCC[grep(".*Vehicle|.*Motor",SCC$EI.Sector),]
mv=NEI[NEI$SCC %in% mv.code$SCC & NEI$fips %in% "24510",]
x = with(mv,aggregate(Emissions,by=list(year),sum))
names(x) = c("Year","Emissions")
png(file="plot5.png")
par(mar=c(5,5,4,2)+0.1)
with(x,plot(Year,Emissions,type="l",
ylab = expression('Total PM'[2.5]*" Emission"),
main="Total PM2.5 emission from motor vehicle \n from 1999 to 2008 in Baltimore City")
)
)
dev.off()
|
bb0e07384149ed24012464b8f8b18757fb4f2737 | 1d90318c97024bd3821b2ed160012682711d5772 | /R_projects_6414/2_Multiple_linear_regression/fram.R | 57f5923873e3a3aa98983ac013f2d5fe0c9930e5 | [] | no_license | adrihernand/Projects_6414 | 194b0a93533dd766a5c606a01aa2f1dceb0a20e5 | 136e8a41968e3b26b331d1be0edd07d1d7c38c8b | refs/heads/main | 2023-07-27T01:20:07.252283 | 2021-09-06T06:41:48 | 2021-09-06T06:41:48 | 403,512,044 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 879 | r | fram.R | # Quizz 2
# Importing the data
data = read.csv("C:\\Users\\adri_\\Documents\\Gatech\\ISYE6414\\Homeworks\\2_Multiple_linear_regression\\fram.csv", head = TRUE)
# Converting categorical to quantitative
data$SEX <- as.factor(data$SEX)
data$CURSMOKE <- as.factor(data$CURSMOKE)
# Fitting the linear model
multiple_model <- lm(SYSBP ~ SEX + AGE + CURSMOKE + BMI, data = data, level = 0.99)
summary(multiple_model)
# Fitting the linear model only on the last observation
data2 = data[100,]
data2
multiple_model_last_obs <- lm(SYSBP ~ SEX + AGE + CURSMOKE + BMI, data = data2, level = 0.99)
summary(multiple_model_last_obs)
# Filtering BMI larger or equal than 30 and fitting the linear model
data3 <- data[data$BMI>=30,]
multiple_model_BMI30 <- lm(SYSBP ~ SEX + AGE + CURSMOKE + BMI, data = data3, level = 0.99)
summary(multiple_model_BMI30)
|
5d044f35f9db9bac251a4dba698ef3aba7a494d2 | 21960cbad6a8d83b8e394513cfed63a96093f7fb | /R/convert_density.R | 21c8a84b986ca6ca217af028153fc7af5231c41f | [] | no_license | mattreusswig/convertUnits | fbbff61c7fba579862b5b733fa2bfb8ea2401b57 | e8764a0e7fe94bade7941294dd69bc8fa91a1f90 | refs/heads/master | 2021-03-06T05:34:10.347027 | 2020-03-18T01:46:01 | 2020-03-18T01:46:01 | 246,182,675 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,242 | r | convert_density.R | #' Convert between density units -- kg/m3, g/L, mg/L, lb/gal, lb/ft3
#'
#' @param x A vector of numbers to be converted.
#' @param from A character vector of the units x is in. Must be length 1 or same length as x.
#' @param to A character vector of the units into which x will be converted. Must be length 1 or same length as x.
#'
#' @return A vector of numbers converted FROM old units TO new units.
#' @export
#'
#' @examples
#' convert_density(1, "kg/m3", "lb/ft3")
#' convert_density(c(1, 10, 100), "kg/m3", "lb/ft3")
#' convert_density(c(1, 10, 100), c("g/l", "lb/gal", "lb/ft3"), c("lb/ft3", "g/l", "lb/gal"))
convert_density <- function(x, from, to) {
## A function for converting between density units (kg/m3, g/L, mg/L, lb/gal,
## lb/ft3)
## List of allowable units
legal_units <- c("kg/m3", "g/l", "mg/l", "lb/gal", "lb/ft3")
legal_units_message <- paste(legal_units, collapse = ", ")
## List divisors for legal_units to convert to kg/m3
unit_div <- c(1, 1, 1000, 0.008345406, 0.062428)
## Give an error message in the from and to inputs have invalid units
if ( sum(from %in% legal_units) < length(from) ) stop(paste("Input variable 'from' must be", legal_units_message,"and must be lowercase."))
if (sum(to %in% legal_units) < length(to) ) stop(paste("Input variable 'to' must be", legal_units_message,"and must be lowercase."))
## Give an error message if the from and to inputs are not of valid length.
## They should be either length 1L or equal to the length of the x vector.
if ( !(length(from) == 1L | length(from) == length(x)) ) stop("Input variable 'from' must have a single element or be of equal length to 'x'.")
if ( !(length(to) == 1L | length(to) == length(x)) ) stop("Input variable 'to' must have a single element or be of equal lengtb to 'x'.")
## Modify the structure of the input variables such that they all have the
## same length as 'x'.
if (length(to) == 1L) to <- rep(to, length(x))
if (length(from) == 1L) from <- rep(from, length(x))
## Apply the unit conversion to all elements of the x vector.
result <- mapply(
function(a, b, c) a / unit_div[which(legal_units == b)] * unit_div[which(legal_units == c)],
x, from, to)
return(result)
}
|
6fec6dcb288739f24f5a0708d92ff37235ca11b5 | 370a1098faf08af78a30c0ee7a65ac42091d0705 | /Clustering/kmeans.R | 7f8303f9670e85cb5addd7a9a5b0049a55da85db | [] | no_license | Mando75/CS450 | 81aa9f9569be3ccfbef3e8ff1827f176a419b1c2 | 105566d1faea1ae7bac97f8b54703d0947a65276 | refs/heads/master | 2020-04-15T19:21:53.032291 | 2019-05-10T01:31:37 | 2019-05-10T01:31:37 | 164,947,197 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,616 | r | kmeans.R | # Title : TODO
# Objective : TODO
# Created by: bryanmuller
# Created on: 2019-03-13
###########################
# KMeans
########################
# Step 1
##########
data <- scale(state.x77)
# Step 2
#########
myClusters = kmeans(data, 3)
# Summary of the clusters
summary(myClusters)
# Length Class Mode
# cluster 50 -none- numeric
# centers 24 -none- numeric
# totss 1 -none- numeric
# withinss 3 -none- numeric
# tot.withinss 1 -none- numeric
# betweenss 1 -none- numeric
# size 3 -none- numeric
# iter 1 -none- numeric
# ifault 1 -none- numeric
# Centers (mean values) of the clusters
print(myClusters$centers)
# Population Income Illiteracy Life Exp Murder HS Grad Frost Area
# 1 -0.2269956 -1.3014617 1.391527063 -1.1773136 1.0919809 -1.4157826 -0.7206500 -0.2340290
# 2 -0.4873370 0.1329601 -0.641201154 0.7422562 -0.8552439 0.5515044 0.4528591 -0.1729366
# 3 0.9462026 0.7416690 0.005468667 -0.3242467 0.5676042 0.1558335 -0.1960979 0.4483198
# Cluster assignments
print(myClusters$cluster)
# Alabama Alaska Arizona Arkansas California
# 3 1 1 3 1
# Colorado Connecticut Delaware Florida Georgia
# 2 2 2 1 3
# Hawaii Idaho Illinois Indiana Iowa
# 2 2 1 2 2
# Kansas Kentucky Louisiana Maine Maryland
# 2 3 3 2 1
# Massachusetts Michigan Minnesota Mississippi Missouri
# 2 1 2 3 1
# Montana Nebraska Nevada New Hampshire New Jersey
# 2 2 1 2 1
# New Mexico New York North Carolina North Dakota Ohio
# 3 1 3 2 1
# Oklahoma Oregon Pennsylvania Rhode Island South Carolina
# 2 2 1 2 3
# South Dakota Tennessee Texas Utah Vermont
# 2 3 1 2 2
# Virginia Washington West Virginia Wisconsin Wyoming
# 1 2 3 2 2
# Plotting a visual representation of k-means clusters
library(cluster)
png("k-3-clusters.png")
clusplot(data, myClusters$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
# Step 3
###############
errors <- c()
for (k in 1:25) {
errors[k] <- kmeans(data, k)$tot.withinss
}
png("kmeans-sum-squares-error.png")
plot(errors, xlab = "k", ylab = "within-cluster sum of squares error")
# Step 4
################
# From elbow method
k <- 10
cluster = kmeans(data, k)
# Step 5
###############
print(cluster$cluster)
# Alabama Alaska Arizona Arkansas California
# 4 2 3 4 10
# Colorado Connecticut Delaware Florida Georgia
# 7 1 8 8 4
# Hawaii Idaho Illinois Indiana Iowa
# 5 7 8 8 7
# Kansas Kentucky Louisiana Maine Maryland
# 7 4 4 9 8
# Massachusetts Michigan Minnesota Mississippi Missouri
# 1 8 7 4 8
# Montana Nebraska Nevada New Hampshire New Jersey
# 6 7 6 9 8
# New Mexico New York North Carolina North Dakota Ohio
# 3 10 4 1 8
# Oklahoma Oregon Pennsylvania Rhode Island South Carolina
# 3 5 8 1 4
# South Dakota Tennessee Texas Utah Vermont
# 9 4 10 7 9
# Virginia Washington West Virginia Wisconsin Wyoming
# 8 5 4 7 6
# Step 6
###############
png("kmeans-10.png")
clusplot(data, cluster$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
# Step 7
##############
print(cluster$centers)
# Population Income Illiteracy Life Exp Murder HS Grad Frost Area
# 1 -0.3499380 0.5656501 -0.7710820 1.2544011 -1.1080742 0.55150442 0.859258777 -0.058630181
# 2 -0.4514893 0.5182516 0.0902330 0.8353735 -0.4748696 0.96161967 -1.571925102 -0.001018197
# 3 -0.7660428 -0.5843829 -0.9117048 0.4809958 -0.9150653 0.65342525 0.994267293 -0.099820942
# 4 -0.1667872 -1.3624751 1.8866900 -1.7868083 1.5933731 -1.55107136 -1.213139113 -0.287006387
# 5 -0.8429672 0.6862826 -1.0171720 -0.9077815 0.4935610 1.35471127 1.462846466 0.384520782
# 6 0.7891560 0.5328170 -0.3117140 -0.2462765 0.3093560 -0.19041729 -0.001154271 -0.342772830
# 7 2.8948232 0.4869237 0.6507713 0.1301655 1.0172810 0.13932569 -1.131057600 0.992720037
# 8 -0.2771693 -1.2506172 0.9788913 -0.6694013 0.6741541 -1.30304191 -0.310242469 -0.189881160
# 9 -0.8693980 3.0582456 0.5413980 -1.1685098 1.0624293 1.68280347 0.914567609 5.809349671
# 10 -0.3889962 0.1472000 -0.1148420 0.3157792 -0.7593038 -0.04122819 -0.013658877 -0.595660829
|
9e600e1db9c2f0016f34ecf5af66aee8871dbed5 | 5f1a9e4340ee17f4dcdf5c07f9abcf7c048e2e90 | /R/OandaPricing.R | eef8ad2cef43e92dd86f31a7158e72a3f1789ca8 | [
"MIT"
] | permissive | ivanliu1989/RQuantAPI | 764b5919443d8560e2e796e96616ea851ebdd96f | 00881560c93318e10596b6e0b324193b61268364 | refs/heads/master | 2020-07-21T20:35:59.982809 | 2019-09-07T13:11:59 | 2019-09-07T13:11:59 | 75,069,525 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,585 | r | OandaPricing.R | #' Get Priceing Information for Oanda Instruments
#'
#' @description
#' Get pricing information for a specified list of Instruments within an Account
#'
#' @param ACCOUNT_TYPE "practice", "real" or "sandbox"
#' @param ACCESS_TOKEN The authorization bearer token previously obtained by the client, can be found in oanda environment
#' @param ACCOUNT_ID ID of the Account to create the Order for.
#' @param INSTRUMENTS Instrument to get candlestick data for e.g. "AUD_USD"
#'
#' @return A \code{data.frame} of current prices
#'
#' @examples
#' getOandaCurPricing(.oandaEnv$ACCOUNT_TYPE, .oandaEnv$ACCESS_TOKEN, .oandaEnv$ACCOUNT_ID, INSTRUMENTS = c('AUD_USD'))
#'
#' @export
getOandaCurPricing <- function(ACCOUNT_TYPE, ACCESS_TOKEN, ACCOUNT_ID, INSTRUMENTS = c('AUD_USD')){
INSTRUMENTS = paste(INSTRUMENTS, collapse = "%2C")
URL = paste0("https://", .oandaEnv$ENVIRONMENTS$api[ACCOUNT_TYPE])
URL = paste0(URL, "/v3/accounts/", ACCOUNT_ID, "/pricing?instruments=")
URL = paste0(URL, INSTRUMENTS)
HEADERS <- c(Authorization = paste("Bearer",ACCESS_TOKEN,sep=" "))
json.data <- getURL(URL,cainfo=system.file("CurlSSL","cacert.pem",
package="RCurl"),httpheader=HEADERS)
tryCatch({
parsed.data <- fromJSON(json.data, simplifyDataFrame = TRUE, flatten = TRUE)$prices
parsed.data <- parsed.data[, c("instrument", "closeoutAsk", "closeoutBid", "status", "time")]
colnames(parsed.data) <- c('instrument', 'ask', 'bid', 'status', 'time')
}, error = function(e) e)
return(parsed.data)
}
# getCurrentPricingStream
|
e9806adbaa3cbb0e621fe7198e813a4c20863371 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/bsts/examples/plot.holiday.Rd.R | b29f91fb1b3e12188772b5b07a4c97b7d029cada | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,995 | r | plot.holiday.Rd.R | library(bsts)
### Name: plot.holiday
### Title: Plot Holiday Effects
### Aliases: PlotHoliday
### ** Examples
trend <- cumsum(rnorm(730, 0, .1))
dates <- seq.Date(from = as.Date("2014-01-01"), length = length(trend),
by = "day")
y <- zoo(trend + rnorm(length(trend), 0, .2), dates)
AddHolidayEffect <- function(y, dates, effect) {
## Adds a holiday effect to simulated data.
## Args:
## y: A zoo time series, with Dates for indices.
## dates: The dates of the holidays.
## effect: A vector of holiday effects of odd length. The central effect is
## the main holiday, with a symmetric influence window on either side.
## Returns:
## y, with the holiday effects added.
time <- dates - (length(effect) - 1) / 2
for (i in 1:length(effect)) {
y[time] <- y[time] + effect[i]
time <- time + 1
}
return(y)
}
## Define some holidays.
memorial.day <- NamedHoliday("MemorialDay")
memorial.day.effect <- c(.3, 3, .5)
memorial.day.dates <- as.Date(c("2014-05-26", "2015-05-25"))
y <- AddHolidayEffect(y, memorial.day.dates, memorial.day.effect)
presidents.day <- NamedHoliday("PresidentsDay")
presidents.day.effect <- c(.5, 2, .25)
presidents.day.dates <- as.Date(c("2014-02-17", "2015-02-16"))
y <- AddHolidayEffect(y, presidents.day.dates, presidents.day.effect)
labor.day <- NamedHoliday("LaborDay")
labor.day.effect <- c(1, 2, 1)
labor.day.dates <- as.Date(c("2014-09-01", "2015-09-07"))
y <- AddHolidayEffect(y, labor.day.dates, labor.day.effect)
## The holidays can be in any order.
holiday.list <- list(memorial.day, labor.day, presidents.day)
number.of.holidays <- length(holiday.list)
## In a real example you'd want more than 100 MCMC iterations.
niter <- 100
ss <- AddLocalLevel(list(), y)
ss <- AddRegressionHoliday(ss, y, holiday.list = holiday.list)
model <- bsts(y, state.specification = ss, niter = niter)
PlotHoliday(memorial.day, model)
|
2260f58fe05c384a165c2525f7ee953dd72c1524 | 396df2552224ffcb0294fe6e297b231aa2e59e68 | /_working/0111-nz-election-results.R | 27f2b9383163c5455e862966a84de6fb2773f18f | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | ellisp/blog-source | d072bed980a5074d6c7fac03be3635f70ab5f098 | 1227f83df23af06da5280214ac7f2e0182be5707 | refs/heads/master | 2023-09-05T07:04:53.114901 | 2023-08-27T21:27:55 | 2023-08-27T21:27:55 | 122,695,494 | 17 | 8 | null | 2023-08-27T21:15:33 | 2018-02-24T02:36:45 | HTML | UTF-8 | R | false | false | 4,012 | r | 0111-nz-election-results.R | #-------------functionality and data----------------
library(tidyverse)
library(scales)
library(nzelect)
library(grid)
library(forcats)
library(testthat)
# load up the last prediction data, which is a data frame of 4,800 rows
# (one for each simulated result) and 10 columns of party results
download.file("https://github.com/ellisp/ellisp.github.io/raw/source/data/ellis-final-nz-election-forecasts-2017.rda",
destfile = "tmp.rda", mode = "wb")
load("tmp.rda")
unlink("tmp.rda")
#------------electorate seats that matter---------
# probability of Labour win in each of the seven Maori seats:
maori_probs <- data.frame(Labour = c(0.49, 0.52, 0.55, 0.58, 0.48, 0.64, 0.3)) %>%
mutate(Other = 1 - Labour)
n <- nrow(sims) # number of simulations ie 4800
filler <- data.frame(
party = c("Conservative", "Green", "NZ First", "United Future"),
seats = c(0, 0, 1,0),
sim = rep(1:n, each = 4)
)
# probability of ACT win in Epsom
epsom <- 0.8
# simulate electorate seat results:
electorate_sims <- data_frame(
epsom = sample(c("ACT", "National"), prob = c(epsom, 1 - epsom), size = n, replace = TRUE),
m1 = sample(c("Labour", "Maori"), prob = maori_probs[1, 1:2], size = n, replace = TRUE),
m2 = sample(c("Labour", "Maori"), prob = maori_probs[2, 1:2], size = n, replace = TRUE),
m3 = sample(c("Labour", "Maori"), prob = maori_probs[3, 1:2], size = n, replace = TRUE),
m4 = sample(c("Labour", "Maori"), prob = maori_probs[4, 1:2], size = n, replace = TRUE),
m5 = sample(c("Labour", "Mana"), prob = maori_probs[5, 1:2], size = n, replace = TRUE),
m6 = sample(c("Labour", "Maori"), prob = maori_probs[6, 1:2], size = n, replace = TRUE),
m7 = sample(c("Labour", "Maori"), prob = maori_probs[7, 1:2], size = n, replace = TRUE)
) %>%
mutate(sim = 1:n()) %>%
gather(seat, party, -sim) %>%
group_by(party, sim) %>%
summarise(seats = n()) %>%
ungroup() %>%
rbind(filler) %>%
spread(party, seats, fill = 0)
#-------------convert to total seats-------------------------
seats <- t(sapply(1:n, function(i){
allocate_seats(votes = as.numeric(sims[i, 1:9]),
electorate = as.numeric(electorate_sims[i, -1]),
parties = gsub("M.ori", "Maori", names(sims)[1:9]))$seats_v
})) %>%
as_tibble()
#-------------compare to actual results----------------------
actual_results <- data_frame(
party = c("ACT", "Green", "Labour", "Mana", "Maori", "National", "NZ First"),
final_seats = c(1, 8, 46, 0, 0, 56, 9)
)
expect_equal(sum(actual_results$final_seats), 120)
d <- seats %>%
gather(party, n_seats) %>%
filter(!party %in% c("Conservative", "United Future", "Mana")) %>%
left_join(actual_results) %>%
mutate(success = ifelse(n_seats == final_seats, "Actual result", "Probability of other results")) %>%
mutate(party = fct_reorder(party, desc(n_seats)))
# see https://stackoverflow.com/questions/4646020/ggplot2-axis-transformation-by-constant-factor
# for this idea to do a linear transformation of the y axis so it is probabilities rather than
# counts of the 4800 simulations:
formatter <- function(x, n = 4800){
format(signif(x / n , 2), digits = 3)
}
levels(d$party)[levels(d$party) == "Maori"] <- "M\U0101ori"
svg("../img/0111-histograms.svg", 9,5)
d %>%
ggplot(aes(x = n_seats, fill = party, alpha = success)) +
facet_wrap(~party, scales = "free") +
geom_histogram(colour = "white", binwidth = 1) +
geom_histogram(data = filter(d, success == "Actual result"), colour = "white", fill = "grey10", binwidth = 1) +
scale_alpha_manual("", values = c(`Actual result` = 0.9, `Probability of other results` = 0.3)) +
labs(x = "Number of seats") +
scale_y_continuous("Probability of outcome\n", labels = formatter) +
ggtitle("Comparison of forecast and actual number of seats in the 2017 New Zealand election",
"Forecasts are the combination of Peter's Stats' Stuff Models A and B") +
scale_fill_manual(values = parties_v, guide = FALSE)
dev.off()
convert_pngs("0111")
|
cdc2d575a5e8f27fa1a6bf8844f5e1375a00b85a | b8723b94da48a1e39bb7b9b2495f6eb68c9dd7e8 | /R/kmbasis.R | 211d2c1f8afa012c89bceb0b39a364ef5ba79063 | [] | no_license | cran/kstMatrix | 50d9bdc9f8ebfb0b3fddb135a04e8fbba39df578 | b5fbab8ae6e34ce0461bd1b0eeb47b068ce62347 | refs/heads/master | 2023-01-28T00:44:11.723822 | 2023-01-23T14:30:02 | 2023-01-23T14:30:02 | 148,498,260 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,215 | r | kmbasis.R | #' Compute the basis of a knowledge space
#'
#' \code{kmbasis} returns a matrix representing the basis of a
#' knowledge space. If \code{x} is a knowledge structure or an
#' arbitrary family of sets \code{kmbasis} returns the basis of
#' the smallest knowledge space containing \code{x}.
#'
#' @param x Binary matrix representing a knowledge space
#' @return Binary matrix representing the basis of the knowledge space.
#'
#' @examples
#' kmbasis(xpl$space)
#'
#' @keywords math
#'
#' @export
kmbasis <- function(x) {
if (!inherits(x, "matrix")) {
stop(sprintf("%s must be of class %s.", dQuote("x"), dQuote("matrix")))
}
if (any(x != 1*as.logical(x))) {
stop(sprintf("%s must be a binary matrix.", dQuote("x")))
}
y <- x
if (dim(x)[1] == 1) {
if (sum(x) > 0)
return(x)
else
return(NULL)
}
for (i in 2:dim(x)[1]) {
for (j in 1:(i-1)) {
if (all(x[i,] <= x[j,])) {
y[j,] <- y[j,] + x[i,]
} else if (all(x[j,] <= x[i,])) {
y[i,] <- y[i,] + x[j,]
}
}
}
v <- rep(1, dim(x)[2])
deleted <- 0
for (i in 1:dim(y)[1]) {
if (!any(y[i,] == v)) {
x <- x[-(i-deleted),]
deleted = deleted + 1
}
}
return(x)
}
|
427ff4dfb11eb142b649df2713165b0a0d9d50cb | 0563103cc766f0cb981ccb8103594357e277dc74 | /man/total_reads.Rd | 8fd18a7c007ccba2bc3d8ad4aa4f24da3f2014a8 | [] | no_license | pblischak/polyfreqs | a140631ae572753ca6123180474f2b067508c27f | b03ec79562842f259ff89b368b6f8a1bf983c6bc | refs/heads/master | 2021-01-21T03:33:59.306182 | 2016-12-16T22:05:25 | 2016-12-16T22:05:25 | 34,615,921 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 373 | rd | total_reads.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_desc.R
\docType{data}
\name{total_reads}
\alias{total_reads}
\title{Total reads matrix}
\format{A 10 x 2 matrix.}
\usage{
data(total_reads)
}
\description{
A dataset of 10 individuals sampled at 2 loci with 20 reads per individual per locus. Used for package testing.
}
\keyword{datasets}
|
932795a5d1f0deb9b226a66bca089b9b6939670e | aa7bd0ecc5cd61e1115e874eb69bc78b682e88b0 | /plot1.R | 6119ae6569ffff704e490bdc5eb92b5deedfc8ab | [] | no_license | Chinasa1/ExData_Plotting1 | d82274a8e91e91de8be47f03c2102c9b17db6e7f | f461d4e61476f48beb44ee606aca2e47dd85a543 | refs/heads/master | 2020-05-17T16:06:47.624070 | 2019-04-28T07:04:44 | 2019-04-28T07:04:44 | 183,810,191 | 0 | 0 | null | 2019-04-27T18:48:35 | 2019-04-27T18:48:35 | null | UTF-8 | R | false | false | 908 | r | plot1.R | ##Plot 1
#Download the file and extract the file if not exist
dataFile <- "household_power_consumption.txt"
if(!file.exists()){
#)Download the file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url = fileUrl, destfile = "power_consumption.zip", method = "curl")
#2)Unzip the file
unzip( "power_consumption.zip")
#)Delete the zip file
file.remove("power_consumption.zip")
}
#)Read the data
powerCons <- read.table(file = "household_power_consumption.txt", na.strings = "?", sep = ";", header = TRUE)
#)Subset the variable
powerCons <- powerCons[(powerCons$Date=="1/2/2007" | powerCons$Date == "2/2/2007"),]
#)Plot
hist(powerCons$Global_active_power, main = "Global Active Power", col = "red", xlab = "Global Active Power (killowatts)", ylab="Frequency")
#save the file
dev.copy(png, "plot1.png")
dev.off()
|
26728c06170a1d9b69ce16cd949acabb09d81edd | 8bd332edc981833330b551f5aea7b99cc46e6599 | /helper.R | 73e087f4ff044ad61cd25e241536d29f11ac48c4 | [] | no_license | wangyt316/ShinyApp | dfe8503793920da4572a710c03ff46d23dfec9be | 495c1c765901e4b72408583a9d4cba64740ba834 | refs/heads/master | 2021-01-10T21:05:42.857449 | 2014-06-18T05:11:02 | 2014-06-18T05:11:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,149 | r | helper.R | dataclean <- function(df){
df$ReportPeriod = as.Date(df$ReportPeriod, '%m/%d/%Y')
df = df[, 2:6]
df$Year = format(df$ReportPeriod, '%Y')
df = df[-which(df$Terminal == 'Imperial Terminal'),]
df = df[-which(df$Terminal == 'Misc. Terminal'),]
return(df)
}
roseGraph <- function(df, year){
yearly = df[df$Year == year, ]
terminal = with(yearly, aggregate(Passenger_Count, data.frame(yearly$Terminal), sum))
terminal$x = terminal$x/1000000
ymax = max(terminal$x)
library(ggplot2)
ps1 = ggplot(terminal, aes(as.factor(yearly.Terminal), x, fill = as.factor(yearly.Terminal))) + geom_bar(stat = 'identity')
ps1 = ps1 + coord_polar() +
theme_bw() +
labs(x = 'Termianl', y = 'Passenger Traffic (in million)')+
ylim(0,ymax) + ggtitle("2013")+
theme(legend.position = 'none',legend.title = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), panel.border = element_blank())
return(ps1)
}
|
edba3f40bb0747cb25c7dd1085e6cde668372058 | 8c7edf1335041a5073df672cbd99f08fcd1439c6 | /makeovermonday/2019w15/mm-2019-w15.R | ea84e8e296556c5d58f04d70b6b47c60ad401f3e | [] | no_license | dmitrijsk/data_viz | 69beeda3fb953bbe1c596a871fabd3c619404056 | 74d3b3680b1bfd931321bd32ae0f618275bb028b | refs/heads/master | 2020-04-29T17:18:36.122450 | 2019-04-15T01:02:21 | 2019-04-15T01:02:21 | 176,293,042 | 0 | 0 | null | 2019-04-08T19:46:05 | 2019-03-18T13:33:11 | R | UTF-8 | R | false | false | 5,485 | r | mm-2019-w15.R |
# MakeoverMonday, Week 15, 2019.
# Original plot: https://www.mercatus.org/statefiscalrankings
# MakeoverMonday Tweet: https://twitter.com/TriMyData/status/1114842772295622658
# Data: http://www.makeovermonday.co.uk/data/
# Packages and functions ----
library(tidyverse)
library(httr)
library(readxl)
library(ggrepel)
# Function provided by Erwan Le Pennec for the radar coord.
coord_radar <- function (theta = "x", start = 0, direction = 1) {
theta <- match.arg(theta, c("x", "y"))
r <- if (theta == "x") "y" else "x"
ggproto("CordRadar", CoordPolar, theta = theta, r = r, start = start,
direction = sign(direction),
is_linear = function(coord) TRUE)
}
COLOURS_STABILITY <- c("stable" = "#1DA7AA",
"volatile" = "#EC9247",
"other" = "grey90")
# Get data ----
GET("https://query.data.world/s/43zkfzcdmvcgmp6bl3ju5kgwmxhnv6", write_disk(tf <- tempfile(fileext = ".xlsx")))
states_orig <- read_excel(tf)
write_delim(states_orig, path = "data/states.csv", delim = ";")
# Explore data ----
glimpse(states_orig)
skimr::skim(states_orig)
# Data transformation ----
# Select columns of interest.
rankings <- states_orig %>%
filter(Year == max(Year) & State != "Average") %>%
mutate(State_with_rank = paste(overallrank, State, sep = ". ") %>% reorder(., overallrank),
State = reorder(State, overallrank)) %>%
select(State, C = cashrank, L = Lrrank, B = budgetrank, S = servicelvlrank, T = trustrank, overallrank, State_with_rank)
# Calculate volatility.
rankings_with_mad <- rankings %>%
left_join(rankings %>%
select(State:T, overallrank) %>%
gather(key, value, -State, -overallrank) %>%
group_by(State) %>%
summarize(mean_abs_dev = mean(abs(value - overallrank))),
by = "State") %>%
arrange(overallrank) %>%
mutate(stability = case_when(overallrank > 10 ~ "other",
mean_abs_dev < 10 ~ "stable",
TRUE ~ "volatile"),
stability_rank = min_rank(mean_abs_dev))
rankings_with_mad %>%
ggplot(aes(x = overallrank, y = mean_abs_dev, label = State, color = mean_abs_dev)) +
geom_point(show.legend = FALSE) +
ggrepel::geom_text_repel(show.legend = FALSE) +
scale_y_continuous(limits = c(0, 25)) +
scale_color_gradient(low = "#f768a1", high = "#7a0177") +
theme_classic() +
labs(title = "Wyoming, ranked 6th of 50, has the most volatile overall fiscal rating",
subtitle = "Volatily measured by mean absolute deviation of five components from the overall rating",
x = "Overall fiscal rating",
y = "Volatility in components of the overall rating",
caption = " Makeover in R by Dmitrijs Kass @dmitrijsk | Data source: Ranking the States by Fiscal, 2018 edition, Condition Mercatus Research") +
theme(plot.caption = element_text(hjust = 0, size = 8),
text = element_text(colour = "grey30"))
ggsave(filename = "images-raw/scatterplot.png", height = 7, width = 14)
# Plot volatility of all states ----
rankings_with_mad %>%
mutate(State = reorder(State, -mean_abs_dev)) %>%
ggplot(aes(x = State, y = mean_abs_dev, fill = stability)) +
geom_col() +
scale_y_continuous(breaks = seq(0, 20, by = 10), expand = c(0, 0, 0, 20), position = "right") +
scale_fill_manual(limits = names(COLOURS_STABILITY), values = COLOURS_STABILITY) +
coord_flip() +
theme_classic() +
theme(legend.position = "top") +
theme(panel.grid.major.x = element_line(colour = "grey70", linetype = "dashed"),
axis.ticks.y = element_blank(),
axis.text.y = element_text(hjust = 0),
axis.text.x = element_text(colour = "grey30"),
axis.title.x = element_text(colour = "grey30", hjust = 0)) +
labs(fill = "Overall fiscal ranking of the states:",
y = "Volatility in five solvency rankings",
x = NULL) +
annotate(y = 23, x = 48, geom = "text", label = "Uniform fiscal condition", hjust = 0, size = 5, colour = "grey30") +
annotate(y = 23, x = 3, geom = "text", label = "Volatile fiscal condition", hjust = 0, size = 5, colour = "grey30")
ggsave(filename = "images-raw/volatility_of_five_dimensions.png", height = 35, width = 25, units = "cm", dpi = 150)
# Radar plot for top 10 states ----
rankings_with_mad %>%
filter(overallrank %in% 1:10) %>%
select(State_with_rank, C:T, stability) %>%
gather(key, value, -State_with_rank, -stability, factor_key = TRUE) %>%
arrange(key) %>%
ggplot(aes(x = key, y = value, group = State_with_rank, label = value, fill = stability)) +
geom_polygon(show.legend = FALSE) +
scale_fill_manual(values = COLOURS_STABILITY) +
scale_y_continuous(breaks = NULL) +
coord_radar() +
geom_text_repel(size = 3, color = "grey30") +
facet_wrap(~State_with_rank, ncol = 2) +
theme_light() +
theme(axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_text(face = "bold", size = 11),
panel.grid.major.x = element_line(linetype = "dashed"),
strip.background = element_rect(fill = "white"),
panel.border = element_blank(),
panel.spacing.x = unit(2,"line"),
panel.spacing.y = unit(2,"line"),
strip.text = element_text(face = "bold", size = 12, color = "grey30")) +
labs(x = NULL,
y = NULL)
ggsave(filename = "images-raw/radar_top_ten.png", height = 33, width = 15, units = "cm", dpi = 150)
|
6e089b8115d8a79c44276110b0652c0b39c58e37 | 4aab4f50e67e46fa372b2149cec54080b286c362 | /R/templates.R | d5e3a6201bd079b1a7d8e7ed3409826be1adffb3 | [] | no_license | lwjohnst86/fost | 477b64bb9af227d030add0b60a5922c7fb9af0cf | 04568a4afb77a211b145421566d40081e36031fd | refs/heads/master | 2021-01-19T11:09:43.419715 | 2018-02-20T18:58:39 | 2018-02-20T18:58:39 | 63,644,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,717 | r | templates.R | #' Convert Rmd to a journal-formatted manuscript.
#'
#' @inheritParams rmarkdown::word_document
#'
#' @return Creates a Word document (or whatever is required) that is formatted
#' according to the requirements of a journal.
#' @seealso See \code{\link[rmarkdown]{word_document}} for a more detailed
#' explanation of what is going on and how to use these templates.
#' @export
templates <- function() {
list.dirs(
system.file('rmarkdown', 'templates', package = 'fost'),
full.names = FALSE,
recursive = FALSE
)
}
#' @rdname templates
#' @export
default_manuscript <- function(fig_width = 7, fig_height = 6, keep_md = TRUE) {
rmarkdown::word_document(
fig_width = fig_width,
fig_height = fig_height,
keep_md = keep_md,
fig_caption = TRUE,
reference_docx = template_reference('default', 'reference.docx')
)
}
#' @rdname templates
#' @export
diabetologia_manuscript <- function(fig_width = 7, fig_height = 6, keep_md = TRUE) {
rmarkdown::word_document(
fig_width = fig_width,
fig_height = fig_height,
keep_md = keep_md,
fig_caption = TRUE,
reference_docx = template_reference('diabetologia', 'reference.docx'),
pandoc_args = c('--csl', template_reference('diabetologia', 'diabetologia.csl'))
)
}
#' @rdname templates
#' @export
diabetes_manuscript <- function(fig_width = 7, fig_height = 6, keep_md = TRUE) {
rmarkdown::word_document(
fig_width = fig_width,
fig_height = fig_height,
keep_md = keep_md,
fig_caption = TRUE,
reference_docx = template_reference('diabetes', 'reference.docx')
)
}
#' @rdname templates
#' @export
diabetes_care_manuscript <- function(fig_width = 7, fig_height = 6, keep_md = TRUE) {
rmarkdown::word_document(
fig_width = fig_width,
fig_height = fig_height,
keep_md = keep_md,
fig_caption = TRUE,
reference_docx = template_reference('diabetes_care', 'reference.docx')
)
}
#' @rdname templates
#' @export
diabetes_manuscript <- function(fig_width = 7, fig_height = 6, keep_md = TRUE) {
rmarkdown::word_document(
fig_width = fig_width,
fig_height = fig_height,
keep_md = keep_md,
fig_caption = TRUE,
reference_docx = template_reference('diabetes', 'reference.docx')
)
}
#' @rdname templates
#' @export
jlr_manuscript <- function(fig_width = 7, fig_height = 6, keep_md = TRUE) {
rmarkdown::word_document(
fig_width = fig_width,
fig_height = fig_height,
keep_md = keep_md,
fig_caption = TRUE,
reference_docx = template_reference('jlr', 'reference.docx')
)
}
|
9df80db54f587b8e9ac5321342d01b471123f435 | c7e4a8d83741e3cad294dca0bf36b0d1f9563eac | /Simulation/Plot_Group_Contents.R | 1e84ad312e77cfc0150ccacc21e2e6c546b72194 | [] | no_license | Hillna-IMAS/Bioregion_Methods | 189e82cf4aa189e12d3a8b5b9b9569869c9a9ab6 | a7b10b2544041af9bf260f47a082c1d0d78ac385 | refs/heads/master | 2022-11-14T03:37:15.951593 | 2020-07-09T04:46:28 | 2020-07-09T04:46:28 | 161,733,339 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,542 | r | Plot_Group_Contents.R | #############################################################################################
## Compare community modelling methods for bioregionalisation of simulated species dataset
## March 2018. N.Hill with input from S. Woolley
#############################################################################################
# Modelling process
# 1) Run models,diagnostics, determine number of groups or set number of groups to 3
# 2) Plot Predicted Distribution of groups across simulation region
# 3) DESCRIBE CONTENTS OF GROUPS
# 4) Describe environment of groups
#######################
## Set up---
#######################
# Get required libraries
library(plyr) #data manipulation
library(tidyr) #data manipulation
library(ggplot2) #plotting
library(SDMTools) #weighted means and sd
library(rasterVis) #plotting rasters
setwd("C:\\Users\\hillna\\UTAS_work\\Projects\\Antarctic_BioModelling\\Analysis\\Community_modelling\\Comm_Analysis_Methods\\Simulation\\")
source("Simulation_Additional_Funcs.R")
#Load required files
#load("Sim_Setup/Many_covars_sim.RData")
load("Sim_Setup/Many_covars_sim_fin.RData")
#sim_data= simulated species probabilities, occurrences, species' groups for entire region
#sp_200 = matrix of occurrences of 30 species at 200 sites to use as species dataset for analysis
#env_200= matrix of corresponding environmental conditions at 200 site to use as environmental dataset for analysis
load("Sim_setup/sim_env_070518.RData")
#load("sim_env.RData")
# env= raster brick of environmental data for entire region
# env_dat= raster brick of environmental data for entire region converted to matrix
load("Results/models.RData")
load("Results/pred_clusters.RData")
species=paste0("Sp", 1:30)
##########################################################################################
## Take note of label switching from plot_pred_clusters
## Tabulate or get directly from models contents of groups- Limit to case where groups =3
## generate dotchart of average and se of species' occurence in each group
##########################################################################################
# A) Cluster environment only- can't do
# 2 Stage Models- cluster then predict:
# B) cluster bioloigcal data, predict clusters with random forests- tabulate from bio clusters
# 2 Stage Models- predict then cluster
# C) predict species using random forests, then cluster predictions- spatially match cluster prediction to sample & tabulate
# D) predict species using Mistnet, then cluster predictions- as above
# E) predict species using HMSC, then cluster predictions- as above
# F) predict dissimilarities using GDM, cluster predicted dissimilarities- as above
# G) predict biologically transformed environment with GF, cluster predictions- as above
# 1 Stage model-based: cluster and predict
# H) Species Archetype Models- direct from model
# I) Regions of Common Profile- direct from model and tabluating hard class results
## Account for label switching---
# correct previously identified label switching in hard class methods
hard_cluster3$env<-mapvalues(hard_cluster3$env ,from=c(1,3), to=c(3,1))
hard_cluster3$Sp_RF<-mapvalues(hard_cluster3$Sp_RF ,from=c(1,2), to=c(2,1))
hard_cluster3$HMSC<-mapvalues(hard_cluster3$HMSC ,from=c(1,2,3), to=c(2,1,3))
hard_cluster3$MNet<-mapvalues(hard_cluster3$MNet ,from=c(1,3), to=c(3,1))
hard_cluster3$GDM_Dissim_HC<-mapvalues(hard_cluster3$GDM_Dissim_HC ,from=c(1,2), to=c(2,1))
hard_cluster3$GDM_TransEnv_HC<-mapvalues(hard_cluster3$GDM_TransEnv_HC ,from=c(1,2), to=c(2,1))
hard_cluster3$bbGDM_Dissim_HC<-mapvalues(hard_cluster3$bbGDM_Dissim_HC ,from=c(1,2), to=c(2,1))
hard_cluster3$bbGDM_TransEnv_HC<-mapvalues(hard_cluster3$bbGDM_TransEnv_HC ,from=c(1,2), to=c(2,1))
hard_cluster3$GF<-mapvalues(hard_cluster3$GF ,from=c(1,2), to=c(2,1))
#check labelling indeed fixed
class_pal<-c("darkolivegreen4","grey", "orange1")
clust3<-stack()
mods<-c( "Sp_RF","HMSC" , "MNet" , "GDM_Dissim_HC","GDM_TransEnv_HC",
"bbGDM_Dissim_HC" , "bbGDM_TransEnv_HC", "GF", "env")
#create factor attribute layer with as many levels as number of clusters
rat<-data.frame(ID=1:3, Group=paste0("Bioregion", 1:3))
#ignore warning in following loop
for (i in 1:length(mods)){
hc_rast<-rasterize(env_dat[,1:2], env, field=hard_cluster3[,mods[i]])
hc_rast<-as.factor(hc_rast)
levels(hc_rast) <- rat
clust3<-stack(clust3, hc_rast)
}
names(clust3)<-mods
x11()
levelplot(clust3, col.regions=class_pal)
## 'True' Distribution
set.seed(42)
#simulation alphas
betamean <- 0.3
betabeta <- 15
betaalpha <- betamean/(1-betamean) * betabeta
prevalences <- rbeta( 30, betaalpha, betabeta)
alphas <- log( prevalences / ( 1-prevalences))
# simulation betas
means<-as.matrix(data.frame(temp=c(0.75,0,-0.5), O2=c(0,-0.5,0), NO3=c(0,0,0), sal=c(0,0,0),
depth=c(0, 0,0), chla=c(0,0,0), ssh=c(0,0,0), curr=c(0,0,0)))
#calculate average probability of group occurrence for each cell across region
mean_alpha<-mean(alphas)
true_lps<-mean_alpha+ as.matrix(sim_dat[,2:9])%*% t(means)
true_grps<-exp(true_lps)/(1 +exp(true_lps))
# from probability of group occurrence extract probabilities at sampling sites
#sites in simulation data= index of sites used in model building
site_true_probs<-true_grps[sites,]
site_true_HC<-apply(site_true_probs, 1, which.max)
## Hard class version of "true'
true_HC_vals<-get_match_vals( site_data= sp_200,
pred_cluster_vals=site_true_HC ,
site_index=1:200)
true_HC_contents_SE<-dotplot_sp_tab(mean_df = true_HC_vals[[1]],
error_df = true_HC_vals[[3]],
nGrp=3, species=species, method="Truth (Hard Class)")
### 2 stage methods: cluster biology then predict. Using RF probabalistic output
bio3_clust<-mapvalues(bio3_clust,from=c(1,3), to=c(3,1))
bio_clust_vals<-get_match_vals( site_data= sp_200,
pred_cluster_vals=bio3_clust ,
site_index=1:200)
bio_clust_contents_SE<-dotplot_sp_tab(mean_df = bio_clust_vals[[1]],
error_df = bio_clust_vals[[3]],
nGrp=3, species=species, method="BioHC_RF (Hard Class)")
### 2 stage methods: predict then heirarchical cluster
names(hard_cluster3)[4:7]<-c("SpRF_HC", "GF_HC", "MNet_HC", "HMSC_HC")
clusts<-names(hard_cluster3)[4:11]
hclust_contents_SE<-list()
for( i in seq_along(clusts)){
clust_vals<-get_match_vals( site_data= sp_200,
pred_cluster_vals=hard_cluster3[,clusts[i]] ,
site_index=sites)
hclust_contents_SE[[i]]<-dotplot_sp_tab(mean_df = clust_vals[[1]],
error_df = clust_vals[[3]],
nGrp=3, species=species, method=clusts[i])
}
## Species Archetype Models----
# get spatial group predictions and match initial sites
sam_probs<-sam3_pred$ptPreds[sites,]
sam_HC <- apply(sam_probs, 1, which.max)
sam_HC_vals<-get_match_vals( site_data= sp_200,
pred_cluster_vals=sam_HC ,
site_index=1:200)
sam_HC_contents_SE<-dotplot_sp_tab(mean_df = sam_HC_vals[[1]],
error_df = sam_HC_vals[[3]],
nGrp=3, species=species, method="SAM (Hard Class)")
sam_HC_contents_SE$Group<-mapvalues(sam_HC_contents_SE$Group, from=c(1,2,3), to=c(2,3,1))
## Region of Common Profile Models----
#from model parameters
rcp_calc_contents<-calc_prev(boot_obj = rcp3_boot, mod_obj=rcp3_mod)
rcp_contents_SD<-dotplot_sp_tab (mean_df=t(rcp_calc_contents$mean),
error_df= t(rcp_calc_contents$sd),
nGrp=3, species= species, method="RCP Coeficients")
#take account of label switching
rcp_contents_SD$Group<-mapvalues(rcp_contents_SD$Group ,from=c(1,2,3), to=c(3,1,2))
rcp_contents_SE<-rcp_contents_SD
#hard classes
#rcp_HC<-apply(rcp3_pred$ptPreds[site_ind,], 1, which.max)
rcp_HC<-apply(rcp3_pred$ptPreds[sites,], 1, which.max)
rcp_HC<-mapvalues(rcp_HC ,from=c(1,2,3), to=c(3,1,2))
rcp_HC_vals<-get_match_vals( site_data= sp_200,
pred_cluster_vals=rcp_HC ,
site_index=1:200)
rcp_HC_contents_SE<-dotplot_sp_tab(mean_df = rcp_HC_vals[[1]],
error_df = rcp_HC_vals[[3]],
nGrp=3, species=species, method="RCP (Hard Class)")
#####################################################
## Generate dotplot to compare contents of groups----
#####################################################
#dotplot of contents
#create blank row/separator for each approach (clunky way of getting ggplot2 to have the legend I want!)
gtp<-dotplot_sp_tab(mean_df = matrix(NA,nrow=30, ncol=3),
error_df = matrix(0,nrow=30, ncol=3),
nGrp=3, species=species, method="Two-stage: Group then Predict")
ptg<-dotplot_sp_tab(mean_df = matrix(NA,nrow=30, ncol=3),
error_df = matrix(0,nrow=30, ncol=3),
nGrp=3, species=species, method="Two-stage: Predict then Group")
os<-dotplot_sp_tab(mean_df = matrix(NA,nrow=30, ncol=3),
error_df = matrix(0,nrow=30, ncol=3),
nGrp=3, species=species, method="One-stage")
contents_SE<-rbind(true_HC_contents_SE, gtp, bio_clust_contents_SE, ptg, do.call(rbind, hclust_contents_SE),
os, rcp_contents_SE, rcp_HC_contents_SE, sam_HC_contents_SE)
#Tidy up names etc
contents_SE$Method<-factor(contents_SE$Method,
levels=c("RCP (Hard Class)","RCP Coeficients", "SAM (Hard Class)", "One-stage",
"GF_HC", "bbGDM_Dissim_HC", "bbGDM_TransEnv_HC", "GDM_TransEnv_HC", "GDM_Dissim_HC",
"MNet_HC", "HMSC_HC", "SpRF_HC", "Two-stage: Predict then Group",
"BioHC_RF (Hard Class)", "Two-stage: Group then Predict",
"Truth (Hard Class)"))
contents_SE$species<-factor(contents_SE$species,
levels=paste0("Sp", 1:30))
names(contents_SE)[2]<-"Prevalence"
names(contents_SE)[5]<-"Species"
contents_SE$Bioregion<-factor(contents_SE$Group, levels=c('1','2','3'),
labels=c("Bioregion_1", "Bioregion_2", "Bioregion_3"))
#truncate values to 0,1
contents_SE$lower<-ifelse(contents_SE$lower <0, 0,contents_SE$lower)
contents_SE$upper<-ifelse(contents_SE$upper >1, 1,contents_SE$upper)
p<-ggplot(data = contents_SE[contents_SE$Species %in% species[1:5],],
#p<-ggplot(data = contents_SE,
aes(x = Species, y = Prevalence, ymin = lower, ymax = upper, colour = Method)) +
scale_y_continuous(limits = c(-0.1,1.1)) +
geom_point(position = position_dodge(0.6), size=0.6) +
geom_errorbar(position = position_dodge(0.6), width = 0.5) +
coord_flip() +
scale_colour_manual(name="Method",
values = c( "darkblue", "cornflowerblue","cyan", "white",
"darkslategray", "darkgreen", "darkseagreen2","chartreuse3", "chartreuse1",
"orange", "yellow" , "pink", "white",
"purple" , "white", "red"))+ #need to add enough colour for sampling levels here!
theme_bw() +
theme(panel.grid.major.y = element_line(colour = "grey", linetype = "dashed"),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.y = element_text(face="italic"),
legend.text=element_text(size=rel(0.8)),
legend.key = element_blank()) +
facet_wrap( ~Bioregion, ncol=4, scales="free")
tiff(file="Results/Plots/Fig5.tiff", height=12, width=20, units="cm", res=600)
#tiff(file="Results/Plots/Grp_sp_SE.tiff", height=18, width=20, units="cm", res=1000)
p + guides(color = guide_legend(reverse = TRUE))
dev.off()
|
19d57c67ba913a66ec2155807faff2079cd6b1ea | 2e0b18721959cf04addbc1b9f07188b5ce352ebc | /man/futurize.Rd | 95397e779d2b56b9d2eb2dab4df5ab787ef2d26b | [
"MIT"
] | permissive | aclemen1/modulr | cde8ed13e708d8207362006c0dc38f4cc81edb65 | 0162dde8a7281380b82d2446841520f3299f87df | refs/heads/master | 2023-05-25T19:41:37.659195 | 2023-05-18T10:42:45 | 2023-05-18T10:43:37 | 41,587,794 | 9 | 0 | null | null | null | null | UTF-8 | R | false | true | 819 | rd | futurize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/future.R
\name{futurize}
\alias{futurize}
\title{Tweak a module to a future.}
\usage{
futurize(original_name, name = paste(original_name, "future", sep = "/"),
dependencies = get_dependencies(original_name), strategy = NULL,
lazy = FALSE, ...)
}
\arguments{
\item{original_name}{A module name.}
\item{name}{A new module name.}
\item{dependencies}{A list of dependencies.}
\item{strategy}{The evaluation function (or name of it) to use for resolving
a future. If NULL, then the current strategy is returned.}
\item{lazy}{Is the strategy lazy?}
\item{...}{Further arguments passed to \code{\link{get_provider}}.}
}
\description{
Tweak a module to a future.
}
\section{Warning}{
This is an experimental feature subject to changes.
}
|
05401434d9272cb3990c022b83a1c0daffd2a4e4 | ead6d5613c2c5e53c31e73de65bf384368728597 | /02 Exploratory Data Analysis/Final Project/plot4.R | a7d6e806e1847ee56c8c9d070fd53bf41581a4a3 | [] | no_license | GRawhideMart/DataScience | 21efd5068b9ea5a32d62fd28effb76cf084002e3 | b8e7d1333a8840a4be0a59fea1a92d5af627ab43 | refs/heads/master | 2022-11-22T21:13:11.426114 | 2020-07-27T22:41:30 | 2020-07-27T22:41:30 | 262,305,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,161 | r | plot4.R | if(!file.exists('./Data.zip') & !file.exists('./Source_Classification_Code.rds') & !file.exists('./summarySCC_PM25.rds')) {
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip', './Data.zip', method = 'curl')
unzip('./Data.zip')
file.remove('./Data.zip')
}
library(ggplot2)
points <- as_tibble(readRDS('./Source_Classification_Code.rds')) %>% #Read the SCC table
select('SCC', 'Short.Name') %>% # Select the columns about name and the foreign key
filter(grepl('[Cc]oal',Short.Name)) %>% # Filter the rows containing coal
inner_join(as_tibble(readRDS('./summarySCC_PM25.rds')), by='SCC') %>% # Join table by SCC
group_by(year, type) %>% # Group by year and type
summarize(totalEmissions = sum(Emissions / 10^6)) # Find total emissions by year and type
png('plot4.png', width = 640,height = 480)
g <- ggplot(data=points, aes(x=year, y=totalEmissions, fill=type))
print(g + geom_col(position = position_dodge(), width = 1) + ggtitle('Coal Combustion Emissions in U.S.: 1999-2008') + xlab('Year') + ylab('PM2.5 Emissions[MTons]') + labs(fill='Type') + theme_light(base_family = 'Cantarell'))
dev.off() |
082be642e94984c82a332d59ae0b38b8c8431c20 | 872a1187f747a190c3b2cf716b3fcc2332c8c53d | /man/testInteger.Rd | f068c327284c5b9e229fd0c587c9af16c7aad423 | [
"MIT"
] | permissive | johnerichumphries/datasimr | 7843ca3f75cc56d5d989f77892c9f1acd7abf910 | 10733c82fc1e8a4664ec1db603b2ddd150dffc8c | refs/heads/master | 2020-05-14T23:39:43.056644 | 2015-05-04T22:49:18 | 2015-05-04T22:49:18 | 33,015,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 462 | rd | testInteger.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{testInteger}
\alias{testInteger}
\title{Check if everything in a vector of numbers are integers}
\usage{
testInteger(x)
}
\arguments{
\item{x}{a numbe or collection of numbers}
}
\value{
Returns \code{TRUE} if all values in vector ar integers and otherwise returns \code{FALSE}
}
\description{
Check if everything in a vector of numbers are integers
}
\examples{
testInteger(3)
testInteger(c(10, 1))
}
|
c3c4ffd5eef8d3b42f78ffaa110e07346cd26ab3 | d5c066858e325da60e6ec9703787d2a92ed6f69b | /FD simulation TIMER.R | ed5d81873b52c7c589dc2bf84979bc9cf025f3fb | [] | no_license | AndrewTungYep/Models-for-the-within-host-dynamics-of-bacterial-infections | f611b6e825895f19db05fc5b503dfe60d40d6b72 | 2021c144c56b341f5c3ea557f2699ea60e259751 | refs/heads/master | 2021-01-22T19:54:26.211863 | 2017-09-04T09:11:21 | 2017-09-04T09:11:21 | 85,257,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,061 | r | FD simulation TIMER.R | initial.pop<-1000
gen.number<-400
r.num<-64000
g.num<-80000
g.prod<-1000
p.change<-0.005
partition<-0.013
#initial population setup
pop.table<-as.data.frame(matrix(,initial.pop,7))
colnames(pop.table)<-c("green","red","ratio","division time","divisions","bacteria number","time since division")
pop.table[,1]<-rpois(initial.pop,g.num)
pop.table[,2]<-rpois(initial.pop,r.num)
pop.table[,3]<-pop.table[,2]/pop.table[,1]
pop.table[,4]<- #setup of initial populations and persistors
#sample(1:50,initial.pop,TRUE) #change for different division time functions
runif(initial.pop,56,600)
pop.table[,5]<-0
pop.table[,6]<-c(1:initial.pop)
pop.table[,7]<-as.integer(pop.table[,4]*runif(initial.pop,0,1))
#generation history setup
history<- data.frame(matrix(,dim(pop.table)[1],8))
colnames(history)<-c("green","red","ratio","persistor?","divisions","bacteria number","time since division","timestep number")
history[1:dim(pop.table)[1],1:7]<-pop.table
history[,8]<-1 #recording inital generation in history data frame
#loop representing a discrete timestep
for (j in 1:gen.number) {
f<-rbinom(dim(pop.table)[1],pop.table[,1],p.change)
pop.table[,1]<-pop.table[,1]+rpois(dim(pop.table)[1],g.prod)-f
pop.table[,2]<-pop.table[,2]+f
pop.table[,7]<-pop.table[,7]+1
dividers<-pop.table[pop.table[,7]>=pop.table[,4],] #data frame for bacteria which will divide
#operations for dividing bacteria
if (dim(dividers)[1]>0) {
next.gen<-data.frame(matrix(,dim(dividers)[1]+dim(pop.table)[1],7))
dividers[,8]<-rnorm(dim(dividers)[1],0.5,partition)
dividers[,9]<-rbinom(dim(dividers)[1],dividers[,1],dividers[,8])
dividers[,10]<-rbinom(dim(dividers)[1],dividers[,2],dividers[,8])
#assigning red and green values for the next generation
next.gen[1:(dim(dividers)[1]*2),1:2]<-matrix(c(dividers[,9],(dividers[,1]-dividers[,9]),
dividers[,10],(dividers[,2]-dividers[,10])),(dim(dividers)[1]*2),2,byrow=FALSE)
next.gen[1:(dim(dividers)[1]*2),3]<-next.gen[1:(dim(dividers)[1]*2),2]/next.gen[1:(dim(dividers)[1]*2),1]
next.gen[1:(dim(dividers)[1]*2),4]<-rep(dividers[1:(dim(dividers)[1]),4],2)
next.gen[1:(dim(dividers)[1]*2),6]<-rep(dividers[1:(dim(dividers)[1]),6],2)
next.gen[1:(dim(dividers)[1]*2),5]<-rep((dividers[1:(dim(dividers)[1]),5]+1),2)
next.gen[1:(dim(dividers)[1]*2),7]<-0
next.gen[(dim(dividers)[1]*2+1):(dim(dividers)[1]+dim(pop.table)[1]),c(1:7)]<-pop.table[pop.table[,7]<pop.table[,4],c(1:7)]
pop.table<-next.gen
}
#updating of history table
colnames(pop.table)<-c("green","red","ratio","persistor?","divisions","bacteria number","time since division")
pop.table[,3]<-pop.table[,2]/pop.table[,1]
h<-history
history<-data.frame(matrix(,(dim(history)[1]+dim(pop.table)[1]),8))
history[1:length(h[,1]),1:8]<-h
history[(length(h[,1])+1):length(history[,1]),1:7]<-pop.table
history[(length(h[,1])+1):length(history[,1]),8]<-j+1 #updating history with the new generation
} |
4b88dd02d21733e1c53684b8444e4d92213933e3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/soc.ca/examples/balance.Rd.R | ab97bc6ef8c7dc51cf28ac302fc1ed85c9aa5f60 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 171 | r | balance.Rd.R | library(soc.ca)
### Name: balance
### Title: Contribution balance
### Aliases: balance
### ** Examples
example(soc.ca)
balance(result)
balance(result, act.dim = 3)
|
139d5112c134815060a06cc521c4c527e9d9d678 | 608697bb4f083f6ae21e850984d3b99711ecc8d8 | /man/execute.Rd | 6a7d3e8e6c0993cb1040a71244d4db42bf4771a6 | [
"Apache-2.0"
] | permissive | Yovoss/Iris | 172b18ac245962c69a121f5fe7e4d965599fb91c | 6bee976057dec90ee2d8bb8b68bfcb598aa4ea61 | refs/heads/master | 2020-07-02T08:25:23.510976 | 2016-07-26T18:13:54 | 2016-07-26T18:13:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,998 | rd | execute.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StudySpecific.R
\name{execute}
\alias{execute}
\title{Execute OHDSI Iris}
\usage{
execute(dbms, user = NULL, domain = NULL, password = NULL, server,
port = NULL, cdmSchema, cdmVersion = 5, file)
}
\arguments{
\item{dbms}{The type of DBMS running on the server. Valid values are
\itemize{
\item{"mysql" for MySQL}
\item{"oracle" for Oracle}
\item{"postgresql" for PostgreSQL}
\item{"redshift" for Amazon Redshift}
\item{"sql server" for Microsoft SQL Server}
\item{"pdw" for Microsoft Parallel Data Warehouse (PDW)}
\item{"netezza" for IBM Netezza}
}}
\item{user}{The user name used to access the server. If the user is not specified for SQL Server,
Windows Integrated Security will be used, which requires the SQL Server JDBC drivers
to be installed.}
\item{domain}{(optional) The Windows domain for SQL Server only.}
\item{password}{The password for that user}
\item{server}{The name of the server}
\item{port}{(optional) The port on the server to connect to}
\item{cdmSchema}{Schema name where your patient-level data in OMOP CDM format resides}
\item{cdmVersion}{Define the OMOP CDM version used: currently support 4 and 5. Default = 4}
\item{file}{(Optional) Name of local file to place results; makre sure to use forward slashes (/)}
}
\value{
Study results are placed in CSV format files in specified local folder and returned
as an R object class \code{OhdsiStudy} when sufficiently small. The properties of an
\code{OhdsiStudy} may differ from study to study.
}
\details{
This function executes OHDSI Iris.
Iris computes some basic parameters about a dataset.
}
\examples{
\dontrun{
# Run study
execute(dbms = "postgresql",
user = "joebruin",
password = "supersecret",
server = "myserver",
cdmSchema = "cdm_schema",
cdmVersion = 4)
# Email result file
email(from = "collaborator@ohdsi.org",
dataDescription = "CDM4 Simulated Data")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.