blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13fae0e17fc506c2cafea74ab03efa0936703d03 | bc57ef0e4eda24fbac74a6b99221a41205768493 | /R/support.R | 87860f0350d828a94da22e4a1b99841b41b6ef97 | [] | permissive | jongbinjung/RMetalog | b7c746559901d93ffa7b1621939d6fb7aa6cedfd | b7337bad527a0f3044427f311d1a5bad0a1fc4f0 | refs/heads/master | 2021-04-27T00:44:56.246401 | 2018-09-19T19:39:20 | 2018-09-19T19:39:20 | 122,658,815 | 0 | 0 | MIT | 2018-02-23T18:36:44 | 2018-02-23T18:36:44 | null | UTF-8 | R | false | false | 6,656 | r | support.R | # Supporting functions called inside the metalog function call
# Build the quantiles through a base function
MLprobs <- function(x, step_len) {
if (class(x) != 'numeric') {
return(print('Error: input must be a numeric vector!'))
}
l <- length(x)
x <- as.data.frame(x)
# Need to sort the dataframe for the quantiles to work out
x <- x[order(x), ]
x <- as.data.frame(x)
# Calculate the liklihood as an interpolation
x$probs <- 0
for (i in 1:l) {
if (i == 1) {
x$probs[i] <- (0.5 / l)
}
else{
x$probs[i] <- (x$probs[i - 1] + (1 / l))
}
}
# If the data is very long we down convert to a smaller but representative
# vector using the step_len default is 0.01 which is a 109 element vector with
# fine values in the tail (tailstep)
if (nrow(x) > 100) {
y <- seq(step_len, (1 - step_len), step_len)
tailstep <- (step_len / 10)
y <- c(seq(tailstep, (min(y) - tailstep), tailstep),
y,
seq((max(y) + tailstep), (max(y) + tailstep * 9), tailstep))
x_new <- stats::quantile(x[, 1], probs = y)
x <- as.data.frame(x_new)
x$probs <- y
}
return(x)
}
pdfMetalog <- function(a,
y,
t,
bounds = c(),
boundedness = 'u') {
d <- y * (1 - y)
f <- (y - 0.5)
l <- log(y / (1 - y))
# Initiate pdf
# For the first three terms
x <- (a[2] / d)
if (a[3] != 0) {
x <- x + a[3] * ((f / d) + l)
}
# For the fourth term
if (t > 3) {
x <- x + a[4]
}
# Initalize some counting variables
e <- 1
o <- 1
# For all other terms greater than 4
if (t > 4) {
for (i in 5:t) {
if (i %% 2 != 0) {
# iff odd
x <- x + ((o + 1) * a[i] * f^o)
o <- o + 1
}
if (i %% 2 == 0) {
# iff even
x <- x + a[i] * (((f^(e + 1)) / d) + (e + 1) * (f^e) * l)
e <- e + 1
}
}
}
# Some change of variables here for boundedness
x <- (x^(-1))
if (boundedness != 'u') {
M <- quantileMetalog(a, y, t, bounds = bounds, boundedness = 'u')
}
if (boundedness == 'sl') {
x <- x * exp(-M)
}
if (boundedness == 'su') {
x <- x * exp(M)
}
if (boundedness == 'b') {
x <- (x * (1 + exp(M))^2) / ((bounds[2] - bounds[1]) * exp(M))
}
return(x)
}
# Quantile function
quantileMetalog <- function(a,
y,
t,
bounds = c(),
boundedness = 'u') {
# Some values for calculation
f <- (y - 0.5)
l <- log(y / (1 - y))
# For the first three terms
x <- a[1] + a[2] * l + a[3] * f * l
# For the fourth term
if (t > 3) {
x <- x + a[4] * f
}
# Some tracking variables
o <- 2
e <- 2
# For all other terms greater than 4
if (t > 4) {
for (i in 5:t) {
if (i %% 2 == 0) {
x <- x + a[i] * f^e * l
e <- e + 1
}
if (i %% 2 != 0) {
x <- x + a[i] * f^o
o <- o + 1
}
}
}
if (boundedness == 'sl') {
x <- bounds[1] + exp(x)
}
if (boundedness == 'su') {
x <- bounds[2] - exp(-x)
}
if (boundedness == 'b') {
x <- (bounds[1] + bounds[2] * exp(x)) / (1 + exp(x))
}
return(x)
}
# Function for returning the matrix of differentiation terms
diffMatMetalog <- function(term_limit, step_len) {
y <- seq(step_len, (1 - step_len), step_len)
Diff <- data.frame()
for (i in 1:length(y)) {
d <- y[i] * (1 - y[i])
f <- (y[i] - 0.5)
l <- log(y[i] / (1 - y[i]))
# Initiate pdf
diffVector <- 0
# For the first three terms
x <- (1 / d)
diffVector <- c(diffVector, x)
if (term_limit > 2) {
diffVector <- c(diffVector, ((f / d) + l))
}
# For the fourth term
if (term_limit > 3) {
diffVector <- c(diffVector, 1)
}
# Initalize some counting variables
e <- 1
o <- 1
# For all other terms greater than 4
if (term_limit > 4) {
for (i in 5:term_limit) {
if (i %% 2 != 0) {
# iff odd
diffVector <- c(diffVector, ((o + 1) * f^o))
o <- o + 1
}
if (i %% 2 == 0) {
# iff even
diffVector <- c(diffVector,
(((f^(e + 1)) / d) + (e + 1) * (f^e) * l))
e <- e + 1
}
}
}
Diff <- rbind(Diff, diffVector)
}
Diff <- as.matrix(Diff)
Diff_neg = -(Diff)
new_Diff <- matrix(c(Diff[, 1], Diff_neg[, 1]), ncol = 2)
for (c in 2:length(Diff[1, ])) {
new_Diff <- cbind(new_Diff, Diff[, c])
new_Diff <- cbind(new_Diff, Diff_neg[, c])
}
return(new_Diff)
}
newtons_method_metalog <- function(m,q,term){
#a simple newtons method application
alpha_step<-0.01
err<-0.0000001
temp_err<-0.1
y_now<-0.5
avec<-paste0('a',term)
a<-m$A[,`avec`]
i<-1
while(temp_err>err){
frist_function<-(quantileMetalog(a,y_now,term,m$params$bounds,m$params$boundedness)-q)
derv_function<-pdfMetalog(a,y_now,term,m$params$bounds,m$params$boundedness)
y_next<-y_now-alpha_step*(frist_function*derv_function)
temp_err<-abs((y_next-y_now))
if(y_next>1){
y_next<-0.99999
}
if(y_next<0){
y_next<-0.000001
}
y_now<-y_next
i<-i+1
if(i>10000){
stop(paste0('Approximation taking too long, quantile value: ',q,' is to far from distribution median. Try plot() to see distribution.'))
}
}
return(y_now)
}
pdfMetalog_density <- function(m,t,y) {
avec<-paste0('a',t)
a<-m$A[,`avec`]
bounds<-m$params$bounds
boundedness<-m$params$boundedness
d <- y * (1 - y)
f <- (y - 0.5)
l <- log(y / (1 - y))
# Initiate pdf
# For the first three terms
x <- (a[2] / d)
if (a[3] != 0) {
x <- x + a[3] * ((f / d) + l)
}
# For the fourth term
if (t > 3) {
x <- x + a[4]
}
# Initalize some counting variables
e <- 1
o <- 1
# For all other terms greater than 4
if (t > 4) {
for (i in 5:t) {
if (i %% 2 != 0) {
# iff odd
x <- x + ((o + 1) * a[i] * f^o)
o <- o + 1
}
if (i %% 2 == 0) {
# iff even
x <- x + a[i] * (((f^(e + 1)) / d) + (e + 1) * (f^e) * l)
e <- e + 1
}
}
}
# Some change of variables here for boundedness
x <- (x^(-1))
if (boundedness != 'u') {
M <- quantileMetalog(a, y, t, bounds = bounds, boundedness = 'u')
}
if (boundedness == 'sl') {
x <- x * exp(-M)
}
if (boundedness == 'su') {
x <- x * exp(M)
}
if (boundedness == 'b') {
x <- (x * (1 + exp(M))^2) / ((bounds[2] - bounds[1]) * exp(M))
}
return(x)
}
|
883c5374d2e6c21e17f9e9cc1b3f049ed265f392 | a1406bb2d1eb0d07b8830c27a8698d637c962a33 | /other_codes/display_name.R | 074608845a9ba3139f11808530c27aced2158071 | [] | no_license | yashg1/r_practice | 093111ae1c641ad5bbe0bc8634f15c5c7e8d28ce | 6cc087f6fc236e9cb289a7925b1b8662b206ffda | refs/heads/master | 2021-05-14T16:33:50.355498 | 2018-11-26T07:09:12 | 2018-11-26T07:09:12 | 116,023,587 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,031 | r | display_name.R | #Function to combine first and last names
display_name <- function(p){
stopifnot(is.list(p))
first_name <- as.character(p$FirstName)
last_name <- as.character(p$Surname)
duplicated_index <- which(duplicated(last_name))
#duplicated_name <- last_name[duplicated(last_name)]
first_charac = substr(first_name,1,3)
##
dis_name <- vector(mode='character', length=nrow(p))
##
dis_name <- paste(first_name,last_name, sep = "")
temp <- str_replace_all(dis_name,fixed(" "), "")
temp <- str_replace_all(temp,"[[:punct:]]", "")
dis_name <- str_to_lower(temp)
new <- cbind(dis_name,p)
return(new)
}
## Old Code
#dis_name[first_name %in% ""] <- last_name[first_name %in% ""]
##
# dis_name[duplicated_index] <- paste(first_charac[duplicated_index],last_name[duplicated_index], sep = ".")
# next_duplicate <- duplicated_index-1
# dis_name[next_duplicate] <- paste(first_charac[next_duplicate],last_name[next_duplicate], sep = ".")
##
#dis_name[dis_name %in% ""] <- last_name[dis_name %in% ""]
|
091c1415097da0327c9529f8895b428fc125d802 | 62591e0ce9e488fe5faf0279c54b13284685b2b5 | /RPackage/R/sequentialImportanceSampling.R | bebafa51d053cce4cba6a375ca584324e0f3b233 | [] | no_license | rohan-shah/largeComponent | 71ba8e2d88963d1320155c94fda39ca4a8a2d548 | 02d5c5eca7013f2fcc8cfceff920dea0f7d2bd46 | refs/heads/master | 2021-01-12T15:41:32.264551 | 2016-11-21T07:15:03 | 2016-11-21T07:15:03 | 71,853,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,084 | r | sequentialImportanceSampling.R | #' @export
sequentialImportanceSampling <- function(probabilities, n, seed, graph, componentSize, initialRadius, vertexPositions = matrix(data=vector(length=0),nrow=0))
{
if(missing(graph))
{
stop("Input graph cannot be missing")
}
if(missing(probabilities))
{
stop("Input probabilities cannot be missing")
}
if(missing(n))
{
stop("Input n cannot be missing")
}
if(missing(seed))
{
stop("Input seed cannot be missing")
}
if(missing(componentSize))
{
stop("Input componentSize cannot be missing")
}
if(missing(initialRadius))
{
stop("Input initialRadius cannot be missing")
}
if(length(initialRadius) != 1 || mode(initialRadius) != "numeric")
{
stop("Input `initialRadius' must be a single number")
}
if(length(componentSize) != 1 || mode(componentSize) != "numeric")
{
stop("Input `componentSize' must be a single number")
}
if(abs(componentSize - round(componentSize)) > 1e-3)
{
stop("Input `componentSize' must be an integer")
}
if(abs(initialRadius - round(initialRadius)) > 1e-3)
{
stop("Input `initialRadius' must be an integer")
}
if(length(n) != 1 || mode(n) != "numeric")
{
stop("Input `n' must be a single number")
}
if(abs(n - round(n)) > 1e-3)
{
stop("Input `n' must be an integer")
}
if(mode(probabilities) != "numeric")
{
stop("Input `probabilities' must be a numeric vector")
}
if(any(probabilities < 0 | probabilities > 1))
{
stop("Input `probabilities' must be between 0 and 1")
}
if(class(graph) %in% c("igraph", "graphNEL", "graphAM"))
{
start <- Sys.time()
result <- .Call("sequentialMethod", graph, probabilities, n, seed, componentSize, initialRadius, vertexPositions, PACKAGE="largeComponent")
end <- Sys.time()
}
else
{
stop("Input graph must have class \"igraph\", \"graphAM\" or \"graphNEL\"")
}
call <- match.call()
return(new("sequentialImportanceResult", start = start, end = end, call = call, estimate = mpfr(result$estimate), distinctParticles = result$distinctParticles, levelProbabilities = result$levelProbabilities, resamplingCounts = result$resamplingCounts))
}
|
f67c60303bcf97e858127db336958282040abea5 | 9c41c04c95796505e14ad87a1a4b86332957c43c | /Documents/networks-pres/bandwidth.r | 24711c402ca0d28941882b115a183368fd6d1fe6 | [] | no_license | fakedrake/Configurations-and-Scripts | a151fb128f60d36bd3e76797788a1cfaf932b6e2 | 797aa474d6396ec37e3ad80eccf49770b05593bb | refs/heads/master | 2020-06-05T04:10:30.473911 | 2011-05-12T17:00:52 | 2011-05-12T17:00:52 | 1,609,805 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 161 | r | bandwidth.r | pdf(file="bandwidth.pdf")
data = c(1.3, 2.5, 3.6, 5.1, 7.05, 11.5, 19.1, 31)
names(data) = c(2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009)
plot(data)
dev.off() |
0df9828e77c18982265f4488bab276ac489ea255 | ddcd6c63b01441d8d27c01c3f5fee827dcb6f708 | /man/Funkcja1.Rd | 32ea80be04a239026b7d20ad3039d8b65ff6453a | [
"MIT"
] | permissive | K4krasin/ProjektKK | bda9ca1957411f7327b0e2cafd9fdf71cc2bb52b | bd0d843bc3042a2038b4d564c1dd59a18897b126 | refs/heads/main | 2023-01-29T06:19:31.585611 | 2020-12-13T19:11:54 | 2020-12-13T19:11:54 | 320,668,188 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 784 | rd | Funkcja1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Funkcja1.R
\name{Funkcja1}
\alias{Funkcja1}
\title{Funkcja oblicza emisje dla aut osobowych poruszajacych sie na diesel, z technologia DPF.
Pod uwage brane sa zanieczyszczenia EC oraz CO.}
\usage{
Funkcja1(
dane = input,
kategoria = "Passenger Cars",
paliwo = "Diesel",
euro = "Euro 4",
technologia = "DPF",
mode = "",
substancja = c("EC", "CO")
)
}
\arguments{
\item{dane}{dane wejsciowe}
\item{kategoria}{rodzaj auta}
\item{paliwo}{rodzaj paliwa}
\item{euro}{euro}
\item{mode}{mode}
\item{substancja}{zanieczyszczenie}
}
\value{
}
\description{
Funkcja oblicza emisje dla aut osobowych poruszajacych sie na diesel, z technologia DPF.
Pod uwage brane sa zanieczyszczenia EC oraz CO.
}
|
439e228bf0fff2396e77b17721d353f2c2aacc5a | a0719512d0d83ec36757f1e6b05aafa82cde148d | /R/retile.R | 166f145ed32b53b0cdb40579c848bf9b222cf01f | [] | no_license | adamkc/DIMEChelper | 25f9ce073a8a6c9cee3e0bf1f18048fbdb881846 | 2cbaacff8f45570fef637ea804b95aa0244e7642 | refs/heads/master | 2020-04-07T00:22:39.157626 | 2020-01-30T19:33:05 | 2020-01-30T19:33:05 | 157,901,336 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,197 | r | retile.R | #' retile
#'
#' This takes a raster of the class SpatialGridDataFrame and converts it into jpg
#' chips of the specified dimensions and overlap. Band color order can be
#' specified with arguments
#'
#' @param imageName Name of Tif to be chipped.
#' @param flightName name of flight to be chipped
#' @param red Dimension number for red band
#' @param green Dimension number for green band
#' @param blue Dimension number for blue band
#' @param dim Size of output jpg chip
#' @param overlap number of pixels to overlap the chips
#' @param outputFolder location that new chips are copied
#' @param exportChipKey Logical, export the chipkey csv or not.
#' @param outputJPG Logical. Export jpg?
#' @param returnData Logical. return jpgs? For future use when chips not saved.
#'
#' @return Optionally exports a list of images that were created.
#' @return Chips jpg files into the outputFolder directory.
#' @return chipList csv file listing the filenames and coordinates for the exported jpgs.
#'
#'
#' @examples
#' \dontrun{
#' ###List of Tiffs to Crop:-----------------------------------------------
#' files <- list.files("G:/GoogleImagery/Purchase2/Mosaics",recursive=TRUE,pattern="*.tif",full.names = TRUE)
#' files <- files[grep(pattern = "crescentcity2015",x = files)]
#'
#' ###Crop one tiff:--------------------------------------------
#' retile(imageName = files[1],dim=299,overlap=60,outputFolder="Chips")
#'
#' ###Crop a list of tiffs:-------------------------------------------
#' setwd("G:/GoogleImagery/Purchase2")
#' sapply(files,FUN=retile,outputFolder="Chips",flightName = "ca_crescentcity_20150715_rgb")
#'
#' }
#' @export
retile <- function(imageName,
flightName="ca_hayfork_20160529_rgb",
red=1,green=2,blue=3,
dim=299,overlap=60,
outputFolder="Chips",
outputJPG = TRUE,
returnData = FALSE,
exportChipKey = FALSE){
## Create Output folder if necessary
csvName <- substr(basename(imageName),start = 1,
stop = nchar(basename(imageName))-4)
outputFolderFull <- file.path(outputFolder,flightName,csvName,"Unclassified")
## Bail early to avoid redundant work:
if (dir.exists(outputFolderFull))
return(print(paste0(imageName," is already clipped.")))
if (!dir.exists(outputFolder))
return(print(paste0("outputFolder '", outputFolder,
"' doesnt exist. Setwd to correct Dir?.")))
if(!file.exists(imageName))
return(print(paste0("Cannot locate file ",imageName)))
dir.create(outputFolderFull, recursive=TRUE)
image <- rgdal::readGDAL(imageName)
## Extract variables from SpatialGridDataFrame
pixelrows <- image@grid@cells.dim[2]
pixelcols <-image@grid@cells.dim[1]
df <- image@data
startLat <- image@bbox[2,2]
startLong <- image@bbox[1,1]
cellsize <- image@grid@cellsize[1]
##Reproject Start Point
if(!raster::compareCRS(image,raster::crs("+init=epsg:4326")) &
!is.na(raster::crs(image))){
xy <- sp::SpatialPoints(coords = data.frame(t(image@bbox)),
proj4string = raster::crs(image))
wgs.xy <- sp::spTransform(xy,CRSobj = sp::CRS("+init=epsg:4326"))
} else {
wgs.xy <- sp::SpatialPoints(coords = data.frame(t(image@bbox)),
proj4string = raster::crs(image))
}
rm(image)
##df$col <- rep(1:pixelcols,times = pixelrows) #This label helps error check.
##df$row <- rep(1:pixelrows,each = pixelcols)
# b1 <- matrix(df[,red],nrow = pixelrows,byrow = TRUE)
# b2 <- matrix(df[,green], nrow = pixelrows,byrow = TRUE)
# b3 <- matrix(df[,blue], nrow = pixelrows,byrow = TRUE)
m <- array(c(matrix(df[,red],nrow = pixelrows,byrow = TRUE),
matrix(df[,green], nrow = pixelrows,byrow = TRUE),
matrix(df[,blue], nrow = pixelrows,byrow = TRUE)),
dim=c(pixelrows,pixelcols,3))
rm(df)
## Calculate number of images to be generated:
nimagerows <- ceiling(pixelrows / (dim-overlap))
nimagecols <- ceiling(pixelcols / (dim-overlap))
print(sprintf("%s columns and %s rows gives %s expected images",nimagecols,
nimagerows,nimagerows*nimagecols))
## Generate and export images:
gridkey <- data.frame(Row=NULL,Col=NULL,File=NULL, Lat=NULL,Long=NULL)
if(returnData) imageList <- list()
for(r in 1:nimagerows){
for(c in 1:nimagecols){
##Determine location of current crop:
rowstart <- (((r-1)*dim)+1) - ((r-1)*overlap)
rowend <- rowstart+dim -1
if(rowend > pixelrows){ ##If at edge, set back from edge.
rows <- (pixelrows-dim+1):pixelrows
} else {
rows <- rowstart:rowend
}
colstart <- (((c-1)*dim)+1) - ((c-1)*overlap)
colend <- colstart+dim -1
if(colend > pixelcols){ ##If at edge, set back from edge.
cols <- (pixelcols-dim+1):pixelcols
} else {
cols <- colstart:colend
}
#Smear crops near edge so all output identical size:
#rows[rows > pixelrows] <- pixelrows
#cols[cols>pixelcols] <- pixelcols
output <- m[rows,cols,c(1:3)]
proportionBlack <- sum(output==0)/(dim*dim*3)
if(proportionBlack <0.20){ ##Dont save black and edge pieces..
##
#This method was used for all of the First purchase. Why?:
# output <- output/max(output)
#Isn't this a more reliable way to scale between 0 and 1?:
output <- output/256
# chipLat <- startLat - (round(mean(rows)) * cellsize)
# chipLong <- startLong + (round(mean(cols)) * cellsize)
chipLat <- wgs.xy@coords[2,2] - ((wgs.xy@coords[2,2] -
wgs.xy@coords[1,2]) *
(mean(rows)/pixelrows))
chipLong <- wgs.xy@coords[1,1] + ((wgs.xy@coords[2,1] -
wgs.xy@coords[1,1]) *
(mean(cols)/pixelcols))
##
chipName <- paste0(csvName,"_", round(chipLat,5),"_",
round(chipLong,5),".jpg")
fileName <- paste0(outputFolderFull,"/",chipName)
if (outputJPG) jpeg::writeJPEG(output, target = fileName,quality=0.95)
if (returnData){
imageNumber <- ((r-1)*nimagecols) + c
imageList[[imageNumber]] <- output
}
##GridKey:
temp <- data.frame(Row=r,Col=c,File=chipName, Lat=chipLat,Long=chipLong)
gridkey <- rbind(gridkey,temp)
}
}
#csvName <- substr(imageName,start = 1,stop = nchar(imageName)-4)
if(exportChipKey)
write.csv(gridkey,
file = paste0(csvName,"_ChipKey.csv"),row.names = FALSE)
}
## Display count of chips and delete empty directory if no chips:
if(outputJPG){
exportedChipCount <- nrow(gridkey)
message(paste0(exportedChipCount," chips created."))
if(exportedChipCount==0){
message("No valid chips exported. Deleting folder.")
unlink(dirname(outputFolderFull),recursive=TRUE)
}
}
if(returnData){
imageList <- imageList[[which(!sapply(FUN = is.null,imageList))]]
return(imageList)
}
}
|
83d956dd71c2633b5634f94193d158a1da4d097b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/symbolicDA/tests/DClust.R | 3613decf2d7dbdd16bed25198490e13da5bcb661 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 151 | r | DClust.R | require(symbolicDA)
data("cars",package="symbolicDA")
sdt<-cars
dist<-dist.SDA(sdt, type="U_3")
clust<-DClust(dist, cl=5, iter=100)
print(clust)
|
19c051eb8ddf4951f329abe87b6fd02e8b53d4da | 98fd03ebd9de52038f06cd89200a460432f9cc5c | /man/roxygen_score_family.Rd | 8e27b242c5da9f65716ee607490920a4cf85245f | [
"MIT"
] | permissive | pharmaR/riskmetric | 51d3b067da6db6ad1252f3ba706db1d922b5df64 | 3d1501880edc07cff5cd72129c0df0899db83029 | refs/heads/master | 2023-07-26T07:33:56.471690 | 2023-05-31T14:58:21 | 2023-05-31T14:58:21 | 173,354,970 | 148 | 32 | NOASSERTION | 2023-09-12T20:41:31 | 2019-03-01T19:11:16 | R | UTF-8 | R | false | true | 797 | rd | roxygen_score_family.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkg_score.R
\name{roxygen_score_family}
\alias{roxygen_score_family}
\title{Helper for creating a roxygen header from template for score.* functions}
\usage{
roxygen_score_family(name, dontrun = TRUE)
}
\arguments{
\item{name}{the name of the scoring function, assuming naming conventions are
followed}
\item{dontrun}{logical indicating whether examples should be wrapped in
a dontrun block. This is particularly useful for assessments which may
require an internet connection.}
}
\value{
roxygen section template for score family functions
}
\description{
Helper for creating a roxygen header from template for score.* functions
}
\examples{
\dontrun{
#' @eval roxygen_score_family("has_news")
}
}
\keyword{internal}
|
c3f0e7bb361deaac65cce44d70004ef5de24f43c | e82f18ca5fcb536fac2d7a702f18baf5ec4345aa | /Rpackage/focus/man/focus.projections.setup.Rd | f9063f3a83d572a3aeb2f0703cc2d3718b0bca64 | [] | no_license | suppechasper/focus | 88b570a143909170dd33b710a743323b185c4bce | 938a182a3cf48e4954057d4badfcf14aa662d700 | refs/heads/master | 2021-01-18T19:43:02.212560 | 2016-02-17T21:57:59 | 2016-02-17T21:57:59 | 100,535,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,599 | rd | focus.projections.setup.Rd | \name{focus.projections.setup}
\alias{focus.projections.setup}
\alias{focus.create.projection.group}
\alias{focus.set.tangent.rotation.animation}
\alias{focus.set.rotation.animation}
\alias{focus.set.no.animation}
\title{Focus - Interactive exploratory visualization for high-dimensional data}
\description{
Methods to change the behaviour of projections created with any of the methods
in \link{focus.projections}.
In particular changing the animations between different projections:
\enumerate{
\item no animation.
\item Standard rotation animation.
\item Tangent rotation animation. Uses local PCA when the origin is moved. The
local PCA is done on all points within unit distance as defined by the
current scale.
}
Finally projections can be group such that if the projection is changed in any
of the projections in the group all projections are chnaged to the same focus
points.
}
\usage{
focus.create.projection.group(projections)
focus.set.tangent.rotation.animation(fprojector, t1=1, t2=2)
focus.set.rotation.animation(fprojector)
focus.set.no.animation(fprojector)
}
\arguments{
\item{projections}{A lsit of projections handles created with the methods in
\link{focus.projections} }
\item{fprojector}{A projection handle created with the methods in
\link{focus.projections} }
\item{t1}{Which principal component to use for the first tangent direction}
\item{t2}{Which principal component to use for the second tangent direction}
}
\value{
}
\author{
Samuel Gerber
}
\seealso{
focus,focus.projections
}
\references{
Focused Maps - Dimensionality Reduction for Interactive Visualization
Samuel Gerber, IEEE TVCG 2015
}
\examples{
library(focus)
#setup a correlation display for the iris data
X = t(scale(t( scale(iris[, 1:4]))))
X = X/sqrt(sum(X[1,]^2))
data <- focus.create.matrix.data( X )
proj <- focus.create.planar.projection( ncol(X) )
focus.start()
focus.add.correlation.display(data, proj, 0.5, 0, 0.5, 05)
X = scale( iris[, 1:4] )
data2 <- focus.create.matrix.data( X )
proj2 <- focus.create.hyperbolic.line.projection( ncol(X) )
focus.add.line.projection.display(data2, proj2, 0, 0, 0.5, 0.5)
proj3 <- focus.create.hyperbolic.voronoi.projection( ncol(X) )
focus.add.hyperbolic.voronoi.projection.display(data2, proj3, 0, 0.5, 0.5, 1)
species = levels(iris$Species)
for(i in 1:length(species)){
group = which(iris$Species == species[i])
focus.set.group(data, i-1, group)
focus.set.group(data2, i-1, group)
}
}
\keyword{visualization, interaction, exploratory data analysis, high dimensional data}
|
1ec3fd08a1a67f989abb5f4cbb43ec15c6ad2460 | 2327d0bc2cc45a5504c39109846e0f4cba266606 | /QID-3397-SFEchooser/SFEchooser.R | cad55619c357062ea6d0520c3370ca992eb20446 | [] | no_license | QuantLet/SFE | 3d98a33cfcdc533210856c7618c32a78e111a6ce | d25a728a4371538eae982f44ea811b5b93328828 | refs/heads/master | 2022-06-15T13:35:17.387252 | 2022-06-08T01:22:00 | 2022-06-08T01:22:00 | 72,103,182 | 12 | 32 | null | 2022-01-30T18:58:21 | 2016-10-27T11:50:43 | R | UTF-8 | R | false | false | 3,852 | r | SFEchooser.R | # clear variables and close windows
rm(list = ls(all = TRUE))
graphics.off()
# parameter settings
K1 = 20 # Exercise price compound
K2 = 210 # Exercise price option
St1 = 230 # Price of underlying asset
r = 0.04545 # dom. interest rate
sigma = 0.25 # volatility per year
T2 = 1 # time to maturity option
T1 = 0.5 # time to maturity compound
b = r # cost of carry out
dt = 0.2 # Interval of step
n = floor(T2/dt) # number of steps
# from equation (7.2)
u = exp(sigma * sqrt(dt)) # upward proportion: approx 1.1183
d = 1/u # downward proportion approx. 0.89422
p = 0.5 + 0.5 * (b - 0.5 * sigma^2) * sqrt(dt)/sigma # Pseudo probability of up movement approx 0.5127
un = matrix(0, n + 1, 1)
un[n + 1, 1] = 1
dm = t(un) # down movement
um = dm # up movement
j = 1
while (j < n + 1) {
# Down movement dynamics
d1 = c(matrix(0, 1, n - j), (matrix(1, 1, j + 1) * d)^(seq(0:j) - 1))
dm = rbind(dm, d1)
# Up movement dynamics
u1 = c(matrix(0, 1, n - j), matrix(1, 1, j + 1) * u^seq(j, 0, -1))
um = rbind(um, u1)
j = j + 1
}
um = t(um)
dm = t(dm)
# Stock price development
s = St1 * um * dm
colnames(s) = c()
print("Stock price development")
print(s)
# Rearrangement
s = s[ncol(s):1, ]
# Call option
opt = matrix(0, ncol(s), ncol(s))
# Determine call option values from prices
opt[, n + 1] = apply(cbind(as.matrix(s[, ncol(s)] - K2), matrix(0, nrow(as.matrix(s[,
ncol(s)] - K2)), 1)), 1, max)
for (j in n:1) {
l = 1:j
# Probable option values discounted back one time step
discopt = ((1 - p) * opt[l, j + 1] + p * opt[l + 1, j + 1]) * exp(-b * dt)
# Option value is max of current stock price - strike or discopt
opt[, j] = c(discopt, matrix(0, n + 1 - j, 1))
}
European_Call_Price = opt[ncol(opt):1, ]
print("Call option price development")
print(European_Call_Price)
print("The price of the European call option at time t_0 is")
print(European_Call_Price[n + 1, 1])
C = opt[1:(floor(T1/dt) + 1), 1:(floor(T1/dt) + 1)]
CC = opt[1:(floor(T1/dt) + 2), 1:(floor(T1/dt) + 2)]
CC[, (floor(T1/dt) + 2)] = 0
# Put option
opt = matrix(0, ncol(s), ncol(s))
# Determine put option values from prices
opt[, n + 1] = apply(cbind(as.matrix(K2 - s[, ncol(s)]), matrix(0, nrow(as.matrix(K2 -
s[, ncol(s)])), 1)), 1, max)
for (j in n:1) {
l = 1:j
# Probable option values discounted back one time step
discopt = ((1 - p) * opt[l, j + 1] + p * opt[l + 1, j + 1]) * exp(-b * dt)
# print(discopt)
# Option value is max of current stock price - strike or discopt
opt[, j] = c(discopt, matrix(0, n + 1 - j, 1))
}
European_Put_Price = opt[ncol(opt):1, ]
print("Put option price development")
print(European_Put_Price)
print("The price of the European put option at time t_0 is")
print(European_Put_Price[n + 1, 1])
Cp = opt[1:(floor(T1/dt) + 1), 1:(floor(T1/dt) + 1)]
CP = opt[1:(floor(T1/dt) + 2), 1:(floor(T1/dt) + 2)]
CP[, (floor(T1/dt) + 2)] = 0
CH = matrix(0, ncol(C), ncol(C))
CH[, (floor(T1/dt) + 1)] = apply(cbind(as.matrix(C[, (floor(T1/dt) + 1)] -
K1), as.matrix(Cp[, (floor(T1/dt) + 1)] - K1), 0), 1, max) + apply(cbind(as.matrix(CC[(floor(T1/dt) +
2), (floor(T1/dt) + 2)] - K1), as.matrix(CP[(floor(T1/dt) + 2), (floor(T1/dt) +
2)] - K1), 0), 1, max)
k = floor(T1/dt)
# employing equation (7.5)
for (i in k:1) {
j = i:1
CH[j, i] = exp(-r * dt) * (p * CH[j + 1, i + 1] + (1 - p) * CH[j, i + 1])
}
European_Chooser_Price = CH[ncol(CH):1, ]
print("Chooser Option price development")
print(European_Chooser_Price)
print("The price of the European chooser option at time t_0 is")
print(European_Chooser_Price[(floor(T1/dt) + 1), 1]) |
cc0933189038daf325fd22ef2b1102731959e1fb | 5d1a7aef7950e4c508f9af3341ab2e03b04960a6 | /tests/testthat/test-mlog10_trans.R | 705de751fa6cde07acb9f532ecc56a7d47c56e60 | [
"MIT"
] | permissive | quanrd/ggGWAS | c981a11650b116e3e990175726878752102187e6 | cb68936dff01fc3e06ba683b918e640aa3ecdc09 | refs/heads/master | 2020-09-06T00:18:28.412842 | 2019-07-31T16:04:49 | 2019-07-31T16:04:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 381 | r | test-mlog10_trans.R | context("mlog10_trans")
test_that("multiplication works", {
expect_equal(2 * 2, 4)
})
#' df <- data.frame(y = runif(1000))
#'
#' ggplot(df, aes(sample = y)) +
#' stat_qq(distribution = stats::qunif) +
#' scale_y_continuous(trans = mlog10_trans()) +
#' scale_x_continuous(trans = mlog10_trans()) +
#' geom_abline(intercept = 0, slope = 1)
## test that equal to qqman::qq(df$y)
|
e0fe6e3aeea0b89c6f8963039515e40e95249fb9 | cba10b84d2cc708dd66148a4511451d77a92a7c5 | /man/SSplotPars.Rd | 8ea4f7f3ce1062e45a59fbaf1784c3776b0b942c | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | r4ss/r4ss | 03e626ae535ab959ff8109a1de37e3e8b44fe7ad | 0ef80c1a57e4a05e6172338ddcb0cda49530fa93 | refs/heads/main | 2023-08-17T08:36:58.041402 | 2023-08-15T21:42:05 | 2023-08-15T21:42:05 | 19,840,143 | 35 | 57 | null | 2023-07-24T20:28:49 | 2014-05-16T00:51:48 | R | UTF-8 | R | false | true | 4,622 | rd | SSplotPars.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSplotPars.R
\name{SSplotPars}
\alias{SSplotPars}
\title{Plot distributions of priors, posteriors, and estimates.}
\usage{
SSplotPars(
replist,
plotdir = NULL,
xlab = "Parameter value",
ylab = "Density",
showmle = TRUE,
showpost = TRUE,
showprior = TRUE,
showinit = TRUE,
showdev = FALSE,
showlegend = TRUE,
fitrange = FALSE,
xaxs = "i",
xlim = NULL,
ylim = NULL,
verbose = TRUE,
debug = FALSE,
nrows = 4,
ncols = 2,
ltyvec = c(1, 1, 3, 4),
colvec = c("blue", "red", "black", "gray60", rgb(0, 0, 0, 0.5)),
add = FALSE,
plot = TRUE,
print = FALSE,
pwidth = 6.5,
pheight = 6.5,
punits = "in",
ptsize = 10,
res = 300,
strings = NULL,
exact = FALSE,
newheaders = NULL
)
}
\arguments{
\item{replist}{A list object created by \code{\link{SS_output}()}.}
\item{plotdir}{Directory where PNG files will be written.}
\item{xlab}{Label on horizontal axis.}
\item{ylab}{Label on vertical axis.}
\item{showmle}{Show MLE estimate and asymptotic variance estimate with blue
lines?}
\item{showpost}{Show posterior distribution as bar graph if MCMC results
are available in \code{replist}?}
\item{showprior}{Show prior distribution as black line?}
\item{showinit}{Show initial value as red triangle?}
\item{showdev}{Include devs in the plot?}
\item{showlegend}{Show the legend?}
\item{fitrange}{Fit range tightly around MLE & posterior distributions,
instead of full parameter range?}
\item{xaxs}{Parameter input for x-axis. See \code{?par} for more info.}
\item{xlim}{Optional x-axis limits to be applied to all plots.
Otherwise, limits are based on the model results.}
\item{ylim}{Optional y-axis limits to be applied to all plots.
Otherwise, limits are based on the model results.}
\item{verbose}{A logical value specifying if output should be printed
to the screen.}
\item{debug}{Provide additional messages to help with debugging when the
function fails.}
\item{nrows}{How many rows in multi-figure plot.}
\item{ncols}{How many columns in multi-figure plot.}
\item{ltyvec}{Vector of line types used for lines showing MLE and prior
distributions and the median of the posterior distribution.}
\item{colvec}{Vector of colors used for lines and polygons showing MLE,
initial value, prior, posterior, and median of the posterior.}
\item{add}{Add to existing plot?}
\item{plot}{Plot to active plot device?}
\item{print}{Print to PNG files?}
\item{pwidth}{Default width of plots printed to files in units of
\code{punits}.}
\item{pheight}{Height of plots printed to png files in units of \code{punits}.
Default is designed to allow two plots per page, with \code{pheight_tall} used
for plots that work best with a taller format and a single plot per page.}
\item{punits}{Units for \code{pwidth} and \code{pheight}. Can be "px"
(pixels), "in" (inches), "cm" (centimeters), or "mm" (millimeters).
The default is \code{punits="in"}.}
\item{ptsize}{Point size for plotted text in plots printed to files (see
\code{help("png")} in R for details).}
\item{res}{Resolution of plots printed to files.
The default is \code{res = 300}.}
\item{strings}{Subset parameters included in the plot using substring from
parameter names (i.e. "SR" will get "SR_LN(R0)" and "SR_steep" if they are both
estimated quantities in this model).}
\item{exact}{Should strings input match parameter names exactly? Otherwise
substrings are allowed.}
\item{newheaders}{Optional vector of headers for each panel to replace the
parameter names.}
}
\description{
Make multi-figure plots of prior, posterior, and estimated asymptotic
parameter distributions. MCMC not required to make function work.
}
\examples{
\dontrun{
# read model results
model <- SS_output(dir = "c:/SS/Simple/")
# make default plots where parameter distribution plots will appear
# in the "pars" tab
SS_plots(model)
# create just the "pars" tab with control of the inputs that are
# passed to SSplotPars
SS_plots(model,
plot = 25, showmle = TRUE, showpost = TRUE,
showprior = TRUE, showinit = TRUE, showdev = FALSE, fitrange = FALSE
)
# call SSplotPars directly
SSplotPars(replist = model)
# Create plot in custom location. Note that strings can be partial match.
# File name will be "parameter_distributions.png"
# or "parameter_distributions_pageX.png" when they don't all fit on one page
SSplotPars(
replist = model, strings = c("steep", "R0"),
nrows = 2, ncols = 1, plot = FALSE, print = TRUE,
plotdir = file.path(model[["inputs"]][["dir"]], "distribution_plots")
)
}
}
\author{
Ian G. Taylor, Cole C. Monnahan
}
|
286d9932112c8a6be43ca3bf05ed53206e9e5908 | eee5f4bfeba72f55c603eb8fbaa04d50af0165a8 | /R/xmashealth.R | 34d8978aa0df9c41b780abe7ddcfcc45453ce9fe | [] | no_license | cran/christmas | a5a1b761fd27fb25e9dd813ac8b18d2e73b04787 | 9e9d13c9e15639ec71a657f52f36844a14692ce5 | refs/heads/master | 2022-12-27T17:02:41.158735 | 2022-12-18T16:50:02 | 2022-12-18T16:50:02 | 236,570,863 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,402 | r | xmashealth.R | #' @title Christmas risks.
#'
#' @description Christmas risks (2011 card). Some epidemiological jokes about
#' potential effects of Christmas on health.
#'
#' @param year Year to be printed. Default is \code{2012}.
#' @param seed Seed for reproducibility of the card. Default is \code{NULL} (no
#' seed).
#' @return A Christmas card plot including boxplots and OR estimates.
#' @author Jose Barrera-Gomez.
#' @examples
#' \donttest{
#' xmashealth()
#' }
#' @export
xmashealth <- function(year = 2012, seed = NULL) {
# "year":
if (!inherits(year, c("numeric", "integer")) || length(year) != 1L)
stop("'year' must be a number")
# "seed":
if(!is.null(seed) & (is.na(seed) || !is(seed, "numeric")))
stop("'seed' must be numeric or NULL")
if (!is.null(seed)) set.seed(seed)
nDaysX <- 20
nDaysNoX <- 365 - nDaysX
nYears <- 100
christmas <- c(rep(1, nYears * nDaysX), rep(0, nYears * nDaysNoX))
X <- data.frame(christmas)
X$inlaws <- simulateBinary(p0 = 0.2, or = 1.8, christmas = christmas)
X$financial <- simulateBinary(p0 = 0.1, or = 1.4, christmas = christmas)
X$noise <- simulateBinary(p0 = 0.1, or = 1.2, christmas = christmas)
labs <- c("In-laws\n visits", "Domestic\n financial\n crisis", "Harmful levels\n of children's\n noise at home")
ORs <- t(sapply(names(X)[-1], FUN = function(y) getOR(x = y, data = X)))
newwindow()
op <- par(mfrow = c(1, 2), las = 1, oma = c(0, 0, 2, 0))
on.exit(par(op))
op
mu0 <- 115
mu1 <- 165
mu <- data.frame(mu = mu0 + (mu1 - mu0) * christmas)
X$ldl <- apply(mu, 1, FUN = function(x) rnorm(n = 1, mean = x, sd = 10))
boxplot(ldl ~ christmas, data = X, xlab = "Christmas", ylab = "", ylim = c(0, 2 * mu1), col = c("forestgreen", "red"), main = "LDL cholesterol (mg/dl)", xaxt = "n")
axis(1, at = c(1, 2), labels = c("Before", "After"), cex = 0.5)
xmin <- 0.8
xmax <- max(ORs[, 3])
plot(ORs[, 1], 1:3, type = "p", xlim = c(xmin, xmax), ylim = c(0, 4), pch = 19, xlab = "OR", ylab = "", yaxt = "n", main = "Other Christmas risks", cex = 1.3, col = "blue")
axis(2, at = 1:3, labels = labs, cex.axis = 0.8)
abline(v = 1, lty = 2, col = "blue", lwd = 2)
segments(ORs[, 2], 1:3, ORs[, 3], 1:3, lwd = 3, col = "blue")
tit <- paste0("I wish you a statistically non significant chRistmas and a happy ",
year, "!")
title(tit, outer = TRUE, cex.main = 1.3, col.main = "forestgreen")
}
|
98fe49b92b3beb44b00059fa8c2708f6faaa1efd | dbe6347de9c2d0bf869f729161a47731daf1cdf9 | /Quiz-3/Q-3.R | c420993db6ca1e574ec10041f3bce5ca01247d43 | [] | no_license | tsloan/GettingCleaningData | ef9e4e5d36953ef534df6166ac8c76d422c06904 | 97731917c32fe7beedf0c9d494d7aaaa7c57ff69 | refs/heads/master | 2021-01-16T19:14:02.217027 | 2014-07-01T16:27:30 | 2014-07-01T16:27:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,309 | r | Q-3.R | #############################################################################
## Question 3
## Load the Gross Domestic Product data for the 190 ranked countries in
## this data set:
##
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
##
## Load the educational data from this data set:
##
## https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv
##
## Match the data based on the country shortcode.
## How many of the IDs match? Sort the data frame in descending order by
## GDP rank (so United States is last). What is the 13th country in the
## resulting data frame?
##
## Original data sources:
## http://data.worldbank.org/data-catalog/GDP-ranking-table
## http://data.worldbank.org/data-catalog/ed-stats
##
## 189, Spain
## 234, St. Kitts and Nevis
## 190, St. Kitts and Nevis
## 234, Spain
## 189, St. Kitts and Nevis
## 190, Spain
##############################################################################
###############################################################################
## Set path on home PC or laptop
###############################################################################
#setwd(
# "C://Terry-R-stuff//Coursera-R//GettingAndCleaningData//GettingCleaningData//Quiz-3")
setwd(
"C://Terry-Programming//Coursera//GettingCleaningData//Quiz-3")
###############################################################################
## Create directory to hold the data
###############################################################################
if (!file.exists("data-Q3")){
dir.create("data-Q3")
###########################################################################
## Download the file into the Create directory
###########################################################################
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(fileUrl, destfile = "./data-Q3/FGDP.csv")
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(fileUrl, destfile = "./data-Q3/FEDSTATS_Country.csv")
list.files("./data-Q3")
dateDownloaded <- date()
dateDownloaded
}
# need to skip the first four lines of this
gdp<-read.csv("./data-Q3/FGDP.csv", skip=4, header=TRUE,
stringsAsFactors=FALSE)
#need to skip the blank lines
edstats <-read.csv("./data-Q3/FEDSTATS_Country.csv",
blank.lines.skip=TRUE,
header=TRUE,
stringsAsFactors=FALSE)
##############################################################################
## Match the data based on the country shortcode.
##
## the edstats$countrycode and gdp$X are the fields
## containing country shortcode
## the all.x = FALSE removes any rows where there is no matching output
##############################################################################
mergeData<-merge(edstats,gdp, by.x="CountryCode", by.y="X",all.x=FALSE)
# extract just the relevant fields
sdf <- data.frame(Code=mergeData$CountryCode,
CountryName=mergeData$Long.Name,
Ranking=mergeData$X.1,
IncomeGroup=mergeData$Income.Group,
stringsAsFactors=FALSE)
#############################################################################
## How many of the IDs match?
#############################################################################
print(paste("number of ID matches is ",nrow(sdf),
" but not all have gdp rank data") )
print("after removing countries where there is no gdp rank data")
sdf<-sdf[(sdf$Ranking != ""),]
print(paste("number of ID matches is ", nrow(sdf) ) )
#############################################################################
## Sort the data frame in descending order by GDP rank
## (so United States is last).
## What is the 13th country in the resulting data frame?
#############################################################################
sdfrank<-sdf[order(- as.numeric(sdf$Ranking)),]
print(paste("13th rank nation is ", sdfrank[13,2]))
#############################################################################
## Question 4
##
## What is the average GDP ranking for the
## "High income: OECD" and
## "High income: nonOECD" group?
## See Q-3.R
############################################################################
spIns<-split(as.numeric(sdf$Ranking),sdf$IncomeGroup)
print(lapply(spIns,mean))
#############################################################################
## Question 5
##
## Cut the GDP ranking into 5 separate quantile groups.
## Make a table versus Income.Group.
## How many countries are Lower middle income but among the 38 nations
## with highest GDP?
##
## See Q-3.R
#############################################################################
# coerce Ranking into a numeric
sdf$Ranking<-as.numeric(sdf$Ranking)
# cut the GDP ranking into 5 seperate groups with a
# boundary for the top 38
sdf$gdpquants = cut(sdf$Ranking,
breaks = c(1,38, 100, 150, 190) )
# create a logical indicating if a country is in the lower middle income
# group
sdf$LMI <- sdf$IncomeGroup %in% c("Lower middle income")
# create a table to get the answer
print(table(sdf$LMI,sdf$gdpquants))
|
6958629684e3911234ff5b26599653d939dba6c8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/neuRosim/examples/stimfunction.Rd.R | 9dd7110c203eafb0774e7f72e34e170e3879ca13 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 303 | r | stimfunction.Rd.R | library(neuRosim)
### Name: stimfunction
### Title: Generate a stimulus boxcar function.
### Aliases: stimfunction
### Keywords: low-level
### ** Examples
total <- 100
os <- c(1, 21, 41, 61, 81)
d <- 10
out <- stimfunction(total, os, d, 0.1)
## Don't show:
rm(total,os,d,out)
## End(Don't show)
|
19af40deb5b88416f8b1f81bc598861d70fbbefa | af0c04fd6c294d59ce2c137d5d1a377f9f5f6bfe | /DataProcessing/abm_rs_abundance_to_raster.R | b62c7a8c64eae1d247d292a57748009b3d4b5657 | [] | no_license | Grimmel/VirtualSpecies-Supplementary3 | b2da41cf413c367087844271f86975b3e857580c | 01a3881e03aaefac579825ed6783a6110e5e6f39 | refs/heads/master | 2023-02-10T07:33:32.596157 | 2020-12-30T11:34:41 | 2020-12-30T11:34:41 | 307,981,301 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,531 | r | abm_rs_abundance_to_raster.R | library(raster)
exptFolder <- "D:\\PHDExperimentOutputs\\MainSims\\Pop_1\\"
landscapeCategories <- c('LLM','LLS','LMM','LMS','LSS')
landscapeReplicates <- 10
replicates <- 9
nSims <- 2
sp.raster <- raster(paste("D:\\PHDExperimentOutputs\\MainSims\\Species5\\Inputs\\LLM1_suitability.txt",sep=""))
for (i in 2:nSims){
counter <- 1
for(lc in landscapeCategories){
for (lr in 1:landscapeReplicates){
ls <- paste(lc,lr,sep='')
occupancyfileName <- paste(exptFolder,'Outputs\\','Sim',i,'_land',counter,'_Occupancy.txt',sep='')
occupancy <- read.table(occupancyfileName,h = T, sep = "\t")
occ <- rasterize(occupancy[, c("x", "y")], y = sp.raster, field = occupancy[, "Year_600"])
writeRaster(occ,paste(exptFolder,'Output_Maps\\occupancy\\occupancy_s',i,'_',ls,'.tif',sep=''),overwrite=TRUE)
for (j in 0:replicates){
populationfileName <- paste(exptFolder,'Outputs\\','Sim',i,'_land',counter,'_Pop.txt',sep='')
population <- read.table(populationfileName,h = T, sep = "\t")
population <- population[population$Rep==j & population$Year ==600,]
if(nrow(population)>0){
abundance <- rasterize(population[, c("x", "y")], y = sp.raster, field = population[, "NInd"])
pa <- abundance
pa[pa>0] <- 1
writeRaster(abundance,paste(exptFolder,'Output_Maps\\abundance\\abundance_s',i,'_',ls,'_r',j,'.tif',sep=''),overwrite=TRUE)
}
}
counter <- counter + 1
}
}
}
|
e3b132e515d1758447d886466cb26b15326c1e9b | b49dfd7e166ef072681a3abff54ffa2aa535db57 | /code/04f_plot_figure9_apc_statebins.R | c4af403291703cfd9f5b8f8af225bfdb0797a102 | [] | no_license | mkiang/opioid_hotspots | 66da7dd0e664abbd150f02dedc4d3ba433c5d9ac | b6125de0b0ec20098e4aa1d47717ca5688b98f75 | refs/heads/master | 2020-03-12T23:59:39.027041 | 2018-06-07T19:29:56 | 2018-06-07T19:29:56 | 105,161,859 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,830 | r | 04f_plot_figure9_apc_statebins.R |
## Imports
library(tidyverse)
library(viridis)
library(statebins)
source("./code/mk_nytimes.R")
## Pull yaml ----
cfig <- config::get()
PVAL <- cfig$sig_p_value
MIN_RATE <- cfig$min_rate
plot_folder <- cfig$plot_dir
data_folder <- cfig$working_data
## Data ----
est_df <- readRDS(sprintf("%s/joinpoint_results_dupe_rows_2016.RDS",
data_folder)) %>%
filter(dupe_row == 0)
## Better APC categories ----
est_df <- est_df %>%
mutate(apc_sig = ifelse(slope_pval < PVAL & rate > MIN_RATE,
apc, NA),
apc_cat = cut(apc_sig,
breaks = c(-Inf, 0, 5, 10, 15, 20,
30, 50, 75, 100, Inf),
ordered_result = TRUE))
est_df$apc_cat <- addNA(est_df$apc_cat)
apc_range <- range(est_df$apc_sig, na.rm = TRUE)
n_cats <- n_distinct(est_df$apc_cat)
levels(est_df$apc_cat)[1] <- sprintf("(%i,0]", round(apc_range[1]))
levels(est_df$apc_cat)[n_cats - 1] <- sprintf("(100,%i]", round(apc_range[2]))
levels(est_df$apc_cat)[n_cats] <- "NS"
## Plot APCs of current segment
apc_cat_df <- est_df %>%
group_by(opioid_type, race_cat, abbrev, race, opioid_cat) %>%
filter(year == max(year)) %>%
select(apc_cat, apc_sig) %>%
ungroup()
p1 <- ggplot(apc_cat_df,
aes(state = abbrev, fill = apc_cat)) +
scale_fill_viridis(
name = "APC (%)", direction = -1,
discrete = TRUE, na.value = "grey50") +
geom_statebins() +
coord_equal() +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
facet_grid(opioid_cat ~ race_cat) +
mk_nytimes(legend.position = "bottom",
panel.grid.major = element_blank(),
axis.text = element_blank(),
panel.border = element_rect(linetype = "solid",
fill = NA,
color = "grey75")) +
guides(fill =
guide_legend(
title.position = "left",
keywidth = 2.5,
keyheight = .5,
label.position = "bottom",
nrow = 1,
label.hjust = .5
)
)
p2 <- ggplot(apc_cat_df %>% filter(!is.na(apc_cat)),
aes(fill = apc_cat, x = opioid_cat)) +
scale_fill_viridis(
name = "APC (%)", direction = -1, discrete = TRUE,
na.value = "grey50") +
scale_x_discrete(NULL) +
scale_y_continuous("Number of states (#)", expand = c(0, .5)) +
geom_histogram(stat = "count", position = "stack", color = "white") +
facet_grid(~ race_cat) +
mk_nytimes(legend.position = "bottom",
panel.grid.major = element_blank(),
panel.border = element_rect(linetype = "solid",
fill = NA,
color = "grey75")) +
guides(fill =
guide_legend(
title.position = "left",
keywidth = 2.5,
keyheight = .5,
label.position = "bottom",
nrow = 1,
label.hjust = .5
))
ggsave(sprintf('%s/fig_current_trajectory.pdf', plot_folder),
p1, width = 6, height = 8, scale = 1.25, device = cairo_pdf)
ggsave(sprintf('%s/fig_current_trajectory.png', plot_folder),
p1, width = 6, height = 8, scale = 1.25, dpi = 300)
ggsave(sprintf('%s/fig_current_trajectory_barchart.pdf', plot_folder),
p2, width = 7, height = 4, scale = 1.25, device = cairo_pdf)
ggsave(sprintf('%s/fig_current_trajectory_barchart.png', plot_folder),
p2, width = 7, height = 4, scale = 1.25, dpi = 300)
|
cd5460827fe894e354ca2a1942e244281aec5299 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BioGeoBEARS/examples/calcZ_part.Rd.R | 03b274982a37e4c7d46ede1b36358861bf7b5813 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 192 | r | calcZ_part.Rd.R | library(BioGeoBEARS)
### Name: calcZ_part
### Title: Calculate Z (equation 6.3 of Harte 2011)
### Aliases: calcZ_part
### ** Examples
testval=1
n=6
lambda1 = 0.5
calcZ_part(n, lambda1)
|
6c56e3dbabe4a56b0b72643783766a605b9ddf29 | 45224ab73092fb8e7f13da6f714b3a86e6a6f6ee | /extras/CoordinatingCenterCode.R | 5f234bdea7238a22307e3c4cb3c42bdeae75e341 | [] | no_license | ohdsi-studies/EumaeusCovid19Vaccines | 1bca11b5a884db7253d07e551067551a73c2658e | 1bca22aacc166ca67a6a463c2f2452e7d5fedd0d | refs/heads/main | 2023-06-14T18:53:02.451320 | 2021-07-06T10:08:56 | 2021-07-06T10:08:56 | 383,422,950 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,425 | r | CoordinatingCenterCode.R | # This file contains code to be used by the study coordinator to download the files from the SFTP server, and to upload them to the results database.
library(Eumaeus)
library(OhdsiSharing)
allDbsFolder <- "r:/Eumaeus/AllDbs"
# dir.create(allDbsFolder)
# Download files from SFTP server -----------------------------------------------------------------
connection <- sftpConnect(privateKeyFileName = "c:/home/keyfiles/study-coordinator-covid19.dat",
userName = "study-coordinator-covid19")
# sftpMkdir(connection, "eumaeus")
sftpCd(connection, "eumaeus")
files <- sftpLs(connection)
files
sftpGetFiles(connection, files$fileName, localFolder = allDbsFolder)
# DANGER!!! Remove files from server:
sftpRm(connection, files$fileName)
sftpDisconnect(connection)
# Upload results to database -----------------------------------------------------------------------
library(DatabaseConnector)
connectionDetails <- createConnectionDetails(dbms = "postgresql",
server = paste(keyring::key_get("ohdsiPostgresServer"),
keyring::key_get("ohdsiPostgresShinyDatabase"),
sep = "/"),
user = keyring::key_get("ohdsiPostgresUser"),
password = keyring::key_get("ohdsiPostgresPassword"))
schema <- "eumaeus"
# Do this only once!
# createResultsDataModel(connectionDetails, schema)
# # After the tables have been created:
# sql <- "grant select on all tables in schema eumaeus to eumaeus_app_readonly;"
# sql <- "grant select on all tables in schema eumaeus to eumaeus_readonly;"
#
# # Next time, before creating tables:
# sql <- "grant usage on schema eumaeus to eumaeus_app_readonly;"
# sql <- "grant usage on schema eumaeus to eumaeus_readonly;"
# sql <- "alter default privileges in schema eumaeus grant select on tables to eumaeus_app_readonly;"
# sql <- "alter default privileges in schema eumaeus grant all on tables to eumaeus_readonly;"
# connection <- connect(connectionDetails)
# executeSql(connection, sql)
# disconnect(connection)
#
# Upload data
uploadedFolder <- file.path(allDbsFolder, "uploaded")
zipFilesToUpload <- list.files(path = allDbsFolder,
pattern = ".zip",
recursive = FALSE)
i = 1
for (i in (1:length(zipFilesToUpload))) {
uploadResultsToDatabase(connectionDetails = connectionDetails,
schema = schema,
zipFileName = file.path(allDbsFolder, zipFilesToUpload[i]),
purgeSiteDataBeforeUploading = T)
# Move to uploaded folder:
file.rename(file.path(allDbsFolder, zipFilesToUpload[i]), file.path(uploadedFolder, zipFilesToUpload[i]))
}
# Add imputed positive controls -------------------------------------
source("extras/AddImputedPositiveControls.R")
addImputedPositiveControls(connectionDetails = connectionDetails, schema = schema, databaseId = "IBM_MDCD")
addImputedPositiveControls(connectionDetails = connectionDetails, schema = schema, databaseId = "IBM_MDCR")
addImputedPositiveControls(connectionDetails = connectionDetails, schema = schema, databaseId = "CCAE")
addImputedPositiveControls(connectionDetails = connectionDetails, schema = schema, databaseId = "OptumEhr")
|
02f3936702d789e3bd08a5b692fda5e77a769aa8 | 12c5fe203a70038f7dbb724cd101239d689fc4bf | /필기/example5.R | 0f2691bd83aee79da283ff4c12645d46f1512651 | [] | no_license | choisawyou/learnR | d5479280b5fa5011d37df487a12647cab6da310e | 1fea4f39f18a3878ba0dee12f29a31fc5586000e | refs/heads/master | 2020-09-21T11:47:44.983163 | 2020-04-21T14:48:03 | 2020-04-21T14:48:03 | 224,779,452 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,963 | r | example5.R | #
# 5일차
#
setwd( "D:/WorkR" )
df <- read.table( file = "airquality.txt", header = T )
df
class( df )
dim( df )
str( df )
head( df, 3 )
tail( df, 3 )
install.packages( "xlsx" )
install.packages( "rJava" )
library( rJava )
library( xlsx )
df.xlsx <- read.xlsx( file = "airquality.xlsx",
sheetIndex = 1,
encoding = "UTF-8" )
df.xlsx
class( df.xlsx )
str( df.xlsx )
head( df.xlsx, 3 )
tail( df.xlsx, 3 )
score <- c( 76, 84, 69, 50, 95, 6, 82, 71, 88, 84 )
which( score == 69 )
which( score >= 85 )
max( score )
which.max( score )
min( score )
which.min( score )
idx <- which( score >= 60 )
score[ idx ] <- 61
score
idx <- which( iris[ , 1:4 ] > 5.0, arr.ind = TRUE )
idx
#
# 단일변수(일변량) 범주형 자료 탐색
#
favorite <- c( 'WINTER', 'SUMMER', 'SPRING',
'SUMMER', 'SUMMER', 'FALL',
'FALL', 'SUMMER', 'SPRING', 'SPRING' )
favorite
class( favorite )
table( favorite )
table( favorite ) / length( favorite )
ds <- table( favorite )
ds
barplot( ds, main = 'favorite season' )
ds.new <- ds[ c( 2, 3, 1, 4 ) ]
ds.new
barplot( ds.new, main = 'favorite season' )
pie( ds, main = 'favorite season' )
pie( ds.new, main = 'favorite season' )
favorite.color <- c( 2, 3, 2, 1, 1, 2, 2,
1, 3, 2, 1, 3, 2, 1, 2 )
ds <- table( favorite.color ); ds
barplot( ds, main = "favorite season" )
colors <- c( 'green', 'red', 'blue' )
names( ds ) <- colors; ds
barplot( ds, main = 'favorite season',
col = colors )
pie( ds, main = 'favorite season',
col = colors )
#
# 단일변수(일변량) 연속형 자료 탐색
#
weight <- c( 60, 62, 64, 65, 68, 69 ); weight
weight.heavy <- c( weight, 120 ); weight.heavy
#평균
mean( weight ); mean( weight.heavy )
#중앙값
median( weight ); median( weight.heavy )
#절사평균
mean( weight, trim = 0.2 )
mean( weight.heavy, trim =0.2 )
#사분위수
quantile( weight.heavy )
quantile( weight.heavy, ( 0:10 ) / 10 )
summary( weight.heavy )
#산포(distribution) - 값이 퍼져있는 정도 파악
#분산
var( weight )
#표준편차
sd( weight )
#값의범위(최소값과 최대값)
range( weight )
#최대값과 최소값의 차이
diff( range( weight ) )
#histogram : 연속형 자료의 분포를 시각화
# 연속형 자료에서는 구간을 나누고 구간에 속한
# 값들의 개수를 세는 방법으로 사용
str( cars )
dist <- cars[ , 2 ]
boxplot.stats( dist )
hist( dist, main = 'Histogram for 제동거리',
xlab = '제동거리', ylab = '빈도수',
border = 'blue', col = 'green',
las = 2, breaks = 5 )
#상자그림(boxplot, 상자수염그림)
# 사분위수를 시각화하여 그래프 형태로 표시
# 상자그림은 하나의 그래프로 데이터의 분포
# 형태를 포함한 다양한 정보를 전달
# 자료의 전반적인 분포를 이해하는데 도움
# 구체적인 최소/최대/중앙값을 알기는 어렵다
boxplot( dist, main = "자동차 제동거리" )
boxplot.stats( dist )
boxplot.stats( dist )$stats # 정상범위 사분위수
boxplot.stats( dist )$n # 관측치 개수
boxplot.stats( dist )$conf # 중앙값 신뢰구간
boxplot.stats( dist )$out # 이상치(특이값)목록
# 일변량중 그룹으로 구성된 자료의 상자그림
boxplot( Petal.Length~Species,
data = iris,
main = '품종별 꽃잎의 길이' )
boxplot( iris$Petal.Length~iris$Species,
main = '품종별 꽃잎의 길이' )
# 한 화면에 여러 그래프 작성
par( mfrow = c( 1, 3 ) )# 1 X 3 가상화면 분할
barplot( table( mtcars$carb ), main ="C",
xlab = "carburetors", ylab = "freq",
col = "blue" )
barplot( table( mtcars$cyl ), main ="Cyl",
xlab = "cyl", ylab = "freq",
col = "red" )
barplot( table( mtcars$gear ), main ="g",
xlab = "gear", ylab = "freq",
col = "green" )
par( mfrow = c( 1, 1 ) )# 가상화면 분할 해제
|
d4bd7694495042e4363ef78a2c603f968cfc7b10 | aeb7ac92f9f84298fc7d93dd133499b9d972aa86 | /election2016.R | 883ac7211f3c001b29324bf914dae83f40af7867 | [] | no_license | yalotfi/twitter-election-2016 | 9e6599a57fb2d3eb1f3841f0af83e711dc0f742d | e5deb86434b7130e9a37a9e40184a9851694d8d3 | refs/heads/master | 2020-07-02T11:30:23.035241 | 2016-12-01T21:51:54 | 2016-12-01T21:51:54 | 74,310,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,148 | r | election2016.R | #####################
### Load Packages ###
#####################
## Call script
source("loadPackages.R")
###########################
### Organize Shapefiles ###
###########################
## Load state shapefiles
states <- readOGR(
dsn = "shapefiles",
layer = "cb_2015_us_state_500k",
stringsAsFactors = FALSE,
verbose = FALSE
)
## Calculate state center points
center_pts <- gCentroid(states, byid = T)
states@data$lon <- center_pts@coords[, 1]
states@data$lat <- center_pts@coords[, 2]
## Define UTM Zone for each State
utm <- matrix(
data = c(seq(-66, -126, -6), # degree range
seq(19, 9, -1) # UTM Zones
)
, nrow = 11, ncol = 2 # 10 zones = 10 rows, 2 variables
, byrow = FALSE # Fill matrix column-wise
, dimnames = list(NULL, c("lon_range", "zones")) # Null row names, define col names
)
utm <- as.data.frame(utm) # coerce matrix to df
## Assign UTM Zone for each State
states@data$zone <- NA
for (i in 0:nrow(states@data)-1) {
if (states@data$lon_center[i] > min(utm[ ,1]) && states@data$lon_center[i] < max(utm[ ,1])) {
for (j in 1:nrow(utm)) {
if (states@data$lon_center[i] < utm$lon_range[j] && states@data$lon_center[i] > utm$lon_range[j+1]) {
} else {
next
}
}
} else {
next
}
}
## Filter States (Minus non-Contintental US, Plus DC)
states_f <- fortify(states)
states@data$id <- as.character(0:(nrow(states@data)-1))
states.data <- left_join(states_f, states@data, by = c("id"= "id"))
states.data <- states@data %>% filter(STUSPS %in%
c(state.abb[-which(state.abb == "AK" | state.abb == "HI")], "DC")) # state.abb is a R dataset
##########################
### Set-Up Twitter API ###
##########################
api_key = "INSERT HERE"
api_secret = "INSERT HERE"
token_key = "INSERT HERE"
token_secret = "INSERT HERE"
setup_twitter_oauth(api_key, api_secret, token_key, token_secret)
rm(api_key, api_secret, token_key, token_secret)
########################
### Mine Twitter API ###
########################
|
b59f160552e40b75a4d36c00eaa390be431ddbfe | 51e8db3547cc6a72863e889bd2a1c04b120c0e77 | /man/leafletR-package.Rd | 020b8776a0f3130e4b99012b56e461cd60794bbf | [] | no_license | cran/leafletR | 4aeae3155b590632d98b47db3967fcf85fc8072a | fd481b9a16b196fe333a680e79bcf91dd5377ef2 | refs/heads/master | 2021-01-23T19:14:01.134188 | 2016-04-01T16:24:37 | 2016-04-01T16:24:37 | 17,697,032 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,525 | rd | leafletR-package.Rd | \encoding{UTF-8}
\name{leafletR-package}
\alias{leafletR-package}
\alias{leafletR}
\docType{package}
\title{Interactive Web-Maps Based on the Leaflet JavaScript Library}
\description{Display your spatial data on interactive web-maps using the open-source JavaScript library Leaflet. The package provides basic web-mapping functionality to combine vector data and online map tiles from different sources.
}
\details{
\code{leafletR} lets you display spatial data on interactive maps in web browsers (a.k.a. slippy maps). It takes advantage of the open-source JavaScript library Leaflet.js, developed by Vladimir Agafonkin. Focusing simplicity, \code{leafletR} provides basic web-mapping functionality and styling options only. For map display an internet connection is required to load the Leaflet library, stylesheets and base map tiles. The ready to use html file output can be viewed locally or uploaded to a web-server.
\code{leafletR} supports GeoJSON and TopoJSON files directly. Additionally it contains conversion tools for sp spatial objects, several popular spatial vector data formats and R data frames containing point coordinates.
\code{leafletR} features open base map tiles. Map data is provided by the \href{http://www.openstreetmap.org}{OpenStreetMap} project and satellite images are provided by courtesy of NASA/ JPL-Caltech and U.S. Department of Agriculture, Farm Service Agency. Other tile sources may be added manually.
Try the example below to check if \code{leafletR} has been correctly installed. Any question and feedback is welcome via email to <christian.graul@gmail.com> or on \href{https://github.com/chgrl/leafletR}{GitHub}.
Example output:
\figure{quakes.png}{options: width=350px}
}
\author{
Christian Graul, with contributions from Francois Guillem
Maintainer: Christian Graul <christian.graul@gmail.com>
}
\references{
\url{http://leafletjs.com}
\url{http://geojson.org}
\url{https://github.com/topojson/topojson-specification}
}
\keyword{package}
\examples{
# load example data (Fiji Earthquakes)
data(quakes)
# store data in GeoJSON file (just a subset here)
q.dat <- toGeoJSON(data=quakes[1:99,], dest=tempdir(), name="quakes")
# make style based on quake magnitude
q.style <- styleGrad(prop="mag", breaks=seq(4, 6.5, by=0.5),
style.val=rev(heat.colors(5)), leg="Richter Magnitude",
fill.alpha=0.7, rad=8)
# create map
q.map <- leaflet(data=q.dat, dest=tempdir(), title="Fiji Earthquakes",
base.map="mqsat", style=q.style, popup="mag")
# view map in browser
#q.map
}
|
4b280341342b8aa9fff00e97df5c45d596ca58e7 | 203053106dccd4b971fdef22956df0281dfc6f0e | /EM_binomial.R | 3046ada08c6ccf54d7b18e9979412c98fdc5b2f5 | [] | no_license | JoonsukPark/examples | 31918dcbdcd98be4796eb96e84990852b802b915 | 91e1f3a1c1396762530269e1338728783c981235 | refs/heads/master | 2020-06-06T13:52:01.439264 | 2019-08-11T17:45:56 | 2019-08-11T17:45:56 | 192,757,363 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 753 | r | EM_binomial.R | n <- 10
p <- c(0.6, 0.5)
n_data <- 10000
coin <- rbinom(n_data, 1, 0.5)
ps <- coin*0.8 + (1-coin)*0.45
y <- rbinom(n_data, n, ps)
iter <- 0
dif <- 1000
eps <- 1e-10
E <- function(p, weights) -sum(weights*dbinom(y, n, p[1], log=T) + (1-weights)*dbinom(y, n, p[2], log=T))
M <- function(p, weights) optim(p, E, weights=weights, lower=c(1e-10, 1e-10), upper=c(1-(1e-10), 1-(1e-10)),
method='L-BFGS-B')$par
while(dif > eps){
weights <- dbinom(y, n, p[1]) / (dbinom(y, n, p[1]) + dbinom(y, n, p[2]))
temp <- M(p, weights)
weights_temp <- dbinom(y, n, temp[1]) / (dbinom(y, n, p[1]) + dbinom(y, n, temp[2]))
dif <- abs(E(p, weights)-E(temp, weights_temp))
p <- temp
iter <- iter + 1
}
p
iter
mean(weights)
|
891e96610ea190411f26f571302cd3118ea4afc4 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/10066_3/rinput.R | e3c0f49f77731107f39b7112a352d74ed7646bb2 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("10066_3.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10066_3_unrooted.txt") |
58756df103fb77d56ec7a55780edd6e8746b717c | 738da1122b2097ac18722a7997cf8c64c0d349a8 | /R/linear_LDA.R | e14cfb7517e4997c18c1049e540d761454043ffc | [] | no_license | ZhHuiJun/Rdimtools | 21b1242f0765806fbc811ea01de3ab634543011e | 6e8f099d7f9952f1c2984a733461bf30cfe0a208 | refs/heads/master | 2020-06-17T13:38:55.693909 | 2018-12-21T13:00:06 | 2018-12-21T13:00:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,039 | r | linear_LDA.R | #' Linear Discriminant Analysis
#'
#' Linear Discriminant Analysis (LDA) originally aims to find a set of features
#' that best separate groups of data. Since we need \emph{label} information,
#' LDA belongs to a class of supervised methods of performing classification.
#' However, since it is based on finding \emph{suitable} projections, it can still
#' be used to do dimension reduction. We support both binary and multiple-class cases.
#' Note that the target dimension \code{ndim} should be \emph{less than or equal to} \code{K-1},
#' where \code{K} is the number of classes, or \code{K=length(unique(label))}. Our code
#' automatically gives bounds on user's choice to correspond to what theory has shown. See
#' the comments section for more details.
#'
#' @section Limit of Target Dimension Selection:
#' In unsupervised algorithms, selection of \code{ndim} is arbitrary as long as
#' the target dimension is lower-dimensional than original data dimension, i.e., \code{ndim < p}.
#' In LDA, it is \emph{not allowed}. Suppose we have \code{K} classes, then its formulation on
#' \eqn{S_B}, between-group variance, has maximum rank of \code{K-1}. Therefore, the maximal
#' subspace can only be spanned by at most \code{K-1} orthogonal vectors.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations
#' and columns represent independent variables.
#' @param label a length-\eqn{n} vector of data class labels.
#' @param ndim an integer-valued target dimension.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#' @examples
#' \dontrun{
#' ## generate data of 3 types with clear difference
#' dt1 = aux.gensamples(n=33)-100
#' dt2 = aux.gensamples(n=33)
#' dt3 = aux.gensamples(n=33)+100
#'
#' ## merge the data and create a label correspondingly
#' Y = rbind(dt1,dt2,dt3)
#' label = c(rep(1,33), rep(2,33), rep(3,33))
#'
#' ## perform onto 2-dimensional space
#' output = do.lda(Y, label, ndim=2)
#'
#' ## visualize
#' plot(output$Y[,1], output$Y[,2], main="3 groups on 2d plane")
#' }
#'
#' @references
#' \insertRef{fisher_use_1936}{Rdimtools}
#'
#' \insertRef{fukunaga_introduction_1990}{Rdimtools}
#'
#' @author Changhee Suh
#' @rdname linear_LDA
#' @export
do.lda <- function(X, label, ndim=2){
## Preprocessing
# 1. data matrix
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
# 2. label vector
label = check_label(label, n)
ulabel = unique(label)
K = length(ulabel)
if (K==1){
stop("* do.lda : 'label' should have at least 2 unique labelings.")
}
if (K==n){
warning("* do.lda : given 'label' has all unique elements.")
}
if (any(is.na(label))||(any(is.infinite(label)))){
stop("* Supervised Learning : any element of 'label' as NA or Inf will simply be considered as a class, not missing entries.")
}
# 3. ndim
if (!check_ndim(ndim,p)){
stop("* do.lda : 'ndim' is a positive integer in [1,#(covariates)].")
}
ndim = as.integer(ndim)
if (ndim>=K){
warning("* do.lda : by the nature of LDA, target dimension 'ndim' is adjusted to match maximally permissible subspace.")
ndim = (K-1)
}
# 4. perform CENTERING
tmplist = aux.preprocess(X,type="center")
trfinfo = tmplist$info
trfinfo$algtype = "linear"
pX = tmplist$pX
## Main Computation
result = list()
if (K==2){ ## 2-class
idx1 = which(label==ulabel[1])
idx2 = which(label==ulabel[2])
SW = lda_outer(pX[idx1,]) + lda_outer(pX[idx2,])
mdiff = matrix(colMeans(pX[idx2,])-colMeans(pX[idx1,]))
RLIN = aux.bicgstab(SW, mdiff, verbose=FALSE)
w = aux.adjprojection(as.matrix(RLIN$x))
Y = pX%*%w
result$Y = Y
result$trfinfo = trfinfo
result$projection = w
} else { ## K-class : maximally, (K-1) dimension possible, you know I'm saying?
# 1. compute S_W : within-group variance for multiclss problem
SW = array(0,c(p,p))
for (i in 1:K){
idxnow = which(label==ulabel[i])
SW = SW + lda_outer(pX[idxnow,])
}
# 2. compute S_B : between-group variance for multiclass problem
SB = array(0,c(p,p))
m = colMeans(pX)
for (i in 1:K){
idxnow = which(label==ulabel[i])
Nk = length(idxnow)
mdiff = (colMeans(pX[idxnow,])-m)
SB = SB + Nk*outer(mdiff,mdiff)
}
RLIN = aux.bicgstab(SW, SB, verbose=FALSE)
W = RLIN$x
topW = aux.adjprojection(RSpectra::eigs(W, ndim)$vectors)
Y = pX%*%topW
result$Y = Y
result$trfinfo = trfinfo
result$projection = topW
}
## Return results
return(result)
}
## sum of outer products to calculate S_B and S_W
#' @keywords internal
#' @noRd
lda_outer <- function(X){
p = ncol(X)
output = array(0,c(p,p))
for (i in 1:nrow(X)){
output = output + outer(X[i,],X[i,])
}
return(output)
}
|
b2aa4824fe77fb9f30f0dcd94fa94463aa84b752 | 65e296e32ea26aad460312d7f4db5150b8fb402f | /plot2.R | 162cb08e16554dbe11fac7fa1b71ed941c6fa08a | [] | no_license | rfschtkt/ExData_Plotting1 | ffaf50dea24633d2df0cb375eeead70ccce39631 | 2143588e702c33302071828266afe178deadc833 | refs/heads/master | 2021-01-18T10:08:52.973110 | 2014-08-10T13:09:57 | 2014-08-10T13:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 870 | r | plot2.R | url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipfile <- "exdata_data_household_power_consumption.zip"
if (!file.exists(zipfile)) download.file(url, zipfile, method="curl")
file <- "household_power_consumption.txt"
if (!file.exists(file)) unzip(zipfile)
# TODO: could we read directly from the zipped file?
# how to select rows during loading?
# and without being particular about Date formatting?
data <- read.csv(file, sep=";", na.strings="?")
data <- data[as.Date(data$Date, "%d/%m/%Y")==as.Date("2007-02-01") |
as.Date(data$Date, "%d/%m/%Y")==as.Date("2007-02-02") ,]
png(file = "plot2.png")
plot(strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S"),
as.numeric(data$Global_active_power),
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
671ac54727a5244375b480e5c07dbbe847e67481 | 29e6954af28c27891bca4270b5d7dc00ed3f1d2c | /plot1.R | 907b19419b2fd156667e328e22bf6a2a1b31bd0a | [] | no_license | manastiwari/Exp_Data_Analysis_Assignment1 | 00277a51d8f7fbee137e5e5856eb5653a493c7e0 | a1bf24c3b9b455432718a8a6d0354667bfdb23bb | refs/heads/master | 2020-03-09T07:00:48.777562 | 2018-04-08T15:43:29 | 2018-04-08T15:43:29 | 128,654,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 568 | r | plot1.R | setwd('C:/Users/hp/Desktop/6th sem/Data Science/Exploratory Data Analysis')
dat1 <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
subdat1 <- subset(dat1, Date %in% c("1/2/2007","2/2/2007"))
subdat1$Date <- as.Date(subdat1$Date, format="%d/%m/%Y")
hist(subdat1$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
png("plot1.png", width=480, height=480)
dev.off() |
aae5353177647f4c62b9c155f12f26c7fbb128f1 | eb08ab7f3a97936b26ebacfe098e77e9a1754c8d | /man/init.Rd | 9c5de84ad6dec09db0032f52575c2ddb59d32daa | [] | no_license | marcalva/diem | 2bc0cd6ba3059984ad1ef17ca42d3399a7acef63 | a9a4d9d5f4d2a72a553e8b41afe10785c19504e8 | refs/heads/master | 2022-12-23T19:42:53.503040 | 2022-12-20T04:35:28 | 2022-12-20T04:35:28 | 184,798,028 | 9 | 6 | null | null | null | null | UTF-8 | R | false | true | 2,267 | rd | init.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init_clust.R
\name{init}
\alias{init}
\title{Initialize parameters}
\usage{
init(x, k_init = 20, use = "kmeans", droplets.use = NULL,
n_sample = NULL, nstart_init = 30, min_size_init = 10,
fixed = NULL, model = "mltn", psc = 1e-10, seedn = 1,
threads = threads, verbose = TRUE, ...)
}
\arguments{
\item{x}{An SCE object.}
\item{k_init}{The number of clusters to initialize, not including
fixed background clusters.}
\item{use}{The method to use for clustering, one of either
"kmeans" or "hclust."}
\item{droplets.use}{Specify droplet IDs to use for initializing
parameters.}
\item{n_sample}{The number of droplets to sample from for initializing
parameters.}
\item{nstart_init}{The number of starts to use in k-means for
for the initialization.}
\item{min_size_init}{The minimum number of droplets that must belong
to an initialized cluster.}
\item{fixed}{A named integer vector that specifies which droplets to
fix to which clusters. If \code{NULL}, then sets background droplets
to 1.}
\item{model}{The mixture model to assume. Can be either "DM" for
a Dirichlet-multinomial or "mltn" for a multinomial.}
\item{psc}{Pseudocount to add to estimation of gene probabilities.}
\item{seedn}{Random seed.}
\item{threads}{Number of threads.}
\item{verbose}{Verbosity.}
\item{...}{Additional parameters to pass to hclust or kmeans.}
}
\value{
An SCE object
}
\description{
Initialize the parameters of the mixture model using
k-means or hierarchical clustering on the PCs.
}
\examples{
\donttest{
# Initialize parameters with default values and multiple threads
sce <- init(sce, threads = 8)
# Specify initial k of 10 cell type clusters
sce <- init(sce, k_init = 10, threads = 8)
# Initialize parameters for Dirichlet-multinomial
sce <- init(sce, k_init = 10, model = "DM", threads = 8)
# Set seedn to NULL for random starts
sce <- init(sce, k_init = 10, seedn = NULL, threads = 8)
sce <- init(sce, k_init = 10, seedn = NULL, threads = 8)
sce <- init(sce, k_init = 10, seedn = NULL, threads = 8)
# Specify initial k of 30 cell type clusters and
# Only allow clusters with at least 30 droplets
sce <- init(sce, k_init = 30, min_size_init = 30, threads = 8)
}
}
|
109ce3cbbd014b6774817c9cb154ec39959b661e | 79aa1613f924627f22e4bc60238bd65b3566d61e | /R/data_NYSE.R | 74b7eb3f82716fa886947e68cfc589fc3b61dc85 | [
"MIT"
] | permissive | tsurf101/olpsR | 4d027e7e57bb7650ccd32cd99178464aa6fe6ac4 | 56cdb47725eb1348223355b0c83a42d9f229c1af | refs/heads/master | 2021-05-17T17:33:51.727673 | 2020-03-28T21:36:42 | 2020-03-28T21:36:42 | 250,898,378 | 0 | 0 | NOASSERTION | 2020-03-28T21:36:09 | 2020-03-28T21:36:09 | null | UTF-8 | R | false | false | 1,832 | r | data_NYSE.R | # =============================================#
# Documentation for NYSE dataset with roxygen2 #
# =============================================#
#' NYSE daily returns
#'
#' The dataset contains daily returns of 36 stocks listed in the
#' New York Stock Exchange from 1962-07-03 until 1984-12-31, that is 5651 trading days.
#' Returns are calculated as closing price divided by the closing price
#' of the privious day (price relative).
#' The dataset was used for the analysis of Cover's \code{Universal Portfolio} algorithm for example
#'
#' @format A data frame with 5651 observations on the following 36 stocks.
#'
#' @details The following stocks are included:
#' \itemize{
#' \item{\code{ahp}}
#' \item{\code{alco}}
#' \item{\code{amerb}}
#' \item{\code{arco}}
#' \item{\code{coke}}
#' \item{\code{comme}}
#' \item{\code{dow}}
#' \item{\code{dupont}}
#' \item{\code{espey}}
#' \item{\code{exxon}}
#' \item{\code{fisch}}
#' \item{\code{ford}}
#' \item{\code{ge}}
#' \item{\code{gm}}
#' \item{\code{gte}}
#' \item{\code{gulf}}
#' \item{\code{hp}}
#' \item{\code{ibm}}
#' \item{\code{inger}}
#' \item{\code{iroqu}}
#' \item{\code{jnj}}
#' \item{\code{kimbc}}
#' \item{\code{kinar}}
#' \item{\code{kodak}}
#' \item{\code{luken}}
#' \item{\code{meico}}
#' \item{\code{merck}}
#' \item{\code{mmm}}
#' \item{\code{mobil}}
#' \item{\code{morris}}
#' \item{\code{pandg}}
#' \item{\code{pills}}
#' \item{\code{schlum}}
#' \item{\code{sears}}
#' \item{\code{sherw}}
#' \item{\code{tex}}
#' }
#'
#' @usage data(NYSE)
#'
#' @source Originally collected by Hal Stern the data here is provided by
#' Yoram Singer \url{http://www.cs.bme.hu/~oti/portfolio/data.html}
#'
#' @references
#' Cover, T. M.
#' Universal Portfolios, 1991
#'
#' @docType data
#' @keywords datasets
#' @name NYSE
NULL
|
6e7056e472f0fa7063e75699ba54c236ec7bdb1e | 9ab62e21cc4ac8da5b938c9e720a19dae68f1f88 | /ScalarCoupling/explore1.R | f32d7895efa2df3b5612fe9365f0f0255379fc0d | [] | no_license | CollinErickson/Kaggle | 28d9efbc6c96f469c98b7ad512ec6046fd219e49 | a2c6e50f59850afa9d0fbe0f14ea63016560f65d | refs/heads/master | 2020-05-29T08:48:53.828236 | 2019-07-16T13:26:46 | 2019-07-16T13:26:46 | 69,776,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 768 | r | explore1.R | library(magrittr); library(dplyr)
dipole_moments <- read.csv('./ScalarCoupling/data/dipole_moments.csv', stringsAsFactors = F)
dipole_moments %>% str
dipole_moments %>% summary
dipole_moments[,2:4] %>% pairs
dipole_moments[,2:4] %>% cor
magnetic <- read.csv('./ScalarCoupling/data/magnetic_shielding_tensors.csv', stringsAsFactors = F)
magnetic %>% str
magnetic %>% summary
magnetic[,2:11] %>% pairs
magnetic[,2:11] %>% cor
mulliken <- read.csv('./ScalarCoupling/data/mulliken_charges.csv', stringsAsFactors = F)
mulliken %>% str
mulliken %>% summary
mulliken[,2:3] %>% pairs
mulliken[,2:3] %>% cor
potential <- read.csv('./ScalarCoupling/data/potential_energy.csv', stringsAsFactors = F)
potential %>% str
potential %>% summary
potential$potential_energy %>% hist
|
92e538173697ad354c82d88bd618498f58689db8 | 774d499250bfa17fa9d4b4058af97ed5d91b9709 | /man/dget_l_ISO.Rd | f029e533603e88491bcbb2682ba7d7b385073cee | [
"0BSD",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pboesu/DEButilities | 495a20dea2ff373df32472a23d924052b13a535b | e2abb26d428207893dbb68212f43c994803a4659 | refs/heads/master | 2021-09-05T16:43:20.687795 | 2018-01-29T18:01:57 | 2018-01-29T18:01:57 | 114,688,190 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 390 | rd | dget_l_ISO.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dget_l_ISO.R
\name{dget_l_ISO}
\alias{dget_l_ISO}
\title{Differential equation}
\usage{
dget_l_ISO(vH, l, pars)
}
\arguments{
\item{vH}{scalar with scaled maturity}
\item{l}{scalar with scaled length}
\item{pars}{5-vector k, lT g f sM}
}
\value{
dl: scalar with d/dvH l
}
\description{
Differential equation
}
|
e52f05c499e8adcbdc123a055ee6c01a17a632bf | bae0c518f1e2c8cec2eff5b2d2c64e39427543b8 | /kubobook_2012/distribution/plot.randest.R | 7e33ecf56cf506b0757f65c2d3b23bd16c6dc686 | [] | no_license | yoshiki146/Stat_Modelling_for_Data_Analysis | 9844e4424731d5f7584b1d445f555b892df48e35 | 8e7383f42b0f0af03f781618501f144264a4929d | refs/heads/master | 2020-03-19T09:25:30.982914 | 2018-06-12T11:46:46 | 2018-06-12T11:46:46 | 136,287,546 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,050 | r | plot.randest.R | # R --vanilla!
source("COMMON.R")
N <- length(data)
mean.y <- mean(data)
mean.true <- 3.5
y <- 0:9
ymax <- 11
plot.frame <- function(file, ymax = 14, width = 2.0, height = 2.0, xlab = "y")
{
dev.on(file, width = width, height = height)
par(mar = c(2.4, 1.5, 0.1, 0.1), mgp = c(1.3, 0.6, 0), cex = 1.2)
plot(
c(), c(),
type = "n",
xlab = xlab,
ylab = "",
yaxs = "i",
axes = FALSE,
xlim = range(y),
ylim = c(0, ymax),
)
axis(1)
box()
}
add.hist <- function(vy)
{
hs <-hist(vy, breaks = seq(-0.5, 12.5, 1), plot = FALSE)
lines(hs)
}
# histgram
plot.frame("randestH0")
add.hist(data)
dev.off()
# dpois
prob <- dpois(y, lambda = mean.true) # !!!
plot.frame("randestD0")
lines(y, prob * N)
points(y, prob * N, col = "#808080", pch = 19)
dev.off()
prob <- dpois(y, lambda = mean.y) # !!!
plot.frame("randestE0")
lines(y, prob * N, type = "b", lty = 3)
dev.off()
# new data
for (i in 1:3) {
plot.frame(sprintf("randestH%i", i), xlab = "")
add.hist(rpois(N, mean.true))
lines(y, prob * N, type = "b", lty = 3)
dev.off()
}
|
ef4ec72662950dfb6d6666cd792f3e51089a2056 | c90d02d3e5f865f613b9646904ca99f7e0412b01 | /polycharts_versions/figure5_12.R | 678e8327383ac690544be04e4d477d07a58ca510 | [] | no_license | arturochian/rCharts_lattice_book | b7fd91fe68b0721e6fced72b90c62990332780c5 | 367ca3f18cf2f7696d4b5fb5e8b82133a1817aa6 | refs/heads/master | 2021-01-23T00:15:59.741297 | 2013-05-04T03:51:32 | 2013-05-04T03:51:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 679 | r | figure5_12.R | #Figure 5.12
#http://www.amazon.com/Lattice-Multivariate-Data-Visualization-Use/dp/0387759689/ref=cm_cr_pr_product_top
#not sure if this is possible since no type = "h" in polycharts
require(rCharts)
require(reshape2)
data(SeatacWeather, package = "latticeExtra")
SeatacWeather.melt <- melt( SeatacWeather, id.var = c( "month", "day", "year" ) )
#remove troublesome . from variable
SeatacWeather.melt$variable <- gsub("[.]", "", SeatacWeather.melt$variable)
chart5_12 <- rPlot(
value ~ day | month,
color = "variable",
data = SeatacWeather.melt[
which( SeatacWeather.melt$variable %in% c( "maxtemp", "mintemp", "precip" ) ),
],
type = 'line'
)
chart5_12
|
c8c835b8bc53d96b1cfe40267a4a4ab0a7d749cc | df0b75acae3011ad458de0f80a99b92d1e0c30e3 | /q54.R | fd348901130622bee5cdb047455b2832f2a974fb | [] | no_license | agamat/portanalysis-approblems | f3dfe571aa35a5777a9b174e7eb640748445fd24 | 26c5b1011df882fbbcb50faeada9d305df8bfba4 | refs/heads/master | 2020-12-27T14:46:32.398423 | 2013-11-12T21:05:57 | 2013-11-12T21:05:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 748 | r | q54.R | source("q53.R")
source("efficientp.R")
weekly.stocks.returns <- na.omit(fapply(weekly.stocks, FUN=returns))
weekly.combined.returns <- cbind(weekly.stocks.returns,
weekly.sp.returns)
ws.means <- colMeans(weekly.stocks.returns) * 52
wc.means <- colMeans(weekly.combined.returns) * 52
ws.sigma <- cov(weekly.stocks.returns) * 52
wc.sigma <- cov(weekly.combined.returns) * 52
ws.weights <- findMVP(ws.sigma, ret=ws.means, target.return=.15)
wc.weights <- findMVP(wc.sigma, ret=wc.means, target.return=.15)
ws.rr <- mv.coords.gen(ws.sigma, ws.means)(ws.weights)
wc.rr <- mv.coords.gen(wc.sigma, wc.means)(wc.weights)
cat(sprintf("\nAmount of risk eliminated: %.2f%%\n",
100 * (ws.rr[1] - wc.rr[1])))
|
2ff9f7dc357ef1c71fe7dc6fae90fcce6bd9f43c | a8447e92df267b33bd5da90668db75b4677e552c | /R/hist.data.frame.R | 36a28516938afa13ed3f7d6bff632fec8f55bce3 | [] | no_license | tianwei-zhang/tianweiR | 9a906e56ef0c55b21e612776d776fe1295a3e496 | de89656bce58b6cb241c85b83182ebc65c720e3e | refs/heads/master | 2021-01-20T05:08:37.740307 | 2017-10-05T14:33:18 | 2017-10-05T14:33:18 | 101,416,744 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 372 | r | hist.data.frame.R | ### need to test and debug later
hist.data.frame=function(data,n){
data=as.data.frame(data)
num_col_index=seq(1:ncol(data))[sapply(data,is.numeric)]
plot_row=ceiling(length(num_col_index)/1)
# par(mfrow=c(plot_row,1))
for(i in num_col_index){
hist(data[,i],n,main = paste('Distribution of',colnames(data)[i]),xlab=colnames(data)[i])
# print(i)
}
} |
cbe096922cd532520fe15c4e8995cc3435413afb | ea273dda5dbf981c3db326d913a554f95888d399 | /data/espn_games.R | 2ce8cc59df6f53930a3984793de1766e9e0096a7 | [] | no_license | johnson-shuffle/fantasy | b54d620f3afe7d160c3c4e392a77ec5e30fdfc98 | ef6d0511e34974ea02515cfbf3f92053fbf43f3d | refs/heads/master | 2020-03-31T05:47:49.483326 | 2018-10-07T22:00:59 | 2018-10-07T22:00:59 | 151,959,104 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,038 | r | espn_games.R | #---------------------------------------------------------------------------------
# Preample
#---------------------------------------------------------------------------------
rm(list = ls())
source("~/GoogleDrive/Research/Fantasy/fantasy.Rprofile")
#---------------------------------------------------------------------------------
# Game info
#---------------------------------------------------------------------------------
#url<-"http://scores.espn.go.com/nfl/scoreboard?"
#weeks <- c(1:17)
#years <- c(2006:2014)
#game.info <- NULL
#for (y in years) {
# for (w in weeks) {
# info <- get.game(w,y)
# game.info <- rbind(game.info,info)
# Sys.sleep(3)
# print(paste("Week",w,"in",y,"done"))
# }
#}
# Postponed game: 2014 - Wk 12
#game.info <- subset(game.info,GAME.ID=="400554331")
# Save
#save(game.info, file = paste0(dsource, "espn_info_games.Rda"))
#---------------------------------------------------------------------------------
# Game Stats
#---------------------------------------------------------------------------------
load(file = paste0(dsource, "espn_info_games.Rda"))
team <- NULL # Team stats
indiv <- NULL # Individual stats
for (i in 1:nrow(missing)) {
# Pause between iterations
Sys.sleep(2)
# Get game stats
check <- game.info$GAME.LINK[i] %>%
html_session %>%
html_table(fill = TRUE, header = FALSE)
if (length(check) < 21) {
next
} else {
foo <- get.stats(game.info[i,])
foo.names <- names(foo)
}
# Bind
team <- plyr::rbind.fill(team,foo$Team)
indiv <- plyr::rbind.fill(indiv,foo$Individual)
# Clean up
rm(foo, foo.names)
# Save
save(list = c("team", "indiv"), file = paste0(dsource,"espn_stats_games.Rda"))
# Status
print(paste0(i, " / ", nrow(game.info)))
}
#---------------------------------------------------------------------------------
# Save
#---------------------------------------------------------------------------------
save(list = c("team", "indiv"), file = paste0(dsource, "espn_stats_games.Rda"))
|
242d5f4bcaeebd2ff6649480d5d6a55b6ae60b66 | 8fe38811d463b585c25fbc86678e2a84d22e1af9 | /cchic_demographic_extract.R | 9cc7be5b87e13c31d1139db6a4375d717e0b2a6e | [] | no_license | ha05069/msc_dissertation | 730d2174f9da97475d554f6b1a3aa13adee128fa | 9ee3c4334a40c7390edada1b7c127e7bb13f5f94 | refs/heads/master | 2020-07-02T21:52:55.764823 | 2019-08-12T10:13:39 | 2019-08-12T10:13:39 | 201,678,254 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,104 | r | cchic_demographic_extract.R | # Extract and join CCHIC demographic data
# establish connection to HIC database and retrieve tables
ctn <-
tbls <-
# extract demographic codes:
demographic_codes <-
tribble(
~hic_codes, ~ short_name,
"NIHR_HIC_ICU_0001", "pas_number",
"NIHR_HIC_ICU_0411", "admission_dt",
"NIHR_HIC_ICU_0412", "discharge_dt",
"NIHR_HIC_ICU_0033", "dob",
"NIHR_HIC_ICU_0093", "sex",
"NIHR_HIC_ICU_0398", "admission_type",
"NIHR_HIC_ICU_0399", "primary_admission_reason",
"NIHR_HIC_ICU_0409", "apache_score",
"NIHR_HIC_ICU_0410", "apache_prob")
# extract valid episodes
cor_tbl <- make_core(ctn)
ref_tbl <- make_reference(ctn)
# extract all HIC episodes
episodes <- epi_length(cor_tbl, ref_tbl, tbls[["events"]])
# extract valid episodes for site only
episodes_site<-
episodes %>%
filter(site == "xxx") %>%
filter(validity == 0)
# extract the required demographic variable data
demographic_data <-
tbls$provenance %>%
filter(site == "xxx") %>%
select(file_id) %>%
inner_join(tbls$episodes, by = c("file_id" = "provenance")) %>%
inner_join(tbls$events, by = "episode_id") %>%
extract_demographics(events = .,
metadata = tbls$variables,
code_names = demographic_codes$hic_codes,
rename = demographic_codes$short_name)
# select only the demographic data for the valid episodes
valid_demographics <-
left_join(episodes_site, demographic_data, by = "episode_id")
# join to transfusion data
transfusion <-
left_join(transfused, valid_demographics, by = c("patient_id" = "pas_number"))
transfusion <-
transfusion %>%
filter(!is.na(episode_id)) %>%
rename(transfusion_dttm = "chart_datetime.x",
transfusion_chartday = "chart_day",
cons_dttm = "chart_datetime.y") %>%
select(-encounter_id, - site.y, - validity, - discharge_dt, - admission_dt)
# filter for valid transfusion episodes
# (ensure transfusion occurs during admission window)
transfusion <-
transfusion %>%
mutate(valid_transfusion =
transfusion_dttm <= epi_end_dttm &
transfusion_dttm >= epi_start_dttm) %>%
filter(valid_transfusion) # 4175 observations
transfusion <-
transfusion %>%
select(-site.x)
# tidy demographic variables
transfusion <- # create age at admission from dob variable
transfusion %>%
mutate(age = floor(difftime(transfusion_dttm, dob, units = "weeks"))) %>%
mutate(age = floor(age / 52)) %>%
select(-dob)
transfusion <- # round length of stay to nearest day
transfusion %>%
mutate(los = floor(los))
# join to previously hand cleaned consultant IDs:
cons_clean <-
read.csv("cons_clean.csv")
transfusion <-
transfusion %>%
left_join(cons_clean, by = "consultant") %>%
select(-consultant) %>%
rename(consultant = "clean_consultant")
|
40d74c4d23f8205fda5c897a6c6be0f170d16018 | 366ec1e0e81f9d8c40e2fde01efa44d640c67daa | /man/anlvm.fit.Rd | af62b61c865f07cf8158d27e945ac94a86841ab2 | [] | no_license | tjfarrar/skedastic | 20194324833b8f2f20e5666b642cff617159588c | 050e6a177a28fb0cc2054b506a53b09d6859e3c7 | refs/heads/master | 2022-11-17T22:41:43.930246 | 2022-11-06T06:39:13 | 2022-11-06T06:39:13 | 219,455,416 | 6 | 0 | null | null | null | null | UTF-8 | R | false | true | 11,881 | rd | anlvm.fit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anlvm.fit.R
\name{anlvm.fit}
\alias{anlvm.fit}
\title{Auxiliary Nonlinear Variance Model}
\usage{
anlvm.fit(
mainlm,
g,
M = NULL,
cluster = FALSE,
varselect = c("none", "hettest", "cv.linear", "cv.cluster", "qgcv.linear",
"qgcv.cluster"),
nclust = c("elbow.swd", "elbow.mwd", "elbow.both"),
clustering = NULL,
param.init = function(q) stats::runif(n = q, min = -5, max = 5),
maxgridrows = 20L,
nconvstop = 3L,
zerosallowed = FALSE,
maxitql = 100L,
tolql = 1e-08,
nestedql = FALSE,
reduce2homosked = TRUE,
cvoption = c("testsetols", "partitionres"),
nfolds = 5L,
...
)
}
\arguments{
\item{mainlm}{Either an object of \code{\link[base]{class}} \code{"lm"}
(e.g., generated by \code{\link[stats]{lm}}), or
a list of two objects: a response vector and a design matrix. The objects
are assumed to be in that order, unless they are given the names
\code{"X"} and \code{"y"} to distinguish them. The design matrix passed
in a list must begin with a column of ones if an intercept is to be
included in the linear model. The design matrix passed in a list should
not contain factors, as all columns are treated 'as is'. For tests that
use ordinary least squares residuals, one can also pass a vector of
residuals in the list, which should either be the third object or be
named \code{"e"}.}
\item{g}{A numeric-valued function of one variable, or a character denoting
the name of such a function. \code{"sq"} is allowed as a way of denoting
\code{function(x) x ^ 2}.}
\item{M}{An \eqn{n\times n} annihilator matrix. If \code{NULL}
(the default), this will be calculated from the \code{mainlm} object}
\item{cluster}{A logical; should the design matrix X be replaced with an
\eqn{n\times n_c} matrix of ones and zeroes, with a single one in each
row, indicating assignments of the \eqn{n} observations to \eqn{n_c}
clusters using an agglomerative hierarchical clustering algorithm. In
this case, the dimensionality of \eqn{\gamma} is \eqn{n_c} and not
\eqn{p}. Defaults to \code{FALSE}}
\item{varselect}{Either a character indicating how variable selection should
be conducted, or an integer vector giving indices of columns of the
predictor matrix (\code{\link[stats]{model.matrix}} of \code{mainlm})
to select. The vector must include \code{1L} for the intercept to be
selected. If a character, it must be one of the following:
\itemize{
\item \code{"none"}: No variable selection is conducted;
\item \code{"hettest"}: Variable selection is conducted by applying a
heteroskedasticity test with each feature in turn serving as the
`deflator' variable
\item \code{"cv.linear"}: Variable selection is conducted by best subset
selection on the auxiliary linear variance model (linear specification),
using squared-error loss computed under \eqn{K}-fold cross-validation
\item \code{"cv.cluster"}: Variable selection is conducted by best subset
selection on the auxiliary linear variance model (clustering
specification), using squared-error loss computed under \eqn{K}-fold
cross-validation
\item \code{"qgcv.linear"}: Variable selection is conducted by best subset
selection on the auxiliary linear variance model (linear specification),
using squared-error loss computed under quasi-generalised
cross-validation
\item \code{"qgcv.cluster"}: Variable selection is conducted by best subset
selection on the auxiliary linear variance model (clustering
specification), using squared-error loss computed under
quasi-generalised cross-validation
}}
\item{nclust}{A character indicating which elbow method to use to select
the number of clusters (ignored if \code{cluster} is \code{FALSE}).
Alternatively, an integer specifying the number of clusters}
\item{clustering}{A list object of class \code{"doclust"}. If set to
\code{NULL} (the default), such an object is generated (ignored if
\code{cluster} is \code{FALSE})}
\item{param.init}{Specifies the initial values of the parameter vector to
use in the Gauss-Newton fitting algorithm. This can either be a function
for generating the initial values from a probability distribution, a
list containing named objects corresponding to the arguments of
\code{\link[base]{seq}} (specifying a sequence of scalar values that
will be passed to \code{\link[base]{expand.grid}}), or a numeric vector
specifying a single initial parameter vector}
\item{maxgridrows}{An integer indicating the maximum number of initial
values of the parameter vector to try, in case of \code{param.init}
being a function or a list used to generate a grid. Defaults to
\code{20L}.}
\item{nconvstop}{An integer indicating how many times the quasi-likelihood
estimation algorithm should converge before the grid search across
different initial parameter values is truncated. Defaults to \code{3L}.
If \code{nconvstop >= maxgridrows}, no early stopping rule will be used.}
\item{zerosallowed}{A logical indicating whether 0 values are acceptable
in the initial values of the parameter vector. Defaults to \code{FALSE}.}
\item{maxitql}{An integer specifying the maximum number of iterations to
run in the Gauss-Newton algorithm for quasi-likelihood estimation.
Defaults to \code{100L}.}
\item{tolql}{A double specifying the convergence criterion for the
Gauss-Newton algorithm; defaults to \code{1e-8}. The criterion is applied
to the \code{L_2} norm of the difference between parameter vectors in
successive iterations.}
\item{nestedql}{A logical indicating whether to use the nested updating step
suggested in \insertCite{Seber03;textual}{skedastic}. Defaults to
\code{FALSE} due to the large computation time required.}
\item{reduce2homosked}{A logical indicating whether the homoskedastic
error variance estimator \eqn{e'e/(n-p)} should be used if the
variable selection procedure does not select any variables. Defaults to
\code{TRUE}.}
\item{cvoption}{A character, either \code{"testsetols"} or
\code{"partitionres"}, indicating how to obtain the observed response
values for each test fold when performing \eqn{K}-fold cross-validation
on an ALVM. The default technique, \code{"testsetols"}, entails fitting
a linear regression model to the test fold of observations from the
original response vector \eqn{y} and predictor matrix \eqn{X}. The
squared residuals from this regression are the observed
responses that are predicted from the trained model to compute the
cross-validated squared error loss function. Under the other technique,
\code{"partitionres"}, the squared residuals from the full
linear regression model are partitioned into training and test folds and
the squared residuals in the test fold are the observed responses that
are predicted for computation of the cross-validated loss.}
\item{nfolds}{An integer specifying the number of folds \eqn{K} to use for
cross-validation, if the \eqn{\lambda} and/or \eqn{n_c} hyperparameters
are to be tuned using cross-validation. Defaults to \code{5L}. One must
ensure that each test fold contains at least \eqn{p+1} observations if
the \code{"testsetols"} technique is used with cross-validation, so that
there are enough degrees of freedom to fit a linear model to the test
fold.}
\item{...}{Other arguments that can be passed to (non-exported) helper
functions, namely:
\itemize{
\item \code{greedy}, a logical passed to the functions implementing best subset
selection, indicating whether or not to use a greedy search rather than
exhaustive search for the best subset. Defaults to \code{FALSE}, but
coerced to \code{TRUE} unconditionally if \eqn{p>9}.
\item \code{distmetric}, a character specifying the distance metric to use in
computing distance for the clustering algorithm. Corresponds to the
\code{method} argument of \code{\link[stats]{dist}} and defaults to
\code{"euclidean"}
\item \code{linkage}, a character specifying the linkage rule to use in
agglomerative hierarchical clustering. Corresponds to the \code{method}
argument of \code{\link[stats]{hclust}} and defaults to
\code{"complete"}
\item \code{alpha}, a double specifying the significance level threshold to use
when applying heteroskedasticity test for the purpose of feature
selection in an ALVM; defaults to \code{0.1}
\item \code{testname}, a character corresponding to the name of a function that
performs a heteroskedasticity test. The function must either be one that
takes a \code{deflator} argument or \code{\link{breusch_pagan}}. Defaults
to \code{evans_king}
}}
}
\value{
An object of class \code{"anlvm.fit"}, containing the following:
\itemize{
\item \code{coef.est}, a vector of parameter estimates, \eqn{\hat{\gamma}}
\item \code{var.est}, a vector of estimates \eqn{\hat{\omega}} of the error
variances for all observations
\item \code{method}, either \code{"cluster"} or \code{"functionalform"},
depending on whether \code{cluster} was set to \code{TRUE}
\item \code{ols}, the \code{lm} object corresponding to the original linear
regression model
\item \code{fitinfo}, a list containing three named objects, \code{g} (the
heteroskedastic function), \code{Msq} (the elementwise-square of the
annihilator matrix \eqn{M}), \code{Z} (the design matrix used in the
ANLVM, after feature selection if applicable), and \code{clustering}
(a list object with results of the clustering procedure, if applicable).
\item \code{selectinfo}, a list containing two named objects,
\code{varselect} (the value of the eponymous argument), and
\code{selectedcols} (a numeric vector with column indices of \eqn{X}
that were selected, with \code{1} denoting the intercept column)
\item \code{qlinfo}, a list containing nine named objects: \code{converged}
(a logical, indicating whether the Gauss-Newton algorithm converged
for at least one initial value of the parameter vector),
\code{iterations} (the number of Gauss-Newton iterations used to
obtain the parameter estimates returned), \code{Smin} (the minimum
achieved value of the objective function used in the Gauss-Newton
routine), and six arguments passed to the function (\code{nested},
\code{param.init}, \code{maxgridrows}, \code{nconvstop},
\code{maxitql}, and \code{tolql})
}
}
\description{
Fits an Auxiliary Nonlinear Variance Model (ANLVM) to estimate the error
variances of a heteroskedastic linear regression model.
}
\details{
The ANLVM model equation is
\deqn{e_i^2=\displaystyle\sum_{k=1}^{n} g(X_{k\cdot}'\gamma) m_{ik}^2+u_i},
where \eqn{e_i} is the \eqn{i}th Ordinary Least Squares residual,
\eqn{X_{k\cdot}} is a vector corresponding to the \eqn{k}th row of the
\eqn{n\times p} design matrix \eqn{X}, \eqn{m_{ik}^2} is the
\eqn{(i,k)}th element of the annihilator matrix \eqn{M=I-X(X'X)^{-1}X'},
\eqn{u_i} is a random error term, \eqn{\gamma} is a \eqn{p}-vector of
unknown parameters, and \eqn{g(\cdot)} is a continuous, differentiable
function that need not be linear in \eqn{\gamma}, but must be expressible
as a function of the linear predictor \eqn{X_{k\cdot}'\gamma}.
This method has been developed as part of the author's doctoral research
project.
The parameter vector \eqn{\gamma} is estimated using the maximum
quasi-likelihood method as described in section 2.3 of
\insertCite{Seber03;textual}{skedastic}. The optimisation problem is
solved numerically using a Gauss-Newton algorithm.
For further discussion of feature selection and the methods for choosing the
number of clusters to use with the clustering version of the model, see
\code{\link{alvm.fit}}.
}
\examples{
mtcars_lm <- lm(mpg ~ wt + qsec + am, data = mtcars)
myanlvm <- anlvm.fit(mtcars_lm, g = function(x) x ^ 2,
varselect = "qgcv.linear")
}
\references{
{\insertAllCited{}}
}
\seealso{
\code{\link{alvm.fit}}, \code{\link{avm.ci}}
}
|
0b234bca9a1ebbd2840e2bc2ac3bc78d5e9bc148 | 9b5aaacd59506bae1ab810c6ae1695a968903476 | /server.R | d17ea55ba35279a98d07d84e72a4dfd0ab097506 | [] | no_license | darrendonhardt/DataProducts-Assignment | 05cae6e79eb38c4b91c2246795fa4427abd9349f | 97486a9d43b3afb057f9dac6e9be838ba05cf82c | refs/heads/master | 2016-09-08T07:23:42.197993 | 2014-08-24T14:02:05 | 2014-08-24T14:02:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 587 | r | server.R | # server.R
library(datasets)
library(ggplot2)
data <- mtcars
shinyServer(function(input, output) {
output$plot <- renderPlot({
data <- subset(data,
cyl>=input$cyl.range[1] & cyl<=input$cyl.range[2] &
disp>=input$disp.range[1] & disp<=input$disp.range[2] &
wt>=input$wt.range[1] & wt<=input$wt.range[2] &
qsec>=input$qsec.range[1] & qsec<=input$qsec.range[2]
)
qplot(mpg, hp, data=data,
geom=c("point","smooth"), span=3, method="loess")
})
})
|
ba7c4f88a19b0b3dd2dbcc1a75b1611713209d03 | 3ede1e837093ef53ddc9b25f9332fb80d2ebefb9 | /R/fun_recovery.R | b976e130b65e51bdb4ac036b747192462842095e | [
"Apache-2.0"
] | permissive | RETURN-project/UpscaleRecovery | 2be5e40e9ef60bc89823b153b0b5411daf6612f2 | c77ae3510b3056fe842621b34d59f25dfe4d7e04 | refs/heads/master | 2021-03-11T22:37:29.327516 | 2020-09-28T13:07:34 | 2020-09-28T13:07:34 | 246,566,448 | 0 | 0 | Apache-2.0 | 2020-09-28T13:07:35 | 2020-03-11T12:35:39 | R | UTF-8 | R | false | false | 16,603 | r | fun_recovery.R | #' Calculate recovery metrics from a time series with known disturbance date. The calcFrazier function derives the RRI, R80P and YrYr recovery indicators, defined by Frazier et al. (2018). The indicators are originally developped for annual long-term time series of optical vegetation indices (the indicators are shown in the figures below). Yet, in order to be able to derive the indicators as well for dense and/or short time series, a modified version is suggested. Here, the user can define the time period before, during and after the disturbance that is used to derive the indicators. To reduce the interference of the seasonal pattern of dense time series, the chosen time period should cover blocks of n years. Moreover, given the potentially high noise levels of dense time series, the mean value instead of the maximum value was used in the formulas. (Frazier, R. J., Coops, N. C., Wulder, M. A., Hermosilla, T., & White, J. C. (2018). Analyzing spatial and temporal variability in short-term rates of post-fire vegetation return from Landsat time series. Remote Sensing of Environment, 205, 32-45.)
#'
#' @param tsio vector of observations (time series with a fixed observation frequency)
#' @param tdist observation number of disturbance, indicating the timing of the disturbance
#' @param obspyr number of observations per year
#' @param shortDenseTS TRUE or FALSE. In case TRUE, the metrics are adjusted to be compatible with short, dense time series
#' @param nPre If shortDenseTS is TRUE, number of years prior to the disturbance used to calculate the pre-disturbance value
#' @param nDist If shortDenseTS is TRUE, number of years used to quantify the time series value during the disturbance
#' @param nPostMin If shortDenseTS is TRUE, the post-disturbance condition is quantified starting from nPostMin years after the disturbance
#' @param nPostMax If shortDenseTS is TRUE, max number of years after the disturbance used to quantify the post-disturbance condition
#'
#' @return a list containing the RRI recovery indicator, R80p recovery indicator and YrYr recovery indicator
#' @export
#'
calcFrazier <- function(tsio, tdist, obspyr, shortDenseTS, nPre, nDist, nPostMin, nPostMax){
# check if there are enough observations before and after the disturbance to calculate the metrics
if((tdist>((nPre*obspyr))) & (tdist < (length(tsio)-(nPostMax*obspyr)+1)) & (sum(!is.na(tsio))>2)){
# translate parameters to those needed for the recovery functions
ys <- tsio # response
ts <- seq(1,length(tsio))# time
# disturbance dates
if (obspyr == 1 | nDist == 0){
tpert <- seq(tdist,tdist+(nDist*obspyr))
}else{
tpert <- seq(tdist,tdist+(nDist*obspyr)-1)
}
# pre-disturbance period
ts_pre <- seq(tdist-(nPre*obspyr),tdist-1)
# post-disturbance period
if (obspyr == 1 | nPostMin == nPostMax){
ts_post <- seq(tdist +(nPostMin*obspyr), tdist +(nPostMax*obspyr))
}else{
ts_post <- seq(tdist +(nPostMin*obspyr), tdist +(nPostMax*obspyr)-1)
}
# timing between disturbance and post-disturbance state
deltat <- switch(shortDenseTS + 1, (nPostMax*obspyr), ts_post-tdist)
# derive recovery indicators
RRI <- rri(ts,ys,tpert,ts_pre, ts_post)
R80P <- r80p(ts,ys,r = 0.8,ts_pre, ts_post)
YrYr <- yryr(ts,ys,tpert, deltat)
# make list of recovery indicators as output of the function
lst <- list(RRI, R80P, YrYr)
names(lst) <- c('RRI', 'R80P', 'YrYr')
# give NA as output if not able to calculate the recovery indicators
}else{
lst <- list(NA, NA, NA)
names(lst) <- c('RRI', 'R80P', 'YrYr')
}
lst
}
#' Post-disturbance slope and recovery metrics derived from BFAST0n trend segments. The calcBFASTrec function derives a set of recovery indicators after fitting a segmented trend in the time series. Using the breakpoints function of the strucchange package, a segmented trend is fitted (hereafter called BFAST0n trend segments). The detected break showing the largest change (in absolute values) is assumed to represent the disturbance. Using the segmented trend and detected disturbance date, the RRI, R80p, YrYr and the slope of the post-disturbance trend segment are derived as recovery indicators.
#'
#' @param tsio vector of observations (time series)
#' @param obspyr number of observations in one year
#' @param h This parameter defines the minimal segment size either given as fraction relative to the sample size or as an integer giving the minimal number of observations in each segment.
#' @param shortDenseTS TRUE or FALSE. In case FALSE, the metrics follow closely the definitions given by Frazier et al
#' @param nPre number of years prior to the disturbance used to calculate the pre-disturbance value
#' @param nDist number of years used to quantify the time series value during the disturbance
#' @param nPostMin min number of years after the disturbance used to quantify the recovery
#' @param nPostMax max number of years after the disturbance used to quantify the recovery
#' @param tdist the timing of the disturbance [observation number]
#' @param maxBreak only for recovery indicators derived from piecewise regression: if maxbreak is true, the maximum break in the segmented series is used as disturbance date to calculate the recovery indicators. If maxbreak is false, the break closest to the provided disturbance timing is used to calculate recovery.
#' @param seas should a seasonal comonent be used in the piecewise regression?
#' @param timeThres threshold on the duration between the disturbance date and date of the detected break [years]
#'
#' @return a list containing the RRI, R80p, YrYr recovery indicator derived from the BFAST0n trend segments and slope of the trend segment after the disturbance (sl).
#' @export
#' @import strucchange
#' @import stats
#' @import bfast
calcSegRec <- function(tsio, tdist, maxBreak, obspyr, h, shortDenseTS, nPre, nDist, nPostMin, nPostMax, timeThres, seas = F){
# Create time series object, needed as input for BFAST
tsi <- ts(tsio, frequency = obspyr)
# Convert the time series object into a dataframe, needed for the breakpoints function
if(obspyr>1){
datapp <- bfastpp(tsi, order = 1, lag = NULL, slag = NULL,
na.action = na.omit, stl = 'none')
}else if(!seas){
datapp <- data.frame(response = tsio, trend = seq(1:length(tsio)))
}else{stop('No seasonal term allowed for time series with one observation per year or less.')}
nreg <- switch(seas+1, 2, 5)
# Test if enough observations are available to fit piecewise model
if(floor(length(tsio[is.na(tsio)==F]) * h) > nreg){
# Apply BFAST0n on time series: find breaks in the regression
if (seas){
bp <- breakpoints(response ~ trend + harmon, data = datapp, h = h)#, breaks = breaks
} else{
bp <- breakpoints(response ~ trend, data = datapp, h = h)##, breaks = breaks
}
# Check if BFAST0n found breakpoints
if(is.na(bp$breakpoints[1])){# no breakpoint found
# tr <- fitted(bp, 0)
# sl <- (tr[2] - tr[1])
frz <- list(NA, NA, NA, NA, NA)
names(frz) <- c('RRI', 'R80P', 'YrYr', 'loglik', 'AIC')
}else{# at least one breakpoint found
# Extract BFAST trend component and breaks
cf <- coef(bp)
# Extract BFAST trend component and breaks
tbp <- bp$breakpoints #observation number of break
#tr <- rep(NA,length(tsi))
indna <- which(is.na(tsi)==F)
tbp <- indna[tbp] # correct observation number for missing values
totbp <- tbp
#Derive trend component without missing values
bpf <- c(0, tbp, length(tsi))
trf <- rep(NA,length(tsi))
for(ti in 1:(length(bpf)-1)){
trf[(bpf[ti]+1):bpf[ti+1]] <- cf[ti,1] + ((cf[ti,2]*((bpf[ti]+1):bpf[ti+1])))
}
# Get information criteria
bp_loglik <- logLik(bp)
bp_aic <- AIC(bp)[length(tbp) + 1]
if(maxBreak){
# Find the major break
dbr <- trf[tbp+1]-trf[tbp]
tbp <- tbp[which(abs(dbr) == max(abs(dbr)))]
}else{
# Use the break closest to the disturbance date
dbr <- tbp-tdist
tbp <- tbp[which(abs(dbr) == min(abs(dbr)))]
}
# check the time period between the break and the fire
timeChck <- ((min(abs(dbr))/obspyr) < timeThres)
# check the typology of the segments:
# constant pre-disturbance period
# preChck <- (abs(trf[tbp] - trf[tbp-1]) < slpThres)
# positive post-disturbance slope
postChck <- ((trf[tbp+3] - trf[tbp+2]) > 0)
# negative break
distChck <- ((trf[tbp+1] - trf[tbp]) < 0)
# no negative break in recovery period
brkthres <- 1+(nPostMax*obspyr) #post-disturbance period used to assess recovery
if(any((totbp>tbp) & (totbp<(brkthres+tbp)))){
postbr <- totbp[(totbp>tbp) & (totbp<(brkthres+tbp))]
postdbr <- trf[postbr+1]-trf[postbr]
# postsl <- trf[postbr+3]-trf[postbr+2]
brkChck <- !any((postdbr<0))# & (postsl>slpThres)
}else{
brkChck <- TRUE
}
if(timeChck & postChck & distChck & brkChck){
# Calculate Frazier recovery metrics on BFAST trend component
frz <- calcFrazier(as.numeric(trf), (tbp+1), floor(obspyr), shortDenseTS, nPre, nDist, nPostMin, nPostMax)
# Calculate the post-disturbance slope of the BFAST trend component (first segment after break)
sl <- (trf[tbp+3] - trf[tbp+2])# Calculate Frazier recovery metrics on BFAST trend component
frz <- calcFrazier(as.numeric(trf), (tbp+1), floor(obspyr), shortDenseTS, nPre, nDist, nPostMin, nPostMax)
# Calculate the post-disturbance slope of the BFAST trend component (first segment after break)
# sl <- (trf[tbp+3] - trf[tbp+2])
frz <- c(frz, bp_loglik, bp_aic)
names(frz) <- c('RRI', 'R80P', 'YrYr', 'loglik', 'AIC')
}else{
frz <- list(NA, NA, NA, NA, NA)
names(frz) <- c('RRI', 'R80P', 'YrYr', 'loglik', 'AIC')
}
}
}else{
frz <- list(NA, NA, NA, NA, NA, NA)
names(frz) <- c('RRI', 'R80P', 'YrYr', 'Sl', 'loglik', 'AIC')
}
frz
}
#' Calculate recovery for a single time series
#'
#' @param tsi vector: the first n values contain the timing of the disturbances and the next n values the observations for which the recovery indicators should be computed
#' @param maxBreak (only for recovery indicators derived after piecewise regression): if maxbreak is true, the maximum break in the segmented series is used as disturbance date to calculate the recovery indicators. If maxbreak is false, the break closest to the provided disturbance timing is used to calculate recovery.
#' @param obspyr the number of observations per year
#' @param inp the preprocessing applied to the time series before computing the recovery indicators: segmented (for piecewise regression), smooth (time series smoothing using loess), or raw (no preprocessing)
#' @param shortDenseTS In case FALSE, the metrics follow closely the definitions given by Frazier et al
#' @param nPre number of years prior to the disturbance that are used to derive the pre-disturbance condition
#' @param nDist number of observations used to derive the disturbance state
#' @param nPostMin start of the post-disturbance period: number of years after the disturbance
#' @param nPostMax end of the post-disturbance period: number of years after the disturbance
#' @param h h parameter of the breakpoints function in the strucchange package
#' @param seas should a seasonal comonent be used in the piecewise regression?
#' @param timeThres only relevant for piecewise regression: threshold on the duration between the disturbance date and date of the detected break [years]
#'
#' @return the RRI, R80P, YrYr and the slope of the pos-disturbance segment
#' @export
#' @import stats
calcRecoveryTS <- function(tsi, maxBreak, obspyr, inp = 'segmented', shortDenseTS = TRUE,
nPre = 2, nDist = 12, nPostMin = 4, nPostMax = 6, h = 0.15, timeThres, seas){
# if (frq == 'annual'){
# #convert time series to annual values by selecting date closest to seasonal max
# tsi <- toAnnualTS(tsseas, tsi, obspyr)
# tdist <- ceiling(tdist/obspyr)
# obspyr <- 1
# }
len <- length(tsi)
tdist <- tsi[1:(len/2)]# data about the timing of the disturbance (0 equals no disturbance happened, 1 equals a disturbance took place)
tsi <- tsi[(1+(len/2)):len]# data about the vegetation response
# Find the major break given by the disturbance dataset
tdist <- which(tdist == 1)# get all dates where a disturbance took place
if(length(tdist) > 0){
dbr <- rep(NA,length(tdist))
for(i in 1:length(tdist)){
if((tdist[i] > (2*obspyr)) & (tdist[i] < (length(tsi)-obspyr+1))){
# calculate magnitude break (difference between 2 years pre and 1 year post disturbance) for each disturbance date
dbr[i] <- mean(tsi[(tdist[i]+1):(tdist[i]+(1*obspyr))], na.rm = T)-mean(tsi[(tdist[i]-2*obspyr):tdist[i]-1], na.rm = T)
}else{
dbr[i] <- 0
}
}
tdist <- tdist[which(abs(dbr) == max(abs(dbr)))]# find largest break
mval <- sum(is.na(tsi))/length(tsi) #fraction of missing values
if (inp == 'smoothed'){
# smooth the time series using a loess filter
df <- data.frame(dat = tsi,tm = 1:length(tsi))
ls <- loess(dat ~ tm, df, span = 0.2)#, span = 0.5
tmps <- predict(ls, data.frame(tm = 1:length(tsi)), se = TRUE)
tsi <- tmps$fit
}
if((inp == 'smoothed') | (inp == 'raw')){
# calculate recovery indicators
tmp <- calcFrazier(tsi, tdist, obspyr, shortDenseTS, nPre, nDist, nPostMin, nPostMax)
outp <- c(tmp$RRI,tmp$R80P, tmp$YrYr, mval,NA,NA)
}
if(inp == 'segmented'){
# calculate recovery indicators after piecewise regression
tmp <- calcSegRec(tsi, tdist, maxBreak, obspyr, h, shortDenseTS, nPre, nDist, nPostMin, nPostMax, timeThres, seas)
outp <- c(tmp$RRI,tmp$R80P, tmp$YrYr, mval, tmp$loglik, tmp$AIC)
}
}else{outp <- c(NA,NA,NA,NA,NA,NA)}
outp
}
#' Calculate recovery indicators from a time series stack
#'
#' @param st raster stack, the first raster is a mask (pixels with value 0 are not considered, pixels with value 1 are included), the next n rasters represent disturbances (0 if no disturbance occours, 1 if a disturbance occurs) and the last n rasters contain the time series observations
#' @param maxBreak (only for recovery indicators derived after piecewise regression): if maxbreak is true, the maximum break in the segmented series is used as disturbance date to calculate the recovery indicators. If maxbreak is false, the break closest to the provided disturbance timing is used to calculate recovery.
#' @param obspyr the number of observations per year
#' @param inp the preprocessing applied to the time series before computing the recovery indicators: segmented (for piecewise regression), smooth (time series smoothing using loess), or raw (no preprocessing)
#' @param shortDenseTS In case FALSE, the metrics follow closely the definitions given by Frazier et al
#' @param nPre number of years prior to the disturbance that are used to derive the pre-disturbance condition
#' @param nDist number of observations used to derive the disturbance state
#' @param nPostMin start of the post-disturbance period: number of years after the disturbance
#' @param nPostMax end of the post-disturbance period: number of years after the disturbance
#' @param h h parameter of the breakpoints function in the strucchange package
#' @param timeThres only relevant for piecewise regression: threshold on the duration between the disturbance date and date of the detected break [years]
#' @param seas only relevant for piecewise regression: should a seasonality term be used?
#'
#' @return a raster with the RRI, R80P, YrYr and the slope of the pos-disturbance segment
#' @export
#'
calcRecoveryStack <- function(st, maxBreak, obspyr, inp = 'segmented', shortDenseTS = TRUE,
nPre = 2, nDist = 12, nPostMin = 4, nPostMax = 6, h = 0.15, timeThres, seas) {
# only consider time series with a mask value of 1
msk <- st[1]
msk[is.na(msk)] = 0
st <- st[-1]
out <- rep(NA, 6)
if(msk==1){
out <- calcRecoveryTS(st, maxBreak, obspyr, inp, shortDenseTS,
nPre, nDist, nPostMin, nPostMax, h, timeThres, seas) }
return(out)
}
# toAnnualStack <- function(){
#
# }
|
2b173b7a34155f5b988239499b66334fdd20d3ef | 0b9725208a5f98fdea10d1cf84d3f62f94d4a00d | /man/preCluster.Rd | 023e6f632301076f8202f62457c08c455650dbca | [
"MIT"
] | permissive | alienzj/endoR | 09411c1a2ba27a3a24f350a878b4b14d3c59553d | 138d31e8e1e0cf1ca330356caa420e0803ff4488 | refs/heads/main | 2023-08-26T02:06:28.235858 | 2021-10-27T09:35:54 | 2021-10-27T09:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,267 | rd | preCluster.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preCluster.R
\name{preCluster}
\alias{preCluster}
\title{Extract decisions from a model and create bootstrap resamples.}
\usage{
preCluster(
model,
model_type,
data,
target,
times = 10,
p = 0.5,
sample_weight = NULL,
classPos = NULL,
ntree = "all",
maxdepth = Inf,
dummy_var = NULL,
discretize = FALSE,
K = 2,
features_ctg = NULL,
seed = 0,
in_parallel = FALSE,
n_cores = detectCores() - 1
)
}
\arguments{
\item{model}{model to extract rules from.}
\item{model_type}{character string: 'RF', 'random forest', 'rf', 'xgboost', 'XGBOOST', 'xgb', 'XGB', 'ranger', 'Ranger', 'gbm' or 'GBM'.}
\item{data}{data with the same columns than data used to fit the model.}
\item{target}{response variable.}
\item{times}{number of bootstraps}
\item{p}{fraction of data to resample.}
\item{sample_weight}{numeric vector with the weights of samples for bootstrap resampling. For classification, if 2 values are given, the 1st one is assumed to be for the positive class (classpos argument).}
\item{classPos}{the positive class predicted by decisions}
\item{ntree}{number of trees to use from the model (default = all)}
\item{maxdepth}{maximal node depth to use for extracting rules (by default, full branches are used).}
\item{dummy_var}{if multiclass variables were transformed into dummy variables before fitting the model, one can pass their names in a vector here to avoid multiple levels to be used in a same rule (recommended).}
\item{discretize}{if TRUE, discretization is performed with the K and features_ctg parameters (discretizeDecisions, by default = FALSE).}
\item{in_parallel}{if TRUE, the function is run in parallel.}
\item{n_cores}{if in_parallel = TRUE, and no cluster has been passed: number of cores to use.}
\item{cluster}{the cluster to use to run the function in parallel.}
}
\value{
A list with the row numbers of partitioned data, the rules originally extracted from the model and new data if discretization was performed.
}
\description{
to run before bootstrapping on parallel with the clustermq package and model2DE_cluster function. Extracts decisions, optionally discretizes them. Creates data partitions for bootstrapping.
}
|
1d351aa3c4ea14c44c3268c5f297172e5cdf5f98 | 6f490aab522bae82056a9d7afbf484835ce772db | /Homework #1.R | f919f3ae3cc0d29adc46b425e2305bfcb3048ae4 | [] | no_license | Acelhaka/RStudioProjects | ced8aa68e6b948d796badec622374964ec0ead74 | ac32c18eeadfd6439e5713d9a9dd69d809a990cd | refs/heads/master | 2020-04-27T08:49:10.589255 | 2019-03-06T17:11:36 | 2019-03-06T17:11:36 | 174,187,349 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,452 | r | Homework #1.R | ##Homework #1
##Amarilda Celhaka
##9/4/2018
## Write a function p=proportion(x,a)
##that returns the proportion of elements
##in a vector x with values less than or equal
##to a given number a.
p <- function(x, a){
z <- x[x <= a]
prop = length(z)/ length(x)
return (prop)
}
##to test the function
p(c(1,1,1,6,6,6),5)
##Write a function that takes a matrix
##as an input and returns the columns of
##the matrix that has the largest and the
##smallest column sums among all the columns.
columnSums <- function(X){
ncol <- dim(X)[2]
nrow <- dim(X)[1]
s <- 0
for(i in 1:nrow)
s <- s + X[i,]
maxIndex <- which(s ==max(colSums(X)))
maxCol <- X[,maxIndex]
minIndex <- which(s == min(colSums(X)))
minCol <- X[,minIndex]
res <- cbind(maxCol, minCol)
return (res)
}
##to test the function
X <- matrix(1:18, ncol = 3)
columnSums(X)
##Write your own function that calculates the
##product of two matrices (do not use the R
##operation %*% in your function).
matrixProd <- function(x, y){
nrow = dim(x)[1]
ncol = dim(y)[2]
res <- matrix(NA, nrow, ncol)
for(i in seq_along(y[1, ])){
for(j in seq_along(x[, 1])){
res[j, i] <- sum(x[j, ] * y[, i])
}
}
res
}
matrixProd(A,B)
|
7b132926defff26bcf5eabee950a9ddc644c0504 | a4398c997dfb62b9b7383823a54be62997aa59c8 | /script/functions_script.R | 62567f1931c67c9e28ac0a658b9c3106ce4aee75 | [] | no_license | amanda-mari/nyc-high-school-grad-rates | 202441ba6cef007c2f44e8bf3e02ab1f0af8f1fd | 2fc4d9d3940b7acb99763d6ad2e25141fcd5772c | refs/heads/main | 2023-03-26T23:22:24.109357 | 2021-03-17T00:31:40 | 2021-03-17T00:31:40 | 333,556,658 | 0 | 0 | null | 2021-03-17T00:31:40 | 2021-01-27T20:51:18 | R | UTF-8 | R | false | false | 390 | r | functions_script.R | # Functions ---------------------------
read_excel_sheets <- function(sheet_name, path) {
x <- readxl::read_excel(path = path,
sheet = sheet_name)
}
read_pdf_data <-function(high_school_directory_pdfs){
pdftools::pdf_data(high_school_directory_pdfs)
}
remove_blank_pages <-function(nested_list_name) {
nested_list_name[lapply(nested_list_name,nrow)!=0]
}
|
6567e1719e480b40da63e9a6d43391acda6a5e93 | 727e96e85a03cf01d46c132225e171218c8dd1e5 | /man/applyScreenTable.Rd | ea77f79fa3bf6af7d52ac8ed8977607c14abd4d5 | [
"MIT"
] | permissive | utah-dwq/irTools | 6faf9da88514cf72b2166f48074348c00a6f2137 | da34c77f363a00767563d76112ea87004c3be0d4 | refs/heads/master | 2023-08-31T23:58:52.981989 | 2023-08-25T15:31:22 | 2023-08-25T15:31:22 | 147,577,302 | 2 | 0 | MIT | 2022-08-30T21:54:40 | 2018-09-05T20:44:04 | R | UTF-8 | R | false | true | 1,674 | rd | applyScreenTable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/applyScreenTable.r
\name{applyScreenTable}
\alias{applyScreenTable}
\title{Apply screens to WQP data by selected table}
\usage{
applyScreenTable(
data,
wb,
sheetname,
flag_col_name,
com_col_name,
startRow = 1,
na_dup_err = TRUE
)
}
\arguments{
\item{data}{A merged WQP result object. Must include both narrowresult & activity files. May also be a post-fillMaskedValues() results object. Note: re-application of edited domain tables to an already screened dataset is not advised, and changes to the domain table likely will not be reflected in a re-screened dataset due to merging conventions.}
\item{wb}{Full path and filename for Excel workbook containing screen table to be applied}
\item{sheetname}{Name of sheet in workbook holding desired screening decisions}
\item{flag_col_name}{Name to rename IR_FLAG column to.}
\item{com_col_name}{Name to rename IR_COMMENT column to.}
\item{startRow}{Row to start reading excel sheet from (in case additional headers have been added). Defaults to 1.}
\item{na_dup_err}{Logical. If TRUE (default), exit function with error if IR_FLAG values are NA or if duplicates detected in combinations in the domain table for which InData=="Y". Set to FALSE to apply a screen table without checking for NA values in IR_FLAG.}
}
\value{
A data.frame object of WQP data with merged columns from input screening tables.
}
\description{
Joins activity review inputs (detConditionTable, labNameActivityTable, activityMediaNameTable, masterSiteTable, paramTransTable, & activityCommentTable) to WQP data to apply decisions from input files to data.
}
|
c559204e08d88121184e9959d790177a89a5316a | 1eb6f2998035f9884a86b54c61641157b8fa6d2f | /3-2_hashChain/modulo.R | 4f5a31972ba9a3412a0e6d0d1d5ab3c02dd77a85 | [] | no_license | markculp0/dataStruct | 90de9f0ab70bf9f8d93253dd5ffa0ce9e093bef7 | 5c82fc8c70e3f10b384206f1eb9b420385ea1bef | refs/heads/master | 2020-05-28T08:26:25.606863 | 2019-07-05T15:20:49 | 2019-07-05T15:20:49 | 188,937,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 798 | r | modulo.R |
# ========================================
# h("world")
119 + (111 * 263) + (114 * 263^2) + (108 * 263^3) + (100 * 263^4)
# [1] 480407646954
480407646954 %% 1000000007
# [1] 407643594
407643594 %% 5
# [1] 4
# ========================================
# java
# char character = 'a';
# int ascii = (int) character;
# char character = name.charAt(0); // This gives the character 'a'
# int ascii = (int) character; // ascii is now 97.
# R computation
utf8ToInt("w")
# [1] 119
world <- sapply(strsplit("world", NULL)[[1L]], utf8ToInt)
# w o r l d
# 119 111 114 108 100
m <- 5
i <- 0
for (d in world ) {
world[i + 1] <- world[i + 1] * 263^i
i <- i + 1
}
t1 <- sum(world)
# [1] 480407646954
t1 <- t1 %% 1000000007
t1 <- t1 %% m
# ========================================
|
ff335529eed3cb23250891d0f5eb76e5dd197ac2 | 8a8002f3b98c1c841e7c1cacc5536e4f9ffb8a8d | /man/vectorToUniformDist.Rd | 843be687954886ca4afc24a020c6039e89578e2f | [] | no_license | alesaccoia/alexr | f3c537ff565a0a9e3b911ea227cb5d1eb708641e | 8750c581656ae695facc36126610dd3e0c0cc7ae | refs/heads/main | 2023-03-29T13:48:36.982005 | 2021-04-10T07:28:41 | 2021-04-10T07:28:41 | 356,506,467 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 609 | rd | vectorToUniformDist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{vectorToUniformDist}
\alias{vectorToUniformDist}
\title{Samples a vector from a uniform distribution, the sum adds to the mean}
\usage{
vectorToUniformDist(N, M, sd = 1)
}
\arguments{
\item{N}{the number of samples}
\item{M}{the mean of the samples}
\item{sd}{the standard deviation of the samples}
}
\value{
a vector of N numbers adding to M (can be negative)
}
\description{
Samples a vector from a uniform distribution, the sum adds to the mean
}
\examples{
vectorToUniformDist(6,10, sd = 1) / 10
}
|
9aa598261d464db3f7e06b87b456f3622aa5325c | 4906e66db6ada21b9c6da45ee03cb977df5ffd16 | /demo/deldir.R | dda02e2d56ef1947fdfec614f0d73723f7830edd | [] | no_license | jsta/spnetwork | 482facd8c997c52d329ed26ac0cf15c5c422960c | d0e9d78a411dee9ba3c0b3ee681f5dbae7183221 | refs/heads/master | 2021-06-11T23:09:37.895782 | 2019-01-23T01:59:56 | 2019-01-23T01:59:56 | 96,037,556 | 0 | 0 | null | 2017-07-02T17:04:27 | 2017-07-02T17:04:27 | null | UTF-8 | R | false | false | 38 | r | deldir.R | # content has move to
vignette("spn")
|
200dc5259438ede9ed345d6bc82a11fcbe9652ea | 4a43a15968f3059ee1eae5d1bdee5b67ffa7f28a | /man/node_info.Rd | 6398f4f8e33791f0bd05cec3a038a5bee4b5334f | [
"MIT"
] | permissive | carlosal1015/DiagrammeR | b438f4e9d9be89742f3ba2ab4d0470f6a31643a7 | b3a96de1e6e65b0a0927cc349d18f6a34bd1a8ef | refs/heads/master | 2021-05-14T18:19:43.104026 | 2018-01-02T20:03:29 | 2018-01-02T20:03:29 | 116,067,939 | 0 | 0 | NOASSERTION | 2019-06-24T01:01:51 | 2018-01-02T23:27:26 | R | UTF-8 | R | false | true | 1,462 | rd | node_info.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/node_info.R
\name{node_info}
\alias{node_info}
\title{Get detailed information on nodes}
\usage{
node_info(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a data frame containing information specific
to each node within the graph.
}
\description{
Obtain a data frame with detailed
information on nodes and their interrelationships
within a graph.
}
\examples{
# Set a seed
set.seed(23)
# Create a node data frame (ndf)
ndf <-
create_node_df(
n = 26,
label = TRUE,
type = c(rep("a", 7),
rep("b", 9),
rep("c", 8),
rep("d", 2)))
# Create an edge data frame (edf)
edf <-
create_edge_df(
from = sample(1:26, replace = TRUE),
to = sample(1:26, replace = TRUE),
rel = c(rep("rel_a", 7),
rep("rel_b", 9),
rep("rel_c", 8),
rep("rel_d", 2)))
# Create a graph using the ndf and edf
graph <-
create_graph(
nodes_df = ndf,
edges_df = edf)
# Get information on the graph's nodes
node_info(graph)
#> id type label deg indeg outdeg loops
#> 1 1 a 1 0 0 0 0
#> 2 2 a 2 0 0 0 0
#> 3 3 a 3 2 2 0 0
#> 4 4 a 4 3 1 2 0
#> 5 5 a 5 1 1 0 0
#> 6 6 a 6 1 0 1 0
#>.. ... ... ... ... ... ... ...
}
|
76b3bf93b85716f9bbc88bf928ac6243d8e0ec68 | 745295046a9499a459f379c2931db70e21cd39e3 | /components-graph_aggregator/src/main/resources/mergeGraphs.R | cae4e54b61727118442436ef2090ff4dfbaf93ec | [] | no_license | AnalyticsWorkbench/Components | bc109595c5707388ffe2c319cdbbfdadedabfde6 | cd7ab7eeaed7480712a3270cc9f81af0a7041634 | refs/heads/master | 2022-12-09T15:05:38.538578 | 2019-05-23T15:23:48 | 2019-05-23T15:23:48 | 136,925,042 | 2 | 1 | null | 2022-12-05T23:55:36 | 2018-06-11T12:44:40 | JavaScript | UTF-8 | R | false | false | 2,303 | r | mergeGraphs.R | require(igraph)
require(plyr)
combineNodeAttributes <- function(nodeData) {
idOrLabel <- ifelse(byLabel, "label", "id")
ddply(nodeData, idOrLabel, function(subdata) {
as.data.frame(lapply(subdata, function(column) {
if (is.numeric(column)) {
sum(column, na.rm = TRUE)
} else {
paste(unique(column), collapse = ",")
}
}))
})
}
mergeGraphs <- function(g1, g2) {
if (byLabel) {
if (is.null(V(g1)$label) || is.null(V(g2)$label)) {
stop("Not every graph has node labels")
}
V(g1)$name <- V(g1)$label
V(g2)$name <- V(g2)$label
} else {
V(g1)$name <- V(g1)$id
V(g2)$name <- V(g2)$id
}
# This is necessary since node and edge attributes might not match completely
edgeDf1 <- as_data_frame(g1, what="edges")
edgeDf1$gId <- 1
edgeDf2 <- as_data_frame(g2, what="edges")
edgeDf2$gId <- 2
nodeDf1 <- as_data_frame(g1, what="vertices")
nodeDf1$gId <- 1
nodeDf2 <- as_data_frame(g2, what="vertices")
nodeDf2$gId <- 2
edges <- merge(edgeDf1, edgeDf2, all=TRUE)
nodes <- combineNodeAttributes(
merge(nodeDf1, nodeDf2, all=TRUE)
)
# node identifier must be the first column in node data frame.
if (byLabel) {
nodes <- nodes[,c("name", setdiff(names(nodes), "name"))]
} else {
nodes <- nodes[,c("id", setdiff(names(nodes), "id"))]
}
res <- graph_from_data_frame(edges, directed=(is.directed(g1) || is.directed(g2)), vertices = nodes)
if (!doUnion) {
res <- delete_edges(res, !is.multiple(res))
}
res <- simplify(res, remove.loops = FALSE, edge.attr.comb = function(attribute) {
if (is.numeric(attribute)) {
sum(attribute)
} else {
paste(unique(attribute), collapse = ",")
}
})
V(res)$id <- 1:vcount(res)
res <- delete_edge_attr(res, "gId")
delete_vertex_attr(res, "gId")
}
# set occurrences attributes
graphs <- lapply(graphs, function(g) {
E(g)$occurrences <- 1
V(g)$occurrences <- 1
g
})
res <- Reduce(mergeGraphs, graphs)
filename <- "merged_graph.gml"
write.graph(res, filename, "gml")
resultData <- list(dataUrl=filename, metadata="Occurences,oc,double,node,number_of_occurences,Occurences,oc,double,edge,none") |
881c2bb525614626beb896e5e632d1a4709b25fe | d59fbaae55043f9249a4bcddcb919ca9b3f0fbc6 | /plot2.R | 72966c0c960058259a2fc9bae087cb51e60982f1 | [] | no_license | iampawansingh/ExploratoryDA | c3a44dfa0799e7111e32d3c1fdccca8095f1bf6c | cca33cb1f004312f6c2872c166ad5425caaee944 | refs/heads/master | 2021-07-31T01:54:47.358817 | 2014-09-07T19:09:55 | 2014-09-07T19:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 235 | r | plot2.R | ### Making Second Plot
png(file = "plot2.png", width = 480, height = 480)
with(power_data_sub, plot(time,Global_active_power, pch ="",type ="l",
ylab = "Global Active Power (Kilowatts)", xlab = ""))
dev.off() |
3bf5508836ababaea2c8d57a56b90ec847542f4f | 504f8c9876bf93ff2755d0e1c142d42e7600d129 | /man/ffModelLM.Rd | 15bd450522c98eda8fdbd89493f2d5db81f710b9 | [] | no_license | jeff100383/fundAnalysis | ffa38eade5ba052bd0be9ef4dd70b018eb24de0c | 8ed78664e8c1a912ea6544bb43d616e7ce90b8b0 | refs/heads/master | 2022-04-02T19:55:29.788182 | 2019-12-02T01:11:44 | 2019-12-02T01:11:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 685 | rd | ffModelLM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fa_utilities.R
\name{ffModelLM}
\alias{ffModelLM}
\title{Generates lm models for one or more funds using Fama-French data as independent variables}
\usage{
ffModelLM(rets, ff_data, s = NULL, e = NULL, n = NULL)
}
\arguments{
\item{rets}{List of returns such as generated by convertPricesToReturns()}
\item{ff_data}{Fama-French data in same frequency as Y}
\item{s}{Start date}
\item{e}{End date}
\item{n}{Number of observations}
}
\value{
List of lm models
}
\description{
Generates lm models for one or more funds using Fama-French data as independent variables
}
\examples{
ffModelLM(rets, ff_data)
}
|
62e79c20d1490d6cef9aa7aab13065a9bc642df1 | edee17c088ce50d56ed265ccfa1b2b7c4fc7182c | /Lab Sample Scripts/pca_sample.R | 96d0531883cc18d2a33396a42d673930406812d6 | [] | no_license | rashidch/Advanced-Data-Analysis-with-R | 21a947681850a3a665658d8d31eb616ea00bd0b8 | f933da7e647990f39f1d6999e12ae7e8e65f9bde | refs/heads/master | 2021-06-28T01:55:34.644043 | 2017-09-13T19:05:00 | 2017-09-13T19:05:00 | 103,429,890 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 306 | r | pca_sample.R |
data(crimtab)
head(crimtab)
colnames(crimtab)
dim(crimtab)
str(crimtab)
apply(crimtab,2,var)
pcacrim= prcomp(crimtab)
pcacrim
par(mar=rep(2,4))
plot(pcacrim, type = "l")
#better visuliziling
pca$rotation= -pca$rotation
pca$x= -pca$x
biplot(pca, scale = 0)
crimtab[16:17,9]
crimtab[17:18,9]
|
5f35630f90b029c4c5f7150ed32d29ae3a1939b0 | 5542631869b0e4ce30fd12ae29ff025e9ce877f0 | /libs/lib_fnames.R | c4bd939b3474cd3c87b036d3ee465e47202a45d3 | [] | no_license | rbstern/analise_goleiro | f2870e3c31c3299f961ab1b0ae4d44ed9c575b7e | 6d395128e85a1e1142b31b07e12a03138cfe05fd | refs/heads/master | 2021-01-25T08:03:15.820418 | 2020-06-04T16:34:21 | 2020-06-04T16:34:21 | 93,708,349 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 512 | r | lib_fnames.R | dir_analysis = "./code/analysis/"
dir_data = "./data/"
dir_data_raw = "./data_raw/"
dir_plot = "./plots/"
dir_proj = "amparo_2018/"
fname_analysis = function(fname)
paste(dir_analysis, fname, sep = "")
fname_data = function(fname)
paste(dir_data, dir_proj, fname, sep = "")
fname_data_raw = function(fname)
paste(dir_data_raw, dir_proj, fname, sep = "")
fname_plot = function(fname)
paste(dir_plot, dir_proj, fname, sep = "")
get_data_raw_dir = function()
paste(dir_data_raw, dir_proj, sep = "")
|
5fccad1340a2ca8046f94e56d1af2208b1768cf1 | 2edd6b712b94a8146853f988d14198116ab92b94 | /ExplorationUB.R | 540426f4c5fca8b79b5b3c93e8c036ad6ba0aca6 | [] | no_license | jazippay/portfolio | d871a850ee2a05ec1a1c9703f5d59f859d1c9890 | a778f196412adf574181e32d9ecc89ed28d68160 | refs/heads/master | 2023-01-08T10:24:12.639273 | 2020-11-10T19:46:10 | 2020-11-10T19:46:10 | 263,468,723 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,555 | r | ExplorationUB.R | #Upward Bound Data Exploration and Analysis
#Reserach Questions:
#does participation in our Upward Bound activities improve academic performance? how can I know who partcipated most?
#What factors mostly contribute to the improvement of academic performance?
# Other questions about data, student control number and control number what is that?
# datasets are lacking data on students that have 0 contacts?
# is there a significant difference between GPA at entry and end of year?
#Data analysis that we may run to answer these questions - testing for assumptions,
#Possible data wrangling, recode first generationa and low income, convert contact date data type, remove NA
library("rcompanion")
library("car")
library("fastR")
library(dbplyr)
library(ggplot2)
summary(UB_Contact_Data_for_Research)
summary(UB_Student_Data_for_Research)
describe(UB_Contact_Data_for_Research)
#Convert field to numeric and drop unwanted columns
#Correlation Matrix
UBcontact.cor = cor(UB_Contact_Data_for_Research)
UBstudent.cor = cor(UB_Student_Data_for_Research)
#regression analysis of GPA
names(UB_Student_Data_for_Research)
keeps <- c("Entry GPA", "H.S. GPA at end of the year")
gpaData <- UB_Student_Data_for_Research[keeps]
View(gpaData)
gpaData1 <- na.omit(gpaData)
#is there a linear relationship between GPAs at entry and exit? entry is x
scatterplot("Entry GPA", "H.S. GPA at end of the year", gpaData1="Scatterplot Example",
xlab="GPA at Entry", ylab="GPA After Year", pch=19)
d <- ggplot(gpaData1, aes(x = "H.S. GPA at end of the year", y = "Entry GPA"))
d + geom_point() + geom_smooth(method=lm, se=FALSE)
#Convert contact method and type to factor from character
contactDF$`Contact Method` <- as.factor(contactDF$`Contact Method`)
#linear model
linreg_Contact <- lm(Hours ~ `Contact Method`, contactDF)
summary(linreg_Contact)
#method of 'Group' is significant p value meaning that there are more significance of relationship of hours in that method
linreg_Contact2 <- lm(Hours ~ `Contact Type`, contactDF)
summary(linreg_Contact2)
#significance of hours for contact types involving field trips
#need to conver to numeric to run correlation matrix, could subset data to include numeric columns of interest, Hours, Entry GPA, HS GPA at end of year
#recode contact type and high risk of academic failure
cor(contactDF, method = "pearson", use = "complete.obs")
res <- cor(my_data)
round(res, 2)
#Getting Back to Question of Performance Metrics, Wrangling Data to work with, Need to combine datasets and analyze change of GPA fro entry to end of year and contact type
#Vectors for DF
keepsB <- c('Contact Method', 'Hours', 'Contact Type', 'control.number')
contactDF <- UB_Contact_Data_for_Research[keepsB]
keepsC <- c('Hours', 'Student Control Number')
KeepsD <- c('Entry GPA', 'H.S. GPA at end of the year', 'Contact Type', 'Hours', 'High School Graduation Status', 'APR::FirstEnrollDT', 'APR::RigorousStudy')
#rename columns
names(UB_Student_Data_for_Research)[names(UB_Student_Data_for_Research) == 'Control Number'] <- 'control.number'
names(UB_Contact_Data_for_Research)[names(UB_Contact_Data_for_Research) == 'Student Control Number'] <- 'control.number'
#fix problem with control number column showing scientific notation
UB_Student_Data_for_Research$control.number <- as.integer(UB_Student_Data_for_Research$control.number)
UB_Contact_Data_for_Research$control.number <- as.integer(UB_Contact_Data_for_Research$control.number)
#Trying to Merge contact data with GPA data
#first merge two main datasets by control.number
#StuContact_df1 looks good!
StuContact_df1 <- merge(UB_Student_Data_for_Research, UB_Contact_Data_for_Research,by="control.number")
gpaContact_df1 <- StuContact_df1[KeepsD]
gpaContact_df1$`Contact Type` <- as.factor(gpaContact_df1$`Contact Type`)
library(dplyr)
#VALUES OF EACH COLUMN
unique(gpaContact_df1$`APR::FirstEnrollDT`)
unique(gpaContact_df1$`Contact Type`)
unique(gpaContact_df1$`High School Graduation Status`)
#recode APR:FirstEnrollDT by condition
gpaContact_df2 <- gpaContact_df1 %>%
mutate(`APR::FirstEnrollDT` = recode(`APR::FirstEnrollDT`, "99/99/999" = 0, "88/88/8888" = 0, "09/21/2019" = 1, "08/27/2018" = 1, "08/26/2019" = 1, "01/19/2019" = 1, "09/01/2019" = 1, "08/30/2019" = 1, "08/19/2019" = 1, "08/14/2018" = 1, "09/09/2019" = 1, "09/19/2019" = 1, "09/16/2019" = 1, "07/26/2019" = 1, "08/09/2019" = 1, "09/10/2019" = 1, "10/01/2019" = 1, "09/29/2019" = 1, "06/04/2019" = 1))
gpaContact_df2
#recode High School Graduation Status
gpaContact_df2$HighSchoolGraduationStatusR[gpaContact_df2$`High School Graduation Status`=="Received high school diploma"] <- 1
gpaContact_df2$HighSchoolGraduationStatusR[gpaContact_df2$`High School Graduation Status`=="Currently enrolled in high school"] <- 0
#recode contact type
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="College Knowledge"] <- 1
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="College Level Course"] <- 2
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="SAT/ACT Info"] <- 3
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Student Development"] <- 4
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Academic Prep"] <- 5
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Financial Literacy"] <- 6
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="College Visit"] <- 7
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="College Application"] <- 8
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Pre Field Trip"] <- 9
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="FAFSA Info"] <- 10
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Senior Exit"] <- 11
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="College Essay / Personal Statement"] <- 12
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Course Selection / A-G"] <- 13
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Tutoring Services / Referral"] <- 14
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Career Exploration"] <- 15
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Senior Planning"] <- 16
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Educational Field Trip"] <- 17
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Upward Bound Course"] <- 18
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Cultural Activities"] <- 19
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="IAP Development"] <- 20
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Community Service"] <- 21
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="FaFSA Info"] <- 22
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Academic Prep\r"] <- 23
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="STEM"] <- 24
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="English Workshop"] <- 25
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Foreign Language Workshop"] <- 26
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="STU1"] <- 27
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="College Course STU1"] <- 28
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Math / Science"] <- 29
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="English"] <- 30
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Summer Program (Foreign Language)"] <- 31
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="academic Prep"] <- 5
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="Workshop"] <- 33
gpaContact_df2$ContactTypeR[gpaContact_df2$`Contact Type`=="college knowledge"] <- 1
#DROP NA AND DROP UNWANTED COLUMNS AND RUN ANALYSIS
gpaContact_df3 <- na.omit(gpaContact_df2)
keepsN <- c('Entry GPA', 'H.S. GPA at end of the year', 'ContactTypeR', 'Hours', 'HighSchoolGraduationStatusR', 'APR::FirstEnrollDT', 'APR::RigorousStudy')
gpaContact_df4 <- gpaContact_df3[keepsN]
#rename columns
names(gpaContact_df4)[names(gpaContact_df4) == 'APR::FirstEnrollDT'] <- 'PostSecondaryEnroll'
names(gpaContact_df4)[names(gpaContact_df4) == 'APR::RigorousStudy'] <- 'RigorousStudy'
names(gpaContact_df3)[names(gpaContact_df3) == 'Contact Type'] <- 'ContactType'
names(gpaContact_df3)[names(gpaContact_df3) == 'H.S. GPA at end of the year'] <- 'EndofYearGPA'
gpaContact_df2$`APR::RigorousStudy` <- as.numeric(gpaContact_df2$`APR::RigorousStudy`)
summary(gpaContact_df2)
#Correlation Matrix
library(corrplot)
gpaContact_df5 = cor(gpaContact_df4)
|
a90b2258e41d86833c10105c2f247c2b82a7b77a | 29585dff702209dd446c0ab52ceea046c58e384e | /CNOGpro/R/solvePR.R | 6e0d4daebedc1e5618b900ecd2ad845ea4df8d8b | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 115 | r | solvePR.R | solvePR <-
function(mean, variance){
p <- 1 - (mean/variance)
r <- (mean)^2/(variance-mean)
return(c(p,r))
}
|
1e354aad807dab8b32b0f95561c235dc3d0b493c | 9c98b38d37cf5b4ddd3a4bf8d7f8770bb6763bbe | /man/gather_producer_autoproducer.Rd | cb8d5a18e19ecc88dd43d68f8ad274e009e7683c | [] | no_license | MatthewHeun/IEATools | 2aca274eff0a51b8d31751457d45a7a43b1a654f | ed3d1c13894ea71b533d6c827b80adedfcc8808e | refs/heads/master | 2023-08-10T05:32:48.813023 | 2023-05-16T20:49:04 | 2023-05-16T20:49:04 | 176,147,859 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,075 | rd | gather_producer_autoproducer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/specify_tp_eiou.R
\name{gather_producer_autoproducer}
\alias{gather_producer_autoproducer}
\title{Gather main activity producer and autoproducer industries}
\usage{
gather_producer_autoproducer(
.tidy_iea_df,
flow_aggregation_point = IEATools::iea_cols$flow_aggregation_point,
flow = IEATools::iea_cols$flow,
e_dot = IEATools::iea_cols$e_dot,
transformation_processes = IEATools::aggregation_flows$transformation_processes,
negzeropos = ".negzeropos",
autoproducer_elect = IEATools::main_act_plants$autoprod_elect_plants,
autoproducer_chp = IEATools::transformation_processes$autoproducer_CHP_plants,
autoproducer_heat = IEATools::transformation_processes$autoproducer_heat_plants,
main_act_producer_elect = IEATools::main_act_plants$main_act_prod_elect_plants,
main_act_producer_heat = IEATools::main_act_plants$main_act_prod_heat_plants,
main_act_producer_chp = IEATools::main_act_plants$main_act_prod_chp_plants
)
}
\arguments{
\item{.tidy_iea_df}{The \code{.tidy_iea_df} which flows need to be specified.}
\item{flow_aggregation_point}{The name of the flow aggregation point column in the \code{.tidy_iea_df}.
Default is \code{IEATools::iea_cols$flow_aggregation_point}.}
\item{flow}{The name of the flow column in the \code{.tidy_iea_df}.
Default is \code{IEATools::iea_cols$flow}.}
\item{e_dot}{The name of the energy column in the \code{.tidy_iea_df}.
Default is \code{IEATools::iea_cols$flow}.}
\item{transformation_processes}{A string identifying transformation processes in the \code{flow_aggregation_point} column of the \code{.tidy_iea_df}
Default is \code{IEATools::aggregation_flows$flow_aggregation_point}.}
\item{negzeropos}{The name of a temporary column created in \code{.tidy_iea_df}.
Default is ".negzeropos".}
\item{autoproducer_elect}{A string identifying "Autoproducer electricity plants" in the \code{flow} column of the \code{.tidy_iea_df}.
Default is \code{IEATools::main_act_plants$autoprod_elect_plants}.}
\item{autoproducer_chp}{A string identifying "Autoproducer CHP plants" in the \code{flow} column of the \code{.tidy_iea_df}.
Default is \code{IEATools::transformation_processes$autoproducer_CHP_plants}.}
\item{autoproducer_heat}{A string identifying "Autoproducer heat plants" in the \code{flow} column of the \code{.tidy_iea_df}.
Default is \code{IEATools::transformation_processes$autoproducer_heat_plants}.}
\item{main_act_producer_elect}{A string identifying "Main activity producer electricity plants" in the \code{flow} column of the \code{.tidy_iea_df}.
Default is \code{IEATools::main_act_plants$main_act_prod_elect_plants}.}
\item{main_act_producer_heat}{A string identifying "Main activity producer heat plants" in the \code{flow} column of the \code{.tidy_iea_df}.
Default is \code{IEATools::main_act_plants$main_act_prod_heat_plants}.}
\item{main_act_producer_chp}{A string identifying "Main activity producer CHP plants" in the \code{flow} column of the \code{.tidy_iea_df}.
Default is \code{IEATools::main_act_plants$main_act_prod_chp_plants}.}
}
\value{
The \code{tidy_iea_df} with autoproducer plants merged with main activity producer plants.
}
\description{
The IEA extended energy balances include both main activity producer
and autoproducer industries for electricity, heat, and CHP plants.
See details for an explication of each.
This function gathers main activity producer and autoproducer, for each
of the three types of plants: electricity, heat, and CHP plants.
This function is called within the \code{specify_all()} function.
}
\details{
Autoproducer plants are those that consume in-situ the energy they produce.
For instance, an iron and steel plant that produces electricity
and directly consumes it would be classified as an autoproducer electricity plant.
Conversely, main activity producer plants are those that produce
a product, be it electricity, heat, or both (CHP plants) and sell it
to the market.
}
\examples{
library(dplyr)
load_tidy_iea_df() \%>\%
gather_producer_autoproducer()
}
|
a67f43e7cd3240b8b4a06ac4d9f77cae115f2cc4 | 5a7611075079f277e02c52aeb6cbef1ad189ea96 | /PD_Tracking_Charts.R | 878da45a9ca410617199ad459275d8ddf6474f04 | [] | no_license | scottmmiller1/P4H | 0138196ad227bb01a7296f21e1a7e292edc7cc2e | a4377572f4f88ada51a3df7874e41b5165e8d415 | refs/heads/master | 2021-06-29T17:06:28.557166 | 2020-09-29T13:57:11 | 2020-09-29T13:57:11 | 148,031,254 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,024 | r | PD_Tracking_Charts.R |
setwd("/Users/scottmiller/Desktop/P4H Global/Evaluation/PD/PD_Tacking/All/Rplots")
dta <- read.csv("/Users/scottmiller/Desktop/P4H Global/Evaluation/PD/PD_Tacking/All/PD_All_collapse.csv")
train <- c("Jul. 16, 2018","Jul. 23, 2018","Aug. 2, 2018","Sep. 29, 2018","Oct. 5, 2018","Dec. 10, 2018","Dec. 13, 2018",
"Jan. 22, 2019","Mar 6, 2019","Apr. 15, 2019","May 27, 2019","Jul. 8, 2019","Jul. 29, 2019","Aug. 20, 2019","Aug. 28, 2019")
# Overall Average
dta$Pre[dta$school=="Mission of Hope"] <- .42; dta$Post[dta$school=="Mission of Hope"] <- .83
dta$Pre[dta$school=="Ecole St Marc"] <- .36; dta$Post[dta$school=="Ecole St Marc"] <- .93
dta$Pre[dta$school=="Harvy"] <- .5; dta$Post[dta$school=="Harvy"] <- .87
dta$Pre[dta$school=="HOH"] <- .4; dta$Post[dta$school=="HOH"] <- .94
dta$Pre[dta$school=="CEFCAP"] <- .41; dta$Post[dta$school=="CEFCAP"] <- .88
dta$Pre[dta$school=="RTS"] <- .55; dta$Post[dta$school=="RTS"] <- .99
dta$Pre[dta$school=="JS"] <- .59; dta$Post[dta$school=="JS"] <- .96
dta$Pre[dta$school=="TRESOR"] <- .48; dta$Post[dta$school=="TRESOR"] <- .98
dta$Pre[dta$school=="Sonje"] <- .51; dta$Post[dta$school=="Sonje"] <- .91
dta$Pre[dta$school=="PD Dondon"] <- .42; dta$Post[dta$school=="PD Dondon"] <- .93
dta$Pre[dta$school=="OEDP"] <- .47; dta$Post[dta$school=="OEDP"] <- .94
dta$Pre[dta$school=="LABY"] <- .35; dta$Post[dta$school=="LABY"] <- .94
dta$Pre[dta$school=="JOUISSANT"] <- .49; dta$Post[dta$school=="JOUISSANT"] <- .90
dta$Pre[dta$school=="CFC - K"] <- .24; dta$Post[dta$school=="CFC - K"] <- .96
dta$Pre[dta$school=="CFC - G"] <- .36; dta$Post[dta$school=="CFC - G"] <- .87
# All
png("BA.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 1
png("pp1.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v6Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v6Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend(x=1,y=0.6, legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 2
png("pp2.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v7Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v7Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 3
png("pp3.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v8Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v8Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 4
png("pp4.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v9Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v9Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 5
png("pp5.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v10Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v10Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 6
png("pp6.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v11Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v11Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 7
png("pp7.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v12Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v12Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 8
png("pp8.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v13Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v13Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.3,"2018",cex=1.5)
text(9,0.3,"2019",cex=1.5)
# Add a legend
legend(x=2,y=0.4, legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 9
png("pp9.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v14Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v14Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 10
png("pp10.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v15Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v15Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 11
png("pp11.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v16Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v16Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 12
png("pp12.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v17Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v17Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend(2,.15, legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.15, 0.1))
par(op) ## reset
dev.off()
# 13
png("pp13.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v18Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v18Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 14
png("pp14.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v19Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v19Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 15
png("pp15.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v20Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v20Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend(2,.15, legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 16
png("pp16.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v21Pre[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="Percent Correct" , col="firebrick2" , lwd=3,
pch=17 , ylim=c(0,1), xaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v21Post[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,0.1,"2018",cex=1.5)
text(9,0.1,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
avg <- matrix(0,16,2)
for (i in 2:17) {
avg[i-1,1] <- mean(dta[,i+16])
avg[i-1,2] <- mean(dta[,i])
}
# ------------------------------------------------------------------------------
# Evals
dta <- read.csv("/Users/scottmiller/Desktop/P4H Global/Evaluation/PD/PD_Tacking/All/Eval_All_collapse.csv")
eval <- c("Very Low","Low","Neutral","High","Very High")
# overall
for (i in 1:nrow(dta)) {
dta$b.avg[i] <- mean(dta$v3[i],dta$v5[i],dta$v7[i],dta$v9[i],dta$v11[i],dta$v13[i],dta$v15[i])
dta$a.avg[i] <- mean(dta$v4[i],dta$v6[i],dta$v8[i],dta$v10[i],dta$v12[i],dta$v14[i],dta$v16[i])
}
png("avg_eval.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$b.avg[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="firebrick2" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=eval, las = 1, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$a.avg[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 1
png("eval1.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v3[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="firebrick2" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=eval, las = 1, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v4[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 2
png("eval2.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v5[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="firebrick2" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=eval, las = 1, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v6[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 3
png("eval3.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v7[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="firebrick2" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=eval, las = 1, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v8[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 4
png("eval4.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v9[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="firebrick2" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=eval, las = 1, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v10[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 5
png("eval5.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v11[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="firebrick2" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=eval, las = 1, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v12[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
# 6
png("eval6.png", width = 800)
op <- par(mar = c(7,4,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta$v13[order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="firebrick2" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=eval, las = 1, cex.lab=2)
lines(dta$nschool[order(dta$nschool)], dta$v14[order(dta$nschool)],
col="royalblue3" , lwd=3 , pch=19 , type="b" )
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
# Add a legend
legend("bottomleft", legend = c("Pre", "Post"), col = c("firebrick2", "royalblue3"),
pch = c(17,19), bty = "n", pt.cex = 2, cex = 1, text.col = "black", horiz = F,
inset = c(0.1, 0.1))
par(op) ## reset
dev.off()
#training eval
t.eval <- c("Strongly Disagree","Disagree","Neutral","Agree","Strongly Agree")
for (i in 1:4) {
png(paste("e",i,".png", sep = ""), width = 800)
op <- par(mar = c(7,7,2,1) + 1)
plot(dta$nschool[order(dta$nschool)], dta[,i+13][order(dta$nschool)], type="b",
bty="l", xlab="", ylab="", col="royalblue3" , lwd=3,
pch=17 , ylim=c(1,5), xaxt = "n", yaxt = "n")
grid(NA, NULL, lwd = 2)
axis(1, at=1:15, labels=train, las = 2, cex.lab=2)
axis(2, at=1:5, labels=t.eval, las = 1, cex.lab=2)
abline(v=7.5,lty=2,lwd=2)
text(6,1.5,"2018",cex=1.5)
text(9,1.5,"2019",cex=1.5)
par(op) ## reset
dev.off()
}
|
c0b9a6c9606020e7c42f343ed2ff1d255fdab98c | e9081eba897ce871ec51ebd27fe13c1afab01118 | /R Scripts/NaiveBayesSmote.R | 549a0311f06239cae35d313c3071967f6a4e7296 | [] | no_license | broccolihead/Academic-Probation-at-ISS | 47b30d964f03494bab25547f398ed518700457fb | 9db75e52024a20ee305fbd76bd054ba4a9eace91 | refs/heads/master | 2020-03-25T19:42:03.599881 | 2018-11-26T07:30:09 | 2018-11-26T07:30:09 | 144,095,699 | 0 | 1 | null | 2018-11-19T15:36:12 | 2018-08-09T03:18:40 | R | UTF-8 | R | false | false | 946 | r | NaiveBayesSmote.R | #Naive Bayes using K fold cross validation
library("klaR")
library("caret")
library("DMwR")
library("e1071")
#Loading the data
train1 <-read.csv("D:/Aiman US/OneDrive - University of Arizona/acads/Capstone/R/bucketlabeled.csv")
#Labels and attributes
x1=train1[2:6]
y1=train1$PROBATIONFLAG
#To solve the class imbalance problem we use smote function
newData <- SMOTE(PROBATIONFLAG ~ ., train1, perc.over = 400,perc.under=100)
table(newData$PROBATIONFLAG)
#Labels and attributes of the new smoted data
x1=newData[2:6]
y1=newData$PROBATIONFLAG
#model using train function where the algorithm specified is Naive Bayes
model = train(x1,y1,'nb',trControl=trainControl(method='cv',number=10))
#Using confusion matrix to calculate precision and recall
result <- confusionMatrix(model)
result1 <- result$table
#Precision
result1[2,2]/(result1[2,2]+result1[2,1])
#Recall
result1[2,2]/(result1[2,2]+result1[1,2])
|
93ec9554fdcb11de9bb1140d69bbaba506bcbc8a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/exactci/examples/poisson.exact.Rd.R | 921c62c73237711fdc411e497fa815f668972ed2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,576 | r | poisson.exact.Rd.R | library(exactci)
### Name: poisson.exact
### Title: Exact Poisson tests with Matching Confidence Intervals
### Aliases: poisson.exact
### Keywords: htest
### ** Examples
### Suppose you have observed rates of 2 out of 17877 in group A
### and 10 out of 20000 in group B
### poisson.test gives non-matching confidence intervals
### i.e., p-value using 'minlike' criteria but confidence interval using 'central' criteria
poisson.test(c(2,10),c(17877,20000))
### poisson.exact gives matching CI to the p-values
### defaults to 'central' two-sided method
poisson.exact(c(2,10),c(17877,20000))
### other options
poisson.exact(c(2,10),c(17877,20000),tsmethod="minlike")
poisson.exact(c(2,10),c(17877,20000),tsmethod="blaker")
## Mid-p confidence intervals do not guarantee coverage,
## but are more likely to have on average closer nominal
## coverage than exact ones (sometimes erroring on the
## too liberal side).
##
## To test the software, here is Table I of Cohen and Yang
## values are equal to the first 2 decimal places
yCY<-c(0:20,20+(1:5)*2,30+(1:14)*5)
TableICohenYang<-matrix(NA,length(yCY),6,dimnames=list(yCY,
c("90pct LL","90pct UL","95pct LL","95pct UL","99pct LL","99pct UL")))
for (i in 1:length(yCY)){
TableICohenYang[i,1:2]<-poisson.exact(yCY[i],
midp=TRUE,conf.level=.9)$conf.int
TableICohenYang[i,3:4]<-poisson.exact(yCY[i],
midp=TRUE,conf.level=.95)$conf.int
TableICohenYang[i,5:6]<-poisson.exact(yCY[i],
midp=TRUE,conf.level=.99)$conf.int
}
TableICohenYang<-round(TableICohenYang,3)
TableICohenYang
|
e2ec3180df2339f4f266ac19acbcce5d61d4170c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/seplyr/examples/group_by_se.Rd.R | 8e2f404fad6d16f5be788f8c33dc1be910a6dadc | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 201 | r | group_by_se.Rd.R | library(seplyr)
### Name: group_by_se
### Title: group_by standard interface.
### Aliases: group_by_se
### ** Examples
datasets::mtcars %.>%
group_by_se(., c("cyl", "gear")) %.>%
head(.)
|
198e227fc35b81dc9779af430173dbb0e3c9be8a | 6b57ed4964727602b75250c5dbcfa10653cf2ee0 | /R/step.R | b4f82ebde50ff7a46369050877d64eee474d5507 | [] | no_license | runehaubo/lmerTestR | 278b4b1a4f99387cc0216193a5236cae6e6a4f0c | 35dc5885205d709cdc395b369b08ca2b7273cb78 | refs/heads/master | 2021-06-05T16:38:46.427782 | 2020-10-23T06:59:55 | 2020-10-23T06:59:55 | 117,861,877 | 47 | 10 | null | 2020-10-19T13:52:50 | 2018-01-17T16:26:02 | HTML | UTF-8 | R | false | false | 15,055 | r | step.R | #############################################################################
# Copyright (c) 2013-2020 Alexandra Kuznetsova, Per Bruun Brockhoff, and
# Rune Haubo Bojesen Christensen
#
# This file is part of the lmerTest package for R (*lmerTest*)
#
# *lmerTest* is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# *lmerTest* is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# <https://www.r-project.org/Licenses/> and/or
# <http://www.gnu.org/licenses/>.
#############################################################################
#
# step.R - implementation of backward elimination for lmerModLmerTest objects
# ------- Contents: --------
#
# --- Generics: ---
#
# step
# get_model
#
# --- methods: ---
#
# step.lmerModLmerTest
# step.default
# get_model.step_list
# print.step_list
# plot.step_list
#
# --- other exported function: ---
#
# --- utility functions: ---
#
# ran_redTable
# fix_redTable
# reduce_random
# ranova_lm
# reduce_fixed
#
##############################################
######## step()
##############################################
#' Generic Step Function
#'
#' Generic step function with default method \code{stats::step}. This
#' construction ensures that \code{stats::step} still works on \code{lm}
#' objects etc. after loading the \pkg{lmerTest} package.
#'
#' @param object a model object.
#' @param ... currently not used.
#'
#' @author Rune Haubo B. Christensen
#' @seealso \code{\link[=step.lmerModLmerTest]{step}}
#' @export
#' @keywords internal
step <- function(object, ...) UseMethod("step")
##############################################
######## step.default()
##############################################
#' @rdname step
#' @export
#' @keywords internal
step.default <- function(object, ...) stats::step(object, ...)
##############################################
######## step.lmerModLmerTest()
##############################################
#' Backward Elimination for Linear Mixed Models
#'
#' Backward elimination of random-effect terms followed by backward elimination
#' of fixed-effect terms in linear mixed models.
#'
#' Tests of random-effects are performed using \code{\link{ranova}} (using
#' \code{reduce.terms = TRUE}) and tests of fixed-effects are performed using
#' \code{\link[=drop1.lmerModLmerTest]{drop1}}.
#'
#' The step method for \code{\link{lmer}} fits has a print method.
#'
#' @param object a fitted model object. For the \code{lmerModLmerTest} method
#' an \code{\link{lmer}} model fit (of class \code{"lmerModLmerTest"}.)
#' @param ddf the method for computing the denominator degrees of freedom and
#' F-statistics. \code{ddf="Satterthwaite"} (default) uses Satterthwaite's method;
#' \code{ddf="Kenward-Roger"} uses Kenward-Roger's method.
#' @param alpha.random alpha for random effects elimination
#' @param alpha.fixed alpha for fixed effects elimination
#' @param reduce.fixed reduce fixed effect structure? \code{TRUE} by default.
#' @param reduce.random reduce random effect structure? \code{TRUE} by default.
#' @param keep an optional character vector of fixed effect terms which should
#' not be considered for eliminated. Valid terms are given by
#' \code{attr(terms(object), "term.labels")}. Terms that are marginal to terms
#' in keep will also not be considered for eliminations.
#' @param ... currently not used.
#'
#' @return \code{step} returns a list with elements \code{"random"} and
#' \code{"fixed"} each
#' containing anova-like elimination tables. The \code{"fixed"} table is
#' based on \code{drop1} and the \code{"random"} table is
#' based on \code{ranova} (a \code{drop1}-like table for random effects). Both
#' tables have a column \code{"Eliminated"} indicating the order in which terms
#' are eliminated from the model with zero (\code{0}) indicating that the term
#' is not eliminated from the model.
#'
#' The \code{step} object also contains the final model as an attribute which
#' is extractable with \code{get_model(<step_object>)}.
#' @seealso \code{\link[=drop1.lmerModLmerTest]{drop1}} for tests of marginal
#' fixed-effect terms and \code{\link{ranova}} for a
#' \code{\link[=drop1.lmerModLmerTest]{drop1}}-like table of reduction of
#' random-effect terms.
#' @author Rune Haubo B. Christensen and Alexandra Kuznetsova
#' @export
#' @examples
#'
#' # Fit a model to the ham dataset:
#' fm <- lmer(Informed.liking ~ Product*Information+
#' (1|Consumer) + (1|Product:Consumer)
#' + (1|Information:Consumer), data=ham)
#'
#' # Backward elimination using terms with default alpha-levels:
#' (step_res <- step(fm))
#' final <- get_model(step_res)
#' anova(final)
#'
#' \dontrun{
#' # Fit 'big' model:
#' fm <- lmer(Informed.liking ~ Product*Information*Gender*Age +
#' + (1|Consumer) + (1|Consumer:Product) +
#' (1|Consumer:Information), data=ham)
#' step_fm <- step(fm)
#' step_fm # Display elimination results
#' final_fm <- get_model(step_fm)
#' }
#'
step.lmerModLmerTest <- function(object, ddf=c("Satterthwaite", "Kenward-Roger"),
alpha.random=0.1, alpha.fixed=0.05,
reduce.fixed=TRUE, reduce.random=TRUE,
keep, ...) {
# Check for and warn about deprecated arguments:
ignored <- c("type", "fixed.calc", "lsmeans.calc", "difflsmeans.calc",
"test.effs")
dots <- list(...)
for(nm in ignored) if(any(pmatch(names(dots), nm, nomatch = 0)))
warning(paste0("Argument '", nm, "' is deprecated and ignored."))
if(any(pmatch(names(dots), "keep.effs", nomatch = 0)))
warning("Argument 'keep.effs' is deprecated: use 'keep' instead")
# reduce random and fixed parts?
if(!reduce.random) alpha.random <- 1
if(!reduce.fixed) alpha.fixed <- 1
if(missing(keep)) keep <- character(0L)
# Reduce random and fixed parts:
red_random <- eval.parent(reduce_random(object, alpha=alpha.random))
model <- attr(red_random, "model")
# 'model' may be 'lmerMod' rather than 'lmerModLmerTest', so we coerce to
# 'lmerModLmerTest' if required:
if(inherits(model, "lmerMod") && !inherits(model, "lmerModLmerTest"))
model <- as_lmerModLmerTest(model)
stopifnot(inherits(model, "lmerModLmerTest") || inherits(model, "lm"))
red_fixed <- eval.parent(reduce_fixed(model, ddf=ddf,
alpha=alpha.fixed, keep=keep))
# get 'reduction' tables:
step_random <- ran_redTable(red_random)
step_fixed <- fix_redTable(red_fixed)
# organize results and return:
step_list <- list(random=step_random, fixed=step_fixed)
class(step_list) <- "step_list"
attr(step_list, "model") <- attr(red_fixed, "model")
attr(step_list, "drop1") <- attr(red_fixed, "drop1")
step_list
}
##############################################
######## get_model()
##############################################
#' Extract Model from an Object
#'
#' @param x an object.
#' @param ... currently not used.
#'
#' @seealso \code{\link{get_model.step_list}}
#' @export
#' @keywords internal
get_model <- function(x, ...) UseMethod("get_model")
##############################################
######## get_model.step_list()
##############################################
#' @rdname step.lmerModLmerTest
#' @param x a step object.
#' @export
get_model.step_list <- function(x, ...) {
attr(x, "model")
}
##############################################
######## print.step_list()
##############################################
#' @importFrom stats formula
#' @export
#' @keywords internal
print.step_list <- function(x, digits = max(getOption("digits") - 2L, 3L),
signif.stars = getOption("show.signif.stars"),
...) {
print(x[["random"]])
cat("\n")
print(x[["fixed"]])
cat("\nModel found:", deparse2(formula(attr(x, "model"))), sep="\n")
invisible(x)
}
##############################################
######## plot.step_list()
##############################################
#' Plot LS-means for Backward Reduced Model
#'
#' Computes the LS-means for the final backward reduced model and passes these
#' to \code{\link{plot.ls_means}}.
#'
#' Error bars are confidence intervals - the default is 95% CI but the confidence
#' level can be changed.
#'
#' @param x a \code{step_list} object; the result of running
#' \code{\link[=step.lmerModLmerTest]{step}}.
#' @param y not used and ignored with a warning.
#' @param which optional character vector naming factors for which LS-means should
#' be plotted. If \code{NULL} (default) plots for all LS-means are generated.
#' @param mult if \code{TRUE} and there is more than one term for which to plot
#' LS-means the plots are organized in panels with \code{facet_wrap}.
#' @param pairwise pairwise differences of LS-means?
#' @param level confidence level.
#' @param ddf denominator degree of freedom method.
#' @param ... currently not used.
#'
#' @export
#' @author Rune Haubo B. Christensen and Alexandra Kuznetsova
#' @seealso \code{\link[=ls_means.lmerModLmerTest]{ls_means}} and
#' \code{\link{plot.ls_means}}
#' @keywords internal
#' @examples
#'
#' \dontrun{
#' # Fit example model:
#' tv <- lmer(Sharpnessofmovement ~ TVset * Picture +
#' (1 | Assessor:TVset) + (1 | Assessor:Picture) +
#' (1 | Assessor:Picture:TVset) + (1 | Repeat) + (1 | Repeat:Picture) +
#' (1 | Repeat:TVset) + (1 | Repeat:TVset:Picture) + (1 | Assessor),
#' data = TVbo)
#'
#' # Backward reduce the model:
#' (st <- step(tv)) # takes ~10 sec to run
#'
#' # Pairwise comparisons of LS-means for Picture and TVset:
#' plot(st, which=c("Picture", "TVset"), pairwise = TRUE)
#' }
#'
plot.step_list <- function(x, y=NULL, which=NULL, pairwise=FALSE, mult=TRUE,
level=0.95, ddf=c("Satterthwaite", "Kenward-Roger"),
...) {
plot(ls_means(get_model(x), pairwise=pairwise, level=level, ddf=ddf),
y=y, which=which, mult=mult)
}
##############################################
######## step utility functions below
##############################################
ran_redTable <- function(table) {
aov <- attr(table, "ranova")[-1, , drop=FALSE]
stopifnot(nrow(table) >= 1)
tab <- rbind(cbind("Eliminated"=c(NA_real_, seq_len(nrow(table)-1)), table),
cbind("Eliminated"=rep(0, nrow(aov)), aov))
class(tab) <- c("anova", "data.frame")
attr(tab, "heading") <- "Backward reduced random-effect table:\n"
tab
}
fix_redTable <- function(table) {
aov <- attr(table, "drop1")
tab <- rbind(cbind("Eliminated"=seq_len(nrow(table)), table),
cbind("Eliminated"=rep(0, nrow(aov)), aov))
class(tab) <- c("anova", "data.frame")
attr(tab, "heading") <- "Backward reduced fixed-effect table:"
if(!is.null(ddf <- attr(table, "ddf"))) {
ddf <- switch(ddf, "Satterthwaite" = "Satterthwaite",
"Kenward-Roger" = "Kenward-Roger")
attr(tab, "heading") <-
c(attr(tab, "heading"), paste("Degrees of freedom method:", ddf, "\n"))
}
tab
}
#' @importFrom stats formula update
#' @importFrom lme4 getME
reduce_random <- function(model, alpha=0.1) {
ran <- ranova(model)
reduced <- ran[1L, ]
newfit <- model
newform <- formula(model)
forms <- attr(ran, "formulae")
pvals <- ran[-1, "Pr(>Chisq)"]
above <- (!is.na(pvals) & pvals > alpha)
while(any(above)) {
remove <- which.max(pvals)
newform <- forms[[remove]]
reduced <- rbind(reduced, ran[1 + remove, ])
if(!has_ranef(newform)) { # If no random effects: fit with lm
reml <- getME(newfit, "is_REML")
lm_call <- get_lm_call(newfit, formula=newform)
newfit <- eval.parent(as.call(lm_call))
ran <- ranova_lm(newfit, REML=reml)
break
}
newfit <- eval.parent(update(newfit, formula. = newform))
# newfit <- update(newfit, formula = newform)
ran <- ranova(newfit)
forms <- attr(ran, "formulae")
pvals <- ran[-1, "Pr(>Chisq)"]
above <- (!is.na(pvals) & pvals > alpha)
}
attr(reduced, "model") <- newfit
attr(reduced, "formula") <- newform
attr(reduced, "ranova") <- ran
reduced
}
ranova_lm <- function(model, REML=TRUE) {
# Compute a ranova table for an lm-object only containing a '<none>' row
# and the right header.
aov <- mk_LRtab(get_logLik(model, REML=REML))
rownames(aov) <- "<none>"
head <- c("ANOVA-like table for random-effects: Single term deletions",
"\nModel:", deparse2(formula(model)))
# attr(aov, "formulae") <- new_forms
structure(aov, heading = head, class = c("anova", "data.frame"))
}
#' @importFrom stats nobs formula
reduce_fixed <- function(model, ddf=c("Satterthwaite", "Kenward-Roger"), alpha=0.05,
keep) {
if(missing(keep)) keep <- character(0L)
stopifnot(is.character(keep))
term_names <- attr(terms(model), "term.labels")
# Test validity of
if(!all(keep %in% term_names)) {
offending <- paste(setdiff(keep, term_names), collapse = " ")
txt1 <- sprintf("Invalid 'keep' ignored: %s.", offending)
txt2 <- sprintf("Valid terms are: %s.", paste(term_names, collapse = " "))
warning(paste(txt1, txt2, sep="\n"), call. = FALSE)
}
ddf <- match.arg(ddf)
aov <- if(inherits(model, "lmerMod")) drop1.lmerModLmerTest(model, ddf=ddf) else
drop1(model, test="F")[-1L, , drop=FALSE]
reduced <- aov[0L, ]
newfit <- model
newform <- orig_form <- formula(model)
nobs_model <- nobs(model)
terms <- rownames(aov)
consider <- setdiff(terms, keep)
pvals <- aov[consider, "Pr(>F)"]
above <- (!is.na(pvals) & pvals > alpha)
if(any(above)) while(any(above)) {
remove <- consider[which.max(pvals)]
newform <- rm_complete_terms(remove, orig_form, random = FALSE)[[1L]]
reduced <- rbind(reduced, aov[remove, ])
newfit <- eval.parent(update(newfit, formula = newform))
# newfit <- update(newfit, formula = newform)
nobs_newfit <- nobs(newfit)
if(all(is.finite(c(nobs_model, nobs_newfit))) && nobs_newfit != nobs_model)
stop("number of rows in use has changed: remove missing values?",
call.=FALSE)
aov <- if(inherits(newfit, "lmerMod")) drop1.lmerModLmerTest(newfit, ddf=ddf) else
drop1(newfit, test="F")[-1L, , drop=FALSE]
# aov <- drop1(newfit)
orig_form <- formula(newfit)
terms <- rownames(aov)
consider <- setdiff(terms, keep)
pvals <- aov[consider, "Pr(>F)"]
above <- (!is.na(pvals) & pvals > alpha)
}
attr(reduced, "model") <- newfit
attr(reduced, "formula") <- newform
attr(reduced, "drop1") <- aov
attr(reduced, "ddf") <- if(inherits(model, "lmerMod")) ddf else NULL
reduced
}
|
19c24d1a3d75b00d0bf2be1b4696055f925b90a5 | b5748165d674f3282517b1c936382032f89d6edc | /pdf_convert.R | d5449a65abac75026f4e78d0ec31793a9baef646 | [] | no_license | saudiwin/tunisia_firms | 9add45be350d3e0a6ae427d5d431c04df4689a32 | 0cbec833937795757bb2df460adbd8602bbb6d97 | refs/heads/master | 2021-01-23T08:20:12.420734 | 2017-04-25T17:57:59 | 2017-04-25T17:57:59 | 86,494,552 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,148 | r | pdf_convert.R | # convert PDFs to editable data using ABBY
require(abbyyR)
setapp(c("Tunisia Firms", "m+w3xiZFDw0D/4BMQSORs0E6"))
getAppInfo()
processImage(file_path = 'C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\Classement 2008.pdf',
imageSource = 'scanner',description = 'Classement2008')
# all 2010 individuals
all_2010s <- list.files(path = 'C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\',pattern='[0-9]+-[0-9].pdf')
lapply(all_2010s, function(x)
processImage(file_path = paste0('C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\',x),
imageSource = 'scanner',description = x)
)
results <- getResults(output='C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\')
# Load text files and process them with regexp
class2014 <- readLines('data/classement2014.txt',encoding='UTF-8')
class2014 <- class2014[grepl('\\h?[0-9]+',class2014,perl=TRUE)]
class2014 <- trimws(class2014)
writeLines(class2014,'data/class2014_firststep.txt')
#class2014 <- class2014[grepl('[0-9]+\\h+[0-9]+',class2014,perl=TRUE)]
|
332661988dda679f0f988613135d607e0c26b3ce | a56f6685719834f10e3bb3829f2431c51805c5a2 | /server.R | df3395ed58fad894062cd69e821fc506c63224f7 | [] | no_license | usfviz/backyardigans-final | da5ec8590826424f2fc3369d1545cbbe65c280be | e009c976d3ba541b907649a08089161580758b4e | refs/heads/master | 2021-01-20T09:57:10.809143 | 2017-05-10T02:11:43 | 2017-05-10T02:11:43 | 90,311,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,005 | r | server.R |
function(input, output, session) {
filteredData <- reactive({
if (input$geo_filter == "All"){
df
}else{
df <- df[df$State == input$geo_filter, ]
}
df
})
output$map <- renderLeaflet({
leaflet() %>% addTiles() %>%
setView(lng = -93.85, lat = 37.45, zoom = 4)
})
output$hist_Centile <- renderPlot({
df2 <- data.frame(filteredData()[,input$var_to_viz])
p <- ggplot(df2, aes(x = df2[,1], fill = "red")) +
geom_density()+xlab("")+ylab("")+ theme(legend.position="none")
p
})
output$dygraph <- renderDygraph({
df3 <- df_20[,c('Year', input$Crime)]
values <- input$Crime
graph_plot <- dygraph(df3, main = "20 Year Crime Rate") %>%
dyAxis("y", label = "Crime Rate (Per 100,000)") %>%
dyLegend(labelsSeparateLines = TRUE) %>%
dyAxis('x', label = 'Year') %>%
dyOptions(colors = brewer.pal(length(values), "Dark2")) %>%
dyRangeSelector() %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 3))})
output$table <- renderDataTable(select_df())
output$scatterplot <- renderPairsD3({
new_df <- df[df$State == input$pairs_state,]
pairsD3(new_df[,3:10],leftmar = 100)
})
observe({
df <- df[, c(input$var_to_viz, "State","City","longitude","latitude")]
radius_p <- df[,1]
# larceny, propertycrime
if(input$var_to_viz%in%c("Larceny_theft","Property_crime")){
print("HOLA")
leafletProxy("map", data = filteredData()) %>%
clearShapes() %>%
addCircles(data = filteredData(), radius = ~radius_p*10, weight = 1, color = "red"
, fillOpacity = 0.5
)
}else{
leafletProxy("map", data = filteredData()) %>%
clearShapes() %>%
addCircles(data = filteredData(), radius = ~radius_p*100, weight = 1, color = "red"
, fillOpacity = 0.5
)
}
})
output$cityControls<- renderUI({
cities <- as.character(df_v[df_v$State == input$state, 2 ])
selectInput("city", "Select City", c('All', cities), selected = 'All')
})
select_df <- reactive({
if (input$state == 'All'){
df_v= df_v
}else{
if (input$city == 'All'){
df_v = df_v[df_v$State == input$state, ]
}else{
df_v = df_v[df_v$State == input$state & df_v$City == input$city, ]
}
}
})
output$summary <- renderPlot({
if (input$state != 'All' & input$city != 'All'){
df_4 <- df_v[df_v$State == input$state, c('Population', 'State', 'City', input$crime2)]
df_4 <- subset(df_4, select = -c(Population))
df_4 <- subset(df_4, select = c('City', 'State',input$crime2))
p <- tidyr::gather(df_4, Crime, Count, -State, -City)
plot1<-ggplot(p, aes(x=Crime, y=Count, color=Crime)) + geom_boxplot()
plot1 <- plot1 + scale_y_continuous(limits = c(0, 275))
plot1
}else{
df_v <- select_df()
df_v <- subset(df_v, select = -c(Population))
df_v <- subset(df_v, select = c('City', 'State',input$crime2))
values <- input$crime2
if (input$state == 'All'){
max2 <- max(df_v[c(input$crime2)])
max2 <- as.integer(max2*.002)
}else{
max2 <- 275
}
p <- tidyr::gather(df_v, Crime, Count, -State, -City)
plot1<-ggplot(p, aes(x=Crime, y=Count, color=Crime)) + geom_boxplot()
plot1 <- plot1 + scale_y_continuous(limits = c(0, max2))
plot1
}})
output$table <- renderDataTable(select_df())
}
|
a34e2be2fba3770919f6f1206d98b7eaa60fa7b5 | 7db5ba91d3835464346a6135e75ee37fb5ae2cb5 | /plot3.R | a7c8eec0622b737f8a71b308690492af77055a68 | [] | no_license | dblane/exdata-data-NEI_data | f086a9edf7259ae33459805fb15032cea5a7ae4e | fbaebe5c530d9feb25e78335df0d5f68c7f2112d | refs/heads/master | 2020-04-25T00:46:29.560854 | 2015-04-26T17:21:53 | 2015-04-26T17:21:53 | 34,578,732 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 942 | r | plot3.R | ## Each functions starts with the importation of data into variables.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Aggregate emission data for Baltimore City
baltimorePM <- NEI[NEI$fips=="24510",]
totals <- aggregate(Emissions ~ year, baltimorePM, sum)
## We have to plot 4 different source types to see which of the 4 decreased from 1999-2008
## We'll have to use ggplot2 in order to represent this data best.
library(ggplot2)
plot <- ggplot(baltimorePM,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE) +
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM 2.5 Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(plot)
## Save as PNG
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() |
f4620fd7c8d8799834a6732f0e08f2e7a7584103 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/pangoAttrFallbackNew.Rd | b31e8c26374d754abd35a28d71da68f180b23082 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 720 | rd | pangoAttrFallbackNew.Rd | \alias{pangoAttrFallbackNew}
\name{pangoAttrFallbackNew}
\title{pangoAttrFallbackNew}
\description{Create a new font fallback attribute.}
\usage{pangoAttrFallbackNew(fallback)}
\arguments{\item{\verb{fallback}}{[logical] \code{TRUE} if we should fall back on other fonts
for characters the active font is missing.}}
\details{If fallback is disabled, characters will only be used from the
closest matching font on the system. No fallback will be done to
other fonts on the system that might contain the characters in the
text.
Since 1.4}
\value{[\code{\link{PangoAttribute}}] the newly allocated \code{\link{PangoAttribute}},}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
092f94ab15497881cd960b63c86fbf65bf68bb6a | 642e217325ad840b6bec1f11a1e577f10dab4d4d | /citibike2018/ui.R | 2a0803961a3d989733c86b0a9e9733af06a2fa6a | [] | no_license | kevingilbert1003/CitiBike2018 | a5381150ea11ae6bc37a29759bebb71ed7d0bace | ee22fac4f32c793ccdaccfe6aea51b1b08510af0 | refs/heads/master | 2020-04-22T20:19:18.660193 | 2019-02-14T06:31:58 | 2019-02-14T06:31:58 | 170,637,375 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,742 | r | ui.R | header <- dashboardHeader(
title = "CitiBike Trips"
)
body <- dashboardBody(
fluidRow(
column(width = 9,
box(width = NULL, solidHeader = TRUE,
leafletOutput("plot1", height = 550)
)
),
column(width = 3,
box(width = NULL, status = "warning"
, dateRangeInput('date', label = 'Trip Date',
start = '2018-01-01', end = '2018-12-31',
min = '2018-01-01', max = '2018-12-31'
)
, sliderInput('start_hour', label = 'Start Hour',
min = 0, max = 24,
value = c(6,10))
, selectInput('day_of_week', label = 'Day of Week',
choices = c('Sun', 'Mon', 'Tue', 'Wed'
, 'Thu', 'Fri', 'Sat'),
selected = c('Mon', 'Tue', 'Wed', 'Thu', 'Fri'),
multiple = T)
, selectInput('gender', label = 'Gender',
choices = list(Female = 1, Male = 2, Unknown = 0),
selected = c(1,2,0),
multiple = T)
, selectInput('usertype', label = 'User Type',
choices = c('Subscriber', 'Customer'),
selected = c('Subscriber'),
multiple = T)
, sliderInput('age', label = 'Age',
min = 0, max = 100,
value = c(18,50))
# , p(
# class = "text-muted",
# paste("Press update to Render Map with your selected filters."
# )
# )
, actionButton("update", "Update Chart")
)
)
),
fluidRow(
column(width = 9,
box(width = NULL, solidHeader = TRUE,
leafletOutput("plot2", height = 550)
)
),
column(width = 3,
box(width = NULL, status = "warning"
, dateRangeInput('date2', label = 'Trip Date',
start = '2018-01-01', end = '2018-12-31',
min = '2018-01-01', max = '2018-12-31'
)
, sliderInput('start_hour2', label = 'Start Hour',
min = 0, max = 24,
value = c(14,18))
, selectInput('day_of_week2', label = 'Day of Week',
choices = c('Sun', 'Mon', 'Tue', 'Wed'
, 'Thu', 'Fri', 'Sat'),
selected = c('Sat', 'Sun'),
multiple = T)
, selectInput('gender2', label = 'Gender',
choices = list(Female = 1, Male = 2, Unknown = 0),
selected = c(1,2,0),
multiple = T)
, selectInput('usertype2', label = 'User Type',
choices = c('Subscriber', 'Customer'),
selected = c('Customer'),
multiple = T)
, sliderInput('age2', label = 'Age',
min = 0, max = 100,
value = c(18,50))
# , p(
# class = "text-muted",
# paste("Press update to Render Map with your selected filters."
# )
# )
, actionButton("update2", "Update Chart")
)
)
)
)
dashboardPage(
header,
dashboardSidebar(disable = TRUE),
body
)
|
0d267a60279102f415900a2b2d6503b7f6e99586 | 72cf507d80c7c2ff4b45acde0e8252bbf4bc719b | /man/multilevelGPSMatch.Rd | e5ec1db923f93240f01ff550e25eda641f4d1883 | [] | no_license | Crazyoumashu/multilevelMatching | 4c459d082c98cdaa4f83ed2feb87ef2ff76eb4bf | 7ec8932c8cc0b99d0321cf526d225958c8a759d0 | refs/heads/master | 2022-03-26T09:50:11.969673 | 2019-12-01T13:02:17 | 2019-12-01T13:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,032 | rd | multilevelGPSMatch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multilevelGPSMatch.R
\name{multilevelGPSMatch}
\alias{multilevelGPSMatch}
\title{Matching on GPS with multilevel treatments}
\usage{
multilevelGPSMatch(Y, W, X, Trimming, GPSM = "multinomiallogisticReg")
}
\arguments{
\item{Y}{A continuous response vector (1 x n)}
\item{W}{A treatment vector (1 x n) with numerical values indicating
treatment groups}
\item{X}{A covariate matrix (p x n) with no intercept. When
\code{GPSM="existing"}, then \code{X} must be a vector (1 x n) of
user-specified propensity scores.}
\item{Trimming}{An indicator of whether trimming the sample to ensure overlap}
\item{GPSM}{An indicator of the methods used for estimating GPS, options
include \code{"multinomiallogisticReg"}, \code{"ordinallogisticReg"} for
proportional odds or cumulative logit model, and \code{"existing"} for
user-specified propensity score via the parameter \code{X}. Defaults to
\code{"multinomiallogisticReg"}}
}
\value{
A list element including:
\itemize{
\item \code{tauestimate}: A vector of estimates for pairwise treatment
effects
\item \code{varestimate}: A vector of variance estimates for
\code{tauestimate}, using Abadie & Imbens (2006)'s method
\item \code{varestimateAI2012}: A vector of variance estimates for
\code{tauestimate}, when matching on the generalized propensity score,
using Abadie & Imbens (2016)'s method. This variance estimate takes into account
of the uncertainty in estimating the GPS. This variable is named AI2012
(not AI2016) for backwards compatibility.
\item \code{analysis_idx}: a list containing the indices_kept (analyzed)
and indices_dropped (trimmed) based on Crump et al. (2009)'s method.
}
}
\description{
Matching on GPS with multilevel treatments
}
\examples{
X <- c(5.5,10.6,3.1,8.7,5.1,10.2,9.8,4.4,4.9)
Y <- c(102,105,120,130,100,80,94,108,96)
W <- c(1,1,1,3,2,3,2,1,2)
multilevelGPSMatch(Y,W,X,Trimming=0,GPSM="multinomiallogisticReg")
multilevelGPSMatch(Y,W,X,Trimming=1,GPSM="multinomiallogisticReg")
}
\references{
Yang, S., Imbens G. W., Cui, Z., Faries, D. E., & Kadziola, Z.
(2016) Propensity Score Matching and Subclassification in Observational
Studies with Multi-Level Treatments. Biometrics, 72, 1055-1065.
\url{https://doi.org/10.1111/biom.12505}
Abadie, A., & Imbens, G. W. (2006). Large sample properties of matching
estimators for average treatment effects. Econometrica, 74(1), 235-267.
\url{https://doi.org/10.1111/j.1468-0262.2006.00655.x}
Abadie, A., & Imbens, G. W. (2016). Matching on the estimated propensity
score. Econometrica, 84(2), 781-807.
\url{https://doi.org/10.3982/ECTA11293}
Crump, R. K., Hotz, V. J., Imbens, G. W., & Mitnik, O. A. (2009). Dealing
with limited overlap in estimation of average treatment effects.
Biometrika, 96(1), 187-199. \url{https://doi.org/10.1093/biomet/asn055}
}
\seealso{
\code{\link{multilevelMatchX}};
\code{\link{multilevelGPSStratification}}
}
|
396dd8dee77036cc789209bb7943ae4d43dc7a78 | 680008636753b98cb441f9e7f947d3604dd6b1af | /kakuma.r | 46d548613796bc398a9c43bf05e230738f3a0292 | [] | no_license | ngamita/ngamitaR | 205cb71bbec68737bd9b91dcc9e9d96c9d0a2edb | 861fc262e29cc4493d81816df3f3f439aba96f09 | refs/heads/master | 2020-04-01T10:12:32.586133 | 2014-11-21T15:14:14 | 2014-11-21T15:14:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,009 | r | kakuma.r |
#quick over plot of how the data is behaving.
#table(resultDF$actionType)
#barplot(table(resultDF$actionType))
accounts <- read.csv('kakuma.csv', header=FALSE, sep=',')
a <- as.vector(t(accounts))
kakuma <- resultDF[ resultDF$actingUserProfileId %in% a, ]
table(kakuma$actionType)
# Say we want to pick unique logins -> sieve only the logins.
kakuma_logins <- kakuma[kakuma$actionType %in% c(104), ]
unique_logins <- aggregate(kakuma_logins$actionType, by=list(kakuma_logins$actingUserProfileId), FUN=length)
unique_logins
nrow(unique_logins)
#follow above for the unique others TODO:
kakuma_search <- kakuma[kakuma$actionType %in% c(200), ]
unique_search <- aggregate(kakuma_search$actionType, by=list(kakuma_search$actingUserProfileId), FUN=length)
unique_search
nrow(unique_search)
#Messages sent follow
kakuma_msgs <- kakuma[kakuma$actionType %in% c(300), ]
unique_msgs <- aggregate(kakuma_msgs$actionType, by=list(kakuma_msgs$actingUserProfileId), FUN=length)
unique_msgs
nrow(unique_msgs) |
323daed3e1d91dcf92e90af678e80d2ecec808ed | e503542797f20b70f3b649a73e04db76dab70b16 | /R/aaa.R | e0afc90119c74bfda97010ed5c1d29fb425c6f06 | [] | no_license | regicid/panelPomp | 94d8ab17313af27c5dd5da7ddc5d2f91b43f38fd | 7c9846321ffe138ea2719b0b9d82c09d50196797 | refs/heads/master | 2022-04-13T17:23:53.245904 | 2020-04-10T16:10:17 | 2020-04-10T19:20:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,563 | r | aaa.R | #' @include package.R
NULL
.onAttach <- function (...) {
pompExDir <- getOption("pomp.examples")
pPompExDir <- getOption("panelPomp.examples")
newDir <- system.file("examples",package="panelPomp")
if (!newDir%in%pompExDir)
options(pomp.examples=c(pomp=pompExDir,pomp=newDir))
if (!newDir%in%pPompExDir)
options(panelPomp.examples=c(panelPomp=pPompExDir,panelPomp=newDir))
}
.onDetach <- function (...) {
pompExDir <- getOption("pomp.examples")
pPompExDir <- getOption("panelPomp.examples")
newDir <- system.file("examples",package="panelPomp")
pompExDir <- pompExDir[pompExDir!=newDir]
if (identical(unname(pompExDir),character())) pompExDir <- NULL
pPompExDir <- pPompExDir[pPompExDir!=newDir]
if (identical(unname(pPompExDir),character())) pPompExDir <- NULL
options(pomp.examples=pompExDir)
options(panelPomp.examples=pPompExDir)
}
## Uniform random draws in the transformed scale: give centers and widths
runif.EstimationScale <-
function(centers, widths,
toEstimationScale.fn = log, fromEstimationScale.fn = exp) {
transformed.centers <- toEstimationScale.fn(centers)
fromEstimationScale.fn(runif(
n = length(centers), min = transformed.centers - widths*0.5,
max = transformed.centers + widths*0.5
))
}
test <- function (expr1, expr2, all, env, verbose = TRUE) {
# expr1: expression to be try(,sil=T)[1]d; can be a quote() to avoid
# evaluation by the function
# expr2: optional; compare to 'expr1' (via identical) and c() logical result
# to object specified via arguments 'all' and 'env' (see below)
# all: name of the vector to accumulate logical test results
# env: name of the environment where 'all' should be modified
# verbose: optional; should the result be returned?
ep <- paste0("in ",sQuote("test"),": ")
if (!exists(all,envir=env))
stop(paste0(ep,"missing vector to accumulate logical test results."),
call.=FALSE)
tryexpr1 <- try(eval(expr1),silent=TRUE)
if (is(tryexpr1,"try-error")) tryexpr1 <- tryexpr1[1]
PASSES <- tryexpr1
if (!missing(expr2)) {
tryexpr2 <- try(eval(expr2),silent=TRUE)
if (is(tryexpr2,"try-error")) tryexpr2 <- tryexpr2[1]
PASSES <- identical(tryexpr1,tryexpr2)
assign(all,value=c(get(all,envir=env),PASSES),envir=env)
}
if (verbose) PASSES
}
#' @title Interpret shortcuts for \code{sQuote()}s and \code{dQuote()}s in
#' character objects
#' @description Concatenate character objects and replace singles quotes with
#' \code{sQuote()}s and asterisks with \code{dQuote()}s: \code{sQuote("x")} and
#' \code{dQuote("x")} can be written as just ''x'' and *x*.
#' @param ... objects to be passed to \code{strsplit}.
#' @keywords internal
#' @examples
#' wQuotes("in ''fn'': *object* is 'a' required argument")
#' paste0("in ",sQuote("fn"),": ",dQuote("object")," is 'a' required argument")
#' @export
wQuotes<- function (...) {
char <- do.call(paste0,list(...)) ## combine arguments
swap <- list(
list(split="''",what="sQuote"),
list(split="*",what="dQuote"))
for (sw in seq_along(swap)) {
chnks <- strsplit(char,split=swap[[sw]]$split,fixed=TRUE)[[1]] ## split char
if (length(chnks)>1) { ## if any pattern
## check if initial
ODD <- FALSE
if (chnks[1]=="") {
ODD <- TRUE
chnks <- chnks[-1]
}
## replace by what
ns <- seq(ifelse(ODD,1,2),length(chnks),by=2)
for (n in ns) chnks[n] <- paste0(do.call(swap[[sw]]$what,list(chnks[n])))
}
char <- do.call(paste0,as.list(chnks))
}
char
}
# The `pompExample` function has been eliminated in pomp version 2. For the sake
# of a more timely release of a pomp compatible version of panelPomp, the last
# version of `pompExample` (as written by Aaron A. King) is temporarily
# reproduced here. panelPomp will also soon move away from this approach of
# loading examples.
pompExample <- function (example, ..., show = FALSE, envir = .GlobalEnv) {
example <- as.character(substitute(example))
ep <- paste0("in ",sQuote("panelPomp:::pompExample"),": ")
pomp.dir <- system.file("examples",package="pomp")
exampleDirs <- getOption("pomp.examples",default=pomp.dir)
names(exampleDirs) <- exampleDirs
show <- as.logical(show)
if (example=="") {
avlbl <- lapply(exampleDirs,list.files,pattern=".+?R$")
avlbl <- lapply(avlbl,function(x) gsub("\\.R$","",x))
for (dir in exampleDirs) {
cat("examples in ",dir,":\n",sep="")
print(avlbl[[dir]])
}
} else {
evalEnv <- list2env(list(...))
file <- c(lapply(exampleDirs,list.files,
pattern=paste0(example,".R"),
full.names=TRUE),
recursive=TRUE)
if (length(file)<1) {
stop(ep,"cannot find file ",
sQuote(paste0(example,".R")),call.=FALSE)
}
#if (length(file)>1) {
# warning(ep,"using ",sQuote(file[1])," from ",sQuote(names(file)[1]),call.=FALSE)
#}
if (show) {
file.show(file[1])
return(invisible(NULL))
}
objs <- source(file[1],local=evalEnv)
if (is.null(envir)) {
obj <- setNames(lapply(objs$value,get,envir=evalEnv),objs$value)
} else if (is.environment(envir)) {
for (i in seq_along(objs$value)) {
assign(objs$value[i],
get(objs$value[i],envir=evalEnv),
envir=envir)
}
cat("newly created object(s):\n",objs$value,"\n")
obj <- NULL
} else {
stop(ep,sQuote("envir")," must be an environment or NULL",call.=FALSE)
}
invisible(obj)
}
}
|
c0ebaa5966f18b00162945ababc0e8a8bdc21df6 | 29585dff702209dd446c0ab52ceea046c58e384e | /wBoot/R/print.boot.regcor.R | 0d167491942fb66c61a853830d67df25a4bd11b7 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,513 | r | print.boot.regcor.R | print.boot.regcor <-
function(x, ...)
{
test <- !is.null(x$Null)
hist(x$Boot.values,breaks=20,xlab=paste("bootstrap",x$Statistic),
main=paste("Histogram of bootstrap ",x$Statistic,"s",sep=""))
abline(v=x$Observed,col="2")
abline(v=x$Mean,col="3")
abline(v=c(x$Confidence.limits),col="4")
if (test) abline(v=x$Null,col="5")
leg.text <- if (test) expression(Observed,Mean.boots,Confidence.interval,Null.value)
else expression(Observed,Mean.boots,Confidence.interval)
legend("topright",leg.text,col=2:5,lwd=2,cex=.6)
cat("\n\n",x$Header,"\n\n")
if (x$cor.ana)
print(data.frame(SUMMARY="STATISTICS",Variable.1=x$Variable.1,
Variable.2=x$Variable.2,n=x$n,Statistic=x$Statistic,Observed=x$Observed),
row.names=FALSE)
else
print(data.frame(SUMMARY="STATISTICS",Predictor=x$Variable.1,
Response=x$Variable.2,n=x$n,Statistic=x$Statistic,Observed=x$Observed),
row.names=FALSE)
cat("\n")
print(data.frame(BOOTSTRAP="SUMMARY",Replications=x$Replications,Mean=x$Mean,
SE=x$SE,Bias=x$Bias,Percent.bias=x$Percent.bias),row.names=FALSE)
cat("\n")
if (test) print(data.frame(HYPOTHESIS="TEST",Null=x$Null,
Alternative=x$Alternative,P.value=x$P.value),row.names=FALSE)
if (test) cat("\n")
if (!is.null(x$Type))
print(data.frame(CONFIDENCE="INTERVAL",Level=x$Level, Type=x$Type,
Confidence.interval=x$Confidence.interval),row.names=FALSE)
else
print(data.frame(CONFIDENCE="INTERVAL",Level=x$Level,
Confidence.interval=x$Confidence.interval),row.names=FALSE)
cat("\n\n")
}
|
551fd0ed6621d127215cb0942949339675c4c2e5 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/AMCP/examples/chapter_15_exercise_17.Rd.R | 55f934dc1830d3dfdd3434f44e117c1161d4c875 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 427 | r | chapter_15_exercise_17.Rd.R | library(AMCP)
### Name: chapter_15_exercise_17
### Title: The data used in Chapter 15, Exercise 17
### Aliases: chapter_15_exercise_17 Chapter_15_Exercise_17 C15E17 c15e17
### Keywords: datasets
### ** Examples
# Load the data
data(chapter_15_exercise_17)
# Or, alternatively load the data as
data(C15E17)
# View the structure
str(chapter_15_exercise_17)
# Brief summary of the data.
summary(chapter_15_exercise_17)
|
43b226fc4865cbd6d6fc571fadd038880b56e4eb | 25449f88edddc74beb261a934964d7d1ce358deb | /tests/testthat/test-get_quantity.R | f861faf424ced4d0b2709588ec081cdd8937238c | [
"MIT"
] | permissive | adokter/bioRad | 53de114ca6e2151743045db8556ffd7a45f90570 | d4935eddaa7cc1c3c50e47278e72967c8bbd980c | refs/heads/master | 2023-09-01T10:49:36.747974 | 2023-07-28T14:12:57 | 2023-07-28T14:12:57 | 59,586,835 | 29 | 21 | NOASSERTION | 2023-09-02T17:36:08 | 2016-05-24T15:49:06 | R | UTF-8 | R | false | false | 5,276 | r | test-get_quantity.R | vp <- example_vp
vp_list_mixed <- list(example_vp, "not_a_vp")
vpts <- example_vpts
test_that("get_quantity() returns error on incorrect parameters", {
expect_error(
get_quantity("not_a_vp", "dens"),
regexp = "no applicable method for 'get_quantity' applied to an object of class")
expect_error(
get_quantity(vp_list_mixed, "dens"),
"`x` must be list of `vp` objects.",
fixed = TRUE
)
expect_error(
get_quantity(vp, "not_a_quantity"),
"Can't find quantity `not_a_quantity` in `x`",
fixed = TRUE
)
expect_error(
get_quantity(vpts, "not_a_quantity"),
"Can't find quantity `not_a_quantity` in `x`",
fixed = TRUE
)
# Quantities are case sensitive
expect_error(get_quantity(vp, "dbzh"),
regexp = "Can't find quantity `dbzh` in `x`.",
fixed = TRUE)
expect_error(get_quantity(vp, "DENS"),
regexp = "Can't find quantity `DENS` in `x`.",
fixed = TRUE)
})
test_that("get_quantity.vp() returns correct quantity, processing eta, dbz, ff when below sd_vvp_threshold", {
# Not tested for vp_list as that is a repetition of vp method
# dens is returned as is
dens <- vp$data$dens
names(dens) <- vp$data$height # Add heights to make named vector
expect_equal(get_quantity(vp, "dens"), dens)
# height is returned as is
height <- vp$data$height
names(height) <- vp$data$height # Add heights to make named vector
expect_equal(get_quantity(vp, "height"), height)
# eta is set to 0 when below sd_vvp_threshold
eta <- vp$data$eta
eta[vp$data$sd_vvp < sd_vvp_threshold(vp)] <- 0
names(eta) <- vp$data$height
expect_equal(get_quantity(vp, "eta"), eta)
# dbz is set to -Inf when below sd_vvp_threshold
dbz <- vp$data$dbz
dbz[vp$data$sd_vvp < sd_vvp_threshold(vp)] <- -Inf
names(dbz) <- vp$data$height
expect_equal(get_quantity(vp, "dbz"), dbz)
# ff (not tested for u, v, w, dd is set to NaN when below sd_vvp_threshold
ff <- vp$data$ff
ff[vp$data$sd_vvp < sd_vvp_threshold(vp)] <- NaN
names(ff) <- vp$data$height
expect_equal(get_quantity(vp, "ff"), ff)
})
test_that("get_quantity.vpts() returns correct quantity, processing eta, dbz, ff when below sd_vvp_threshold", {
# dens is returned as is
dens <- vpts$data$dens
rownames(dens) <- vpts$height
colnames(dens) <- as.character(vpts$datetime)
expect_equal(get_quantity(vpts, "dens"), dens)
# height is returned as a matrix repetition of vpts$height
height <- matrix(rep(as.numeric(vpts$height),dim(vpts)[1]), ncol=dim(vpts)[1])
rownames(height) <- vpts$height
colnames(height) <- as.character(vpts$datetime)
expect_equal(get_quantity(vpts, "height"), height)
# eta is set to 0 when below sd_vvp_threshold
eta <- vpts$data$eta
rownames(eta) <- vpts$height
colnames(eta) <- as.character(vpts$datetime)
eta[vpts$data$sd_vvp < sd_vvp_threshold(vpts)] <- 0
expect_equal(get_quantity(vpts, "eta"), eta)
# dbz is set to -Inf when below sd_vvp_threshold
dbz <- vpts$data$dbz
rownames(dbz) <- vpts$height
colnames(dbz) <- as.character(vpts$datetime)
dbz[vpts$data$sd_vvp < sd_vvp_threshold(vpts)] <- -Inf
expect_equal(get_quantity(vpts, "dbz"), dbz)
# ff (not tested for u, v, w, dd is set to NaN when below sd_vvp_threshold
ff <- vpts$data$ff
rownames(ff) <- vpts$height
colnames(ff) <- as.character(vpts$datetime)
ff[vpts$data$sd_vvp < sd_vvp_threshold(vpts)] <- NaN
expect_equal(get_quantity(vpts, "ff"), ff)
})
test_that("get_quantity.vp() returns vectors for all 17 quantities", {
expect_vector(get_quantity(vp, "height"))
expect_vector(get_quantity(vp, "dens"))
expect_vector(get_quantity(vp, "u"))
expect_vector(get_quantity(vp, "v"))
expect_vector(get_quantity(vp, "w"))
expect_vector(get_quantity(vp, "ff"))
expect_vector(get_quantity(vp, "dd"))
expect_vector(get_quantity(vp, "sd_vvp"))
expect_vector(get_quantity(vp, "gap"))
expect_vector(get_quantity(vp, "dbz"))
expect_vector(get_quantity(vp, "eta"))
expect_vector(get_quantity(vp, "dens"))
expect_vector(get_quantity(vp, "DBZH"))
expect_vector(get_quantity(vp, "n"))
expect_vector(get_quantity(vp, "n_all"))
expect_vector(get_quantity(vp, "n_dbz"))
expect_vector(get_quantity(vp, "n_dbz_all"))
})
test_that("get_quantity.vpts() return a matrix for all 17 quantities", {
expect_true(is(get_quantity(vpts, "height"), "matrix"))
expect_true(is(get_quantity(vpts, "dens"), "matrix"))
expect_true(is(get_quantity(vpts, "u"), "matrix"))
expect_true(is(get_quantity(vpts, "v"), "matrix"))
expect_true(is(get_quantity(vpts, "w"), "matrix"))
expect_true(is(get_quantity(vpts, "ff"), "matrix"))
expect_true(is(get_quantity(vpts, "dd"), "matrix"))
expect_true(is(get_quantity(vpts, "sd_vvp"), "matrix"))
expect_true(is(get_quantity(vpts, "gap"), "matrix"))
expect_true(is(get_quantity(vpts, "dbz"), "matrix"))
expect_true(is(get_quantity(vpts, "eta"), "matrix"))
expect_true(is(get_quantity(vpts, "dens"), "matrix"))
expect_true(is(get_quantity(vpts, "DBZH"), "matrix"))
expect_true(is(get_quantity(vpts, "n"), "matrix"))
expect_true(is(get_quantity(vpts, "n_all"), "matrix"))
expect_true(is(get_quantity(vpts, "n_dbz"), "matrix"))
expect_true(is(get_quantity(vpts, "n_dbz_all"), "matrix"))
})
|
37d01e2a9c9e1f8de001c1bd16d3dc4e8e9f531c | e9a5a9e952a9ccac535efe64b96cc730b844677b | /man/cellstyle-class.Rd | d1766f96fa673ebdcee7d470e931a63d4cf2351c | [] | no_license | miraisolutions/xlconnect | 323c22258439616a4d4e0d66ddc62204094196c9 | ae73bfd5a368484abc36638e302b167bce79049e | refs/heads/master | 2023-09-04T05:27:42.744196 | 2023-08-30T07:10:44 | 2023-08-30T07:10:44 | 8,108,907 | 114 | 35 | null | 2023-08-30T07:10:46 | 2013-02-09T11:17:42 | R | UTF-8 | R | false | false | 3,642 | rd | cellstyle-class.Rd | \name{cellstyle-class}
\Rdversion{1.1}
\docType{class}
\alias{cellstyle-class}
\title{Class "cellstyle"}
\description{
This class represents a cell style in a Microsoft Excel \code{\linkS4class{workbook}}. S4 objects of this class
and corresponding methods are used to manipulate cell styles. This includes setting data formats,
borders, background- and foreground-colors, etc.
}
\section{Objects from the Class}{
Cell styles are created by calling the \code{\link[=createCellStyle-methods]{createCellStyle}} method on a
\code{\linkS4class{workbook}} object.
}
\section{Slots}{
\describe{
\item{\code{jobj}:}{Object of class \code{jobjRef} (see package \pkg{rJava}) which represents a Java object reference that is used
in the back-end to manipulate the underlying Excel cell style instance.}
}
}
\references{
Apply, create, or remove a cell style:\cr
\url{https://support.microsoft.com/en-us/office/apply-create-or-remove-a-cell-style-472213bf-66bd-40c8-815c-594f0f90cd22?ocmsassetid=hp001216732&correlationid=5691ac73-b7a2-40c3-99aa-a06e806bb566&ui=en-us&rs=en-us&ad=us}
}
\author{
Martin Studer\cr
Mirai Solutions GmbH \url{https://mirai-solutions.ch}
}
\note{
\pkg{XLConnect} generally makes use of custom (named) cell styles. This allows users to more easily manage cell styles via Excel's cell style
menu. For example, assuming you were using a specific custom cell style for your data table headers, you can change the header
styling with a few clicks in Excel's cell style menu across all tables.
}
\seealso{
\code{\linkS4class{workbook}}, \code{\link[=createCellStyle-methods]{createCellStyle}}, \code{\link[=setStyleAction-methods]{setStyleAction}},
\code{\link[=setCellStyle-methods]{setCellStyle}}
}
\examples{\dontrun{
# Load workbook (create if not existing)
wb <- loadWorkbook("cellstyles.xlsx", create = TRUE)
# We don't set a specific style action in this demo, so the
# default 'XLConnect' will be used (XLC$"STYLE_ACTION.XLCONNECT")
# Create a sheet named 'mtcars'
createSheet(wb, name = "mtcars")
# Create a named region called 'mtcars' referring to the sheet
# called 'mtcars'
createName(wb, name = "mtcars", formula = "mtcars!$C$4")
# Write built-in data set 'mtcars' to the above defined named region.
# This will use the default style action 'XLConnect'.
writeNamedRegion(wb, mtcars, name = "mtcars")
# Now let's color all weight cells of cars with a weight > 3.5 in red
# (mtcars$wt > 3.5)
# First, create a corresponding (named) cell style
heavyCar <- createCellStyle(wb, name = "HeavyCar")
# Specify the cell style to use a solid foreground color
setFillPattern(heavyCar, fill = XLC$"FILL.SOLID_FOREGROUND")
# Specify the foreground color to be used
setFillForegroundColor(heavyCar, color = XLC$"COLOR.RED")
# Which cars have a weight > 3.5 ?
rowIndex <- which(mtcars$wt > 3.5)
# NOTE: The mtcars data.frame has been written offset with top
# left cell C4 - and we have also written a header row!
# So, let's take that into account appropriately. Obviously,
# the two steps could be combined directly into one ...
rowIndex <- rowIndex + 4
# The same holds for the column index
colIndex <- which(names(mtcars) == "wt") + 2
# Set the 'HeavyCar' cell style for the corresponding cells.
# Note: the row and col arguments are vectorized!
setCellStyle(wb, sheet = "mtcars", row = rowIndex, col = colIndex,
cellstyle = heavyCar)
# Save workbook (this actually writes the file to disk)
saveWorkbook(wb)
# clean up
file.remove("cellstyles.xlsx")
}
}
\keyword{classes}
\keyword{utilities}
|
7a39c74feb74b81bf90686cfe6752602b0dd87e1 | 8ea38f5e226ad45ee7343e8abafdc8393cb0cd89 | /ExtendedDataFig5.constancy/Extended_Data_Fig5a.node_constancy.R | cb1cb29dd332578a035374a5c2f7b48154020513 | [] | no_license | Mengting-Maggie-Yuan/warming-network-complexity-stability | 79360ae41b4eba610ee94d80aee4f71af8a87874 | 4a2721365ff136f0e99acea798965d56bcefda28 | refs/heads/master | 2022-01-02T17:18:02.261894 | 2021-12-09T06:43:53 | 2021-12-09T06:43:53 | 219,168,891 | 26 | 32 | null | null | null | null | UTF-8 | R | false | false | 2,877 | r | Extended_Data_Fig5a.node_constancy.R | # contributor: Maggie Yuan
library(ggplot2)
library(gridExtra)
setwd("/Users/maggieyuan/Documents/!annual_network/GitHub/ExtendedDataFig5.constancy/")
otu = read.table("input_file/OTUtable_NetworkedOTUs_AllSamples.txt", sep="\t", header=T, row.names=1)
map = read.table("input_file/SampleMap_AllSamples.txt", sep="\t", header=T)
id_09 = rep(which(map$Year == "Y09"), each=2)
id_10 = which(map$Year == "Y10")
id_11 = which(map$Year == "Y11")
id_12 = which(map$Year == "Y12")
id_13 = which(map$Year == "Y13")
id_14 = which(map$Year == "Y14")
# check plot order
data.frame(map$Plot_full_name[id_09],
map$Plot_full_name[id_10],
map$Plot_full_name[id_11],
map$Plot_full_name[id_12],
map$Plot_full_name[id_13],
map$Plot_full_name[id_14])
warming_trt = map$Warming[id_14]
# separate OTU table
otu_09 = as.data.frame(otu[, id_09])
otu_10 = as.data.frame(otu[, id_10])
otu_11 = as.data.frame(otu[, id_11])
otu_12 = as.data.frame(otu[, id_12])
otu_13 = as.data.frame(otu[, id_13])
otu_14 = as.data.frame(otu[, id_14])
# check OTU table order
sum(names(otu_09) != map$Sample[id_09]) # 24 doesn't match because auto changed column names
sum(names(otu_10) != map$Sample[id_10])
sum(names(otu_11) != map$Sample[id_11])
sum(names(otu_12) != map$Sample[id_12])
sum(names(otu_13) != map$Sample[id_13])
sum(names(otu_14) != map$Sample[id_14])
# calculate constancy
otu_mean = (otu_09+otu_10+otu_11+otu_12+otu_13+otu_14)/6
otu_sd = sqrt(((otu_09-otu_mean)^2 + (otu_10-otu_mean)^2 + (otu_11-otu_mean)^2 + (otu_12-otu_mean)^2 + (otu_13-otu_mean)^2 + (otu_14-otu_mean)^2)/5)
otu_constancy = otu_mean/otu_sd
write.table(otu_constancy, "observed_constancy_of_each_node.csv", sep=",")
otu_constancy_w = otu_constancy[,which(warming_trt == "W")]
otu_constancy_c = otu_constancy[,which(warming_trt == "N")]
otu_constancy_w_avg = rowMeans(otu_constancy_w)
otu_constancy_c_avg = rowMeans(otu_constancy_c)
con_w = otu_constancy_w_avg[is.finite(otu_constancy_w_avg)]
con_c = otu_constancy_c_avg[is.finite(otu_constancy_c_avg)]
# plot Extended Data Fig5a - node constancy
nc_df = data.frame(warming = c(rep("control", length(con_c)), rep("warming", length(con_w))),
nc = c(con_c, con_w))
ggplot(nc_df, aes(x=warming, y=nc, fill=warming)) +
geom_boxplot(alpha=1, width=0.4, outlier.shape = NA) +
geom_jitter(shape=16, size=0.5, position=position_jitterdodge(jitter.width = 0.01, dodge.width = 0.8)) +
scale_fill_manual(values=c("#214da0", "#e7211f")) +
labs(y = "Node constancy") +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill=NA, size=0.6),
legend.position="none")
|
a715248295d5026dde671a2eaa9ab098c0c93f79 | b7e291947b8eb7e6ba02883bca6b204b3e29d642 | /man/printDebug.Rd | 4931b9cbbd0739134e2df5f8404c5bef2e41300d | [] | no_license | hjanime/jamba | f725f643a7ccd015e3d12171802b261b499d8cad | e2b385b9fedba9afbaab961a49925c104f7bf2f3 | refs/heads/master | 2020-09-23T03:26:52.609306 | 2019-10-30T14:59:45 | 2019-10-30T14:59:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,824 | rd | printDebug.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jamba.r
\name{printDebug}
\alias{printDebug}
\title{print colorized output to R console}
\usage{
printDebug(..., fgText = NULL, bgText = NULL, fgTime = "cyan",
timeStamp = TRUE, comment = TRUE, formatNumbers = TRUE,
trim = TRUE, digits = NULL, nsmall = 0L, justify = "left",
big.mark = "", small.mark = "", zero.print = NULL, width = NULL,
doColor = NULL, splitComments = FALSE, collapse = "", sep = ",",
detectColors = TRUE, darkFactor = c(1, 1.5), sFactor = c(1, 1.5),
lightMode = checkLightMode(), Crange = NULL, Lrange = NULL,
removeNA = FALSE, replaceNULL = NULL,
adjustRgb = getOption("jam.adjustRgb"), byLine = FALSE,
verbose = FALSE, indent = "", keepNA = TRUE, file = "",
append = TRUE, invert = FALSE, htmlOut = FALSE, x)
}
\arguments{
\item{...}{text to be printed to the R console.}
\item{fgText}{vector of R compatible colors, or a list of vectors of
R compatible colors, to define the foreground colors. In the latter
case, each vector is applied to each list item from '...'}
\item{bgText}{vector of R compatible colors, or a list of vectors,
to define the background color.}
\item{fgTime}{character R color to colorize the time}
\item{timeStamp}{logical whether to include a time stamp in output}
\item{comment}{logical whether to prefix output with '##' as a comment}
\item{formatNumbers}{logical whether to format numbers using
\code{\link[base]{format}} which controls the number of digits displayed.}
\item{trim, digits, nsmall, justify, big.mark, small.mark, zero.print, width}{parameters sent to the \code{\link[base]{format}} function.}
\item{doColor}{NULL or logical indicating whether to colorize output. If
NULL it detects whether the crayon package is available and console
color is enabled.}
\item{splitComments}{logical whether to color each element independently
without light-dark alternating pattern.}
\item{collapse}{character collapse string used to separate list items,
by default "" so text separation is expected in the input data.}
\item{sep}{character separator used to separate vector elements, when
a list items contains a vector.}
\item{detectColors}{logical whether to detect and potentially try to
correct console color capabilities.}
\item{darkFactor}{numeric darkness to apply to alternative vector values
when using alternating light-dark color shading.}
\item{sFactor}{numeric color saturation to apply to alternative vector
values when using alternating light-dark color shading.}
\item{lightMode}{boolean or NULL, indicating whether the text background
color is light, thus imposing a maximum brightness for colors displayed.
It use lightMode if defined by the function caller, otherwise it will
use options("jam.lightMode") if defined, lastly it will attempt to detect
whether running inside Rstudio by checking the environment variable
"RSTUDIO", and if so it will assign lightMode TRUE.}
\item{Crange}{numeric range of chroma values, ranging
between 0 and 100. When NULL, default values will be
assigned to Crange by \code{setCLranges()}.}
\item{Lrange}{numeric range of luminance values, ranging
between 0 and 100. When NULL, default values will be
assigned to Lrange by \code{setCLranges()}.}
\item{removeNA}{logical whether to remove NA values and not print to
the console.}
\item{replaceNULL}{character or NULL, optionally replace NULL elements
with non-NULL character value.}
\item{adjustRgb}{numeric value adjustment used during the conversion of
RGB colors to ANSI colors, which is inherently lossy. If not defined,
it uses the default returned by \code{setCLranges()} which itself uses
\code{getOption("jam.adjustRgb")} with default=0. In order to boost
color contrast, an alternate value of -0.1 is suggested.}
\item{byLine}{logical whether to delimit lists by line instead of
using collapse to combine them onto one line.}
\item{verbose}{logical whether to print verbose output}
\item{indent}{character optional characters used as a prefix to indent
output.}
\item{file}{passed to \code{cat}, to allow sending output to
a specified file.}
\item{append}{logical whether to append output, relevant only when
\code{file} specifies a filename.}
\item{invert}{logical indicating whether foreground and background
colors should be switched.}
\item{htmlOut}{logical indicating whether to print HTML span
output, using format
\code{<span style="color:fg;background-color:bg">text</span>}.}
}
\value{
This function is called for the by-product of printing
debug output, it returns \code{invisible(NULL)}, no output.
}
\description{
print colorized output to R console
}
\details{
This function prints colorized output to the R console, with some
rules for colorizing the output to help visually distinguish items.
Its output also by default begins with comment '#' characters, a
datetimestamp, so it the output is copied back into the R console it
will not cause a new command to be run.
The colorization uses a vector or list of colors for fgText (foreground)
and bgText (background.), applied to each item in '...'. When an item in
'...' is a vector, each vector element is colored alternating light
and dark from that base color, to give visual indication of each element.
The next item in '...' receives the next color from fgText, and so on.
Colors in fgText are recycled to the length of '...'
}
\examples{
printDebug("Testing ", "default ", "printDebug().");
printDebug("List of vectors:", c("one", "two", "three"));
printDebug("List of vectors:", c("one", "two", "three"),
c("four", "five", "six"), collapse=" ");
# slightly different style, one entry per line, indented:
printDebug("List of vectors:", c("one", "two", "three"),
c("four", "five", "six"), collapse="\\n ");
# in an R console, or when writing to a log file, the
# following output text is colored
printDebug(c("red", "blue", "yellow"));
}
\seealso{
Other jam practical functions: \code{\link{applyCLrange}},
\code{\link{breakDensity}}, \code{\link{checkLightMode}},
\code{\link{colNum2excelName}}, \code{\link{exp2signed}},
\code{\link{fileInfo}}, \code{\link{fixYellowHue}},
\code{\link{fixYellow}}, \code{\link{getAxisLabel}},
\code{\link{handleArgsText}}, \code{\link{isFALSEV}},
\code{\link{isTRUEV}}, \code{\link{jamba}},
\code{\link{jargs}}, \code{\link{kable_coloring}},
\code{\link{log2signed}}, \code{\link{make_styles}},
\code{\link{mergeAllXY}}, \code{\link{minorLogTicks}},
\code{\link{newestFile}}, \code{\link{printDebugI}},
\code{\link{renameColumn}}, \code{\link{rmInfinite}},
\code{\link{rmNA}}, \code{\link{rmNULL}},
\code{\link{sclass}}, \code{\link{sdima}},
\code{\link{sdim}}, \code{\link{setCLranges}},
\code{\link{setPrompt}}, \code{\link{ssdima}},
\code{\link{ssdim}}
}
\concept{jam practical functions}
|
4da5f452ddcea5d45b0a03749c681b52ba8f6a1f | eafcbacd06361d83a8da71ae86968f723177570f | /code/2_Plot_CRSS_UBres_generic_annual.R | ffa519cbf5a47b6d560456c0251f1ea7f413aec3 | [
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | usbr/RW-RDF-Process-Plot | a6edd4a0172294424d90a1456907be2b9baffc8c | 943ff620ef5b96fa30f9c830cfc0853a1420ec62 | refs/heads/master | 2023-08-17T10:22:15.871190 | 2023-08-04T21:15:48 | 2023-08-04T21:15:48 | 144,192,835 | 0 | 1 | null | 2022-03-24T14:41:14 | 2018-08-09T19:04:22 | HTML | UTF-8 | R | false | false | 13,183 | r | 2_Plot_CRSS_UBres_generic_annual.R | # from crspopsdata.feather plot Powell & 1 generic UBres In/PE/Out and write stats
# CF Oct 2021
# # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
warning('Run Process_CRSS_rdf_generic_feather.R before this')
scen_dir_overwrite=FALSE # don't need this for already processed, just give F so doesn't error on libs_n_dirs
#get scen information from .yml file
yaml_nm=FALSE #
results_nm <- "NoChangeNF_PowellElVol"# "NoChangeNF_PowellElVol" #"NewInactCap_NewNF_PowEV"
#libraries and setup directories, just use getwd()
source(file.path(getwd(),"code","libs_n_dirs.R"))
if(T){ #if you've already processed just Load Feather with Processed Results
scen_res <- feather::read_feather(path = file.path(feather_data_dir,'crspopsdata.feather'))
summary(scen_res)
scens <- unique(scen_res$ScenarioGroup)
scens
#make ggplot keep this order rather than alpha
scen_res$ScenarioGroup <- factor(scen_res$ScenarioGroup, levels=scens)
unique(scen_res$Variable)
length(unique(scen_res$Scenario))
}
# results_nm<-unique(scen_res$ScenarioGroup)[1]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## 2. User Input ##
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#pick which res to graph along with Powell in/PE/out
res <- "FlamingGorge" #BlueMesa #FlamingGorge #Fontenelle #TaylorPark
res <- F #if F only plot Powell
startyr = 2022 #filter out all years > this year
endyr = 2026#2060
print_png <- T #F = don't make seperate png figures
widths=9; heights=6
mycolors <- c("#138d75","#f1c40f") #crssplot 138d75=green=dev, f1c40f=gold=baseline
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# END USER INPUT
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if (!file.exists(results_nm)) {
message(paste('Ploting',results_nm,'for Powell &',res))
} else {
stop('Run Process_CRSS_rdf_generic_feather.R first')
}
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Plot annual figures
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
yrs2show <- startyr:endyr
scen_res <- scen_res %>%
dplyr::filter(Year %in% yrs2show)
#get everything on a date
scen_res$MonthNum = as.Date(paste0(scen_res$Year,scen_res$Month,"01"), format = "%Y%B%d")
#get a numeric month number
scen_res$MonthNum = as.numeric(format.Date(scen_res$MonthNum, format = "%m"))
if (res != F) {
pdf(file.path(results_dir,paste0(results_nm,"_AnnualPowell+",res,"_",first(yrs2show),"-",last(yrs2show),'.pdf')), width=9, height=6)
variable = paste0(res,".Inflow")
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "Annual Flow (1,000 ac-ft/yr)"
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
mutate(Value = Value/1000) %>% #convert to KAF after we convert to AF
dplyr::group_by(ScenarioGroup,TraceNumber,Year) %>%
summarise(Value = sum(Value)) %>% #first sum by year, keeping scens, traces, and years together
dplyr::group_by(ScenarioGroup,Year) %>%
summarise(Value = mean(Value)) %>% #then avg all traces, keeping scens and years together
ggplot(aes(x = Year, y = Value, color = ScenarioGroup)) +
# scale_x_continuous(breaks = 2021:2040) +
geom_line() +
theme_light() +
scale_y_continuous(labels = scales::comma) +
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average Annual",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Average Annual",variable,".png")), width = widths[1],height = heights[1])}
variable = paste0(res,".Storage")
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "EOCY Storage (1,000 ac-ft)"
exc_month = 12
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
dplyr::filter(MonthNum%in%exc_month) %>%
dplyr::group_by(ScenarioGroup, Year) %>%
dplyr::summarise(Value = mean(Value)) %>%
mutate(Value = Value/1000) %>% #convert to KAF after we convert to AF
ggplot(aes(x = Year, y = Value, color = ScenarioGroup))+#, group = ScenarioGroup)) +
geom_line() +
theme_light() +
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average EOCY",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Mean EOCY",variable,".png")), width = widths[1],height = heights[1])}
variable = paste0(res,".Outflow")
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "Annual Flow (1,000 ac-ft/yr)"
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
mutate(Value = Value/1000) %>% #convert to KAF after we convert to AF
dplyr::group_by(ScenarioGroup,TraceNumber,Year) %>%
summarise(Value = sum(Value)) %>% #first sum by year, keeping scens, traces, and years together
dplyr::group_by(ScenarioGroup,Year) %>%
summarise(Value = mean(Value)) %>% #then avg all traces, keeping scens and years together
ggplot(aes(x = Year, y = Value, color = ScenarioGroup)) +
# scale_x_continuous(breaks = 2021:2040) +
geom_line() +
theme_light() +
scale_y_continuous(labels = scales::comma) +
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average Annual",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Average Annual",variable,".png")), width = widths[1],height = heights[1])}
variable = paste0(res,".PE")
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "EOCY Water Surface Elevation (ft)"
exc_month = 12
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
dplyr::filter(MonthNum%in%exc_month) %>%
dplyr::group_by(ScenarioGroup, Year) %>%
dplyr::summarise(Value = mean(Value)) %>%
ggplot(aes(x = Year, y = Value, color = ScenarioGroup))+#, group = ScenarioGroup)) +
geom_line() +
theme_light() +
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average EOCY",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Mean EOCY",variable,".png")), width = widths[1],height = heights[1])}
#print out res stats
scen_res_stats_inout <- scen_res %>%
dplyr::filter(Variable %in% paste0(res,c(".Inflow",".Outflow"))) %>%
dplyr::group_by(ScenarioGroup, Year,Variable,TraceNumber) %>% #by leaving Variable in I keep the name in the resulting df
summarise(Value = sum(Value)) #first sum by year, keeping scens, traces, and years together
scen_res_stats_eocy <- scen_res %>%
dplyr::filter(Variable %in% paste0(res,c(".Storage",".PE"))) %>%
dplyr::filter(MonthNum%in%12) #%>%
scen_res_stats <- rbind.data.frame(scen_res_stats_eocy[,names(scen_res_stats_inout)],scen_res_stats_inout)
scen_res_stats %>%
dplyr::group_by(ScenarioGroup, Year,Variable) %>% #by leaving Variable in I keep the name in the resulting df
dplyr::summarise('Mean' = mean(Value), 'Med' = median(Value), #summarize over the traces
'q10' = quantile(Value,.1),'q90' = quantile(Value,.9),
'Min' = min(Value),'Max' = max(Value)) %>%
pivot_wider(names_from = ScenarioGroup,values_from=c("Mean","Med","Min","q10","q90","Max")) %>%
arrange(Variable,Year) %>%
write.csv(file = file.path(results_dir,"figure_data",paste(res,"_Stats.csv")))
} # end UB res plotting loop
if (res == F) { #if UB res was not plotted than open a pdf just for Powell
pdf(file.path(results_dir,paste0(results_nm,"_AnnualPowell_",first(yrs2show),"-",last(yrs2show),'.pdf')), width=9, height=6)
}
if (T){ #always plot Powell
variable = "Powell.Inflow"
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "Annual Flow (1,000 ac-ft/yr)"
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
mutate(Value = Value/1000) %>% #convert to KAF after we convert to AF
dplyr::group_by(ScenarioGroup,TraceNumber,Year) %>%
summarise(Value = sum(Value)) %>% #first sum by year, keeping scens, traces, and years together
dplyr::group_by(ScenarioGroup,Year) %>%
summarise(Value = mean(Value)) %>% #then avg all traces, keeping scens and years together
ggplot(aes(x = Year, y = Value, color = ScenarioGroup)) +
# scale_x_continuous(breaks = 2021:2040) +
geom_line() +
theme_light() +
scale_y_continuous(labels = scales::comma) +
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average Annual",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Average Annual",variable,".png")), width = widths[1],height = heights[1])}
variable = "Powell.Storage"
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "EOCY Storage (1,000 ac-ft)"
exc_month = 12
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
dplyr::filter(MonthNum%in%exc_month) %>%
dplyr::group_by(ScenarioGroup, Year) %>%
dplyr::summarise(Value = mean(Value)) %>%
mutate(Value = Value/1000) %>% #convert to KAF after we convert to AF
ggplot(aes(x = factor(Year), y = Value, color = ScenarioGroup, group = ScenarioGroup)) +
geom_line() +
theme_light() +
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average EOCY",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Mean EOCY",variable,".png")), width = widths[1],height = heights[1])}
variable = "Powell.Outflow"
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "Annual Flow (1,000 ac-ft/yr)"
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
mutate(Value = Value/1000) %>% #convert to KAF after we convert to AF
dplyr::group_by(ScenarioGroup,TraceNumber,Year) %>%
summarise(Value = sum(Value)) %>% #first sum by year, keeping scens, traces, and years together
dplyr::group_by(ScenarioGroup,Year) %>%
summarise(Value = mean(Value)) %>% #then avg all traces, keeping scens and years together
ggplot(aes(x = Year, y = Value, color = ScenarioGroup)) +
# scale_x_continuous(breaks = 2021:2040) +
geom_line() +
theme_light() +
scale_y_continuous(labels = scales::comma) +
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average Annual",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Average Annual",variable,".png")), width = widths[1],height = heights[1])}
variable = "Powell.PE"
title = paste(variable,first(yrs2show),"-",last(yrs2show))
y_lab = "EOCY Water Surface Elevation (ft)"
exc_month = 12
p <- scen_res %>%
dplyr::filter(Variable == variable) %>%
dplyr::filter(MonthNum%in%exc_month) %>%
dplyr::group_by(ScenarioGroup, Year) %>%
dplyr::summarise(Value = mean(Value)) %>%
ggplot(aes(x = factor(Year), y = Value, color = ScenarioGroup, group = ScenarioGroup)) +
geom_line() +
theme_light() +
ylim(c(3400,3700))+
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average EOCY",title), y = y_lab, x = "Year")
print(p)
if(print_png==T){ ggsave(filename = file.path(figures_dir,paste("Mean EOCY",variable,".png")), width = widths[1],height = heights[1])}
p <- scen_res %>%
dplyr::filter(Year%in%2022:2026) %>%
dplyr::filter(Variable == variable) %>%
dplyr::filter(MonthNum%in%exc_month) %>%
dplyr::group_by(ScenarioGroup, Year) %>%
dplyr::summarise(Value = mean(Value)) %>%
ggplot(aes(x = factor(Year), y = Value, color = ScenarioGroup, group = ScenarioGroup)) +
geom_line() +
theme_light() +
ylim(c(3400,3700))+
scale_color_manual(values = mycolors) + crssplot::theme_crss() +
labs(title = paste("Average EOCY",title), y = y_lab, x = "Year")
print(p)
#print out res stats
scen_res_stats_inout <- scen_res %>%
dplyr::filter(Variable %in% paste0("Powell",c(".Inflow",".Outflow"))) %>%
dplyr::group_by(ScenarioGroup, Year,Variable,TraceNumber) %>% #by leaving Variable in I keep the name in the resulting df
summarise(Value = sum(Value)) #first sum by year, keeping scens, traces, and years together
scen_res_stats_eocy <- scen_res %>%
dplyr::filter(Variable %in% paste0("Powell",c(".Storage",".PE"))) %>%
dplyr::filter(MonthNum%in%12) #%>%
scen_res_stats <- rbind.data.frame(scen_res_stats_eocy[,names(scen_res_stats_inout)],scen_res_stats_inout)
scen_res_stats %>%
dplyr::group_by(ScenarioGroup, Year,Variable) %>% #by leaving Variable in I keep the name in the resulting df
dplyr::summarise('Mean' = mean(Value), 'Med' = median(Value), #summarize over the traces
'q10' = quantile(Value,.1),'q90' = quantile(Value,.9),
'Min' = min(Value),'Max' = max(Value)) %>%
pivot_wider(names_from = ScenarioGroup,values_from=c("Mean","Med","Min","q10","q90","Max")) %>%
arrange(Variable,Year) %>%
write.csv(file = file.path(results_dir,"figure_data",paste0("Powell_",first(yrs2show),"-",last(yrs2show),'_Stats.csv')))
message(paste('Writing stats file to',file.path(results_dir,"figure_data",paste("res_Stats.csv"))))
dev.off()
}
dev.off()
|
31a377f38992756d7a3749b6258f22b828f8b6a4 | 68b2e33cb01e2ea268b392e75bbd417ea7acbbeb | /00sintaxis/04datos-basicos/script_sol.R | 6c065ea53e88a4ee10a5fad3406c67bdd72b1ed9 | [] | no_license | rsanchezs/ciencia-datos-con-r-tareas | 1d2d980a8a9539ceb8b9c27fbb1e29d9e3a9917c | 9b26e88f0dbdb18bde1a8fddc8813331b0a6534e | refs/heads/master | 2021-09-26T03:02:38.089905 | 2018-10-27T00:40:51 | 2018-10-27T00:40:51 | 113,011,162 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 630 | r | script_sol.R | # Cambia `numerico` por 42
numerico <- 42
# Comprueba la clase de `numerico`
class(numerico)
# Comprueba el tipo de `numerico`
is.numeric(numerico)
# Cambia el texto por "Ciencia de Datos con R"
caracter <- "Ciencia de Datos con R"
# Comprueba la clase de `caracter`
class(caracter)
# Comprueba el tipo de `caracter`
is.character(caracter)
# Cambia logico a FALSE
logico <- FALSE
# Comprueba la clase de `logico`
class(logico)
# Comprueba el tipo de `logico`
is.logical(logico)
# Definimos la variable `numerico` como entera
numerico <- 42L
# Comprobamos que se trata de un valor de tipo entero
is.integer(numerico)
|
7bd8e239340d9829eefe216a4fcecd9dcdc67956 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-007/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-007.R | 77080a2ac54d02de143e1541e17c45e65f900474 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,202 | r | biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-007.R | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 71140
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 70862
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 70862
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-007.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 28028
c no.of clauses 71140
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 70862
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-007.qdimacs 28028 71140 E1 [1108 1109 1110 1111 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1674 1675 1676 1677 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 2240 2241 2242 2243 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2806 2807 2808 2809 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 3372 3373 3374 3375 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3938 3939 3940 3941 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 4461 4463 4465 4467 4469 4471 4473 4475 4477 4479 4481 4483 4485 4487 4489 4491 4493 4497 4504 4505 4506 4507 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4539 4541 4543 4545 4547 4549 4551 4553 4567] 0 232 24589 70862 RED
|
5d683640bf98415c5d6906e4d5d712aa7290924b | 5858bd92d435f69a5bb9a22b94bdeadc3dd9e300 | /01_forward_simulator/forward_simulator.R | d7a7715deee15aca39e0e03205d43db125c038a0 | [
"MIT"
] | permissive | cory-weller/genome-reconstruction-revision | 6945593cad50d2327f7415e17537ccf8595c9786 | 424f37db9a5f0d16dd5db009a134b80f39865459 | refs/heads/master | 2020-07-22T23:53:39.845376 | 2019-10-21T20:04:00 | 2019-10-21T20:04:00 | 207,374,368 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,281 | r | forward_simulator.R | #!/usr/bin/env Rscript
library(data.table)
library(foreach)
library(digest)
args <- commandArgs(trailingOnly=TRUE)
# Debugging args:
# args <- c("-bed","recombination.bed","-prefix","dgrp","-n0","500","-rate","1.0","-sex","dioecious","-nfounders","32","-ngenerations","2","-lineIDs","lines.txt","-chrx","X","-iter","1","-recombination","femaleOnly","-dmel","TRUE","-nthreads","2")
args.env <- new.env()
for(i in seq(1, length(args), 2)) {
args.env[[args[i]]] <- args[i+1]
}
# Required arguments:
bed_file <- args.env[["-bed"]]
prefix <- args.env[["-prefix"]]
popSize <- as.numeric(args.env[["-n0"]])
growthRate <- as.numeric(args.env[["-rate"]])
sexModel <- args.env[["-sex"]]
n_founders <- as.numeric(args.env[["-nfounders"]])
n_generations <- as.numeric(args.env[["-ngenerations"]])
lineIDs_filename <- args.env[["-lineIDs"]]
x.chromosome <- args.env[["-chrx"]]
recombinationModel <- args.env[["-recombination"]]
n_threads <- as.numeric(args.env[["-nthreads"]])
iteration <- args.env[["-iter"]]
dmel <- args.env[["-dmel"]]
# if dmel is TRUE, arms 2L/2R and 3L/3R will be considered one chromosome during recombination
# Do not enter these two arguments unless you are generating recombinant inbred lines
n_RILs <- as.numeric(args.env[["-nRILs"]])
inbreed_generations <- as.numeric(args.env[["-inbreed_generations"]])
# Optional seed override
manual_seed <- args.env[["-seed"]]
# Set seed; use L'Ecuyer-CMRG seed for reproducible %dopar% results
if(! is.null(manual_seed)) {
cat("manual seed: ")
cat(manual_seed)
cat("\n")
set.seed(manual_seed, kind = "L'Ecuyer-CMRG")
} else {
# generate random seed by converting the combined input string to md5,
# subsetting the first six characters,
# and converting from hexadecimal to integer.
input_string <- paste(bed_file, prefix, popSize, growthRate, sexModel, n_founders, n_generations, lineIDs_filename, x.chromosome, recombinationModel, n_threads, iteration, dmel, n_RILs, inbreed_generations, sep=" ")
cat("input string: ")
cat(input_string)
starting_seed <- strtoi(substr(digest(input_string, algo="md5"), 1, 7), base=16L)
cat("\nstarting seed: ")
cat(starting_seed)
cat("\n")
set.seed(starting_seed, kind = "L'Ecuyer-CMRG")
}
filestem <- paste(prefix, "_", n_founders, "_F", n_generations, "_", iteration, sep="")
# If n_RILs argument exists, set flag
if(! length(n_RILs)==0) {make_RILs <- TRUE} else {make_RILs <- FALSE}
if(n_threads > 1) {
library(doMC)
registerDoMC(cores=n_threads)
}
initializePopulation <- function(bed, n_founders, sexModel) {
DT <- foreach(chromosome=unique(bed$chr), .combine="rbind", .inorder=TRUE) %do% {
data.table(chromosome, haplotype=1:2, start=1, stop=max(bed[chr==chromosome]$stop))
}
DT <- DT[rep(1:.N, each=n_founders)]
DT[,founderID:=rep(1:n_founders, length(chromosomes)*2)]
if(sexModel=="dioecious") {
DT <- DT[rep(1:.N, each=2)]
DT[, sex := rep(c("M","F"), n_founders*length(chromosomes)*2)]
} else if(sexModel=="hermaphroditic") {
DT[, sex := "H"]
}
setkey(DT, founderID, sex, chromosome, haplotype, start, stop)
DT[, ind := rleid(sex,founderID)]
# remove 2nd X chromosome from males
return(DT[! (sex=="M" & haplotype==2 & chromosome=="X")][])
}
getGamete <- function(pop, ind.i, haplotype.i, parSex, recombinant) {
if(recombinant==FALSE) {
# simply return random selection of alleles
foreach(chromosome.i=chromosomes, .combine="rbind", .inorder=TRUE) %do% {
dt.out <- pop[.(ind.i, chromosome.i, sample(1:2, size=1))]
dt.out[, haplotype := haplotype.i]
return(dt.out[,c("chromosome","haplotype","start","stop", "founderID")])
}
} else if(recombinant==TRUE) {
# pull out individual and recombine
foreach(chromosome.i = chromosomes, .combine="rbind", .inorder=TRUE) %do% {
if(chromosome.i=="X" & parSex=="M") {
breakpoints <- breakpoints <- c(0, chromosome_sizes[[chromosome.i]])
} else {
breakpoints <- unique(trunc(sort(recombination_function[[chromosome.i]](runif(rpois(1, lambda=recombination_rates[[chromosome.i]]))))))
breakpoints <- c(0, breakpoints, chromosome_sizes[[chromosome.i]])
}
N.recomb <- length(breakpoints) - 2
startHaplotype <- sample(c(1,2), size=1)
ranges <- data.table(
"ind"=ind.i,
"chromosome"=chromosome.i,
"haplotype"=(((1 + startHaplotype) : (N.recomb + 1 + startHaplotype)) %% 2 + 1),
"start"=1+breakpoints[1:(length(breakpoints)-1)],
"stop"=breakpoints[2:length(breakpoints)])
foreach(ind.i=ranges$ind, chromosome.i=ranges$chromosome, haplotype.j=ranges$haplotype,
start.i=ranges$start, stop.i=ranges$stop,.combine="rbind", .inorder=TRUE) %do% {
if(chromosome.i=="X" & parSex=="M") {
dt.out <- pop[.(ind.i, chromosome.i, haplotype.j)]
dt.out[,haplotype := haplotype.i]
return(dt.out[,c("chromosome","haplotype","start","stop", "founderID")])
} else {
dt.out <- pop[.(ind.i, chromosome.i, haplotype.j)][! (stop < start.i) & ! (start > stop.i)]
dt.out[1,start := start.i]
dt.out[dim(dt.out)[1], stop := stop.i]
dt.out[,haplotype := haplotype.i]
return(dt.out[,c("chromosome","haplotype","start","stop", "founderID")])
}
}
}
}
}
doSex <- function(pop, sexModel, n) {
N.inds <- unique(pop$ind)
if(sexModel=="hermaphroditic") {
par1 <- sample(N.inds, size=1)
par2 <- sample(N.inds, size=1)
}else if(sexModel=="dioecious") {
# pick first gamete
par1 <- sample(N.inds, size=1)
par1sex <- unique(pop[ind==par1]$sex)
# pick second gamete from opposite sex
if(length(N.inds)==2) {
par2 <- unique(pop[ind!=par1]$ind)}
else {
par2 <- sample(unique(pop[sex!=par1sex]$ind), size=1)
}
par2sex <- unique(pop[ind==par2]$sex)
}
g1 <- getGamete(pop, par1, 1, par1sex, ifelse(recombinationModel=="femaleOnly" & par1sex=="M", FALSE, TRUE))
g2 <- getGamete(pop, par2, 2, par2sex, ifelse(recombinationModel=="femaleOnly" & par2sex=="M", FALSE, TRUE))
ind.out <- rbindlist(list(g1,g2))[!is.na(founderID)]
ind.out[,ind := n]
if (length(unique(ind.out[chromosome=="X"]$haplotype))==1) {
ind.out[, sex := "M"]
} else {
offspringSex <- ifelse(sexModel=="dioecious", "F", "H")
ind.out[, sex := offspringSex]
}
return(ind.out[])
}
# Function for drosophila corrections, e.g. combining/splitting 2L/2R as a single chromosome
translateLandR <- function(DT, max2L=23100000L, max3L=24600000L) {
splitting_chr2 <- DT[chromosome=="2" & max2L > start & max2L < stop]
splitting_chr3 <- DT[chromosome=="3" & max3L > start & max3L < stop]
# Restructure regions overlapping centromere on chromosome 2
chr2L <- copy(splitting_chr2)
chr2R <- copy(splitting_chr2)
chr2L[, stop := max2L]
chr2L[, chromosome := "2L"]
chr2R[, start := 1]
chr2R[, stop := stop - max2L]
chr2R[, chromosome := "2R"]
# Restructure regions overlapping centromere on chromosome 3
chr3L <- copy(splitting_chr3)
chr3R <- copy(splitting_chr3)
chr3L[, stop := max3L]
chr3L[, chromosome := "3L"]
chr3R[, start := 1]
chr3R[, stop := stop - max3L]
chr3R[, chromosome := "3R"]
# Restructure regions not overlapping centromere on chromosome 2
dat.2Lor2R <- copy(DT[chromosome=="2"][stop < max2L | start > max2L])
dat.2Lor2R[stop < max2L, chromosome := "2L"]
dat.2Lor2R[start > max2L, chromosome := "2R"]
dat.2Lor2R[chromosome=="2R", start := start - max2L]
dat.2Lor2R[chromosome=="2R", stop := stop - max2L]
# Restructure regions not overlapping centromere on chromosome 3
dat.3Lor3R <- copy(DT[chromosome=="3"][stop < max3L | start > max3L])
dat.3Lor3R[stop < max3L, chromosome := "3L"]
dat.3Lor3R[start > max3L, chromosome := "3R"]
dat.3Lor3R[chromosome=="3R", start := start - max3L]
dat.3Lor3R[chromosome=="3R", stop := stop - max3L]
# Keep chromosome X as is
dat.X <- DT[chromosome=="X"]
# Combine ranges
dat.all <- rbindlist(list(
dat.2Lor2R,
dat.3Lor3R,
chr2L,
chr2R,
chr3L,
chr3R,
dat.X
))
# Reorder
setkey(dat.all, ind, chromosome, haplotype, start)
return(dat.all)
}
# Load bed file
bed <- fread(bed_file)
setnames(bed, c("chr", "start", "stop", "c"))
bed[chr==x.chromosome, chr := "X"]
# Correction for Drosophila
if(dmel==TRUE) {
# stoare & add maximum value of 2L onto every start, stop for 2R
# store & add maximum value of 3L onto every star,t stop for 3R
# Reduce these later
max2L <- as.integer(max(bed[chr=="2L"]$stop))
max3L <- as.integer(max(bed[chr=="3L"]$stop))
bed[chr=="2R", start := start + max2L]
bed[chr=="2R", stop := stop + max2L]
bed[chr=="3R", start := start + max3L]
bed[chr=="3R", stop := stop + max3L]
bed[chr %in% c("2L","2R"), chr := "2"]
bed[chr %in% c("3L","3R"), chr := "3"]
}
# Get list of unique chromosome names within .bed file
chromosomes <- unique(bed$chr)
# Convert c (cM per Mb) to Morgans
bed[, M := c * ((stop-start)/1e8)]
# Create hash table with chr -> expected value for number of recombination events
# e.g.,
# > recombination_rates[["2L"]]
# [1] 0.5533038
recombination_rates <- new.env()
for(chromosome in chromosomes) {
recombination_rates[[chromosome]] <- sum(bed[chr==chromosome]$M) # convert c (cM per Megabase) to Morgans
}
chromosome_sizes <- new.env()
for(chromosome in chromosomes) {
chromosome_sizes[[chromosome]] <- max(bed[chr==chromosome]$stop)
}
# Create hash table with random value (0,1) -> recombination position, via linear interpolation of scaled cumulative sum of recombination rates
bed[, cumulative_M := cumsum(M), by=chr]
bed[, scaled := cumulative_M/max(cumulative_M), by=chr]
genomeSize <- sum(bed[, list(size=max(stop)), by=chr]$size)
recombination_function <- new.env()
for(chromosome in chromosomes) {
recombination_function[[as.character(chromosome)]] <- approxfun(c(0, bed[chr==chromosome]$scaled), c(0,bed[chr==chromosome]$stop))
}
# Extract header column from (possibly zgipped) vcf file
lineIDs <- readLines(lineIDs_filename)
# Subset (size=N founders) founder IDs, in order that they appear in the header
used_IDs <- lineIDs[sort(sample(length(lineIDs), size=n_founders, replace=FALSE))]
# Initialize Population
pop <- initializePopulation(bed, n_founders, sexModel)
setkey(pop, ind, chromosome, haplotype, start)
# Iterate through generations
for(i in 1:n_generations) {
print(i)
pop2 <- foreach(n=1:trunc((popSize*growthRate**i)), .combine="rbind", .inorder=TRUE) %dopar% {
doSex(pop, sexModel, n)
}
setkey(pop2, ind, chromosome, haplotype, start)
#write.table(pop2, file=paste(stem, "_", i, ".txt", sep=""), sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
popSize <- trunc(popSize*growthRate)
pop <- copy(pop2)
}
pop.outbred <- copy(pop2)
# Steps unique to generating RILs:
if(make_RILs == TRUE) {
RILs <- foreach(nRIL=1:n_RILs, .combine="rbind", .errorhandling="remove", .inorder=TRUE) %do% {
print(paste("RIL ", nRIL, sep=""))
M.founder <- sample(unique(pop.outbred[sex=="M"][,ind]), size=1)
F.founder <- sample(unique(pop.outbred[sex=="F"][,ind]), size=1)
pop <- pop.outbred[ind %in% c(M.founder, F.founder)]
# Do N many generations of inbreeding
for(i in 1:inbreed_generations) {
# Select a single male and female
M.ind <- unique(pop[sex=="M"][,ind])
F.ind <- unique(pop[sex=="F"][,ind])
# Subset population to that single male and single female
pop <- pop[ind %in% c(M.ind, F.ind)]
# Generate first individual
ind1 <- doSex(pop, sexModel, 1)
ind1sex <- unique(ind1[,sex])
# Generate second individual of different sex
while(TRUE) {
ind2 <- doSex(pop, sexModel, 2)
if(unique(ind2[,sex]) != ind1sex) {
break
}
}
pop <- rbindlist(list(ind1, ind2))
setkey(pop, ind, chromosome, haplotype, start)
}
# Return one single inbred individual (female)
dt.return <- pop[sex=="F"]
dt.return[, ind := nRIL][]
return(dt.return)
}
# Fully homozygose RILs by forcing 2nd haplotype to be identical to the 1st
RILs_2 <- copy(RILs[haplotype==1])
RILs_2[, haplotype := 2]
pop <- rbindlist(list(RILs[haplotype==1], RILs_2))
# set n_founders to n_RILs for filename output
n_founders <- n_RILs
pop <- RILs
# Translate founderID to lineID
pop[, lineID := used_IDs[pop$founderID]]
# Remove numeric founderID column
pop[, founderID := NULL]
# change X chromosome back to chrN
pop[chromosome=="X", chromosome := x.chromosome]
# Corrections for Drosophila (splits "2" into "2L" + "2R"; "3" into "3L" + "3R")
if(dmel == TRUE) { pop <- translateLandR(pop) }
setnames(pop, "ind", "RIL_ID")
# Sort haplotype map
setkey(pop, RIL_ID, chromosome, haplotype, start)
} else {
# change X chromosome back to chrN
pop[chromosome=="X", chromosome := x.chromosome]
# Translate founderID to lineID
pop[, lineID := used_IDs[pop$founderID]]
# Corrections for Drosophila (splits "2" into "2L" + "2R"; "3" into "3L" + "3R")
if(dmel == TRUE) { pop <- translateLandR(pop) }
setkey(pop, ind, chromosome, haplotype, start)
}
# Write to file
options(scipen=999)
write.table(pop, file=paste(filestem, ".haps", sep=""), sep="\t", quote=FALSE, col.names=TRUE, row.names=FALSE)
# write.table(used_IDs, file=paste(iteration, ".founders.txt", sep=""), sep="\t", quote=FALSE, col.names=FALSE, row.names=FALSE)
|
7c02cd5faf13e2d3e2a5aba001790293c7495fbb | f43448c5b345ec595d47c6d37491bdde05c98855 | /data-raw/authors.R | f6d542b0b9e1ac356031369f7385d950ac86495e | [] | no_license | ly129/AutoAff | b33ee0b033930b9d47a56889f7eda7f9e606d38c | a03b0e5ead98ce5888ae2fe8cc1219ba418f19af | refs/heads/master | 2022-02-23T07:04:42.029861 | 2019-08-12T21:49:05 | 2019-08-12T21:49:05 | 198,249,623 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,195 | r | authors.R | ## code to prepare `authors` dataset goes here
set.seed(1)
aff <- character(26)
for (i in 1:13) {
aff[i] <- paste("University of", LETTERS[i])
}
for (i in 14:26) {
aff[i] <- paste(LETTERS[i], "Institute")
}
n <- 140
p <- 6
authors <- matrix(character(n*p), nrow = n)
for (i in 1:7) {
for (j in 1:20) {
authors[20*(i-1) + j, ] <- c(sample(aff, size = 7-i, replace = FALSE), rep(sample(c("", NA), size = 1), i-1))
}
}
authors <- authors[sample(1:n), ]
authors <- as.data.frame(authors)
names <- sapply(1:140, FUN = function(x) {paste("Author", x)})
authors$names <- names
authors$first <- sapply(1:140, FUN = function(x) {paste("First", x, sep = "")})
authors$middle <- sapply(1:140, FUN = function(x) {paste("Middle", x, sep = "")})
authors$last <- sapply(1:140, FUN = function(x) {paste("Last", x, sep = "")})
authors <- authors[, c(7,8,9,10,1,2,3,4,5,6)]
names(authors) <- c("Name", "First", "Middle", "Last", "aff1", "aff2", "aff3", "aff4", "aff5", "aff6")
authors$Degree <- sample(c("BA", "MSc", "PhD"), size = n, replace = TRUE)
# library(AutoAff)
# result <- AutoAff(authors, c("aff1", "aff2", "aff3", "aff4", "aff5", "aff6"))
usethis::use_data(authors, overwrite = TRUE)
|
d4ea398eb1692fbb60915db66105b25d7c98b6f2 | f9a9f9e260498c0ff96809dee0fb9697c75b064e | /regularization_selection/dimension_reduction/pcr.R | fadb1c69fa56a79074d5a9a6e1b948c813c0a62f | [] | no_license | kayfay/R_programming | 4b94f9af3684589c0d29ac47a7f740bb313908a8 | 3f1b3325c2f16d3f578c5f672341b216879aa263 | refs/heads/master | 2021-04-24T20:54:29.637163 | 2019-07-07T17:09:26 | 2019-07-07T17:09:26 | 117,136,236 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,620 | r | pcr.R | # Principal components regression
library(ISLR)
library(pls)
set.seed(2)
# Omit missing values
Hitters=na.omit(Hitters)
x = model.matrix(Salary~.,Hitters)[,-1]
y = Hitters$Salary
train=sample(1:nrow(x), nrow(x)/2)
test=(-train)
y.test=y[test]
# Standardize predictors before generating princiapl components
# with scale and 10-fold cros val
pcr.fit=pcr(Salary~.,data=Hitters,scale=TRUE,validation="CV")
summary(pcr.fit)
# Data: X dimension: 263 19
# Y dimension: 263 1
# Fit method: svdpc
# Number of components considered: 19
#
# VALIDATION: RMSEP
# Cross-validated using 10 random segments.
# (Intercept) 1 comps 2 comps 3 comps 4 comps 5 comps 6 comps
# CV 452 348.9 352.2 353.5 352.8 350.1 349.1
# adjCV 452 348.7 351.8 352.9 352.1 349.3 348.0
# 7 comps 8 comps 9 comps 10 comps 11 comps 12 comps 13 comps
# CV 349.6 350.9 352.9 353.8 355.0 356.2 363.5
# adjCV 348.5 349.8 351.6 352.3 353.4 354.5 361.6
# 14 comps 15 comps 16 comps 17 comps 18 comps 19 comps
# CV 355.2 357.4 347.6 350.1 349.2 352.6
# adjCV 352.8 355.2 345.5 347.6 346.7 349.8
#
# TRAINING: % variance explained
# 1 comps 2 comps 3 comps 4 comps 5 comps 6 comps 7 comps 8 comps
# X 38.31 60.16 70.84 79.03 84.29 88.63 92.26 94.96
# Salary 40.63 41.58 42.17 43.22 44.90 46.48 46.69 46.75
# 9 comps 10 comps 11 comps 12 comps 13 comps 14 comps 15 comps
# X 96.28 97.26 97.98 98.65 99.15 99.47 99.75
# Salary 46.86 47.76 47.82 47.85 48.10 50.40 50.55
# 16 comps 17 comps 18 comps 19 comps
# X 99.89 99.97 99.99 100.00
# Salary 53.01 53.85 54.61 54.61
# NULL
# Find best model fit, predict, compute test MSE
pcr.fit=pcr(Salary~., data=Hitters,subset=train,scale=TRUE,validation="CV")
validationplot(pcr.fit,val.type="MSEP") # 7
pcr.pred=predict(pcr.fit,x[test,], ncomp=7)
mean((pcr.pred-y.test)^2) # [1] 142274.4
# Fit on full dataset with M = 7
pcr.fit=pcr(y~x, scale=TRUE, ncomp=7)
summary(pcr.fit)
# Data: X dimension: 263 19
# Y dimension: 263 1
# Fit method: svdpc
# Number of components considered: 7
# TRAINING: % variance explained
# 1 comps 2 comps 3 comps 4 comps 5 comps 6 comps 7 comps
# X 38.31 60.16 70.84 79.03 84.29 88.63 92.26
# y 40.63 41.58 42.17 43.22 44.90 46.48 46.69
# NULL
|
4930775bae1b1bbb1c4c5495af8da18b21048137 | 377b3527d739295c381b7f916f4aab9e7cfabe59 | /cachematrix-tests.R | 1cd90fd227376e174d0ca6a6b0dd21e5c2435d2e | [] | no_license | lisah2u/ProgrammingAssignment2 | 5acc65bfa690f0009e2667c34b3c960134feeec2 | 80630d1d46f3b825e7e706e0228113b7c12c70f6 | refs/heads/master | 2021-01-15T20:57:09.129041 | 2014-06-17T12:06:57 | 2014-06-17T12:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,021 | r | cachematrix-tests.R | # Unit tests for cachematrix.R
# Useful discussion here:
# https://class.coursera.org/rprog-004/forum/thread?thread_id=153
# Source cachematrix.R before running.
# Create a square matrix and compute the inverse normally using
# solve(A). Inverse of A where A is a square matrix.
A <- matrix(c(1, 0, 0, 0, 1, 0, 0, 0, 1), nrow=3, ncol=3)
b <- 3
B <- diag(10,b,b)
C <- matrix(rnorm(1000000), 1000, 1000)
# system.time(solve(C))
# user system elapsed
# 1.033 0.002 1.034
# Compare with system time at bottom of script
# Make cache matrix
s <- makeCacheMatrix()
# Set A
s$set(A)
# Get and print my matrix
s$get()
# Should return
# [,1] [,2] [,3]
# [1,] 1 0 0
# [2,] 0 1 0
# [3,] 0 0 1
# returns NULL the first time since not chached
s$getsolve()
# cache it
cacheSolve(s)
# print the cached matrix
s$getsolve()
# Some timing information
s$set(C)
cacheSolve(s)
system.time(s$getsolve()) # compare with time above... nicely cached!
# user system elapsed
# 0 0 0 |
90e4526ea122375fcbbf11461e84de52a8cd42a3 | bbbbcff6554028a51c365c0936b0b979e59da205 | /R/bayesian_inference_functions.R | 2b642c22f7151d72f6a947f0d3ffe6801ceacb0a | [
"MIT"
] | permissive | eehh-stanford/baydem | 379d18cbcefbf07899d290c2db09095c3c382811 | 3a4e4811ae7d768c43a5aaf99657f255e7454292 | refs/heads/master | 2023-05-22T14:29:09.895165 | 2021-11-07T23:38:13 | 2021-11-07T23:38:13 | 197,023,444 | 11 | 1 | null | null | null | null | UTF-8 | R | false | false | 38,848 | r | bayesian_inference_functions.R | #' @title
#' Sample from the posterior of a density model for a set of radiocarbon
#' measurements
#'
#' @description
#' This is the core function that implements the Bayesian inference. Currently,
#' the only supported density model is a truncated Gaussian mixture. If a
#' starting parameter vector (\code{th0}) is not provided, it is set by calling
#' init_trunc_gauss_mix; the same vector is used for all sampling chains. Named
#' elements of the variable control must consist of one of the following four
#' options (defaults in parentheses):
#'
#' \itemize{
#' \item{\code{num_chains}}
#'
#'
#'
#' The calibration_curve to use for masking is separately input to maintain
#'
#' consistency with previous versions of baydem.
#' {Number of chains (4)}
#' \item{\code{samps_per_chain}}
#' {Number of samples per chain (2000)}
#' \item{\code{warmup}}
#' {Number of warmup samples (\code{samps_per_chain/2})}
#' \item{\code{stan_control}}
#' {Additional control parameters to pass to stan (\code{list()})}
#' \item{\code{mask}}
#' {Whether to mask the likelihood sum based on individual calibration
#' (FALSE)}
#' }
#'
#' The calibration_curve to use for masking is separately input to maintain
#' consistency with previous versions of baydem.
#'
#' @param rc_meas The radiocarbon measurements (see import_rc_data).
#' @param density_model The density model (see set_density_model).
#' @param hp Hyperparameters for the priors and to specify the spacing of the
#' Riemann sum that approximates the integral for the likelihood.
#' @param calib_df The calibration data frame (see load_calib_curve).
#' @param th0 An optional parameter vector to initialize the Stan chains. If not
#' provided, it is set by calling init_trunc_gauss_mix.
#' @param init_seed An optional random number seed for determining the starting
#' parameter vector using a maximum likelihood fit. If not provided, it is
#' drawn. It should not be provided if th0 is provided.
#' @param stan_seed An optional random number seed for the call to Stan. If not
#' provided, it is drawn.
#' @param calibration_curve The calibration curve to use for masking (only used
#' control$mask is TRUE). The default is "intcal20". Other options are
#' "shcal20" and "marine20". For further options see Bchron::BchronCalibrate.
#'
#' @return
#' \code{bayesian_soln}, a list-like object of class bd_bayesian_soln with the
#' following fields:
#' \itemize{
#' \item{\code{fit}}
#' {The result of the call to stan}
#' \item{\code{final_th0}}
#' {The final \code{th0} value; i.e., never NA.}
#' \item{\code{final_init_seed}}
#' {The final init_seed value; i.e., never NA unless \code{th0} is
#' provided.}
#' \item{\code{final_stan_seed}}
#' {The final \code{stan_seed} value; i.e., never NA.}
#' \item{\code{final_control}}
#' {The final control parameters used; i.e., if a parameter is not
#' provided.}
#' \item{\code{optional_inputs}}
#' {A record of the actual input values for the optional inputs, which are
#' \code{th0}, \code{init_seed}, \code{stan_seed}, and \code{control}.}
#' }
#'
#' @seealso
#' * [import_rc_data()] for the format of \code{rc_meas}
#' * [set_density_model()] for the format of \code{density_model}
#' * [load_calib_curve()] for the format of \code{calib_df}
#' @export
sample_theta <- function(rc_meas,
density_model,
hp,
calib_df,
th0=NA,
init_seed=NA,
stan_seed=NA,
calibration_curve="intcal20",
control=list()) {
for (param_name in names(control)) {
if (!(param_name %in% c("num_chains",
"samps_per_chain",
"warmup",
"stan_control",
"mask"))) {
stop(paste0("Unsupported named parameter in control = ",param_name))
}
}
# Save the the optional inputs, which are stored in the return value
optional_inputs <- list(th0=th0,
init_seed=init_seed,
stan_seed=stan_seed,
control=control)
have_th0 <- !all(is.na(th0))
have_init_seed <- !is.na(init_seed)
# Raise an error if both th0 and init_seed are provided
if(have_th0 && have_init_seed) {
stop("init_seed should not be provided if th0 is provided")
}
# If necessary, draw the initialization seed
if (!have_th0 && !have_init_seed) {
init_seed <- sample.int(1000000,1)
}
# If necessary, draw the stan seed
if (is.na(stan_seed)) {
stan_seed <- sample.int(1000000,1)
}
# Unpack and/or define the control parameters
have_num_chains <- "num_chains" %in% names(control)
have_samps_per_chain <- "samps_per_chain" %in% names(control)
have_warmup <- "warmup" %in% names(control)
have_stan_control <- "stan_control" %in% names(control)
have_mask <- "mask" %in% names(control)
if (have_num_chains) {
num_chains <- control$num_chains
} else {
num_chains <- 4
}
if (have_samps_per_chain) {
samps_per_chain <- control$samps_per_chain
} else {
samps_per_chain <- 2000
}
if (have_warmup) {
warmup <- control$warmup
} else {
warmup <- floor(samps_per_chain / 2)
}
if (have_stan_control) {
stan_control <- control$stan_control
} else {
stan_control <- NA
}
if (have_mask) {
mask <- control$mask
} else {
mask <- FALSE
}
final_control <- list(
num_chains = num_chains,
samps_per_chain = samps_per_chain,
warmup = warmup,
stan_control = stan_control,
mask = mask
)
if (density_model$type == "trunc_gauss_mix") {
# Stan needs all the inputs and hyperparameters as variables in R's
# workspace
tau_min <- density_model$tau_min
tau_max <- density_model$tau_max
tau <- seq(tau_min, tau_max, by = hp$dtau)
alpha_s <- hp$alpha_s
alpha_r <- hp$alpha_r
alpha_d <- hp$alpha_d
K <- density_model$K
if (all(is.na(th0))) {
# Then an initialization vector has not been provided. Call
# init_trunc_gauss_mix to randomly initialize the parameter vector.
th0 <- init_trunc_gauss_mix(K,
1,
tau_min,
tau_max,
input_seed=init_seed)
}
# stan expects the initial parameter to be a named list
init0 <- list()
init0$pi <- th0[ 0 + (1:K)]
init0$mu <- th0[ K + (1:K)]
init0$s <- th0[2*K + (1:K)]
init_list <- list()
for (cc in 1:num_chains) {
init_list[[cc]] <- init0
}
N <- length(rc_meas$phi_m)
G <- length(tau)
K <- density_model$K
if (!mask) {
# Do not mask the likelihood
dtau <- hp$dtau
M <- calc_meas_matrix(tau, rc_meas$phi_m, rc_meas$sig_m, calib_df)
Mt <- t(M)
file_path <- system.file("stan/gaussmix.stan",
package = "baydem"
)
} else {
# Mask the likelihood to speed up calculations. This very slightly
# changes the likelihood value used for Bayesian updating, but the
# effect is small.
# Calibrate the dates using Bchron. The ranges of dates returned by the
# Bchron calibration is used for the masking
N <- length(rc_meas$trc_m)
calibrations <-
Bchron::BchronCalibrate(ages=round(rc_meas$trc_m),
ageSds=round(rc_meas$sig_trc_m),
calCurves=rep(calibration_curve,N))
# Calculate stacked_log_M and subsetting vectors. stacked_log_M contains
# stacked measurement matrix values (logged), where the stacking is by
# sample index, n. subset_length contains the length of each subset,
# G_n = subset_length[n], and M_offset and tau_offset are, respectively,
# the offsets to give the subsetting location for stacked_log_M and
# tau.
stacked_log_M <- c()
subset_length <- rep(NA, N)
M_offset <- rep(NA, N)
tau_offset <- rep(NA, N)
dtau <- hp$dtau
for (n in 1:N) {
# Get the minimum and maximum calendar dates (AD = 1950 - years BP)
# for this obsercation, accounting for the grid spacing.
calib <- calibrations[[n]]
min_date_AD <- min(1950 - calib$ageGrid)
max_date_AD <- max(1950 - calib$ageGrid)
if(dtau != 1) {
tau_min_n <- min_date_AD - ( min_date_AD %% dtau)
tau_max_n <- max_date_AD + (-max_date_AD %% dtau)
} else {
tau_min_n <- min_date_AD
tau_max_n <- max_date_AD
}
if (tau_min_n < tau_min) {
tau_min_n <- tau_min
}
if (tau_max_n > tau_max) {
tau_max_n <- tau_max
}
# Calculate the subset measurement matrix value, and add them to
# stacked_log_M. Also update the subsetting vectors.
tau_n <- seq(tau_min_n, tau_max_n, by = dtau)
M_n <- calc_meas_matrix(tau_n,
rc_meas$phi_m[n],
rc_meas$sig_m[n],
calib_df)
M_offset[n] = length(stacked_log_M)
G_n <- ncol(M_n)
subset_length[n] <- G_n
tau_offset[n] <- which(tau == tau_min_n) - 1
stacked_log_M <- c(stacked_log_M, log(M_n))
}
# Because stan does not support vectors of integrs (or even casting from
# a real to an integer), place the minimum and maximum values of the
# index vectors into the environment.
stacked_log_M_length <- length(stacked_log_M)
subset_length_min <- min(subset_length)
subset_length_max <- max(subset_length)
M_offset_min <- min(M_offset)
M_offset_max <- max(M_offset)
tau_offset_min <- min(tau_offset)
tau_offset_max <- max(tau_offset)
file_path <- system.file("stan/gaussmix_masked.stan", package = "baydem")
}
options(mc.cores = parallel::detectCores())
# There are two possible calls depending on whether have_stan_control is
# TRUE.
if (have_stan_control) {
fit <- rstan::stan(file_path,
chains = num_chains,
iter = samps_per_chain,
warmup = warmup,
seed=stan_seed,
init = init_list,
control = stan_control)
} else {
fit <- rstan::stan(file_path,
chains = num_chains,
iter = samps_per_chain,
warmup = warmup,
seed=stan_seed,
init = init_list)
}
} else {
stop(paste("Unrecognized fit type:", density_model$fit_type))
}
bayesian_soln <- list(fit=fit,
final_th0=th0,
final_init_seed=init_seed,
final_stan_seed=stan_seed,
final_control=final_control,
optional_inputs=optional_inputs)
class(bayesian_soln) <- "bd_bayesian_soln"
return(bayesian_soln)
}
#' @title
#' Calculate some key summary measures using the result of a call to
#' \code{sample_theta}
#'
#' @description
#' \code{sample_theta} calls Stan to do Bayesian inference by
#' generating a sample of parameters from the posterior of theta (or \code{th}).
#' \code{sample_theta} analyzes the result of that inference. Notably,
#' it calculates the quantiles of the density function and the growth rate.
#'
#' @details \code{bayesian_soln} is the result of a call to
#' \code{sample_theta}. It contains posterior samples for the density
#' model. The primary thing \code{summarize_bayesian_inference} does is
#' calculate quantiles of both the parameterized density and growth rate. For
#' example, for a calendar date tau_g each sample yields a density and growth
#' rate. The quantile is the value of the density or growth rate such that a
#' given proportion of samples are smaller than that value. The probabilities
#' used to calculate these quantiles are `probs = c(lev, 0.5, 1-lev)`, where
#' `lev` is the level (0.025 by default, so that 95% of the observations lie
#' between the first and last quantile bands).
#'
#' In addition, \code{summarize_bayesian_inference} identifies calendar dates
#' for which the growth rate quantiles defined by `lev` and `1 - lev` do not
#' contain zero. This indicates significant positive or negative growth for the
#' density curve. The output vector `growth_state` codes calendar dates by
#' growth state as 'negative', 'zero', and 'positive'. For the Gaussian mixture
#' parameterization of the density, the rate is not typically meaningful near
#' the calendar date boundaries where it increases linearly as the calendar date
#' goes to positive or negative infinity. The parameter `rate_prop` provides
#' control on how calendar dates are classified by growth rate near these
#' boundaries. In particular, the calendar dates with a cumulative density (50%
#' quantile) below `rate_prop` (for the lower boundary) or above `1 - rate_prop`
#' (for the upper boundary) are classified as 'missing' in `growth_state`. By
#' default, `rate_prop` is NA and no calendar dates are classified as missing.
#'
#' By default, a summary is done for each sample by calling summarize_sample.
#' This is not done if do_summary is FALSE.
#'
#' @param bayesian_soln The solution, a list-like object of class
#' bd_bayesian_soln (see sample_theta).
#' @param rc_meas The radiocarbon measurements (see import_rc_data).
#' @param density_model The density model (see set_density_model).
#' @param calib_df The calibration data frame (see load_calib_curve).
#' @param dtau The spacing of the sampling grid (default: 5).
#' @param th_sim The known parameters used to create simulation data (default:
#' NA, not provided).
#' @param lev The level to use for the quantile bands (default: 0.025).
#' @param rate_prop The cumulative density needed to define rate growth bands
#' (default: NA, not used).
#' @param do_sample_summaries Whether to calculate some summary information for
#' each sampled curve (Default: TRUE).
#'
#' @return A list with information on the quantiles of the density function and
#' growth rate (and sample summaries)
#'
#' @seealso
#' * [import_rc_data()] for the format of \code{rc_meas}
#' * [set_density_model()] for the format of \code{density_model}
#' * [load_calib_curve()] for the format of \code{calib_df}
#'
#' @export
summarize_bayesian_inference <- function(bayesian_soln,
rc_meas,
density_model,
calib_df,
dtau=5,
th_sim = NA,
lev = 0.025,
rate_prop = NA,
do_sample_summaries = T) {
if (density_model$type != "trunc_gauss_mix") {
stop(paste0("Currently, only truncated Gaussian mixtures are supported ",
"for the density model"))
}
tau_min <- density_model$tau_min
tau_max <- density_model$tau_max
tau <- seq(tau_min,tau_max,dtau)
# The probabilities to use to calculate the quantiles
probs <- c(lev, 0.5, 1 - lev)
# Extract the samples of theta in the variable TH to create TH, a matrix with
# dimensions N x Number of Parameters, where N is the number of Bayesian
# samples
TH <- extract_param(bayesian_soln$fit)
num_samp <- nrow(TH)
# Calculate the pdf matrix, which is the probability density for the input
# density_model evaluated for each sample and at each grid point in the vector
# tau. f_mat has dimensions N x G, where G is the number of grid points
# (length of the vector tau).
f_mat <- calc_gauss_mix_pdf_mat(TH,
tau,
tau_min=tau_min,
tau_max=tau_max)
# Calculate the rate for each sample and grid point. The rate is f' / f, where
# f is the probability density and f' is the derivative of f.
rate_mat <- calc_gauss_mix_pdf_mat(TH,
tau,
tau_min=tau_min,
tau_max=tau_max,
type="rate")
# Calculate the quantiles of the probability density
Qdens <- calc_quantiles(f_mat, probs)
# Extract the 50% quantile of the probability density (which is not itself a
# probability density; that is, it does not integrate to 1). By construction,
# the 50% quantile is the second row of the matrix Qdens.
f50 <- Qdens[2, ]
# Restrict to indices with enough probability mass (if necessary)
if (!is.na(rate_prop)) {
rate_ind <- which(cumsum(f50 * dtau) > rate_prop &
rev(cumsum(rev(f50) * dtau)) > rate_prop)
} else {
rate_ind <- 1:length(f50)
}
# Identify regions with growth rates that differ from zero per the input
# quantile level (lev).
Qrate <- calc_quantiles(rate_mat[, rate_ind], probs)
# Create growth_state0, a base vector summarizing the growth rate states.
# growth_state0 has the value "negative" for significant negative growth,
# "positive" for significant positive growth, and "zero" otherwise.
# growth_state is identical to growth_state0, except that it has the value
# "missing" for observations without enough probability mass per the rate_prop
# condition.
growth_state0 <- rep("zero", length(rate_ind))
growth_state0[Qrate[2, ] > 0 & Qrate[1, ] > 0] <- "positive"
growth_state0[Qrate[2, ] < 0 & Qrate[3, ] < 0] <- "negative"
growth_state <- rep("missing", length(tau))
growth_state[rate_ind] <- growth_state0
# Calculate the measurement matrix
M <- calc_meas_matrix(tau,
rc_meas$phi_m,
rc_meas$sig_m,
calib_df)
# TODO: consider adding a separate function to calculate the SPD
# Normalize the measurement matrix by row
M <- M / replicate(length(tau),rowSums(M)*dtau)
# Calculate summed probability density (SPD) vector
f_spdf <- colMeans(M)
# Create the output list
out <- list(
tau = tau,
f_spdf = f_spdf,
Qdens = Qdens,
Qrate = Qrate,
probs = probs,
rate_prop = rate_prop,
rate_ind = rate_ind,
growth_state = growth_state
)
class(out) <- "bd_bayesian_summary"
if (do_sample_summaries) {
summ_list <- list()
for (n in 1:num_samp) {
th <- TH[n, ]
summ_list[[n]] <- summarize_trunc_gauss_mix_sample(th,
tau_min,
tau_max)
}
out$summ_list <- summ_list
}
have_sim <- !all(is.na(th_sim))
if (have_sim) {
f_sim <- calc_gauss_mix_pdf(th_sim,
tau,
tau_min=tau_min,
tau_max=tau_max)
rate_sim <- calc_gauss_mix_pdf(th_sim,
tau,
tau_min=tau_min,
tau_max=tau_max,
type = "rate")
out$f_sim <- f_sim
out$rate_sim <- rate_sim
}
return(out)
}
#' @title
#' Extract the Bayesian samples for a Gaussian mixture model generated by
#' \code{sample_theta}
#'
#' @description
#' The input fit is the result of a call to stan by
#' \code{sample_theta}, of class stanfit. Return a matrix TH with
#' dimensions S x (3*K), where S is the number of samples (across all chains,
#' and excluding warmup), and K is the number of mixtures.
#'
#' @param fit The fit from stan, of class stanfit
#'
#' @return A matrix of samples with dimensions S by (3*K), where S is the number
#' of non-warmup samples
#' @export
extract_param <- function(fit) {
if (class(fit) != "stanfit") {
stop(paste("Expected fit to be class stanfit, but it is", class(fit)))
}
K <- sum(unlist(lapply(names(fit),
function(long_string){startsWith(long_string,"mu[")})))
TH <- as.matrix(fit)[,1:(3*K)]
return(TH)
}
#' @title
#' Identify growth periods and the peak value for a truncated Gaussian mixture
#'
#' @description
#' The input vector th parameterizes a Gaussian mixture, and tau_min / tau_max
#' give the limits of truncation. Summarize the sample by identifying growth /
#' decay periods and the peak value using the following procedure.
#'
#' (1) Calculate the derivative, f'(t), at the points t =
#' seq(tau_min,tau_max,len=N), where N is 1000 by default.
#'
#' (2) Identify points where f'(t) changes sign, then numerically estimate the
#' crossing point between the two t values where there was a sign change.
#'
#' (3) Create a vector of critical points, t_crit, which includes
#' tau_min / tau_max as well as the crossing points found in the preceding
#' step.
#'
#' (4) Calculate the density at the critical points to identify the peak value,
#' f_peak, and corresponding calendar date, t_peak, as well as the index of
#' the peak in t_crit, ind_peak.
#'
#' (5) For each time period (the length(t_peak)-1 durations defined by t_peak)
#' determine the sign of the density function, f(t), and create a character
#' vector, slope, that has the value 'pos' if f(t) is positive and 'neg' if
#' f(t) is negative.
#'
#' (6) Finally, create a character vector, pattern, that appends the index of
#' the peak in t_crit (converted to a character) to the character vector
#' slope. This defines a unique pattern of the sample that takes into
#' account periods of growth / decline and the relative location of the
#' peak.
#'
#' @param th The Gaussian mixture parameterization
#' @param tau_min The lower truncation value
#' @param tau_max The upper truncation value
#' @param N The number of points use for identifying slope changes
#' (default: 1000)
#'
#' @return A list consisting of:
#' \itemize{
#' \item{\code{periods}}
#' {A data-frame where the columns t_lo/t_hi indicate the starting and
#' ending calendars dates of periods and slope is negative if the growth
#' rate is negative over that time period and positive if it is positive.}
#' \item{\code{ind_peak}}
#' {The index of the period in the data-frame \code{periods} with the peak
#' value of the density.}
#' \item{\code{t_peak}}
#' {The calendar date of the peak value of the density.}
#' \item{\code{f_peak}}
#' {The value of the density function at the peak calendar date.}
#' \item{\code{pattern}}
#' {A unique pattern that summaries the periods of growth/decary and
#' relative locaiton of the peak (see Description).}
#' }
#'
#' @export
summarize_trunc_gauss_mix_sample <- function(th,
tau_min,
tau_max,
N = 1000) {
# (1) Calculate the derivative of the density
K <- length(th) / 3 # Number of mixtures
t <- seq(tau_min, tau_max, len = N)
f_prime <- calc_gauss_mix_pdf(th, t, tau_min, tau_max, type = "derivative")
# (2) Identify locations in t where the derivative changes sign. This happens
# if f_prime[n] * f_prime[n+1] is less than zero. Then, numerically
# estimate the exact t-value of the crossing.
ind <- which(f_prime[1:(length(f_prime) - 1)] *
f_prime[2:length(f_prime)] < 0)
M <- length(ind) # Number of cross-overs
# Vectors for t / f values of crossings
t_cross <- rep(NA, M)
f_cross <- rep(NA, M)
if (M > 0) {
# Objective function to maximize
root_fun <- function(t) {
return(calc_gauss_mix_pdf(th, t, tau_min, tau_max, type = "derivative"))
}
# Iterate over crossings
for (m in 1:M) {
root <- stats::uniroot(root_fun, lower = t[ind[m]], upper = t[ind[m] + 1])
t_cross[m] <- root$root
f_cross[m] <- calc_gauss_mix_pdf(th, t_cross[m], tau_min, tau_max)
}
}
# (3-4) Create the vector of critical points, calculate densities, and
# identify peak
t_crit <- c(tau_min, t_cross, tau_max)
f_crit <- c(calc_gauss_mix_pdf(th, tau_min, tau_min, tau_max),
f_cross,
calc_gauss_mix_pdf(th, tau_max, tau_min, tau_max))
ind_peak <- which.max(f_crit)
t_peak <- t_crit[ind_peak]
f_peak <- f_crit[ind_peak]
# (5) Create tlo, thi, and slope
num_per <- length(t_crit) - 1 # Number of periods
t_lo <- t_crit[1:num_per]
t_hi <- t_crit[2:(num_per + 1)]
df <- diff(f_crit)
slope <- rep("pos", num_per)
slope[df < 0] <- "neg"
# (6) Create the pattern (then return the result)
pattern <- c(slope, as.character(ind_peak))
return(list(periods = data.frame(t_lo = t_lo,
t_hi = t_hi,
slope = slope),
ind_peak = ind_peak,
t_peak = t_peak,
f_peak = f_peak,
pattern = pattern))
}
#' @title
#' Calculate the quantiles for an input matrix X
#'
#' @description
#' The input matrix X has dimensions S x G, where S is the number of samples and
#' G the number of grid points at which X was evaluated. Calculate quantiles for
#' each grid point, g = 1,2,..G.
#'
#' @param X The matrix for which quantiles are calculated, with dimension S x G
#' @param probs The probability values at which to calculate the quantiles
#' (default: `c(0.025, 0.5, 0.975)`)
#'
#' @return The quantiles, a matrix with dimension length(probs) x G
#'
#' @export
calc_quantiles <- function(X, probs = c(.025, .5, .975)) {
num_quant <- length(probs) # Number of quantiles
G <- dim(X)[2] # Number of grid points
Q <- matrix(NA, num_quant, G) # Initialize Q with dimensions numQuant x G
# Iterate over grid points to calculate quantiles
for (g in 1:G) {
Q[, g] <- stats::quantile(X[, g], probs = probs)
}
return(Q)
}
#' @title
#' For each sample, calculate the time it takes for the density to decrease by
#' half from the peak (or by another use-provided ratio)
#'
#' @details
#' For each sample, calculate the time it takes for the density to decrease by
#' half from the peak. Optionally, a different proportion can be used than the
#' default prop_change = 0.5. For example, with prop_change = 0.1 the time it
#' takes for the density to decrease by 10% is used. If the relative density is
#' not reached, the half life for the sample is set to NA. If there is no
#' interior peak in the range peak_range, which is tau_min to tau_max by
#' default, the half life is set to NA.
#'
#' @param bayesian_soln The solution, a list-like object of class
#' bd_bayesian_soln (see sample_theta).
#' @param density_model The density model (see set_density_model).
#' @param rc_meas The radiocarbon measurements (see import_rc_data; optional:
#' if not provided, bayesian_summary must be provided).
#' @param calib_df The calibration data frame (see load_calib_curve; optional:
#' if not provided, bayesian_summary must be provided).
#' @param prop_change The relative decrease in density to use for the duration
#' calculation (default: 0.5).
#' @param bayesian_summary The result of a call to summarize_bayesian_inference.
#' (optional; if not provided, it is calculated, which requires that rc_meas
#' and calib_df be provided).
#' @param peak_range A range over which to search for the peak of the density
#' function (default: NA, which means that tau_min to tau_max is used for the
#' range).
#'
#' @return A vector of "half-lives" (proportional change set by prop_change)
#'
#' @seealso
#' * [import_rc_data()] for the format of \code{rc_meas}
#' * [set_density_model()] for the format of \code{density_model}
#' * [load_calib_curve()] for the format of \code{calib_df}
#'
#' @export
calc_half_life_from_peak <- function(bayesian_soln,
density_model,
rc_meas=list(),
calib_df=list(),
prop_change=0.5,
bayesian_summary=NA,
peak_range = NA) {
if (density_model$type != "trunc_gauss_mix") {
stop(paste0("Currently, only truncated Gaussian mixtures are supported ",
"for the density model"))
}
tau_min <- density_model$tau_min
tau_max <- density_model$tau_max
TH <- extract_param(bayesian_soln$fit)
N <- nrow(TH)
if (all(is.na(bayesian_summary))) {
if (length(rc_meas) == 0) {
stop("rc_meas must be provided if bayesian_summary is not provided")
}
if (length(calib_df) == 0) {
stop("calib_df must be provided if bayesian_summary is not provided")
}
bayesian_summary <- summarize_bayesian_inference(bayesian_soln,
rc_meas,
density_model,
calib_df)
}
summ_list <- bayesian_summary$summ_list
if (all(is.na(peak_range))) {
peak_range <- c(tau_min, tau_max)
}
half_life <- rep(NA, N)
for (n in 1:N) {
th <- TH[n, ]
# Identify the peak, ensuring it is in peak_range
# critical points
t_crit <-
c(summ_list[[n]]$periods$t_lo,
summ_list[[n]]$periods$t_hi[length(summ_list[[n]]$periods$t_hi)])
t_crit <- t_crit[peak_range[1] <= t_crit & t_crit <= peak_range[2]]
f_crit <- calc_gauss_mix_pdf(th, t_crit, tau_min, tau_max)
ind_peak <- which.max(f_crit)
t_peak <- t_crit[ind_peak]
f_peak <- f_crit[ind_peak]
is_in <- tau_min < t_peak && t_peak < tau_max
if (is_in) {
# Function for root finder
root_fun <- function(t) {
return(f_peak * prop_change - calc_gauss_mix_pdf(th,
t,
tau_min,
tau_max,
type = "density"))
}
# Find root. Catch any errors in case the half life does not exist on the
# interval tpeak to taumax
result <- tryCatch(
{
root <- stats::uniroot(root_fun,
lower = t_peak,
upper = peak_range[2]
)
half_life[n] <- min(root$root - t_peak)
},
error = function(e) {
NA
}
)
}
}
return(half_life)
}
#' @title
#' Calculate the relative density at two dates (or a range of dates / the peak)
#'
#' @description
#' Calculate the relative density for two dates or, more generally, for two
#' different specifications of the density aside from a simple date. The
#' additional specifications that are supported are the peak value and the mean
#' density on an interval. For a simple date, spec1/spec2 should be scalar
#' real numbers. For a date range, spec1/spec2 should be real vectors with a
#' length of 2. For the peak, spec1/spec2 should be the string 'peak'.
#'
#' By default, this calculation is done for all the Bayesian samples in
#' bayesian_soln which is the result of a call to \code{sample_theta}.
#' Optionally, a subset can be specified via the input ind, which should be a
#' vector of integer indices at which to do the calculation. To save computation
#' if either spec1 or spec2 is 'peak', the result of a call to
#' \code{summarize_bayesian_inference} for which \code{do_summary} was TRUE can
#' be input.
#'
#'
#' @param bayesian_soln The result of a call to sample_theta
#' @param density_model The density model (see set_density_model).
#' @param spec1 The specification for the first density (see details)
#' @param spec2 The specification for the second density (see details)
#' @param rc_meas The radiocarbon measurements (see import_rc_data; optional:
#' if not provided, bayesian_summary must be provided).
#' @param calib_df The calibration data frame (see load_calib_curve; optional:
#' if not provided, bayesian_summary must be provided).
#' @param bayesian_summary The result of a call to summarize_bayesian_inference.
#' (optional; if not provided, it is calculated, which requires that rc_meas
#' and calib_df be provided).
#' @param ind Indices at which to do the calculation (optional; by default, all
#' the samples in bayesian_summary are used).
#'
#' @return A vector of relative densities (f_spec1 / f_spec2)
#'
#' @seealso
#' * [import_rc_data()] for the format of \code{rc_meas}
#' * [set_density_model()] for the format of \code{density_model}
#' * [load_calib_curve()] for the format of \code{calib_df}
#'
#' @export
calc_relative_density <- function(bayesian_soln,
density_model,
spec1,
spec2,
rc_meas=list(),
calib_df=list(),
ind=NA,
bayesian_summary=NA) {
TH <- extract_param(bayesian_soln$fit)
N <- nrow(TH)
if (all(is.na(ind))) {
ind <- 1:N
}
# Interpret and do error checking on inputs by calling the helper function
# unpack_spec
spec1 <- unpack_spec(spec1,density_model,T)
spec2 <- unpack_spec(spec2,density_model,F)
if (spec1$type == "peak" || spec2$type == "peak") {
if (all(is.na(bayesian_summary))) {
if (length(rc_meas) == 0) {
stop("rc_meas must be provided if bayesian_summary is not provided")
}
if (length(calib_df) == 0) {
stop("calib_df must be provided if bayesian_summary is not provided")
}
# If ind is not NA, the following line may involve un-utilized computation
bayesian_summary <- summarize_bayesian_inference(bayesian_soln,
rc_meas,
density_model,
calib_df)
}
summ_list <- bayesian_summary$summ_list[ind]
}
# Calculate the density for spec1
if (spec1$type == "point") {
f1 <- calc_point_density(TH[ind, ],
density_model,
spec1$value)
} else if (spec1$type == "range") {
f1 <- calc_range_density(TH[ind, ],
density_model,
spec1$lower,
spec1$upper)
} else if (spec1$type == "peak") {
f1 <- calc_peak_density(summ_list)
} else {
# This should not happen, but throw an error regardless
stop("Unsupported spec type")
}
# Calculate the density for spec2
if (spec2$type == "point") {
f2 <- calc_point_density(TH[ind, ],
density_model,
spec2$value)
} else if (spec2$type == "range") {
f2 <- calc_range_density(TH[ind, ],
density_model,
spec2$lower,
spec2$upper)
} else if (spec2$type == "peak") {
f2 <- calc_peak_density(summ_list)
} else {
# This should not happen, but throw an error regardless
stop("Unsupported spec type")
}
return(f1 / f2)
}
# A helper function to unpack and do error checking on inputs spec1 / spec2
unpack_spec <- function(spec,density_model,is_one) {
# For more informative error messages, use the input is_one to set the string
# s to spec1 or spec2
if (is_one) {
s <- "spec1"
} else {
s <- "spec2"
}
# Handle the supported cases, throwing an error if necessary
if (is.numeric(spec)) {
if (length(spec) == 1) { # Numeric / length 1
point <- spec
if (point < density_model$tau_min || density_model$tau_max < point) {
stop(
paste(s, "is a single date, but not in the range tau_min to tau_max"))
}
return(list(type = "point", value = point))
} else if (length(spec) == 2) { # Numeric / length 2
lower <- spec[1]
if (lower < density_model$tau_min || density_model$tau_max < lower) {
stop(paste(s,"is a date range, but lower value is not ",
"in the range tau_min to tau_max"))
}
upper <- spec[2]
if (upper < density_model$tau_min || density_model$tau_max < upper) {
stop(paste(s, "is a date range, but upper value is not ",
"in the range taumin to taumax"))
}
if (lower > upper) {
stop(paste(s, "is a date range, but lower value is ",
"greater than upper value"))
}
return(list(type = "range", lower = lower, upper = upper))
} else { # Numeric / not length 1 or 2
stop(
paste(s, "is numeric, but is neither a single date nor a date range"))
}
} else if (is.character(spec)) { # Character
if (spec == "peak") {
return(list(type = "peak"))
} else {
stop(paste(s, "is a character, but not equal to peak"))
}
} else { # Neither character nor numeric
stop(paste(s, "is neither numeric nor a character"))
}
}
# A helper function to calculate point densities
calc_point_density <- function(TH,density_model,t) {
return(as.numeric(calc_gauss_mix_pdf_mat(TH,
t,
tau_min = density_model$tau_min,
tau_max = density_model$tau_max)))
}
# A helper function to calculate the mean density over a range
calc_range_density <- function(TH,density_model,t_lo,t_hi) {
f_lo <- as.numeric(calc_gauss_mix_pdf_mat(TH,
t_lo,
tau_min = density_model$tau_min,
tau_max = density_model$tau_max,
type = "cumulative"))
f_hi <- as.numeric(calc_gauss_mix_pdf_mat(TH,
t_hi,
tau_min = density_model$tau_min,
tau_max = density_model$tau_max,
type = "cumulative"))
return((f_hi - f_lo) / (t_hi - t_lo))
}
# A helper function to calculate the peak density
calc_peak_density <- function(summ_list) {
return(unlist(lapply(summ_list, function(summ) {
summ$f_peak
})))
} |
a1af2c13edde6d6895cd9cda1930f670e7c05583 | 739b236b8f507f02054b65e49e22674f5c426ff5 | /Q1.R | ef0661754219d9dc77bdefd64a5ee3d3794e6e2e | [] | no_license | sumitIO/Exploratory-Data-Analysis-Project | 9e159486abb7958ab8623b1b04f3b4ec7252b7d6 | e995c0b792fb189c0f88e2191d0128485caed451 | refs/heads/master | 2022-04-06T22:35:31.291621 | 2020-02-11T13:32:04 | 2020-02-11T13:32:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 351 | r | Q1.R | NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
year_sum <- aggregate(NEI$Emissions, by= list(NEI$year), FUN=sum)
png(filename = "plot1.png")
plot(year_sum$Group.1,year_sum$x, type="l",
main="Total Emission Variation(1999-2008)",
xlab="Years",
ylab="Total Estimation of PM2.5 (tones)")
dev.off() |
be4fcf30a8b2d67640dbc3b777bfb8e80f22510a | 3eb01fdb8094b35bb1f3bad856ab60881dceb9e3 | /Basic Data Visualization.R | 7f79e0fbd7b9b695d54b9f8b6d34f00289b09247 | [] | no_license | saurav-datageek/Data-Science-R-Programming-Scripts | 61ba4b17ed6122d250095c0809cc8a66c7c10967 | 350853ad9b55f227a3224fc54676f3c16a521b7d | refs/heads/main | 2023-07-24T01:58:29.922808 | 2021-09-06T09:59:14 | 2021-09-06T09:59:14 | 403,569,675 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,821 | r | Basic Data Visualization.R |
### Code to perform basic Data Visualization in R ###
### Dataset used is the mtcars dataset, which is the built in dataset in R ###
library(dplyr)
library(ggplot2)
### Data Visualization ###
### Used in two different stages in Data Science ###
### First is during Exploratory Data Analysis ### EDA ###
df_mtcars <- mtcars
head(df_mtcars)
sapply(df_mtcars,table)
summary(df_mtcars)
### Quartiles ### Q1, Q2 and Q3 ### Same logic as Median. Q2=Median ###
fivenum(df_mtcars$wt)
filter(df_mtcars, wt > 5.3)
### Weight ### We want to explore about Weight ###
hist(df_mtcars$wt,main = "Histogram of Weight")
help("hist")
boxplot(df_mtcars$wt,ylim=c(0,10))
### Histogram vs Bar Plot ###
### Histogram is used for Cont. Variables, and Bar Plot is used for Categorical Variables.
barplot(df_mtcars$cyl)
table(df_mtcars$cyl)
barplot(table(df_mtcars$cyl))
### Till now, we have just used one variable for analysis. This type of EDA is known as Univariate Analysis. ###
### Multivariate Analysis ###
### Bi-Variate Analysis ###
cor(df_mtcars$mpg, df_mtcars$wt) # -0.86 which means strong negative (linear) relationship
### Scatter Plot ###
plot(df_mtcars$wt, df_mtcars$mpg, main = "Weight vs Mpg", xlab = "Weight", ylab = "Mpg",col="red")
### Doing Scatter Plot where input is Categorical Variable ###
plot(df_mtcars$cyl, df_mtcars$mpg)
boxplot(df_mtcars$mpg ~ df_mtcars$cyl)
### Once we go towards more complex visualisation, we usually use other libraries in R ###
### And the most commonly used one is ggplot2 ###
## gg means Grammar of Graphics, plot means plot, 2 means its the second version 2.0 ##
# install.packages("ggplot2")
### ggplot2 is created by the same guy who created dplyr ###
### Just for scatter plot, I need to write more letters into my code ###
plot(df_mtcars$wt, df_mtcars$mpg)
ggplot(df_mtcars, aes(wt, mpg)) + geom_point() + stat_smooth(method = lm)
### Grammar gives us some order in daily language ### Some structure ###
### ggplot takes 3 things into consideration ###
### First is Data ### Second is Variables (what variables you want to use) ### Third is what plot you want to visualise ###
### data comes in data part, variables come under Aesthetics part (aes), and the plot comes under geometry part (geom)
ggplot(df_mtcars, aes(wt)) + geom_histogram(bins = 30)
ggplot(df_mtcars, aes(cyl)) + geom_bar()
### Till now, we have visualized two variables at a time ###
### What if we want to do more than two? ###
ggplot(df_mtcars, aes(wt, mpg,col=as.factor(cyl))) + geom_point()
class(df_mtcars$cyl)
### What if we want to use 4 Variables at a time? ###
ggplot(df_mtcars, aes(wt, mpg,col=as.factor(cyl), shape=as.factor(am))) + geom_point()
### If one were to try above line code using plot, it would be way too complicated ###
|
787d39702fea824f6dc8ce7f0d0134167418e677 | 36d73bd4ec51b24f9aa427003d41ace725c23a14 | /inst/scripts/make-upload.R | 4fd7b9b3825d79e5d985c0a29d36e9aa209a9f87 | [] | no_license | drisso/SingleCellMultiModal | f613c4f7b7470f27ee25445160ecd798bdd5f89c | 2685521119f5b162809da3f5f73dab01cb08a1de | refs/heads/master | 2022-11-07T04:59:38.832585 | 2020-06-23T13:33:40 | 2020-06-23T13:33:40 | 279,685,426 | 1 | 0 | null | 2020-07-14T20:22:47 | 2020-07-14T20:22:46 | null | UTF-8 | R | false | false | 1,125 | r | make-upload.R | .getDataFiles <- function(directory = "~/data/scmm",
dataDir = "mouse_gastrulation", pattern = allextpat) {
location <- file.path(directory, dataDir)
list.files(location, pattern = pattern, full.names = TRUE, recursive = TRUE)
}
# upload files to AWS S3
allextpat <- "\\.[Rr][Dd][Aa]$"
# IMPORTANT!
# Make sure that AWS_DEFAULT_REGION, AWS_ACCESS_KEY_ID, and
# AWS_SECRET_ACCESS_KEY are set in the ~/.Renviron file
source("make-metadata.R")
upload_aws <- function(
dataType, directory = "~/data/scmm",
upload = FALSE, fileExt = allextpat
) {
if (missing(dataType))
stop("Enter a 'dataType' folder")
datafilepaths <- .getDataFiles(
directory = directory, dataDir = dataType, pattern = fileExt
)
bucketLocation <-
file.path("experimenthub", "SingleCellMultiModal", dataType)
if (upload)
AnnotationHubData:::upload_to_S3(file = datafilepaths,
remotename = basename(datafilepaths),
bucket = bucketLocation)
}
# upload_aws(dataType = "mouse_gastrulation", upload=TRUE)
# upload_aws(dataType = "mouse_visual_cortex", upload=TRUE)
|
2adf7942aa6e6cd5a752e1c216345207c8786adb | ba4b9d6b12afa90de9bca88dd9a6f6b7315702e6 | /nn_model_depression_vs_control.R | ad75276863046f6d90356aebff3b6d67db48079e | [
"MIT"
] | permissive | nurolab/Depmet | bff5ba9d7e3c8c204f61698e20a52cd009a1bd0c | d8c25a47d79699aabe8faa290dd96c441d2dc7a1 | refs/heads/master | 2021-06-14T21:03:53.432467 | 2017-04-03T09:07:39 | 2017-04-03T09:07:39 | 53,113,560 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,269 | r | nn_model_depression_vs_control.R | # R script to model depression data using neural networks
# Author: Deepak Sharma
# Date: 26 March 2017
# Import libraries --------------------------------------------------------
library(RSNNS)
# Loading data
depression_vs_control <- read.csv("depression_vs_control.csv")
dvc <- depression_vs_control
# Creating modeling variables
Predictor <- subset(depression_vs_control, select=-Type)
Target <- noquote(depression_vs_control$Type)
# MLP model ---------------------------------------------------------------
attach(dvc)
DecTarget <- decodeClassLabels(Target)
dvc <- splitForTrainingAndTest(Predictor, DecTarget, ratio = 0.15)
dvc <- normTrainingAndTestSet(dvc)
model <- mlp(dvc$inputsTrain, dvc$targetsTrain, size = 5,
learnFuncParams = 0.1, maxit = 60, inputsTest = dvc$inputsTest,
targetsTest = dvc$targetsTest)
predictions <- predict(model, dvc$inputsTest)
plotIterativeError(model)
plotRegressionError(predictions[, 2], dvc$targetsTest[, 2], pch = 3)
plotROC(fitted.values(model)[, 2], dvc$targetsTrain[, 2])
plotROC(predictions[, 2], dvc$targetsTest[, 2])
confusionMatrix(dvc$targetsTrain, fitted.values(model))
confusionMatrix(dvc$targetsTest, predictions)
confusionMatrix(dvc$targetsTrain, encodeClassLabels(fitted.values(model),
method = "402040", l = 0.4, h = 0.6))
weightMatrix(model)
# SOM Network -------------------------------------------------------------
model <- som(Predictor, mapX = 16, mapY = 16, maxit = 500,
targets = Target)
plotActMap(model$map, col = rev(heat.colors(12)))
plotActMap(log(model$map + 1), col = rev(heat.colors(12)))
persp(1:model$archParams$mapX, 1:model$archParams$mapY, log(model$map + 1),
theta = 30, phi = 30, expand = 0.5, col = "lightblue")
plotActMap(model$labeledMap)
model$spanningTree
model$labeledMap
model$componentMaps
model$map
model$actMaps
for(i in 1:ncol(Predictor)) plotActMap(model$componentMaps[[i]],
col = rev(topo.colors(12)))
# ART2 Network --------------------------------------------------------------------
patterns <- snnsData$art2_tetra_med.pat
model <- art2(patterns, f2Units = 5,
learnFuncParams = c(0.99, 20, 20, 0.1, 0),
updateFuncParams = c(0.99, 20, 20, 0.1, 0))
library("scatterplot3d")
scatterplot3d(patterns, pch = encodeClassLabels(model$fitted.values)) |
e085d98bbc570b67c50835c0bacbf722381307d3 | 0b6d9b2478732d1d0adaffc8eccfec928b2262cb | /man/oszacuj_czas_wykonania.Rd | b76d1c1db750456bc69551b9285c06eae28912d1 | [
"MIT"
] | permissive | zozlak/ZPD | e19e0403460b5f3b36f8da4bb308d713b0466a08 | 1a7d2f60b038e658a706dd8c78cbc60f8692ea02 | refs/heads/master | 2023-02-18T17:02:16.111630 | 2023-01-23T14:34:07 | 2023-01-23T14:38:08 | 21,378,996 | 12 | 1 | null | null | null | null | UTF-8 | R | false | true | 881 | rd | oszacuj_czas_wykonania.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oszacuj_czas_wykonania.R
\name{oszacuj_czas_wykonania}
\alias{oszacuj_czas_wykonania}
\title{Pobiera dozwolone wartości punktowe kryteriów oceny}
\usage{
oszacuj_czas_wykonania(dane, pelnyPlan = FALSE, format = "TEXT")
}
\arguments{
\item{dane}{ramka danych dplyr-a}
\item{pelnyPlan}{czy zwracać pełny plan zapytania czy tylko łączny koszt (zawsze TRUE gdy format inny niż TEXT)}
\item{format}{format zwracanych danych: TEXT, XML, JSON lub YAML}
}
\description{
Zwraca albo pełen plan zapytania (pelnyPlan = TRUE) albo skrótowe
podsuomowanie szacowanego kosztu wykonania zapytania.
W wypadku zwracania tylko podsumowania wyswietlane są dolny i górny szacunek
w jednostkach szacowania czasu planera wraz z komentarzem opisującym rząd
wielkości czasu, jakiemu dana wartość odpowiada.
}
|
7d32945d3491c236467dd8cb861fd4148a098731 | 9a7ffcd969f5efa2d2de768eb2f9d72085440291 | /man/remoteDriver.Rd | 2f07a6eb73b30460547d6607d66106ddb5d6ee5f | [] | no_license | agstudy/RSelenium | 6861c76d992a2be319ccb9a28422885044b856ff | e93fed17aae812316af9fe9455c4bb88b5eab1ac | refs/heads/master | 2021-01-15T09:29:21.204423 | 2013-08-03T06:11:43 | 2013-08-03T06:11:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,101 | rd | remoteDriver.Rd | \name{remoteDriver}
\alias{remoteDriver}
\title{CLASS remoteDriver}
\usage{
remoteDriver(...)
}
\description{
remoteDriver Class uses the JsonWireProtocol to
communicate with the Selenium Server. If an error occurs
while executing the command then the server sends back an
HTTP error code with a JSON encoded reponse that
indicates the precise Response Error Code. The module
will then croak with the error message associated with
this code. If no error occurred, then the subroutine
called will return the value sent back from the server
(if a return value was sent). So a rule of thumb while
invoking methods on the driver is if the method did not
croak when called, then you can safely assume the command
was successful even if nothing was returned by the
method.
}
\details{
remoteDriver is a generator object. To define a new
remoteDriver class method `new` is called. The slots
(default value) that are user defined are:
remoteServerAddr(localhost), port(4444),
browserName(firefox), version(""), platform(ANY),
javascript(TRUE). See examples for more information on
use.
}
\section{Slots}{
\describe{ \item{\code{remoteServerAddr}:}{Object of
class \code{"character"}, giving the ip of the remote
server. Defaults to localhost} \item{\code{port}:}{Object
of class \code{"numeric"}, the port of the remote server
on which to connect.} \item{\code{browserName}:}{Object
of class \code{"character"}. The name of the browser
being used; should be one of
{chrome|firefox|htmlunit|internet explorer|iphone}.}
\item{\code{version}:}{Object of class
\code{"character"}. The browser version, or the empty
string if unknown.} \item{\code{platform}:}{Object of
class \code{"character"}. A key specifying which platform
the browser is running on. This value should be one of
{WINDOWS|XP|VISTA|MAC|LINUX|UNIX}. When requesting a new
session, the client may specify ANY to indicate any
available platform may be used.}
\item{\code{javascript}:}{Object of class
\code{"character"}. Whether the session supports
executing user supplied JavaScript in the context of the
current page. } \item{\code{serverURL}:}{Object of class
\code{"character"}. Url of the remote server which JSON
requests are sent to. } \item{\code{sessionInfo}:}{Object
of class \code{"list"}. A list containing information on
sessions. } }
}
\section{Methods}{
\describe{ \item{\code{new(...)}:}{ Create a new
\code{remoteDriver} object. ... is used to define the
appropriate slots.} \item{\code{open()}:}{ Send a request
to the remote server to instantiate the browser. }
\item{\code{getSessions()}:}{ Returns a list of the
currently active sessions. Each session will be returned
as a list containing amongst other items: \describe{
\item{\code{id}:}{The session ID}
\item{\code{capabilities}:}{An object describing
session's capabilities} } } \item{\code{status()}:}{
Query the server's current status. All server
implementations should return two basic objects
describing the server's current platform and when the
server was built.} \item{\code{getAlertText()}:}{ Gets
the text of the currently displayed JavaScript alert(),
confirm() or prompt() dialog. }
\item{\code{sendKeysToActiveElement(sendKeys)}:}{ Send a
sequence of key strokes to the active element. This
command is similar to the send keys command in every
aspect except the implicit termination: The modifiers are
not released at the end of the call. Rather, the state of
the modifier keys is kept between calls, so mouse
interactions can be performed while modifier keys are
depressed. The key strokes are sent as a list. Plain
text is enter as an unnamed element of the list. Keyboard
entries are defined in `selKeys` and should be listed
with name `key`. See the examples. }
\item{\code{sendKeysToAlert(sendKeys)}:}{ Sends
keystrokes to a JavaScript prompt() or alert () dialog.
The key strokes are sent as a list. Plain text is enter
as an unnamed element of the list. Keyboard entries are
defined in `selKeys` and should be listed with name
`key`. See the examples.} \item{\code{acceptAlert()}:}{
Accepts the currently displayed alert dialog. Usually,
this is equivalent to clicking the 'OK' button in the
dialog. } \item{\code{dismissAlert()}:}{ Dismisses the
currently displayed alert dialog. For comfirm() and
prompt() dialogs, this is equivalent to clicking the
'Cancel' button. For alert() dialogs, this is equivalent
to clicking the 'OK' button. }
\item{\code{mouseMoveToLocation(x, y, elementId)}:}{ Move
the mouse by an offset of the specificed element. If no
element is specified, the move is relative to the current
mouse cursor. If an element is provided but no offset,
the mouse will be moved to the center of the element. If
the element is not visible, it will be scrolled into
view. }
\item{\code{setAsyncScriptTimeout(milliseconds)}:}{ Set
the amount of time, in milliseconds, that asynchronous
scripts executed by execute_async_script() are permitted
to run before they are aborted and a |Timeout| error is
returned to the client. }
\item{\code{setImplicitWaitTimeout(milliseconds)}:}{ Set
the amount of time the driver should wait when searching
for elements. When searching for a single element, the
driver will poll the page until an element is found or
the timeout expires, whichever occurs first. When
searching for multiple elements, the driver should poll
the page until at least one element is found or the
timeout expires, at which point it will return an empty
list. If this method is never called, the driver will
default to an implicit wait of 0ms. }
\item{\code{close()}:}{ Close the current window. }
\item{\code{quit()}:}{ Delete the session & close open
browsers. } \item{\code{getCurrentWindowHandle()}:}{
Retrieve the current window handle. }
\item{\code{getWindowHandles()}:}{ Retrieve the list of
window handles used in the session. }
\item{\code{getWindowSize(windowId)}:}{ Retrieve the
window size. `windowid` is optional (default is 'current'
window). Can pass an appropriate `handle` }
\item{\code{getWindowPosition(windowId = "current")}:}{
Retrieve the window position. `windowid` is optional
(default is 'current' window). Can pass an appropriate
`handle` } \item{\code{getCurrentUrl()}:}{ Retrieve the
url of the current page. } \item{\code{navigate()}:}{
Navigate to a given url. } \item{\code{getTitle()}:}{ Get
the current page title. } \item{\code{goForward()}:}{
Equivalent to hitting the forward button on the browser.
} \item{\code{goBack()}:}{ Equivalent to hitting the back
button on the browser. } \item{\code{refresh()}:}{ Reload
the current page. }
\item{\code{executeAsyncScript(script,args)}:}{ Inject a
snippet of JavaScript into the page for execution in the
context of the currently selected frame. The executed
script is assumed to be asynchronous and must signal that
is done by invoking the provided callback, which is
always provided as the final argument to the function.
The value to this callback will be returned to the
client. Asynchronous script commands may not span page
loads. If an unload event is fired while waiting for a
script result, an error should be returned to the client.
} \item{\code{executeScript(script,args)}:}{ Inject a
snippet of JavaScript into the page for execution in the
context of the currently selected frame. The executed
script is assumed to be synchronous and the result of
evaluating the script is returned to the client.
The script argument defines the script to execute in the
form of a function body. The value returned by that
function will be returned to the client. The function
will be invoked with the provided args array and the
values may be accessed via the arguments object in the
order specified.
Arguments may be any JSON-primitive, array, or JSON
object. JSON objects that define a WebElement reference
will be converted to the corresponding DOM element.
Likewise, any WebElements in the script result will be
returned to the client as WebElement JSON objects. }
\item{\code{screenshot()}:}{ Take a screenshot of the
current page. The screenshot is returned as a base64
encoded PNG.} \item{\code{switchToFrame(frameId)}:}{
Change focus to another frame on the page. If the frame
ID is null, the server will switch to the page's default
content. } \item{\code{switchToWindow(windowId}:}{ Change
focus to another window. The window to change focus to
may be specified by its server assigned window handle, or
by the value of its name attribute. }
\item{\code{setWindowPosition(x,y,winHand)}:}{ Set the
position (on screen) where you want your browser to be
displayed. The windows handle is optional. If not
specified the current window in focus is used. }
\item{\code{setWindowSize(width,height,winHand)}:}{ Set
the size of the browser window. The windows handle is
optional. If not specified the current window in focus is
used.} \item{\code{getAllCookies()}:}{ Retrieve all
cookies visible to the current page. Each cookie will be
returned as a list with the following name and value
types: \describe{ \item{\code{name}:}{character}
\item{\code{value}:}{character}
\item{\code{path}:}{character}
\item{\code{domain}:}{character}
\item{\code{secure}:}{logical} } }
\item{\code{addCookie(name,value,path,domain,secure)}:}{
Set a cookie on the domain. The inputs are required apart
from `secure` which defaults to FALSE. }
\item{\code{deleteAllCookies()}:}{ Delete all cookies
visible to the current page. }
\item{\code{deleteCookieNamed(name)}:}{ Delete the cookie
with the given name. This command will be a no-op if ther
is no such cookie visible to the current page.}
\item{\code{getPageSource()}:}{ Get the current page
source. } \item{\code{findElement(using ,value)}:}{
Search for an element on the page, starting from the
document root. The located element will be returned as an
object of webElement class. The inputs are: \describe{
\item{\code{using}:}{Locator scheme to use to search the
element, available schemes: {class, class_name, css, id,
link, link_text, partial_link_text, tag_name, name,
xpath}. Defaults to 'xpath'. } \item{\code{value}:}{The
search target. See examples.} } }
\item{\code{findElements(using ,value)}:}{ Search for
multiple elements on the page, starting from the document
root. The located elements will be returned as an list of
objects of class WebElement. The inputs are: \describe{
\item{\code{using}:}{Locator scheme to use to search the
element, available schemes: {class, class_name, css, id,
link, link_text, partial_link_text, tag_name, name,
xpath}. Defaults to 'xpath'. } \item{\code{value}:}{The
search target. See examples.} } }
\item{\code{getActiveElement()}:}{ Get the element on the
page that currently has focus. The located element will
be returned as a WebElement id. }
\item{\code{click(buttonId)}:}{ Click any mouse button
(at the coordinates set by the last mouseMoveToLocation()
command). buttonId - any one of 'LEFT'/0 'MIDDLE'/1
'RIGHT'/2. Defaults to 'LEFT'}
\item{\code{doubleclick(buttonId)}:}{ Double-Click any
mouse button (at the coordinates set by the last
mouseMoveToLocation() command). buttonId - any one of
'LEFT'/0 'MIDDLE'/1 'RIGHT'/2. Defaults to 'LEFT' }
\item{\code{buttondown(buttonId)}:}{ Click and hold the
given mouse button (at the coordinates set by the last
moveto command). Note that the next mouse-related command
that should follow is buttondown . Any other mouse
command (such as click or another call to buttondown)
will yield undefined behaviour. buttonId - any one of
'LEFT'/0 'MIDDLE'/1 'RIGHT'/2. Defaults to 'LEFT'}
\item{\code{buttonup(buttonId)}:}{ Releases the mouse
button previously held (where the mouse is currently at).
Must be called once for every buttondown command issued.
See the note in click and buttondown about implications
of out-of-order commands. buttonId - any one of 'LEFT'/0
'MIDDLE'/1 'RIGHT'/2. Defaults to 'LEFT' }
\item{\code{closeServer()}:}{ Closes the server in
practice terminating the process. This is useful for
linux systems. On windows the java binary operates as a
seperate shell which the user can terminate. } }
}
\examples{
\dontrun{
# start the server if one isnt running
startServer()
# use default server initialisation values
remDr <- remoteDriver$new()
# send request to server to initialise session
remDr$open()
# navigate to R home page
remDr$navigate("http://www.r-project.org")
# navigate to www.bbc.co.uk notice the need for http://
remDr$navigate("http://www.bbc.co.uk")
# go backwards and forwards
remDr$goBack()
remDr$goForward()
remDr$goBack()
# Examine the page source
frontPage <- remDr$getPageSource()
# The R homepage contains frames
webElem <- remDr$findElements(value = "//frame")
sapply(webElem, function(x){x$getElementAttribute('name')})
# The homepage contains 3 frames: logo, contents and banner
# switch to the `contents` frame
webElem <- remDr$findElement(using = 'name', value = 'contents')
remDr$switchToFrame(webElem$elementId)
# re-examine the page source
contentPage <- remDr$getPageSource()
identical(contentPage, frontPage) # false we hope!!
# Find the link for the search page on R homepage. Use xpath as default.
webElem <- remDr$findElement(value = '//a[@href = "search.html"]')
webElem$getElementAttribute('href') # "http://www.r-project.org/search.html"
# click the search link
webElem$clickElement()
# FILL OUT A GOOGLE SEARCH FORM
remDr$navigate("http://www.google.com")
# show different methods of accessing DOM components
webElem1 <- remDr$findElement(using = 'name', value = 'q')
webElem2 <- remDr$findElement(using = 'id', value = webElem1$getElementAttribute('id'))
webElem3 <- remDr$findElement(using = 'xpath', value = '//input[@name = "q"]')
# Enter some text in the search box
webElem1$sendKeysToElement(list('RSelenium was here'))
# clear the text previously entered
webElem1$clearElement()
# show an example of sending a key press
webElem1$sendKeysToElement(list('R', key = 'enter'))
# Collate the results for the `R` search
googLinkText <- remDr$findElements(value = "//h3[@class = 'r']")
linkHeading <- sapply(googLinkText, function(x) x$getElementText())
googLinkDesc <- remDr$findElements(value = "//div[@class = 's']")
linkDescription <- sapply(googLinkDesc, function(x) x$getElementText())
googLinkHref <- remDr$findElements(value = "//h3[@class = 'r']/a")
linkHref <- sapply(googLinkHref, function(x) x$getElementAttribute('href'))
data.frame(heading = linkHeading, description = linkDescription, href = linkHref)
# Example of javascript call
remDr$executeScript("return arguments[0] + arguments[1];", args = 1:2)
# Example of javascript async call
remDr$executeAsyncScript("arguments[arguments.length - 1](arguments[0] + arguments[1]);", args = 1:2)
}
}
|
9608406fb4e51e0a3e67f768f5928f2bd44e631b | 050edfa53f5ec7d76b2321c552266e0f60e4db92 | /R/c5.R | 99c82afd82dc3a6cb4958e65136920f26d787eca | [] | no_license | placeboo/subgraph | e1ab54fabda52ed4243fdc5cdc2a348b2da6d41c | 37036807aa7bd75aeab90fe224fdd44c126fb3f9 | refs/heads/master | 2021-10-27T15:54:59.877512 | 2019-04-18T08:08:57 | 2019-04-18T08:08:57 | 107,905,890 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 953 | r | c5.R | #' c5 graphs
#' List all possible c5 graphs based on given FIVE nodes
#' @param x The vector representing nodes
#' @return A matrix listing edges of c5 graphs
#' @examples
#' c5(c(1:5))
c5 = function(x){
if(length(x)!=5){
stop("The number of nodes should be FIVE!")
}
x = sort(x)
# start from the first node in the x
start_node = x[1]
rest = x[-1]
# permulate, using p4 function
p4.mat = p4(rest)
# find the nodes which are connected twice
temp.mat = apply(p4.mat, 1, function(y) as.numeric(unlist(strsplit(y, '-'))))
# the number appear twice is the hub
# connect the hub
hub.mat = apply(temp.mat, 2, function(y) rest[table(y) == 1])
# connec the start node to the hub
cnct.mat = t(apply(hub.mat, 2, function(z) c(deToIn(z[1], start_node), deToIn(z[2], start_node))))
return(cbind(p4.mat, cnct.mat))
}
|
147573685f5c873420743f1ea28e67c87d6e5b59 | c5de5d072f5099e7f13b94bf2c81975582788459 | /R Extension/RMG/Energy/PM/ERCOT/procmon/ercot_email_rayburn.r | 34ad6eb1016ae87281b97b05f705c8163c40c08f | [] | no_license | uhasan1/QLExtension-backup | e125ad6e3f20451dfa593284507c493a6fd66bb8 | 2bea9262841b07c2fb3c3495395e66e66a092035 | refs/heads/master | 2020-05-31T06:08:40.523979 | 2015-03-16T03:09:28 | 2015-03-16T03:09:28 | 190,136,053 | 2 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 7,137 | r | ercot_email_rayburn.r | # Email Rayburn load forecast to Rayburn
# Takes the xDA lock of Rayburn forecast and emails to Rayburn
#
#
# Written by Lori Simpson on 31-Oct-2011
#------------------------------------------------------------
# Functions
# startdate=Sys.Date()+0; enddate=startdate+3; testing=TRUE
#
email.ray.load = function(startdate, enddate, testing)
{
if((startdate-Sys.Date())>5) {sendEmail(from="lori.simpson@constellation.com", to="lori.simpson@constellation.com", subject="NEED TO UPDATE Rayburn Forecast Lock!", body=paste("Can't return data for more than 6 days forward. Missing data for dates ",startdate, " to ",enddate,".\n\nLori Simpson\n410-470-3217\nlori.simpson@constellation.com",sep="")); return("Looking too far out for data!")}
rayld=pull.tsdb(startdate-5, enddate, symbolst=c('ercot_ray_ld_lock'), hourconvention=0)
rayld$value=rayld$value/4
## Get opr_day
rayld$HE=as.POSIXlt(rayld$dt_he)$hour
rayld$OPR_DATE=rayld$dt_he - rayld$HE*60*60
rayld[rayld$HE==0,"OPR_DATE"]=rayld[rayld$HE==0,"OPR_DATE"]-1*60*60*24
rayld[rayld$HE==0,"HE"]=24
dates_returned=data.frame(dt=unique(rayld$OPR_DATE), returned=1)
dates_desired=data.frame(dt=seq(startdate, enddate, by="day")); ## dates_desired$dt=round(as.POSIXct(dates_desired$dt), "day")
dates_desired$dt=paste(as.POSIXlt(dates_desired[,1])$year+1900,sprintf("%02.0f", as.POSIXlt(dates_desired[,1])$mon+1),sprintf("%02.0f", as.POSIXlt(dates_desired[,1])$mday),sep="-"); dates_desired$dt=as.POSIXct(dates_desired$dt)
## Check that we've retrieved all dates
datescheck=merge(dates_returned, dates_desired, all.y=TRUE)
failedct=nrow(datescheck[is.na(datescheck$returned)==TRUE, ])
if(failedct>0)
{
sendEmail(from="lori.simpson@constellation.com", to="lori.simpson@constellation.com",
subject="NEED TO UPDATE Rayburn Forecast Lock!",
body=paste("Missing some data for dates ",startdate, " to ",enddate,".\n\nLori Simpson\n410-470-3217\nlori.simpson@constellation.com",sep=""))
}
if(failedct==0)
{
## Who do i email
if(testing==TRUE) emailto=c("lori.simpson@constellation.com")
if(testing==FALSE) emailto=c("akirkland@rayburnelectric.com, jkirkland@rayburnelectric.com, dnaylor@rayburnelectric.com, jdavis@rayburnelectric.com, #wtercot@constellation.com, ercotpm2@constellation.com, lori.simpson@constellation.com, Suzanne.Eagles@constellation.com, Laura.Castle@constellation.com, Matt.Swartz@constellation.com, Jason.Miller@constellation.com, Adam.Boyd@constellation.com, Gary.Gibson@constellation.com, craig.dolan@constellation.com, walter.kuhn@constellation.com")
## Write file to send out
datafile="S:/All/Portfolio Management/ERCOT/Daily Ercot Bidding/Load Bids/rayloadlock.csv"
colnames(dates_desired)="OPR_DATE"
rayld=merge(rayld, dates_desired)
write.csv(rayld[,c("dt_he","value")], file=datafile)
## Check for reasonableness of results
if( min(rayld$value)<(140/4) | max(rayld$value)>(775/4))
{
rLog(paste("The data does not seem reasonable. It should be within 35 and 194 MW, but is returning a min value of ", round(min(rayld$value),0)," and a max value of ", round(max(rayld$value),0),".",sep=""))
sendEmail(from="lori.simpson@constellation.com", to="lori.simpson@constellation.com",
subject="Rayburn Forecast Lock data unreasonable!",
body=paste("The data does not seem reasonable. It should be within 35 and 194 MW, but is returning a min value of ", round(min(rayld$value),0)," and a max value of ", round(max(rayld$value),0)," for dates ",startdate, " to ",enddate,".",sep=""))
return(paste("The data does not seem reasonable. It should be within 35 and 194 MW, but is returning a min value of ", round(min(rayld$value),0)," and a max value of ", round(max(rayld$value),0),".",sep=""))
}
## Email results
if( min(rayld$value)>(140/4) & max(rayld$value)<(775/4))
{
setLogLevel(1)
sendEmail(from="lori.simpson@constellation.com", to=emailto,
subject="Rayburn Forecast Lock",
body=paste("Please see attached for the Rayburn lock for ",startdate, " to ",enddate,".\n\nLori Simpson\n410-470-3217\nlori.simpson@constellation.com",sep=""),
attachments=c(datafile))
setLogLevel(3)
}
}
}
#------------------------------------------------------------
# MAIN
#
require(CEGbase)
options(width=150) ## ‘width’: controls the maximum number of columns on a line used in printing vectors, matrices and arrays, and when filling by ‘cat’.
require(RODBC)
require(Tsdb)
require(FTR)
# source("H:/user/R/RMG/Energy/PM/ERCOT/dam/lib.dam.R") ## For testing
source("S:/All/Risk/Software/R/prod/Energy/PM/ERCOT/dam/lib.dam.R") ## For production
rLog(paste("Start proces at", Sys.time()))
returnCode <- 0 # succeed = 0
#------------------------------------------------------------
# Collect and upload gas data
# email.ray.load(STARTDT+3, STARTDT+3, testing==FALSE)
## Set variables
STARTDT=Sys.Date()
STARTDTdayofweek=as.POSIXlt(STARTDT)$wday
## Send Data
email.ray.load(STARTDT+1, STARTDT+1, testing=FALSE)
## TESTING
# email.ray.load(STARTDT-0, STARTDT+1, testing=TRUE)
# email.ray.load(STARTDT+0, STARTDT+1, testing=FALSE)
# HOLIDAYS: email.ray.load(STARTDT+1, STARTDT+5, testing=FALSE)
# Log finish
rLog(paste("Done at ", Sys.time()))
#------------------------------------------------------------
# Interactive=true if ran by hand
#
if( interactive() ){
print( returnCode )
} else {
q( status = returnCode )
}
#------------------------------------------------------------
# Upload data
#
BACKFILL=FALSE
if(BACKFILL==TRUE)
{
raydata = read.csv(file="S:/All/Portfolio Management/Simpson/Second Simpson/R/written docs/uploadraydafcst.csv");
#write.csv(dlt, file="S:/All/Portfolio Management/Simpson/Second Simpson/R/written docs/dlt.csv")
## Upload data
symbol <- "ercot_ray_ld_lock"
dfRay <- data.frame(time=as.POSIXct(raydata[1000:nrow(raydata),4]),
value=raydata[1000:nrow(raydata),3])
rLog("Made dataframe")
tsdbUpdate(symbol, dfRay)
## rLog(paste("Tried to upload I2 data to TSDB for date ",round(dfNoms[1,1],"day")," --success uncertain",sep=""))
}
# sendEmail(from="lori.simpson@constellation.com", to=c("ercotpm2@constellation.com"),
# subject="Testing automated emails--please respond",
# body=paste("Please please reply if you receive this email so i know this is working. THanks!",sep=""),
# attachments=c(datafile))
|
bc9a3121ff2f844788b20da9f49f3647777c51a2 | 8cd6f2ac51d11cd87a0bebe38d8938070d640fd4 | /R/MakeInput_Fn.R | e694b27191b9f29f61b5b7e03d9a691a67d0a2bb | [] | no_license | James-Thorson/spatial_DFA | 289e2b7cd30246fef660fdbb5691366a684a9d41 | 763b7e888338c9661222e840c3c9b127e2bdc5be | refs/heads/master | 2021-07-10T08:37:18.124375 | 2020-06-30T16:06:50 | 2020-06-30T16:06:50 | 32,428,942 | 8 | 6 | null | null | null | null | UTF-8 | R | false | false | 28,330 | r | MakeInput_Fn.R |
#' Build data input for spatial dynamic factor analysis (SDFA)
#'
#' \code{MakeInput_Fn} builds a tagged list of inputs for TMB
#'
#' @param Version a version number (see example for current default).
#' @param Nfactors The number of dynamic factors used to approximate spatio-temporal variation
#' @param DF a data frame of data where each row is a unique sample and with the following columns
#' \describe{
#' \item{catch}{the observation for each sample}
#' \item{year}{a column of years for each sample}
#' \item{spp}{a factor specifying the species for that sample}
#' \item{sitenum}{a column of years for each sample}
#' \item{PredTF}{a vector of 0s or 1st, stating whether a sample is used when fitting the model (PredTF=0) or when evaluating its predictive performance (PredTF=1)}
#' \item{TowID}{a vector coding the unit of overdispersion, e.g., tow or vessel (OPTIONAL)}
#' }
#' @param loc_xy Locations for each station
#' @param method The method for approximating spatial variation (options: "grid" or "mesh")
#' @param Nobsfactors The number of factors used to approximate overdispersion among levels of \code{TowID} (Default=0)
#' @param Kappa_Type Whether the decorrelation distance is constant for all factors or not (Default="Constant")
#' @param ObsModel The observation model used
#' @param Include_Omega Whether to estimate purely spatial variation (Default=TRUE)
#' @param Include_Epsilon, Whether to incldue spatio-temporal variation (Default=TRUE)
#' @param EncounterFunction, The link between density and encounter probability; 0=two-parameter logistic relationship; 1=two-parameter logistic relationship with less-than-one saturation; 2=one-parameter saturating relationship (Default=1)
#' @param Correlated_Overdispersion, Whether to estimate overdispersion (only possible if TowID is present in \code{DF}
#' @param Include_Phi Whether to estimate each factor in equilibrium (FALSE) or with a fixed offset from equilibrium (TRUE), Default=TRUE
#' @param Include_Rho Whether to estimate the magnitude of temporal autocorrelation (Default=TRUE)
#' @param Use_REML Whether to use REML estimation
#' @param X_ik A matrix specifying measured variables that affect catchability for each sample (Default is turned off)
#' @param X_nl A matrix specifying measured variables that affect density for each sample (Default is an intercept)
#' @param X_ntl An array specifying measured variables that affect density and vary over time (Default is off)
#' @param a_n A vector giving the area associated with mesh vertex used when calculating indices of abundance (Default is even weighting of sample locations)
#' @param YearSet A vector of levels of the \code{year} input that should be modeled (Default is to infer from the year column of DF)
#' @param IndependentTF Whether the spatio-temporal variation (IndependentTF[1]) or overdispersion (IndependentTF[2]) is independent among species (Default is both are correlated)
#' @param CheckForBugs Whether to check inputs for obvious problems (Default=TRUE)
#' @param CorrGroup_pp A matrix filled with integers, for post hoc testing of differences in among-species correlations
#' @return Tagged list containing inputs to function \code{Build_TMB_Fn()}
#' \describe{
#' \item{TmbData}{A tagged list of data inputs}
#' \item{TmbParams}{A tagged list with input parameters}
#' \item{Random}{A character vector specifying which parameters are treated as random effects}
#' \item{Map}{A parameter map, used to turn off or mirror parameter values}
#' }
#' @export
MakeInput_Fn = function( Version, Nfactors, DF, loc_xy, method="mesh", Nobsfactors=0, Kappa_Type="Constant", ObsModel=NULL,
Aniso=FALSE, Include_Omega=TRUE, Include_Epsilon=TRUE, EncounterFunction=2, Correlated_Overdispersion=FALSE,
Include_Phi=TRUE, Include_Rho=TRUE, Use_REML=FALSE, X_ik=NULL, X_nl=NULL, X_ntl=NULL, a_n=NULL, YearSet=NULL,
IndependentTF=c(FALSE,FALSE), CheckForBugs=TRUE, CorrGroup_pp=NULL, ...){
# Calculate spde inputs
if( require(INLA)==FALSE ) stop("Must install INLA from: source('http://www.math.ntnu.no/inla/givemeINLA.R')")
# Build SPDE object using INLA
inla_mesh = INLA::inla.mesh.create( loc_xy ) # loc_samp ; ,max.edge.data=0.08,max.edge.extra=0.2
inla_spde = INLA::inla.spde2.matern(inla_mesh, alpha=2)
# 2D AR1 grid
dist_grid = dist(loc_xy, diag=TRUE, upper=TRUE)
grid_size_km = sort(unique(dist_grid))[1]
M0 = as( ifelse(as.matrix(dist_grid)==0, 1, 0), "dgTMatrix" )
M1 = as( ifelse(as.matrix(dist_grid)==grid_size_km, 1, 0), "dgTMatrix" )
M2 = as( ifelse(as.matrix(dist_grid)==sqrt(2)*grid_size_km, 1, 0), "dgTMatrix" )
grid_list = list("M0"=M0, "M1"=M1, "M2"=M2, "grid_size_km"=grid_size_km)
# Infer default values for inputs
if( method=="mesh" ) Nknots = inla_mesh$n
if( method=="grid" ) Nknots = nrow(loc_xy)
if( is.null(YearSet) ) YearSet = min(DF[,'year']):max(DF[,'year'])
if( is.null(ObsModel) ){
ObsModel = ifelse( all(is.integer(DF[,'catch'])), 0, 1 )
}
if( is.null(a_n) ) a_n = rep(0,Nknots)
# Species Grouping matrix
if( is.null(CorrGroup_pp) ){
CorrGroup_pp=matrix(0,nrow=length(levels(DF[,'spp'])),ncol=length(levels(DF[,'spp'])))
}else{
if( any(CorrGroup_pp!=0 & CorrGroup_pp!=1) ) stop("CorrGroup_pp can only contain 0 and 1")
if( any(dim(CorrGroup_pp)!=length(levels(DF[,'spp']))) ) stop("CorrGroup_pp must be a square matrix with dimension Nspecies")
}
# Check for inconsistent inputs
if( method=="grid" & Aniso==TRUE ){
Aniso = FALSE
message("Switching Aniso=FALSE because the 2D AR1 grid is isotropic")
}
# Options_vec
Options_vec=c( "ObsModel"=ObsModel, "Include_Omega"=Include_Omega, "Include_Epsilon"=Include_Epsilon, "EncounterFunction"=EncounterFunction, "Correlated_Overdispersion"=ifelse(Nobsfactors==0,0,1), "AnisoTF"=Aniso, "Method"=switch(method,"mesh"=0,"grid"=1) )
# Data size
Nyears = length(YearSet)
Nsites = length(unique(DF[,'sitenum']))
Nspecies = length(unique(DF[,'spp']))
Nobs = nrow(DF)
Nfactors_input = ifelse( Nfactors==0, 1, Nfactors )
Nobsfactors_input = ifelse( Nobsfactors==0, 1, Nobsfactors )
if( Options_vec["Correlated_Overdispersion"]==1 ){
if( !("TowID" %in% names(DF)) ) stop("with correlated observations, TowID must be a column in DF")
Nsamples = length(unique(DF[,'TowID']))
}else{
if( !("TowID" %in% names(DF)) ) DF = cbind(DF, "TowID"=1)
Nsamples = 1
}
if( !("PredTF_i" %in% names(DF)) ){
DF = cbind(DF, "PredTF_i"=0)
}else{
if( !(all(DF[,'PredTF_i']==0 | DF[,'PredTF_i']==1)) ) stop("PredTF_i must be either 0 or 1")
}
# By default, catchability design matrix is turned off
if( is.null(X_ik) ){
X_ik = matrix(0, nrow=Nobs, ncol=1)
}else{
if( nrow(X_ik)!=Nobs ) stop("Check catchability design matrix input: X_ik")
}
# By default, spatial design matrix is an intercept for each species
# This design matrix is not used in V14-V17
if( is.null(X_nl) ){
X_nl = matrix(1, nrow=Nknots, ncol=1)
}else{
if( !(Version%in%c("spatial_dfa_v13")) ) stop("X_nl is only used in version spatial_dfa_v13")
if( nrow(X_nl)!=Nknots ) stop("Check spatial matrix input: X_sp")
if( all(sapply(X_nl,MARGIN=2,FUN=var)>0) & CheckForBugs==TRUE ) stop("You almost certainly want to add an intercept to the density matrix X_nl")
}
# By default, spatio-temporal design matrix is an intercept for each species
# This design matrix is not used in V1-V13
if( is.null(X_ntl) ){
X_ntl = array(1, dim=c(Nknots,Nyears,1))
}else{
if( !(Version%in%c("spatial_dfa_v18","spatial_dfa_v17","spatial_dfa_v16","spatial_dfa_v15","spatial_dfa_v14")) ) stop("X_ntl is only used in version spatial_dfa_v14 and higher")
if( dim(X_ntl)[1] != Nknots ) stop("Check spatial matrix input: X_ntl")
}
# Data
if(Version=="spatial_dfa_v3") TmbData = list("n_obs"=Nobs, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_cov"=ncol(X_ik), "c_i"=DF[,'catch'], "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version=="spatial_dfa_v4") TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_cov"=ncol(X_ik), "c_i"=DF[,'catch'], "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version%in%c("spatial_dfa_v10","spatial_dfa_v9","spatial_dfa_v8b","spatial_dfa_v8","spatial_dfa_v7","spatial_dfa_v6","spatial_dfa_v5")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_cov"=ncol(X_ik), "c_i"=DF[,'catch'], "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "Rotation_jj"=diag(Nfactors_input), "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version%in%c("spatial_dfa_v11")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_cov"=ncol(X_ik), "c_i"=DF[,'catch'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "Rotation_jj"=diag(Nfactors_input), "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version%in%c("spatial_dfa_v12")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_cov"=ncol(X_ik), "c_i"=DF[,'catch'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "a_n"=a_n, "Rotation_jj"=diag(Nfactors_input), "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version%in%c("spatial_dfa_v13")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_catchcov"=ncol(X_ik), "n_spacecov"=ncol(X_nl), "c_i"=DF[,'catch'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "X_nl"=X_nl, "a_n"=a_n, "Rotation_jj"=diag(Nfactors_input), "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version%in%c("spatial_dfa_v14")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_catchcov"=ncol(X_ik), "n_spacecov"=dim(X_ntl)[3], "c_i"=DF[,'catch'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "X_ntl"=X_ntl, "a_n"=a_n, "Rotation_jj"=diag(Nfactors_input), "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version%in%c("spatial_dfa_v15")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_catchcov"=ncol(X_ik), "n_spacecov"=dim(X_ntl)[3], "c_i"=DF[,'catch'], "predTF_i"=DF[,'PredTF_i'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "X_ntl"=X_ntl, "a_n"=a_n, "Rotation_jj"=diag(Nfactors_input), "G0"=inla_spde$param.inla$M0, "G1"=inla_spde$param.inla$M1, "G2"=inla_spde$param.inla$M2 )
if(Version%in%c("spatial_dfa_v16")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_catchcov"=ncol(X_ik), "n_spacecov"=dim(X_ntl)[3], "c_i"=DF[,'catch'], "predTF_i"=DF[,'PredTF_i'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "X_ntl"=X_ntl, "a_n"=a_n, "Rotation_jj"=diag(Nfactors_input), "spde"=NULL, "spde_aniso"=NULL )
if(Version%in%c("spatial_dfa_v17")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_catchcov"=ncol(X_ik), "n_spacecov"=dim(X_ntl)[3], "c_i"=DF[,'catch'], "predTF_i"=DF[,'PredTF_i'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "X_ntl"=X_ntl, "a_n"=a_n, "Rotation_jj"=diag(Nfactors_input), "CorrGroup_pp"=CorrGroup_pp, "spde"=NULL, "spde_aniso"=NULL )
if(Version%in%c("spatial_dfa_v18")) TmbData = list("Options_vec"=Options_vec, "n_obs"=Nobs, "n_samples"=Nsamples, "n_obsfactors"=Nobsfactors_input, "n_sites"=Nsites, "n_years"=Nyears, "n_knots"=Nknots, "n_species"=Nspecies, "n_factors"=Nfactors_input, "n_catchcov"=ncol(X_ik), "n_spacecov"=dim(X_ntl)[3], "c_i"=DF[,'catch'], "predTF_i"=DF[,'PredTF_i'], "m_i"=as.numeric(DF[,'TowID'])-1, "p_i"=as.numeric(DF[,'spp'])-1, "s_i"=DF[,'sitenum']-1, "t_i"=match(DF[,'year'],YearSet)-1, "X_ik"=X_ik, "X_ntl"=X_ntl, "a_n"=a_n, "Rotation_jj"=diag(Nfactors_input), "CorrGroup_pp"=CorrGroup_pp, "spde"=NULL, "spde_aniso"=NULL, "M0"=grid_list$M0, "M1"=grid_list$M1, "M2"=grid_list$M2 )
# Add aniso inputs
if( "spde" %in% names(TmbData)){
MeshList = FishStatsUtils::Calc_Anisotropic_Mesh(Method=switch(method,"mesh"="Mesh","grid"="Grid"), loc_x=loc_xy, anisotropic_mesh=inla_mesh)
TmbData[["spde"]] = MeshList$isotropic_spde$param.inla[c("M0","M1","M2")]
TmbData[["spde_aniso"]] = list("n_s"=MeshList$anisotropic_spde$n.spde, "n_tri"=nrow(MeshList$anisotropic_mesh$graph$tv), "Tri_Area"=MeshList$Tri_Area, "E0"=MeshList$E0, "E1"=MeshList$E1, "E2"=MeshList$E2, "TV"=MeshList$TV-1, "G0"=MeshList$anisotropic_spde$param.inla$M0, "G0_inv"=inla.as.dgTMatrix(solve(MeshList$anisotropic_spde$param.inla$M0)) )
}
# Parameters
if(Version=="spatial_dfa_v2") TmbParams = list("logkappa_j"=rep(log(100),TmbData$n_factors), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "Psi_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "gamma_k"=rep(1,TmbData$n_cov), "log_sigma_p"=rep(log(1),TmbData$n_species), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs) )
if(Version=="spatial_dfa_v3") TmbParams = list("logkappa_j"=rep(log(100),TmbData$n_factors), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "L_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "gamma_k"=rep(1,TmbData$n_cov), "log_sigma_p"=rep(log(1),TmbData$n_species), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs) )
if(Version%in%c("spatial_dfa_v7","spatial_dfa_v6","spatial_dfa_v5","spatial_dfa_v4")) TmbParams = list("logkappa_j"=rep(log(100),TmbData$n_factors), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "L_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "gamma_k"=rep(1,TmbData$n_cov), "log_sigma_p"=rep(log(1),TmbData$n_species), "zinfl_pz"=matrix(0,nrow=TmbData$n_species,ncol=2), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs) )
if(Version%in%c("spatial_dfa_v10","spatial_dfa_v9","spatial_dfa_v8b","spatial_dfa_v8")) TmbParams = list("logkappa_jz"=array(log(100),dim=c(TmbData$n_factors,2)), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "L_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "gamma_k"=rep(1,TmbData$n_cov), "log_sigma_p"=rep(log(1),TmbData$n_species), "zinfl_pz"=matrix(0,nrow=TmbData$n_species,ncol=2), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs) )
if(Version%in%c("spatial_dfa_v12","spatial_dfa_v11")) TmbParams = list("logkappa_jz"=array(log(100),dim=c(TmbData$n_factors,2)), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "L_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "L2_val"=rnorm(TmbData$n_obsfactors*TmbData$n_species-TmbData$n_obsfactors*(TmbData$n_obsfactors-1)/2), "gamma_k"=rep(1,TmbData$n_cov), "log_sigma_p"=rep(log(1),TmbData$n_species), "zinfl_pz"=matrix(0,nrow=TmbData$n_species,ncol=2), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs), "eta_mb"=array(0,dim=c(TmbData$n_samples,TmbData$n_obsfactors)) )
if(Version%in%c("spatial_dfa_v13")) TmbParams = list("logkappa_jz"=array(log(100),dim=c(TmbData$n_factors,2)), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "L_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "L2_val"=rnorm(TmbData$n_obsfactors*TmbData$n_species-TmbData$n_obsfactors*(TmbData$n_obsfactors-1)/2), "gamma_k"=rep(1,TmbData$n_catchcov), "gamma_lp"=matrix(1,nrow=TmbData$n_spacecov,ncol=TmbData$n_species), "log_sigma_p"=rep(log(1),TmbData$n_species), "zinfl_pz"=matrix(0,nrow=TmbData$n_species,ncol=2), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs), "eta_mb"=array(0,dim=c(TmbData$n_samples,TmbData$n_obsfactors)) )
if(Version%in%c("spatial_dfa_v15","spatial_dfa_v14")) TmbParams = list("logkappa_jz"=array(log(100),dim=c(TmbData$n_factors,2)), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "L_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "L2_val"=rnorm(TmbData$n_obsfactors*TmbData$n_species-TmbData$n_obsfactors*(TmbData$n_obsfactors-1)/2), "gamma_k"=rep(1,TmbData$n_catchcov), "gamma_ptl"=array(1,dim=unlist(TmbData[c("n_species","n_years","n_spacecov")])), "log_sigma_p"=rep(log(1),TmbData$n_species), "zinfl_pz"=matrix(0,nrow=TmbData$n_species,ncol=2), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs), "eta_mb"=array(0,dim=c(TmbData$n_samples,TmbData$n_obsfactors)) )
if(Version%in%c("spatial_dfa_v18","spatial_dfa_v17","spatial_dfa_v16")) TmbParams = list("ln_H_input"=c(0,0), "logkappa_jz"=array(-1,dim=c(TmbData$n_factors,2)), "alpha_j"=rep(0,TmbData$n_factors), "phi_j"=rep(0,TmbData$n_factors), "loglambda_j"=rep(log(1),TmbData$n_factors), "rho_j"=rep(0.2,TmbData$n_factors), "L_val"=rnorm(TmbData$n_factors*TmbData$n_species-TmbData$n_factors*(TmbData$n_factors-1)/2), "L2_val"=rnorm(TmbData$n_obsfactors*TmbData$n_species-TmbData$n_obsfactors*(TmbData$n_obsfactors-1)/2), "gamma_k"=rep(1,TmbData$n_catchcov), "gamma_ptl"=array(1,dim=unlist(TmbData[c("n_species","n_years","n_spacecov")])), "log_sigma_p"=rep(log(1),TmbData$n_species), "zinfl_pz"=matrix(0,nrow=TmbData$n_species,ncol=2), "Epsilon_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors,TmbData$n_years)), "Omega_input"=array(0,dim=c(TmbData$n_knots,TmbData$n_factors)), "delta_i"=rep(0,TmbData$n_obs), "eta_mb"=array(0,dim=c(TmbData$n_samples,TmbData$n_obsfactors)) )
# ,
# Random
Random = c( "Omega_input", "Epsilon_input", "delta_i" )
if(Use_REML==TRUE) Random = c(Random, "gamma_k") # , "log_zinfl"
if("eta_mb"%in%names(TmbParams)) Random = c(Random, "eta_mb")
if("gamma_lp"%in%names(TmbParams) && Use_REML==TRUE) Random = c(Random, "gamma_lp") # , "log_zinfl"
if("gamma_ptl"%in%names(TmbParams) && Use_REML==TRUE) Random = c(Random, "gamma_ptl") # , "log_zinfl"
#Random = NULL
# Fixed values
Map = list()
Map[["alpha_j"]] = factor( rep(NA,length(TmbParams[["alpha_j"]])) )
#Map[["phi_j"]] = factor( rep(NA,length(TmbParams[["phi_j"]])) )
# Parameters shared between dynamic factors
Map[["rho_j"]] = factor( rep(1,length(TmbParams[["rho_j"]])) )
Map[["loglambda_j"]] = factor( rep(1,length(TmbParams[["loglambda_j"]])) )
if(Version%in%c("spatial_dfa_v7","spatial_dfa_v6","spatial_dfa_v5","spatial_dfa_v4")){
if( Kappa_Type=="Constant" ) Map[["logkappa_j"]] = factor( rep(1,length(TmbParams[["logkappa_j"]])) )
if( Kappa_Type=="Omega_vs_Epsilon" ) stop("Not implemented")
}
if(Version%in%c("spatial_dfa_v18","spatial_dfa_v17","spatial_dfa_v16","spatial_dfa_v15","spatial_dfa_v14","spatial_dfa_v13","spatial_dfa_v12","spatial_dfa_v11","spatial_dfa_v10","spatial_dfa_v9","spatial_dfa_v8b","spatial_dfa_v8")){
if( Kappa_Type=="Constant" ) Map[["logkappa_jz"]] = factor( array(1,dim=dim(TmbParams[["logkappa_jz"]])) )
if( Kappa_Type=="Omega_vs_Epsilon" ) Map[["logkappa_jz"]] = factor( outer(rep(1,TmbData$n_factors),c(1,2)) )
}
##### Run-specific fixed values
# Turn off anisotropy
if( "ln_H_input"%in%names(TmbParams) & Aniso==FALSE ){
Map[["ln_H_input"]] = factor( c(NA,NA) )
}
# Turn off zero-inflation parameters
if( !is.na(Options_vec["EncounterFunction"]) && Options_vec["EncounterFunction"]==2 ){
Map[["zinfl_pz"]] = factor( cbind(1:TmbData$n_species, rep(NA,TmbData$n_species)) )
}
# ObsModel specification
if( Options_vec['ObsModel'] %in% c(0,4) ){
Map[["zinfl_pz"]] = factor( rep(NA,prod(dim(TmbParams[["zinfl_pz"]]))) )
}
if( Options_vec['ObsModel'] %in% c(1,2,3,4) ){
# Shrink size for speed-up during compile
TmbParams[["delta_i"]] = 0
Map[["delta_i"]] = factor( rep(NA,length(TmbParams[["delta_i"]])) )
}
# Turn off Phi
if( Include_Phi==FALSE | Nfactors==0 ){
Map[["phi_j"]] = factor( rep(NA,length(TmbParams[["phi_j"]])) )
TmbParams[["phi_j"]] = rep(0,length(TmbParams[["phi_j"]]))
}
# Turn off Rho
if( Include_Rho==FALSE | Nfactors==0 ){
Map[["rho_j"]] = factor( rep(NA,length(TmbParams[["rho_j"]])) )
TmbParams[["rho_j"]] = rep(0,length(TmbParams[["rho_j"]]))
}
# Turn off spatial variation (Omega)
if( Options_vec['Include_Omega']==FALSE | Nfactors==0 ){
Map[["Omega_input"]] = factor( array(NA,dim=dim(TmbParams[["Omega_input"]])) )
TmbParams[["Omega_input"]] = array(0,dim=dim(TmbParams[["Omega_input"]]))
Map[["loglambda_j"]] = factor( rep(NA,length(TmbParams[["loglambda_j"]])) )
TmbParams[["loglambda_j"]] = rep(25,length(TmbParams[["loglambda_j"]]))
}
# Turn off spatiotemporal variation (Epsilon)
if( Options_vec['Include_Epsilon']==FALSE | Nfactors==0 ){
Map[["Epsilon_input"]] = factor( array(NA,dim=dim(TmbParams[["Epsilon_input"]])) )
TmbParams[["Epsilon_input"]] = array(0,dim=dim(TmbParams[["Epsilon_input"]]))
Map[["loglambda_j"]] = factor( rep(NA,length(TmbParams[["loglambda_j"]])) )
TmbParams[["loglambda_j"]] = rep(25,length(TmbParams[["loglambda_j"]]))
}
# Turn off both spatial and spatiotemporal variation
if( (Options_vec['Include_Omega']==FALSE & Options_vec['Include_Epsilon']==FALSE) | Nfactors==0 ){
TmbData$Options_vec[c('Include_Omega','Include_Epsilon')] = 0
Map[["logkappa_jz"]] = factor( array(NA,dim=dim(TmbParams[["logkappa_jz"]])) )
Map[["rho_j"]] = factor( rep(NA,length(TmbParams[["rho_j"]])) )
Map[["L_val"]] = factor( rep(NA,length(TmbParams[["L_val"]])) )
if( "ln_H_input"%in%names(TmbParams) ) Map[["ln_H_input"]] = factor( c(NA,NA) )
}
#
if( Options_vec["Correlated_Overdispersion"]==0 ){
TmbParams[["L2_val"]][] = 0
Map[["L2_val"]] = factor( rep(NA,length(TmbParams[["L2_val"]])) )
# Shrink size for speed-up during compile
TmbParams[["eta_mb"]] = array(0,dim=c(1,ncol(TmbParams$eta_mb)))
Map[["eta_mb"]] = factor( array(NA,dim(TmbParams[["eta_mb"]])) )
}
# Default setting for spatio-temporal covariates -- constant across time
if( "gamma_ptl"%in%names(TmbParams) ){
Map[["gamma_ptl"]] = factor( aperm(outer(matrix(1:(TmbData$n_species*TmbData$n_spacecov),ncol=TmbData$n_spacecov,nrow=TmbData$n_species),rep(1,TmbData$n_years)),c(1,3,2)) )
}
# Turn off catchability covariates
if( "gamma_k"%in%names(TmbParams) && all(X_ik==0) ){
Map[["gamma_k"]] = factor( rep(NA,length(TmbParams[["gamma_k"]])) )
TmbParams[["gamma_k"]][] = 0
}
# Turn off spatial covariates
if( "gamma_lp"%in%names(TmbParams) && all(X_nl==0) ){
Map[["gamma_lp"]] = factor( array(NA,dim=dim(TmbParams[["gamma_lp"]])) )
TmbParams[["gamma_lp"]][] = 0
}
# Turn off spatio-temporal covariates
if( "gamma_ptl"%in%names(TmbParams) && all(X_ntl==0) ){
Map[["gamma_ptl"]] = factor( array(NA,dim=dim(TmbParams[["gamma_ptl"]])) )
TmbParams[["gamma_ptl"]][] = 0
}
# Make independent inputs
if( IndependentTF[1]==TRUE ){
if(Nfactors!=Nspecies) stop("If independent, Nfactors must equal Nspecies")
TmbParams[["L_val"]] = diag( rep(1,Nspecies) )[lower.tri(diag(1,Nspecies),diag=TRUE)]
Map[["L_val"]] = diag( 1:Nspecies )[lower.tri(diag(1,Nspecies),diag=TRUE)]
Map[["L_val"]] = factor( ifelse(Map[["L_val"]]==0,NA,Map[["L_val"]]) )
}
if( IndependentTF[2]==TRUE ){
if(Nobsfactors!=Nspecies) stop("If independent, Nfactors must equal Nspecies")
TmbParams[["L2_val"]] = diag( rep(1,Nspecies) )[lower.tri(diag(1,Nspecies),diag=TRUE)]
Map[["L2_val"]] = diag( 1:Nspecies )[lower.tri(diag(1,Nspecies),diag=TRUE)]
Map[["L2_val"]] = factor( ifelse(Map[["L_val"]]==0,NA,Map[["L_val"]]) )
}
# Check for bugs
if( CheckForBugs==TRUE ){
if( any(sapply(TmbParams[c("alpha_j","phi_j","loglambda_j","rho_j")],length)<TmbData$n_factors) ) stop("Problem with parameter-vectors subscripted j")
if( is.null(EncounterFunction) | is.null(ObsModel)) stop("Problem with NULL inputs")
if( Options_vec["Correlated_Overdispersion"]==1 ) if( max(TmbData$m_i)>TmbData$n_samples | min(TmbData$m_i)<0 ) stop("Problem with m_i")
if( max(TmbData$t_i)>TmbData$n_years | min(TmbData$t_i)<0 ) stop("Problem with t_i")
}
# Check mapped stuff
Remove_Random = NULL
for(i in 1:length(Random) ){
if( Random[i]%in%names(Map) && all(is.na(Map[[Random[i]]])) ){
Remove_Random = c(Remove_Random, Random[i])
}
}
Random = setdiff(Random, Remove_Random)
if(length(Random)==0) Random = NULL
# Return
Return = list("TmbData"=TmbData, "TmbParams"=TmbParams, "Random"=Random, "Map"=Map, "mesh"=inla_mesh, "Remove_Random"=Remove_Random)
return( Return )
}
|
c37f1fef5e992c17834ed14fb8c889d51ed252f1 | cb323e59d1fa0a7d530ac2efbb19ee7a62827079 | /R/utils.R | 674ae5206ce2039f2c29fe4a513dcff1ee799b5f | [] | no_license | jpmarindiaz/datafringe | 7fb91241af0e745574e648f0187f40964342d0d2 | 033cce244d2f4273e1cfc6e80073524d405ff6fa | refs/heads/master | 2021-01-10T22:46:05.837528 | 2019-04-02T05:21:02 | 2019-04-02T05:21:02 | 70,357,127 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,415 | r | utils.R |
isGsheetUrl <- function(x){
grepl("docs.google.com/spreadsheets", x, fixed = TRUE)
}
trim_punct <- function (x){
gsub("[[:punct:]]", "", x)
}
dfFactorsToCharacters <- function(d){
i <- sapply(d, is.factor)
d[i] <- lapply(d[i], as.character)
d
}
colInDf <- function(col,d){
!col %in% colnames(d)
}
has_warning <- function(expr) {
warn <- err <- NULL
value <- withCallingHandlers(
tryCatch(expr, error=function(e) {
err <<- e
NULL
}), warning=function(w) {
warn <<- w
invokeRestart("muffleWarning")
})
length(warn) > 0
}
`%||%` <- function (x, y){
suppressWarnings({
if (is.empty(x))
return(y)
else if (is.null(x) || is.na(x))
return(y)
else if( class(x)=="character" && all(nchar(x)==0))
return(y)
else x
})
}
is.empty <- function(x){
# !is.null(x)
!as.logical(length(x))
}
naToEmpty <- function(df, empty = c(" ")){
df[is.na(df)] <- ""
df[df %in% empty] <- ""
df
}
#' @export
file_path_sans_ext <- function (x)
{
sub("([^.]+)\\.[[:alnum:]]+$", "\\1", x)
}
#' Read contents of a system file into a character string
#' @name sysfile
#' @description sysfile
#' @param string string
#' @return string
#' @export
#' @examples \dontrun{
#' }
sysfile <- function(..., package = "datafringe"){
if (is.null(package)){
path = file.path(...)
} else {
path = system.file(..., package = package)
}
path
}
|
1dca5a715422bce00659e571a8edb2db32026ddf | d677e29760774381438f25a11c5b3e09141477dc | /3/fpfi3_0.7.6/fpfi3/man/ProjectNTrees.Rd | fb0b9ad1b872defd100420845bf45980ea37565d | [] | no_license | grpiccoli/FF | 695e681f8c062f0c71af0488ac1e288283c63f87 | e19d0098c592dd36303eaad7cc0d771c0813edbd | refs/heads/master | 2022-04-24T00:36:39.447469 | 2020-04-19T07:42:05 | 2020-04-19T07:42:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,596 | rd | ProjectNTrees.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/project.R
\name{ProjectNTrees}
\alias{ProjectNTrees}
\title{Project the number of trees.}
\usage{
ProjectNTrees(species, region, N0, BA1, Age1, Age2, SI)
}
\arguments{
\item{species}{A character indicating one of the species name. The function is case insensitive.}
\item{region}{A character indicating one of the region names (depending on the species). The function is case insensitive. Default is NULL.}
\item{N0}{A scalar indicating the initial number of trees.}
\item{BA1}{A scalar numeric indicating the initial BA.}
\item{Age1}{A scalar numeric indicating the Age at wich \code{N0} is measured.}
\item{Age2}{A scalar integer at which the projection is desired.}
\item{SI}{A scalar numeric indicating the Site Index.}
}
\value{
A scalar numeric.
}
\description{
This function project the number of trees at any desired age, using different parameters and structure
depending on the species and region.
}
\details{
Models were extracted from:
\itemize{
\item Eucalyptus:
\itemize{
\item Uruguay: document
}
\item Pinus Radiata:
\itemize{
\item New Zealand: document
}
\item Pinus Taeda:
\itemize{
\item Uruguay: document
}
}
}
\examples{
1+1
}
\seealso{
Other Internals: \code{\link{Dispatcher}},
\code{\link{EstimateDBHSd}},
\code{\link{GetBAAfterThinning}},
\code{\link{ProjectBasalArea}},
\code{\link{ProjectDominantHeight}},
\code{\link{ProjectVolume}}, \code{\link{SiteIndex}},
\code{\link{availableSpecies}}
}
\author{
Álvaro Paredes, \email{alvaro.paredes@fpfi.cl}
}
\concept{Internals}
|
60d400c5b41fde642141063e007727f4078680eb | e5bf6df521b05964ab25f5af57423506a52e3807 | /basicPreprocessing.R | a2e95d694f1b3796d56d35ca4b509e6e0d36718c | [] | no_license | PraveenSebastian/Coursera-Data-Science-Machine-Learning-Project | 1687f71fb8b61e31541a5ee592e1474a8d32b1f7 | 9dbb7fef9fe94ec633eedd1151319c4b2726b8a3 | refs/heads/master | 2021-09-25T10:43:36.644032 | 2018-10-21T08:16:55 | 2018-10-21T08:16:55 | 103,850,082 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,866 | r | basicPreprocessing.R | library(caret)
library(kernlab)
data(spam)
inTrain <- createDataPartition(y=spam$type,p=0.75, list=FALSE)
training <- spam[inTrain,]
testing <- spam[-inTrain,]
hist(training$capitalAve,main="",xlab = "ave.capital run length")
sd(training$capitalAve)
#Standardising train set
trainCapAve <- training$capitalAve
trainCapAves <- (trainCapAve - mean(trainCapAve))/sd(trainCapAve)
mean(trainCapAves)
sd(trainCapAves)
#Standardising test set
testCapAve <- testing$capitalAve
testCapAves <- (testCapAve-mean(trainCapAve))/sd(trainCapAve)
mean(testCapAves)
#Standardising preProcess function
preObj <- preProcess(training[,-58], method=c("center","scale"))
trainCapAves <- predict(preObj,training[,-58])$capitalAve
mean(trainCapAves)
sd(trainCapAves)
testCapAves <- predict(preObj,training[,-58])$capitalAve
mean(testCapAves)
sd(testCapAves)
#Standardizing - preProcess argument
set.seed(32343)
modelFit <- train(type~.,data = training,preProcess=c("center","scale"),method = "glm")
modelFit
#Standardizing - Box-Cox transforms
preObj <- preProcess(training[,-58],method = c("BoxCox"))
trainCapAves <- predict(preObj, training[,-58])$capitalAve
par(mfrow=c(1,2));hist(trainCapAves);qqnorm(trainCapAves)
#Standardizing - Imputing data
set.seed(13343)
#Make some values NA
training$capAve <- training$capitalAve
selectNA <- rbinom(dim(training)[1],size=1,prob = 0.05)==1
training$capAve[selectNA] <- NA
#Impute and Standardize
preObj <- preProcess(training[,-58],method = "knnImpute")
capAve <- predict(preObj,training[,-58])$capAve
#Standardize true values
capAveTruth <- training$capitalAve
capAveTruth <- (capAveTruth-mean(capAveTruth))/sd(capAveTruth)
quantile(capAve - capAveTruth)
quantile((capAve - capAveTruth)[SelectNA])
quantile((capAve - capAveTruth)[!SelectNA])
|
f60c59f72b6de3d886a462cf74b9cc02a1f1e08c | 320f0f7a77f0d1e863429c71b4666baaeefcf343 | /simple-data-interpolation/Script_interpolacion.r | 49f2e80fccd0e866460d10ae3fc8f4741608bbd9 | [] | no_license | feahorita/data-processing-and-analytics | cc66b8c98c38af45f60f46aa91aa15be9c906ca3 | b89c4fd6745573b64876cc0a8285d34f3de1600c | refs/heads/master | 2020-06-29T12:20:45.656033 | 2016-12-10T18:47:23 | 2016-12-10T18:47:23 | 74,427,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,205 | r | Script_interpolacion.r | #################################################################################################################################
#Subrutina que interpola los valores de nivel de manera lineal para que coincidan con las entradas al modelo
#################################################################################################################################
rm(list=ls()) #limpia todos los valores
h1<-read.delim("h.csv", header = FALSE, sep = ";") #Lee el archivo de los datos de nivel en un formato csv
h1<-h1[,1]
h<-h1*1000
HandD<-read.delim("H and D.csv", header = FALSE, sep = ";") #Lee el archivo de los datos de nivel en un formato csv
#Revisar siempre que los decimales estén como punto y que el separador entre valores sea ;
H<-HandD[,1] #carga la primera columna como el valor de H
D<-HandD[,2] #carga la segunda columna como el valor de D
m<-length(t(H)) # Tamaño vector H
n<-length(t(h)) # Tamaño vector h
d<-c() #Vector columna vacio
k<-1
x<-1
c<-10000
ch<-1
interp=matrix(data=NA, nrow=n, ncol=2)
# identação (recuo para os blocos de comandos)
for (i in x:n) { #For para correr todos los valores del vector tiempo requerido
for (j in k:m) { #For para buscar los datos en todo el vector de tiempo que se tiene
fh2<-j
fh3<-h[i]
fh4<-H[j]
if (h[i]>H[j]) { #Este y el siguiente if son para que el encuentre los dos valores para la interpolación
if (h[i]<H[j+1]) {
d[i]<-(D[j+1]-D[j])/(H[j+1]-H[j])*(h[i]-H[j])+D[j] # Interpolación dentro del intervalo
k<-j
} else {
if (h[i]==H[j+1]) { #If para asignar el valor del extremo superior si coinciden los tiempos
d[i]<-D[j+1]
k<-j
}
}
}
if (h[i]==H[j]) { #If para asignar el valor del extremo inferior si coinciden los tiempos
d[i]<-D[j]
k<-jwa
}
}
ch<-ch+1
if ((ch %% 1000) == 0) {
interp<-cbind(h,d) #matriz de valores interpolados
write.csv(interp,"hadnd.csv") #valores interpolados por tiempo y valor del dato
}
}
interp<-cbind(h,d) #matriz de valores interpolados
write.csv(interp,"hadnd.csv") #valores interpolados por tiempo y valor del dato |
b535d32edbc52aa3e8abeeaa51aee25c6db36bde | 7618115043c871bee04f6c078d3881c585e8d865 | /Plot2.R | 4b359609ac06a93a4b1fe73141c8b2fb19eb29ed | [] | no_license | stlinc/ExData_Plotting1 | d90a9c5d42ec59f73c40bde8daaf7140afaf5835 | 7d9e80b0719b9341220e23c5a22aeb8a54ae8438 | refs/heads/master | 2021-01-12T14:01:43.987258 | 2016-03-28T14:56:31 | 2016-03-28T14:56:31 | 54,820,868 | 0 | 0 | null | 2016-03-27T09:12:06 | 2016-03-27T09:12:06 | null | UTF-8 | R | false | false | 807 | r | Plot2.R | #Load data
householdPowerData <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors=FALSE, dec=".")
#Subset of records on 1/2/2007 and 2/2/2007
householdPowerDataSubset <- householdPowerData[grepl("^(1/2/2007|2/2/2007)", householdPowerData$Date), ]
#Base plot
library(datasets)
## Create plot on screen device
globalActivePower <- as.numeric(householdPowerDataSubset$Global_active_power)
dateTime <- strptime(paste(householdPowerDataSubset$Date, householdPowerDataSubset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
plot(dateTime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
## Copy plot (plot on current device) to a PNG file
dev.copy(png, file = "Plot1.png", width = 480, height = 480)
## Don't forget to close the PNG device!
dev.off()
|
bff180389a647f45d050a65a93af0afce7d81d6d | 08ea1a5d55021669287b7e81aac6e1f38f6eb034 | /plot1-6.R | 8fc6b75baa112d4af8e41e8a0457391418afe63b | [] | no_license | rubenogit/ExData_Plotting2 | 380db4b9c8002248f6ee05c01db0184e15d052be | a9bbd7c6a025cd9e3e67e5e4dcdee19cd158d5f8 | refs/heads/master | 2021-01-17T17:07:01.458123 | 2015-07-25T17:40:53 | 2015-07-25T17:40:53 | 39,695,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,037 | r | plot1-6.R |
#Download and unzip the data file
address <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
address <- sub("^https", "http", address)
zipname <- "NEI_data.zip"
download.file(address,zipname)
unzip(zipname)
#housekeeping - remove the zip as it is no longer needed
file.remove("NEI_data.zip")
#housekeeping
rm(address, zipname)
#Read the data
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##aggregate emmisions per year and plot
emissions_per_year <- aggregate(Emissions ~ year, data = NEI, sum)
plot(emissions_per_year$year, emissions_per_year$Emissions,
type="b", xlab="Year", ylab="PM2.5 Emissions",
main="Total PM2.5 emissions from all sources", xaxt="n")
axis(1, at=c(1999,2002,2005,2008), lab=c("1999","2002","2005","2008"))
abline(h=emissions_per_year$Emissions[emissions_per_year$year==1999],lty=4,col="red")
#save to png
dev.copy(png,'plot1.png')
dev.off()
##aggregate emmisions per year for Baltimore City (fips 24150) and plot
emissions_per_year_balt <- aggregate(Emissions ~ year, data = NEI[NEI$fips == "24510",], sum)
#plot(emissions_per_year_balt$year, emissions_per_year_balt$Emissions, type="b")
plot(emissions_per_year_balt$year, emissions_per_year_balt$Emissions,
type="b", xlab="Year", ylab="PM2.5 Emissions",
main="Total PM2.5 emissions in Baltimore", xaxt="n")
axis(1, at=c(1999,2002,2005,2008), lab=c("1999","2002","2005","2008"))
abline(h=emissions_per_year_balt$Emissions[emissions_per_year_balt$year==1999],lty=4,col="red")
#save to png
dev.copy(png,'plot2.png')
dev.off()
##aggregate emmisions per year and type for Baltimore City (fips 24150)
emissions_per_year_type_balt <- aggregate(Emissions ~ year + type, data = NEI[NEI$fips == "24510",], sum)
###use ggplot2 here
library(ggplot2)
Em1999 <- emissions_per_year_type_balt[emissions_per_year_type_balt$year==1999,]
plot3 <- ggplot(emissions_per_year_type_balt, aes(year, Emissions)) +
geom_point() +
geom_line() +
geom_hline(data=Em1999, aes(yintercept=Emissions), linetype=4, colour="red") +
facet_wrap(~type, ncol=2)
plot3
#save to png
dev.copy(png,'plot3.png')
dev.off()
#####coal
#SCC_coal <- as.character(SCC$SCC[grep("Coal", SCC$EI.Sector)])
#use Short Name to find combination of Comb and Coal
SCC_coal <- as.character(SCC$SCC[grep("(Comb).*(Coal)", SCC$Short.Name)])
emissions_per_year_coal <- aggregate(Emissions ~ year, data = NEI[NEI$SCC %in% SCC_coal,], sum)
#plot(emissions_per_year_coal$year, emissions_per_year_coal$Emissions, type="b")
plot(emissions_per_year_coal$year, emissions_per_year_coal$Emissions,
type="b", xlab="Year", ylab="PM2.5 Emissions",
main="Total coal related PM2.5 emissions", xaxt="n")
axis(1, at=c(1999,2002,2005,2008), lab=c("1999","2002","2005","2008"))
abline(h=emissions_per_year_coal$Emissions[emissions_per_year_coal$year==1999],lty=4,col="red")
#save to png
dev.copy(png,'plot4.png')
dev.off()
###vehicles baltimore City (fips 24150)
###I include the Highway vehicles and the Off-highway vehicles
SCC_vehicles <- as.character(SCC$SCC[grep("(Highway Veh)|(Off-highway)", SCC$Short.Name)])
emissions_per_year_vehicles_balt <- aggregate(Emissions ~ year, data = NEI[NEI$SCC %in% SCC_vehicles & NEI$fips == "24510",], sum)
#plot(emissions_per_year_vehicles_balt$year, emissions_per_year_vehicles_balt$Emissions, type="b")
plot(emissions_per_year_vehicles_balt$year, emissions_per_year_vehicles_balt$Emissions,
type="b", xlab="Year", ylab="PM2.5 Emissions",
main="Motor vehicle related PM2.5 emissions Baltimore", xaxt="n")
axis(1, at=c(1999,2002,2005,2008), lab=c("1999","2002","2005","2008"))
abline(h=emissions_per_year_vehicles_balt$Emissions[emissions_per_year_vehicles_balt$year==1999],lty=4,col="red")
#save to png
dev.copy(png,'plot5.png')
dev.off()
###vehicles baltimore City (fips 24150)
###I include the Highway vehicles and the Off-highway vehicles
SCC_vehicles <- as.character(SCC$SCC[grep("(Highway Veh)|(Off-highway)", SCC$Short.Name)])
emissions_per_year_vehicles_balt_la <- aggregate(Emissions ~ year + fips, data = NEI[NEI$SCC %in% SCC_vehicles & NEI$fips %in% c("24510","06037"),], sum)
bl <- emissions_per_year_vehicles_balt_la
#calculate relative emissions
bl$RelEmissions[bl$fips == "06037"] <-
(bl$Emissions[bl$fips == "06037"] /
bl$Emissions[bl$year == 1999 & bl$fips == "06037"])
bl$RelEmissions[bl$fips == "24510"] <-
(bl$Emissions[bl$fips == "24510"] /
bl$Emissions[bl$year == 1999 & bl$fips == "24510"])
plot(bl$year,
bl$RelEmissions,
type="n",
xlab="Year", ylab="PM2.5 Emissions as percentage of 1999 level",
main="Relative motor vehicle related PM2.5 emissions Baltimore and LA",
xaxt="n", yaxt="n",)
axis(1, at=c(1999,2002,2005,2008), lab=c("1999","2002","2005","2008"))
axis(2, at=c(0.4,0.6,0.8,1.0,1.2), lab=c("40%","60%","80%","100%","120%"))
lines(bl$year[bl$fips == "06037"],
bl$RelEmissions[bl$fips == "06037"],
type="b", col="red")
lines(bl$year[bl$fips == "24510"],
bl$RelEmissions[bl$fips == "24510"],
type="b", col="blue")
legend("bottomleft", legend=c("Los Angeles","Baltimore"), col=c("red","blue"), lty=1, bty="y")
#save to png
dev.copy(png,'plot6.png')
dev.off()
plot(bl$year,
bl$Emissions,
type="n",
xlab="Year", ylab="PM2.5 Emissions",
main="Total motor vehicle related PM2.5 emissions Baltimore and LA",
xaxt="n")
axis(1, at=c(1999,2002,2005,2008), lab=c("1999","2002","2005","2008"))
lines(bl$year[bl$fips == "24510"],
bl$Emissions[bl$fips == "24510"],
type="b", col="blue")
lines(bl$year[bl$fips == "06037"],
bl$Emissions[bl$fips == "06037"],
type="b", col="red")
abline(h=bl$Emissions[bl$year==1999 & bl$fips == "24510"],lty=4,col="black")
abline(h=bl$Emissions[bl$year==1999 & bl$fips == "06037"],lty=4,col="black")
legend("right", legend=c("Los Angeles","Baltimore"), col=c("red","blue"), lty=1, bty="y")
#save to png
dev.copy(png,'plot6b.png')
dev.off()
|
229e3d4df7bde2270190b259b9604bf8e94091d5 | 5c1428fd33542b114c915db0bbd303a72f1ff491 | /man/setupColorBounds.Rd | ccdaf00ab581dea06a2acf9e97c59d74b8c32379 | [] | no_license | CraigMohn/rideProfile | be6b3af50065807065c35974cd99157f633503c4 | aebf051a59b747f4ef923403495d720f62c7e774 | refs/heads/master | 2020-07-26T13:28:55.403780 | 2020-05-01T15:45:09 | 2020-05-01T15:45:09 | 208,659,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,626 | rd | setupColorBounds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drawMisc.R
\name{setupColorBounds}
\alias{setupColorBounds}
\title{create functions to map variables into colors based on parameters}
\usage{
setupColorBounds(
palette,
legendwidth,
speedLow = 3,
speedHigh = 40,
speedColorLow = 0,
speedColorHigh = 40,
gradeLow = -0.15,
gradeHigh = 0.15,
gradeColorLow = 40,
gradeColorHigh = 0,
hrLow = 100,
hrHigh = 170,
hrColorLow = 11,
hrColorHigh = 26,
cadCont = TRUE,
cadTarget = 88,
cadLow = 50,
cadHigh = 120,
cadColorLow = 4,
cadColorMid = 10,
cadColorHigh = 15,
powerLow = 75,
powerHigh = 400,
powerColorLow = 9,
powerColorHigh = 31,
imperial = TRUE,
...
)
}
\arguments{
\item{palette}{viridis palette name, see main routine}
\item{legendwidth}{width of a legend chunk in distance units}
\item{speedLow}{speeds below this are same color}
\item{speedHigh}{speeds above this are same color}
\item{speedColorLow}{set color for speedLow and slower}
\item{speedColorHigh}{set color for speedHigh and faster}
\item{gradeLow}{grades below this are same color}
\item{gradeHigh}{grades above this are same color}
\item{gradeColorLow}{set color for gradeLow and steeper downhill}
\item{gradeColorHigh}{set color for gradeHigh and steeper uphill}
\item{hrLow}{heartrates below this are same color}
\item{hrHigh}{heartrates above this are same color}
\item{hrColorLow}{set color for hrLow and lower}
\item{hrColorHigh}{set color for hrHigh and higher
colors are from same palette as speeds, number is the speed corresponding
to the desired limit on the range of heartrates}
\item{cadCont}{display cadence as a continuos color map}
\item{cadTarget}{target cadence range minimum}
\item{cadLow}{lower cadence limit for continuous color, all
lower cadences are displayed as same color}
\item{cadHigh}{upper cadence limit for continuous color, all
higher cadences are displayed as same color}
\item{cadColorLow}{set color for cadence at cadLow or below}
\item{cadColorMid}{set color for cadence above low but below target}
\item{cadColorHigh}{set color for cadence above target}
\item{powerLow}{power outputs below this are same color}
\item{powerHigh}{power outputs above this are same color}
\item{powerColorLow}{set color for powerLow and lower}
\item{powerColorHigh}{set color for powerHigh and higher}
\item{imperial}{use mi and ft instead of km and m}
\item{...}{stuff for other functions}
}
\value{
a named list of named lists of functions
}
\description{
\code{setupColorBounds} creates lists of functions for color bars and legends
}
|
11c506307c2923ab339c3037fb43c8a95bf02c72 | 85bd593fc4603e99bbb6e8e097960ab832a469d3 | /man/geodesic.Rd | e772ebd0057f45b7693955bed8b5e95d1725b246 | [] | no_license | cran/GeodesiCL | 2dab609c79d45ceba619cf478ad5f5382c1d7667 | 09c72a0c6deefe2b168024406087c2fcf8ae34b3 | refs/heads/master | 2023-04-25T15:26:14.862721 | 2021-05-25T11:20:02 | 2021-05-25T11:20:02 | 370,748,265 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,256 | rd | geodesic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Geodesic.R
\name{geodesic}
\alias{geodesic}
\title{To convert from Cartesian coordinate to Geographic coordinate.}
\usage{
geodesic(a, XYZ_df, digits = 4)
}
\arguments{
\item{a}{Selection of Ellipsoid.}
\item{XYZ_df}{Sexagesimal longitude and latitude as dataframe.}
\item{digits}{Number of digits the seconds are \code{\link{round}ed} to. DEFAULT: 4}
}
\value{
data.frame with the data in the following order: "Pt", "Lat", "Lon", "H".
}
\description{
With this function it is possible to convert from Cartesian coordinate to Geographic coordinate and obtain the decimal precision that you assign.
}
\note{
create data frame of epsg codes by epsg <- rgdal::make_EPSG()
}
\examples{
# Point name
Pto <- "St1"
# Cartesian data
X <- 1711591.78090565
Y <- -5060304.1659587
Z <- -3473256.69328603
# Pto, X, Y and Z as data.frame
XYZ_df <- as.data.frame(cbind(Pto, X, Y, Z))
# To know the ellipsoids and the order open the Ellipsoids in the package and look for it number
Ellip <- Ellipsoids
#View(Ellip)
# We choose the number 5 which is GRS80
value <- geodesic(5, XYZ_df, digits = 4)
print(value)
}
\references{
https://github.com/OSGeo/PROJ & https://github.com/cran/rgdal
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.