blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad89025b136ec07671962711144ad497a678f44f | bcedcc38635d223401b16aac4cd095f4d38fd25f | /simulation.R | d07a50a1442fdeab02bc0cf68447cd25d7aeff6a | [] | no_license | paulfournel/smurfs | aecb8ddbfed10b8c59895a3af256f869813c0b44 | 32505791ac291f4d6871cf08bfe5c25df2125607 | refs/heads/master | 2021-01-10T05:27:10.625532 | 2017-08-06T05:57:19 | 2017-08-06T05:57:19 | 46,412,191 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,125 | r | simulation.R | require(schoolmath)
require(pracma)
number_of_smurfs = 3
has_solution = function(mat){
return(any(rowSums(mat)==dim(mat)[1]))
}
infos = function(day, smurfs){
prime_decomposition = as.integer(factorize(day))
if(any(prime_decomposition<=max(smurfs))){
return(unique(prime_decomposition[prime_decomposition<=max(smurfs)]))
}
return(NULL)
}
test_smurfs = function(number_of_smurfs){
smurfs = primes(100000)[2:(number_of_smurfs+1)]
knowledge = matrix(rep(0, length(smurfs)*length(smurfs)), length(smurfs))
day = 1
light = F
while(!has_solution(knowledge)){
day = day + 1
selected_smurf = sample(1:length(smurfs),1)
knowledge[selected_smurf, selected_smurf] = 1
if(light){
knowledge[selected_smurf, match(infos(day, smurfs), smurfs)] = 1
}
if(all(is.element(infos(day+1, smurfs), smurfs[knowledge[selected_smurf,]==1]))){
light = T
}else{
light = F
}
}
return(day)
}
# Simulating for different number of sumrfs
N = 50
size = sample(3:100, N, replace=T)
res = rep(0, N)
for(i in 1:N){
res[i] = test_smurfs(size[i])
print(i)
}
plot(size, res)
|
e6f469f5b46599237d4536aa2751be422c786486 | e31448ca0dbf0230b47d6b24a0d7fc66811ff406 | /tests/testthat/fakepackages/allexportedchecked/R/pending.R | 5b26d281929d507fc197baeb2f270443f2a9ea6c | [
"MIT"
] | permissive | rmsharp/checkr | abecee52da5c99772acdb46b33b4911db01c896d | 9e796514c77b030d0a41e1e25352a892e981eaf1 | refs/heads/master | 2021-07-12T02:07:48.486434 | 2017-10-10T15:39:15 | 2017-10-10T15:39:15 | 106,333,359 | 0 | 1 | null | 2017-10-09T20:44:03 | 2017-10-09T20:44:02 | null | UTF-8 | R | false | false | 324 | r | pending.R | #' Pending!
#'
#' This function has no formals, so it won't be counted against checkr!
#' @export
pending <- function() { "Pending!" }
#' Pending identity.
#'
#' This function needs to be checked or else the test will fail.
#' @import checkr
#' @export
pending_identity <- checkr::ensure(pre = list(x %is% any), identity)
|
99a4e3f39019535e748e1d352114d6aa89a7ec3d | badf5f99111607d30a41956bc92aaff4f2b3493a | /man/best_clust_toy_obj.Rd | dcb02184ce344dcdacd397ec894290ed8a3407ba | [] | no_license | cbg-ethz/TMixClust | 4d5755e40bc5cc36f13e6ca52c0e7a10b731ab7c | 1c641e560ef82e699990501187c2f659712836c6 | refs/heads/master | 2021-07-07T07:47:59.028119 | 2018-02-01T18:42:11 | 2018-02-01T18:42:11 | 96,901,003 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,110 | rd | best_clust_toy_obj.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_TMixClust.R
\docType{data}
\name{best_clust_toy_obj}
\alias{best_clust_toy_obj}
\title{TMixClust object containing the optimal clustering solution for the
toy data with 3 clusters.}
\format{A \code{TMixClust} object.}
\usage{
best_clust_toy_obj
}
\value{
optimal clustering solution for the toy data
}
\description{
This object contains the result of clustering and
stability analysis
corresponding to the clustering solution with the highest likelihood among 10
different runs of clustering on the toy data with K=3 clusters.
}
\examples{
# Load the optimal clustering solution for the toy data
# provided with the TMixClust package
data("best_clust_toy_obj")
# Print the first lines of the toy clustering object
head(best_clust_toy_obj)
}
\references{
Golumbeanu M, Desfarges S, Hernandez C, Quadroni M, Rato S,
Mohammadi P, Telenti A, Beerenwinkel N, Ciuffi A. (2017) Dynamics of
Proteo-Transcriptomic Response to HIV-1 Infection.
}
\author{
Monica Golumbeanu, \email{monica.golumbeanu@bsse.ethz.ch}
}
\keyword{datasets}
|
aa96bc22bfb4700febffee8f29fb0336d77c426d | 4bd57b8501d4326ecc06c1d1ea499935e1668d95 | /MASH-dev/DanielCitron/Haiti_Geography/haiti_geography.R | 4a9b44fd7bef4d1a03309d0ed3b36eee40dde911 | [] | no_license | aucarter/MASH-Main | 0a97eac24df1f7e6c4e01ceb4778088b2f00c194 | d4ea6e89a9f00aa6327bed4762cba66298bb6027 | refs/heads/master | 2020-12-07T09:05:52.814249 | 2019-12-12T19:53:24 | 2019-12-12T19:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,335 | r | haiti_geography.R | library(sp) # necessary for rgdal
library(rgdal)
library(raster)
library(rgeos)
library(maptools)
GBD <- readShapePoly("/Volumes/snfs/DATA/SHAPE_FILES/GBD_geographies/master/GBD_2016/master/shapefiles/GBD2016_analysis_final.shp")
# Load in A2 data
global <- readShapePoly("/Volumes/snfs/WORK/11_geospatial/06_original shapefiles/GAUL_admin/admin2/g2015_2014_2/g2015_2014_2_modified.shp")
# Find the subset of A2 areas that belong to Haiti
HTI <- global[global$ADM0_NAME=="Haiti",]
plot(HTI)
# These are the names of the A1 areas
unique(HTI$ADM1_NAME)
# And the names of the A2 areas in Grand Anse:
HTI[HTI$ADM1_Name == "Grande Anse",]
# The names of the A2 areas in Sud
HTI[HTI$ADM1_NAME == "Sud",]$ADM2_NAME
HTI.SUD <- HTI[HTI$ADM1_NAME=="Sud",]
ga <- unique(HTI$ADM1_NAME)[9]
HTI.GA <- HTI[HTI$ADM1_NAME==ga,]
# The names of the A2 areas in GA
unique(HTI.GA$ADM2_NAME)
# The names of the A2 areas in Sud
unique(HTI.SUD$ADM2_NAME)
# Outline of the two main A1 units
plot(HTI.SUD)
plot(HTI.GA, add = TRUE)
# Highlight the westernmost A2 units
plot(HTI.GA[HTI.GA$ADM2_NAME=="Anse-D'Ainault",], col = "Red", add = TRUE)
plot(HTI.GA[HTI.GA$ADM2_NAME=="Jeremie",], col = "Red", add = TRUE)
plot(HTI.SUD[HTI.SUD$ADM2_NAME=="Chardonnieres",], col = "Red", add = TRUE)
# Highlight some eastern A2 units nearby
plot(HTI.GA[HTI.GA$ADM2_NAME=="Corail",], col = "Green", add = TRUE)
plot(HTI.SUD[HTI.SUD$ADM2_NAME=="Coteaux",], col = "Green", add = TRUE)
plot(HTI.SUD[HTI.SUD$ADM2_NAME=="Port-Salut",], col = "Green", add = TRUE)
plot(HTI.SUD[HTI.SUD$ADM2_NAME=="Cayes",], col = "Green", add = TRUE)
# Label each of the A2 units
# Pulling out centroid Long/Lat - Anse-D'Ainault
AD.centroid <- gCentroid(HTI.GA[HTI.GA$ADM2_NAME=="Anse-D'Ainault",])
AD.long <- coordinates(AD.centroid)[1] # -74.39124
AD.lat <- coordinates(AD.centroid)[2] # 18.46622
text(AD.long, AD.lat, "Anse-D'Ainault")
# Pulling out centroid Long/Lat - Jeremie
JE.centroid <- gCentroid(HTI.GA[HTI.GA$ADM2_NAME=="Jeremie",])
JE.long <- coordinates(JE.centroid)[1] # -74.22257
JE.lat <- coordinates(JE.centroid)[2] # 18.534
text(JE.long, JE.lat, "Jeremie")
# Pulling out centroid Long/Lat - Corail
CO.centroid <- gCentroid(HTI.GA[HTI.GA$ADM2_NAME=="Corail",])
CO.long <- coordinates(CO.centroid)[1] # -73.92961
CO.lat <- coordinates(CO.centroid)[2] # 18.49504
text(CO.long, CO.lat, "Corail")
# Pulling out centroid Long/Lat - Chardonnieres
CH.centroid <- gCentroid(HTI.SUD[HTI.SUD$ADM2_NAME=="Chardonnieres",])
CH.long <- coordinates(CH.centroid)[1] #
CH.lat <- coordinates(CH.centroid)[2]
text(CH.long, CH.lat, "Chardonnieres")
# Pulling out centroid Long/Lat - Coteaux
CT.centroid <- gCentroid(HTI.SUD[HTI.SUD$ADM2_NAME=="Coteaux",])
CT.long <- coordinates(CT.centroid)[1] # -74.20935
CT.lat <- coordinates(CT.centroid)[2] # 18.33986
text(CT.long, CT.lat, "Coteaux")
# Pulling out centroid Long/Lat - Port-Salut
PS.centroid <- gCentroid(HTI.SUD[HTI.SUD$ADM2_NAME=="Port-Salut",])
PS.long <- coordinates(PS.centroid)[1] # -73.87902
PS.lat <- coordinates(PS.centroid)[2] # 18.09536
text(PS.long, PS.lat, "Port-Salut")
# Pulling out centroid Long/Lat - Cayes
CA.centroid <- gCentroid(HTI.SUD[HTI.SUD$ADM2_NAME=="Cayes",])
CA.long <- coordinates(CA.centroid)[1] # -73.83127
CA.lat <- coordinates(CA.centroid)[2] # 18.28119
text(CA.long, CA.lat, "Cayes")
|
457ce77f27ff721997f3f8197a1270b76566e266 | 934b47d53e0ef1cdd1f1512d12b81a6bba16b970 | /debugging.R | b951930c3927fa890ae9078924d8066657f21730 | [] | no_license | jessicawalsh1/consOpt | b8ace200518c3f8b1dc6bf654fbe292ca42b18a0 | 1cdd2c94a269fe8e09e1d285470e9c6b8fb26e59 | refs/heads/master | 2020-03-29T00:31:18.291533 | 2018-09-17T09:08:08 | 2018-09-17T09:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,302 | r | debugging.R | library(ompr)
library(ompr.roi)
library(ROI.plugin.lpsolve)
library(magrittr)
library(R6)
source("plotutil.R")
# ------------------------------
# Container object
# ------------------------------
optStruct <- R6Class("optStruct",
public = list(
B = NULL,
cost.vector = NULL,
all.index = NULL,
t = NULL,
budget.max = NULL,
weights = NULL,
get.baseline.results = function(){
#' Returns the "results" from only running the baseline strategy
#'
#' @return A list holding the results
return( private$baseline.results )
},
add.combo = function(input.strategies=NULL, combined.strategies){
#' The benefits matrix might contain strategies which are combinations of several strategies. The joint selection of these strategies
#' will be artificially expensive if two combo strategies contain one or more of the same individual strategy, as the cost will be doubled
#' E.g.: Strategy S12 is a combination of strategies S3, S7, and S10.
#' Strategy S13 is a combination of strategies S6, S9, and S10
#' Selecting strategies S12 and S13 simultaneously will erronously count the cost of S10 twice, making this combination less favorable to the objective function.
#'
#'
#' @param input.strategies A named list denoting strategy names, e.g. list(strategy1="S1", strategy2="S2", ...)
#' @param combined.strategies A list of lists denoting strategy names that are combined, e.g. list(strategy1=c("S5", "S6", "S7"), strategy2=c("S6", "S7", "S8"))
#'
#' @return Silently updates the benefits matrix and the cost vector
#'
#' @example
#' TODO
if ( length(names(input.strategies)) < 1){
stop("Error: input.strategies must be a named list")
}
if ( length(names(combined.strategies)) < 1){
stop("Error: combined.strategies must be a named list")
}
input.strategy.names <- unlist(input.strategies, use.names=F)
combined.strategy.names <- unlist(combined.strategies, use.names=F)
# Check if the user supplied two (or more) existing strategies to combine
if (length(input.strategy.names) > 1){
# Strategies must be in the benefits matrix to be combined
if (!all(input.strategy.names %in% rownames(self$B))){
print(input.strategy.names)
stop("User supplied multiple strategies to combine, but they were not found in the benefits matrix")
}
# Both strategies are present, compute the cost vector correctly
combined.strategy.names <- unlist(combined.strategies, use.names=F)
applied.strategies <- union(combined.strategy.names, combined.strategy.names)
# Make sure strategies are in the cost vector
if (!all(applied.strategies %in% names(self$cost.vector))){
print(applied.strategies %in% names(self$cost.vector))
print(names(self$cost.vector))
print(applied.strategies)
stop("Some strategies to be combined were not in the cost vector")
}
total.cost <- sum(self$cost.vector[applied.strategies])
# Add cost to cost vector
strategy.name <- paste(input.strategy.names, collapse=" + ")
self$cost.vector <- c(self$cost.vector, total.cost)
names(self$cost.vector)[length(self$cost.vector)] <- strategy.name
# Add to benefits matrix - new species benefit vector is the logical OR of the benefit vectors of each input strategy
old.benefits <- self$B[input.strategy.names,]
new.row <- apply(old.benefits, 2, max) # take the max (1) of each column - same as (x['S2',] | x['S1',] )*1 for two rows
self$B <- rbind(self$B, new.row)
l <- length(rownames(self$B))
rownames(self$B)[l] <- strategy.name
# Done
invisible(self)
} else {
# User supplied ONE strategy name as input, adding a novel strategy to the mix
union.strategy.names <- union(combined.strategy.names, combined.strategy.names)
if (!all(union.strategy.names %in% rownames(self$B))){
stop("Error: User attempted to combine strategies that were not in the benefits matrix")
}
if (!all(union.strategy.names %in% names(self$cost.vector))){
print(union.strategy.names)
stop("Error: User attempted to combine strategies that were not in the cost vector")
}
if (is.null(input.strategy.names)) {
warning("No strategy name supplied, setting default name")
default.strategy.name <- paste(union.strategy.names, collapse=" + ")
} else {
default.strategy.name <- input.strategy.names
}
# Compute cost
total.cost <- sum(self$cost.vector[union.strategy.names])
self$cost.vector <- c(self$cost.vector, total.cost)
names(self$cost.vector)[length(self$cost.vector)] <- default.strategy.name
# Compute benefits
old.benefits <- self$B[union.strategy.names,]
new.row <- apply(old.benefits, 2, max)
self$B <- rbind(self$B, new.row)
l <- length(rownames(self$B))
rownames(self$B)[l] <- default.strategy.name
invisible(self)
}
},
weight.species = function(weights){
#' Replace each species that survived the threshold with a species weight
#'
#' @param weights A list of integers. Must have a number for each species in the benefits matrix.
#' @returns Updates the benefits matrix in place
if ( ncol(self$B) != length(weights) ){
stop("Mismatch between species matrix and weights")
}
for(i in 1:nrow(self$B)){
self$B[i,] <- self$B[i,] * weights
}
invisible(self)
},
solve = function(budget, debug=FALSE){
#' Solve the ILP for this optStruct and a supplied budget
#'
#' @param budget A number
#' @return A result container
if (private$baseline.solved) {
return(self$get.baseline.results())
}
if (budget == 0){
return(self$get.baseline.results())
}
res <- private$solve.ilp(budget)
parsed <- private$parse.results(res)
if(debug){
return(res)
}
parsed
},
initialize = function(B, cost.vector, all.index, t, weights=NULL){
# TODO: Add error handling if parameters are missing
if(all.index > nrow(B)){
stop("Error: User supplied a strategy (all.index) that was not in the benefits matrix")
}
self$B <- B
self$cost.vector <- cost.vector
self$all.index <- all.index
self$t <- t
names(self$cost.vector) <- rownames(self$B)
# Check for names and do the rounding of B
private$prepare()
# Threshold B
private$threshold(self$t)
# Weight species groups (optional)
if (!is.null(weights)) {
self$weight.species(weights)
}
# Count the baseline results and remove etc.
private$baseline.prep()
# Set the zeroed out species to -1
self$B[self$B==0] <- -1
# We are now ready to do optimization
}
),
private = list(
current.budget = NULL,
baseline.solved = FALSE,
baseline.idx = 1,
baseline.results = NULL,
species.buffer = list(),
state = list(
weighted = FALSE
),
prepare = function(){
#' Rounds the B matrix, check if B is labelled
#'
#' @return Updates self$B
self$B <- round(self$B, digits=2)
strategy.names <- rownames(self$B)
species.names <- colnames(self$B)
if (length(strategy.names) < nrow(self$B) || length(species.names) < ncol(self$B))
warning("Warning: Missing strategy or species label information, results will not be meaningful")
names(self$cost.vector) <- strategy.names
invisible(self)
},
threshold = function(t){
#' Thresholds the B matrix, binarizing the entries
#'
#' @param t A number
#' @return Modifies the B matrix in place
self$t <- t
self$B <- as.data.frame( (self$B >= t)*1 )
invisible(self)
},
baseline.prep = function(){
#' Count up the species saved by the baseline strategy, then remove it;
#' These species are buffered and are added freely to nontrivial strategies at results time
#' B is mutated by removing the baseline strategy, and the all_index is decremented
#'
#' @return Updates private$baseline.results
baseline.species.idx <- which(self$B[private$baseline.idx,] > 0)
# If ALL species are saved by the baseline, the B matrix will be useless
if (length(baseline.species.idx) == ncol(self$B)){
private$baseline.solved = TRUE
}
baseline.species.names <- colnames(self$B)[baseline.species.idx]
species.names.string <- paste(baseline.species.names, sep=" | ")
# Store in the species buffer
private$species.buffer <- c(private$species.buffer, baseline.species.names)
if (length(baseline.species.idx > 0)) {
# Remove baseline species from B, costs, and the all_index
self$B <- self$B[-private$baseline.idx, -baseline.species.idx]
self$cost.vector <- self$cost.vector[-private$baseline.idx]
self$all.index <- self$all.index - 1
}
# Update baseline results
baseline.num.species <- length(baseline.species.idx)
baseline.cost <- 0
baseline.threshold <- self$t
private$baseline.results <- list(numspecies = baseline.num.species,
totalcost = baseline.cost,
threshold = baseline.threshold,
species.groups = species.names.string,
strategies="Baseline",
budget = baseline.cost)
invisible(self)
},
parse.results = function(results){
#' Convert the optimization results into something human readable
#'
#' @param results An OMPR solution object
#' @return A list compiling the results of the optimization
assignments <- get_solution(results, X[i,j])
# Get entries of the assignment matrix
assignments <- assignments[assignments$value==1,]
# Get strategy names
strategy.idx <- sort(unique(assignments$i))
strategy.names <- rownames(self$B)[strategy.idx]
# Get strategy cost
total.cost <- sum(self$cost.vector[strategy.idx])
# Get species names
species.idx <- sort(unique(assignments$j))
species.names <- colnames(self$B)[species.idx]
# Add in the baseline species
species.names <- c(species.names, self$get.baseline.results()$species.groups)
species.total <- length(species.names)
threshold <- self$t
# Return
list(numspecies = species.total,
totalcost = total.cost,
threshold = threshold,
species.groups = species.names,
strategies = strategy.names,
assignments = assignments,
budget = private$current.budget)
},
solve.ilp = function(budget){
#' Solves the ILP given a budget
#'
#' @param budget A number
#' @return A list of results
private$current.budget <- budget
B <- self$B
strategy.cost <- self$cost.vector
budget.max <- budget
all_idx <- self$all.index
# Number of strategies
n <- nrow(B)
# Number of species
m <- ncol(B)
others <- which(1:n != all_idx)
# Set up the ILP
# --------------
model <- MIPModel() %>%
# Decision variables
# ------------------
# X[i,j] binary selection matrix
add_variable(X[i,j], i = 1:n, j = 1:m, type="binary") %>%
# y[i] Strategy selection vector
add_variable(y[i], i = 1:n, type="binary") %>%
# Objective function
# ------------------
set_objective(sum_expr(B[i,j] * X[i,j], i = 1:n, j = 1:m)) %>%
# Constraints
# -----------
# Constraint (1):
# Ensure only one strategy applies to a target species
add_constraint(sum_expr(X[i,j], i = 1:n) <= 1, j = 1:m) %>%
# Constraint (2)
# Force contributions of management strategy i to every target species j to be null if strategy i is not selected
# forall(i in strategies, j in target) xij[i][j] <= yi[i];
add_constraint(X[i,j] <= y[i], i = 1:n, j = 1:m) %>%
# Constraint (3)
# "All" strategy constraint - if the "all" strategy is active, all others must be deselected
add_constraint(y[all_idx] + y[i] <= 1, i = others) %>%
# Constraint (4)
# Budget constraint
add_constraint(sum_expr(y[i]*strategy.cost[i], i = 1:n) <= budget.max, i = 1:n)
# Solve the model
result <- solve_model(model, with_ROI(solver="lpsolve", verbose=FALSE))
result
}
)
)
# ------------------------------
# Function to optimize over a range of thresholds
# ------------------------------
#' Perform the optimization over a range of budgets and thresholds
#'
#' @param B A [strategies]x[species] dataframe with named rows and columns
#' @param cost.vector A list of strategy costs
#' @param all.index An integer signifying the index of the strategy that combines all strategies
#' @param budgets A list of budgets over which to optimize. If NULL, a sensible range of budgets will be automatically generated
#' @param thresholds A list of survival thresholds over which to optimize
#' @param combo.strategies
optimize.range <- function(B, cost.vector, all.index, budgets = NULL, thresholds = c(50.01, 60.01, 70.01), combo.strategies=NULL, weights=NULL){
# Set up the progress bar
progress.bar <- txtProgressBar(min=1, max=100, initial=1)
step <- 1
# Collect results of the optimization here
out <- data.frame()
for (threshold in thresholds) {
# Initialize a new optimization run with an opStruct
this.opt <- optStruct$new(B=B, cost.vector=cost.vector, all.index=all.index, t=threshold, weights=weights)
# Check if combo information needs to be supplied
if (!is.null(combo.strategies)){
combos <- combo.strategies$get.combos()
for(i in 1:length(combos)){
input <-combos[[i]]$input
output <- combos[[i]]$output
this.opt$add.combo(input, output)
}
}
if ( is.null(budgets) ){
# No budget range supplied. Use the costs of individual strategies
budgets <- make.budget(this.opt$cost.vector)
}
for (budget in budgets){
# Run over the budgets and compile the results
optimization.result <- this.opt$solve(budget)
out <- rbind(out, opt.result.to.df(optimization.result))
# Update progress bar
step <- step + 1
setTxtProgressBar(progress.bar, step)
}
}
# Remove duplicate entries from the result
remove.duplicates(out)
}
#' Title
#'
#' @param range.result.df
#'
#' @return
#' @export
#'
#' @examples
remove.duplicates <- function(range.result.df){
# Remove runs that didn't contribute new species groups for the same number of species saved
tmp <- range.result.df
# Remove expensive strategies that don't improve on the number of species saved
tmp$duplicated_species <- FALSE
tmp$duplicated_numspecies <- FALSE
for(threshold in unique(tmp$threshold)){
th.idx <- which(tmp$threshold==threshold)
this.df <- tmp[th.idx,]
tmp[th.idx,]$duplicated_species <- duplicated(this.df$species_groups)
tmp[th.idx,]$duplicated_numspecies <- duplicated(this.df$number_of_species)
}
out <- tmp[!tmp$duplicated_species,]
out <- out[!out$duplicated_numspecies,]
out$duplicated_species <- NULL
out$duplicated_numspecies <- NULL
out
}
opt.result.to.df <- function(opt.result){
#' TODO: Documentation
#'
#' @param opt.result A list
#' @return slkdfl
# Concatenate species groups
species.groups.flat <- paste(opt.result$species.groups, collapse = " | ")
# Concatenate strategies
strategies.flat <- paste(opt.result$strategies, collapse = " + ")
out <- data.frame(total_cost = opt.result$totalcost,
strategies = strategies.flat,
species_groups = species.groups.flat,
threshold = opt.result$threshold,
number_of_species = opt.result$numspecies,
budget.max = opt.result$budget)
out$duplicated <- NULL
out
}
make.budget <- function(cost.vector){
#' Generate a list of budgets that adequately tests out different combinations of strategies
#' Currently computes the prefix sum of the strategy cost vector and mixes it with the strategy costs
#'
#' @param cost.vector A list of numbers
#' @return A sorted list of new budget levels
sc <- sort(unlist(cost.vector)) / (10^6)
newbudget <- seq(min(sc), max(sc), 30)
newbudget <- newbudget * (10^6)
out <- c(cost.vector, newbudget)
return(c(0, unique(sort(out))))
}
# ------------------------------
# Struct for combinations (for optimize.range())
# ------------------------------
combination <- R6Class("combination",
public = list(
add.combo = function(input, output){
#' Add a combination
#'
#' @param input A named list of the form: list(strat1="<some name>")
#' @param output A named list of the form list(strat1=c("strategy1", "strategy2", "..."))
#' @return void
combo.idx <- private$combo.counter + 1
private$combo.counter <- combo.idx
private$combos[[combo.idx]] <- list(input=input, output=output)
invisible(self)
},
get.combos = function(){
private$combos
}
),
private = list(
combo.counter = 0,
combos = list()
))
#' Title
#'
#' @param combo.mat
#'
#' @return A combination R6 object
#' @export
#'
#' @examples
parse.combination.matrix <- function(combo.mat){
# Find combination strategies by identifying columns containing nontrivial combinations
strategy.combination.size <- apply(combo.mat, 2, function(x) length(which(x != '')))
combinations.idx <- which(strategy.combination.size > 1 & strategy.combination.size < length(strategy.combination.size))
combinations <- combo.mat[,combinations.idx]
# Find strategies that are implemented by several combination strategies
combo.table <- table(unlist(combinations))
combo.table <- combo.table[2:length(combo.table)]
overlaps <- names(combo.table[which(combo.table > 1)])
# Each strategy containing each overlap must be combined
combo.container <- combination$new()
for (overlap in overlaps){
# Find all strategies containing this overlapping strategy
to.combine <- c()
for (i in 1:ncol(combinations)){
if (overlap %in% combinations[,i]) {
to.combine <- c(to.combine, colnames(combinations)[i])
}
}
# Combine the found strategies
input <- list()
for (i in 1:length(to.combine)){
input[i] <- to.combine[i]
names(input)[i] <- paste("strat", i, sep="")
}
output <- list()
for (i in 1:length(to.combine)){
strat <- list(remove.empty(combinations[,to.combine[i]]))
output[i] <- strat
names(output)[i] <- paste("strat", i, sep="")
}
combo.container$add.combo(input, output)
}
combo.container
}
remove.empty <- function(factorlist){
out <- as.character(factorlist[factorlist != ""])
gsub(" ", "", out)
}
|
efd980485753c5292ddc49df56bff39d4844b7dc | 67d069e4d9d9b852e11cdc333bb0ff0ed5e6a477 | /man/predict.EMglmnet.Rd | 69580d29493073992b2867c3fa4dd1467b844233 | [] | no_license | schiffner/EMnet | 3bf8aba9b0a9c318b8dbb7f7638f6c28be5aefd2 | ce34e19a36637e3eb0cebe2162417e4c67904ac7 | refs/heads/master | 2020-12-25T14:14:02.980829 | 2016-12-08T11:06:43 | 2016-12-08T11:06:43 | 64,326,192 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 898 | rd | predict.EMglmnet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EMglmnet.R
\name{predict.EMglmnet}
\alias{predict.EMglmnet}
\title{Predict New Observations by a Trained Logistic Mixture of Experts Model}
\usage{
\method{predict}{EMglmnet}(object, newdata, ...)
}
\arguments{
\item{object}{An object of class \code{EMglmnet}.}
\item{newdata}{A \code{data.frame} or \code{matrix} with data for which to make predictions.}
\item{...}{Further arguments. Currently unused.}
}
\value{
A \code{list} with components:
\item{class}{A \code{factor} with predicted class levels.}
\item{posterior}{A \code{matrix} with predicted posterior probabilities.}
\item{gating}{The probabilities predicted by the fating model.}
\item{experts}{The class posterior probabilities predicted by individual experts.}
}
\description{
Predict new observations by a trained logistic mixture of experts model.
}
|
16ecf94d5b9fa3b2ff5c810fcf3d08aeb0a27f00 | 904fcdd647828aabb08dc7e563dfce0f985947e5 | /handling-data-and-time-in-r/solutions/module-2.R | b747188abe8b34b885fa78445efc28ceadd1566b | [] | no_license | stghn/Free-Online-R-Courses | 915d554cdb787ceaeb150ffb27a66f5bb52eb339 | e604b7319881f13b85f137579b8f6754fcb338ab | refs/heads/master | 2023-07-06T11:59:02.450121 | 2021-08-05T10:19:06 | 2021-08-05T10:19:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 482 | r | module-2.R | # using character
as.Date('2000-02-29')
# using origin and number
as.Date(11016, origin = '1970-01-01')
# POSIXct
as.POSIXct('2000-02-29 08:55:23', tz = 'UTC')
# POSIXlt
release_v1 <- as.POSIXlt('2000-02-29 08:55:23', tz = "UTC")
release_v1$mday # month day
release_v1$yday # day of year
release_v1$mon # month
# ISO Date
ISOdate(year = 2000,
month = 02,
day = 29,
hour = 08,
min = 55,
sec = 23,
tz = "UTC")
|
7961c0ce7183bbc34850c31f8693975cf6c9f360 | 0a825ed49ce75054592522ddebf9daba82425667 | /WisconsinLotter-modelling.R | 0e95cc79d9c1b4117dcbea516cbed9eb14ab4806 | [] | no_license | savla/ML | 7014db5c500b30fe6d852ab23a5b11d5920bda6b | 687409effa40d0c02b00394ec29e81f41df1c81f | refs/heads/master | 2021-08-30T00:42:15.316629 | 2017-12-15T12:01:24 | 2017-12-15T12:01:24 | 114,365,882 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,541 | r | WisconsinLotter-modelling.R | library(corpcor)
library(car)
library(perturb)
library(MASS)
library(dummies)
setwd("C://Users//Savla-Home//Documents//Manu//PGDDS//Stats//fwdcsvfilesfortommorrow")
## Read the dataset & summarize
Data<-read.csv("WiscLottery.csv", header = TRUE)
str(Data)
attach(Data)
summary(Data)
## Step 1: Univariate Analysis of all variables: y 's and x's
boxplot(PERPERHH, main="Persons per Household", col ="blue")
boxplot(MEDSCHYR, main="Median years of schooling", col ="blue")
boxplot(MEDHVL, main="Median home value", col ="blue")
boxplot(PRCRENT, main="Percent of housing", col ="blue")
boxplot(PRC55P, main="Percent of population that is 55", col ="blue")
boxplot(HHMEDAGE, main="Household median age", col ="blue")
boxplot(MEDINC, main="median household income", col ="blue")
boxplot(POP, main="Online lottery sales", col ="blue")
boxplot(SALES, main="Population", col ="blue")
## Step 2: Bivariate Analysis using Correlation Coefficient, Partial Correlation Coefficient & Scatter Plots
plot(Data[ ,2:10])
cor(Data[ ,2:10])
cor2pcor(cor(Data[ ,2:10]))
## Step 3: Basic Model with y & all X variables: Evaluate model summary, Anova analysis, diagnostic plots, residual plots, AV Plots for getting an understanding of fit
model1 <-lm (SALES ~ ., data= Data[ ,2:10])
summary(model1)
anova(model1)
par(mfrow=c(2,2))
plot(model1)
residualPlots(model1)
avPlots(model1, id.n=2, id.cex=0.7)
## Step 4: Transform y based on Box Cox Transformation
gh<-boxcox(model1)
gh$x[which.max(gh$y)]
## Step 5: Build Next model with transformed Y: Evaluate model summary, Anova analysis, diagnostic plots, residual plots, AV Plots for getting an understanding of fit
model2 <-lm (log(SALES) ~ ., data= Data[ ,2:10])
summary(model2)
anova(model2)
par(mfrow=c(2,2))
plot(model2)
residualPlots(model2)
avPlots(model2, id.n=2, id.cex=0.7)
## Step 6: Build Next model with transformed X Values: Evaluate model summary, Anova analysis, diagnostic plots, residual plots, AV Plots for getting an understanding of fit
Data$PERPERHH2 <- (Data$PERPERHH) ^2
Data$MEDSCHYR2 <- (Data$MEDSCHYR) ^2
Data$PRCRENT2 <- (Data$PRCRENT)^2
Data$HHMEDAGE2 <- (Data$HHMEDAGE)^2
model3 <-lm (log(SALES) ~ ., data= Data[ ,2:14])
summary(model3)
anova(model3)
par(mfrow=c(2,2))
plot(model3)
residualPlots(model3)
avPlots(model3, id.n=2, id.cex=0.7)
Data$POP2 <- (Data$POP) ^2
model4 <-lm (log(SALES) ~ ., data= Data[ ,2:15])
summary(model4)
anova(model4)
par(mfrow=c(2,2))
plot(model4)
residualPlots(model4)
avPlots(model4, id.n=2, id.cex=0.7)
## Step 7: Check for Multicollinearity
vif(model4)
colldiag(model4)
## Step 8: Mitigate Multicollinearity using Approx Mean centering
Data1 <- Data
summary(Data1)
Data1$PERPERHH <- Data1$PERPERHH - 2.706
Data1$MEDSCHYR <- Data1$MEDSCHYR-12.70
Data1$MEDHVL <- Data1$MEDHVL - 57.09
Data1$PRCRENT <- Data1$PRCRENT - 24.68
Data1$PRC55P <- Data1$PRC55P - 39.7
Data1$HHMEDAGE <- Data1$HHMEDAGE - 48.76
Data1$MEDINC <- Data1$MEDINC -45.12
Data1$POP <- Data1$POP - 9311
Data1$PERPERHH2 <- Data1$PERPERHH2 - 7.365
Data1$MEDSCHYR2 <- Data1$MEDSCHYR2 - 161.5
Data1$PRCRENT2 <- Data1$PRCRENT2 - 694.6
Data1$HHMEDAGE2 <- Data1$HHMEDAGE2 - 2394
Data1$POP2 <- Data1$POP2 - 2.074e+08
## Step 9: Fit the model
model5 <- lm( log(SALES) ~., data = Data1[,(2:15)])
summary(model5)
anova(model5)
par(mfrow=c(2,2))
plot(model5)
residualPlots(model5)
avPlots(model5, id.n=2, id.cex=0.7)
vif(model5)
colldiag(model5)
## Step 10: Variable Selection using StepAIC
stepAIC(model5)
## Step 11: Fit the model with selected variables
model6 <- lm(log(SALES) ~ PERPERHH + MEDSCHYR + MEDHVL + PRC55P +
HHMEDAGE + POP + PERPERHH2 + POP2, data = Data1[, (2:15)])
summary(model6)
anova(model6)
par(mfrow=c(2,2))
plot(model6)
residualPlots(model6)
avPlots(model6, id.n=2, id.cex=0.7)
vif(model6)
colldiag(model6)
## Step 12: Look at Influential variables using Cook's distance > 1.0
influence.measures(model6)
influenceIndexPlot(model6) # Index Plots of the influence measures
influencePlot(model6) # A user friendly representation of the above
## Step 13. Fit the model with/without influential variables
Data2 <- Data1[-9, ]
model6 <- lm(log(SALES) ~ PERPERHH + MEDSCHYR + MEDHVL + PRC55P +
HHMEDAGE + POP + PERPERHH2 + POP2, data = Data2[, (2:15)])
summary(model6)
anova(model6)
par(mfrow=c(2,2))
plot(model6)
residualPlots(model6)
avPlots(model6, id.n=2, id.cex=0.7)
vif(model6)
colldiag(model6)
Data3 <- Data1[-21, ]
model6 <- lm(log(SALES) ~ PERPERHH + MEDSCHYR + MEDHVL + PRC55P +
HHMEDAGE + POP + PERPERHH2 + POP2, data = Data3[, (2:15)])
summary(model6)
anova(model6)
par(mfrow=c(2,2))
plot(model6)
residualPlots(model6)
avPlots(model6, id.n=2, id.cex=0.7)
vif(model6)
colldiag(model6)
Data4 <- Data1[-28, ]
model6 <- lm(log(SALES) ~ PERPERHH + MEDSCHYR + MEDHVL + PRC55P +
HHMEDAGE + POP + PERPERHH2 + POP2, data = Data4[, (2:15)])
summary(model6)
anova(model6)
par(mfrow=c(2,2))
plot(model6)
residualPlots(model6)
avPlots(model6, id.n=2, id.cex=0.7)
vif(model6)
colldiag(model6)
## Step 14. Final Model
model6 <- lm(log(SALES) ~ PERPERHH + MEDSCHYR + MEDHVL + PRC55P +
HHMEDAGE + POP + PERPERHH2 + POP2, data = Data1[, (2:15)])
summary(model6)
anova(model6)
par(mfrow=c(2,2))
plot(model6)
residualPlots(model6)
avPlots(model6, id.n=2, id.cex=0.7)
vif(model6)
colldiag(model6)
|
c1904241652adfbab15b3f866a4a3200bdd1748c | ee73739bd3314929cd44aa98b6b364f23e72691f | /man/KendallTau.Rd | d189088deeaca567dda08250b9a374712f7c8d33 | [] | no_license | irinagain/mixedCCA | 8ee85144bcf7bff615990f244506c37770147d37 | 4c2b63f754582e57654893e50484c42a0f32cdb4 | refs/heads/master | 2022-09-23T22:50:00.459426 | 2022-09-09T21:19:33 | 2022-09-09T21:19:33 | 140,593,736 | 20 | 9 | null | 2022-09-09T21:15:52 | 2018-07-11T15:19:15 | R | UTF-8 | R | false | true | 1,630 | rd | KendallTau.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KendallTau.R
\name{KendallTau}
\alias{KendallTau}
\alias{Kendall_matrix}
\title{Kendall's tau correlation}
\usage{
KendallTau(x, y)
Kendall_matrix(X, Y = NULL)
}
\arguments{
\item{x}{A numeric vector.}
\item{y}{A numeric vector.}
\item{X}{A numeric matrix (n by p1).}
\item{Y}{A numeric matrix (n by p2).}
}
\value{
\code{KendallTau(x, y)} returns one Kendall's tau correlation value between two vectors, \code{x} and \code{y}.
\code{Kendall_matrix(X)} returns a p1 by p1 matrix of Kendall's tau correlation coefficients. \code{Kendall_matrix(X, Y)} returns a p1 by p2 matrix of Kendall's tau correlation coefficients.
}
\description{
Calculate Kendall's tau correlation.
\deqn{ \hat{\tau}_{jk} = \frac{2}{n(n-1)}\sum_{1\le i<i'\le n} sign(X_{ji}-X_{ji'}) sign(X_{ki}-X_{ki'}) }
The function \code{KendallTau} calculates Kendall's tau correlation between two variables, returning a single correlation value. The function \code{Kendall_matrix} returns a correlation matrix.
}
\examples{
n <- 100 # sample size
r <- 0.8 # true correlation
### vector input
# Data generation (X1: truncated continuous, X2: continuous)
Z <- mvrnorm(n, mu = c(0, 0), Sigma = matrix(c(1, r, r, 1), nrow = 2))
X1 <- Z[,1]
X1[Z[,1] < 1] <- 0
X2 <- Z[,2]
KendallTau(X1, X2)
Kendall_matrix(X1, X2)
### matrix data input
p1 <- 3; p2 <- 4 # dimension of X1 and X2
JSigma <- matrix(r, nrow = p1+p2, ncol = p1+p2); diag(JSigma) <- 1
Z <- mvrnorm(n, mu = rep(0, p1+p2), Sigma = JSigma)
X1 <- Z[,1:p1]
X1[Z[,1:p1] < 0] <- 0
X2 <- Z[,(p1+1):(p1+p2)]
Kendall_matrix(X1, X2)
}
|
068b5f97f0558e02bf30ca110c68c96e6b3049e5 | 16dc4177a9da65fc04a80a34407923b5cc1b382b | /syncTraktLetterboxd.R | 9533ca9a7ff77f28d397e4be9a7a229098a06fab | [] | no_license | tomcopple/trakt | db272d444d33206ec0e80cc63e9ee3a112dc700d | 225fb26fb746bba8727124c5dab26dd5a21d7adc | refs/heads/master | 2023-08-29T21:54:15.584352 | 2023-08-22T13:34:50 | 2023-08-22T13:34:50 | 74,285,803 | 0 | 0 | null | 2017-02-01T18:25:43 | 2016-11-20T16:03:09 | R | UTF-8 | R | false | false | 2,891 | r | syncTraktLetterboxd.R | ## Sync trakt and letterboxd?
library(tidyverse);library(httr);library(jsonlite);library(lubridate);library(xml2)
source('traktShiny/setTrakt.R')
# 1. Get Trakt Movie History ----------------------------------------------
traktRawHis <- httr::GET(url = 'https://api.trakt.tv/users/tomcopple/watched/movies',
headers)
httr::stop_for_status(traktRawHis)
traktHis <- httr::content(traktRawHis, as = 'text') %>%
jsonlite::fromJSON(simplifyDataFrame = T, flatten = T) %>%
select(title = movie.title, date = last_watched_at) %>%
mutate(date = as_date(date)) %>%
filter(date != lubridate::ymd('2011-08-24')) %>%
arrange(date) %>%
as_tibble()
traktHis %>% count(year(date)) %>% ggplot(aes(x = as.factor(`year(date)`), y = n)) + geom_col(fill = scales::hue_pal()(4)[[3]])
# Merge with Trakt ratings ------------------------------------------------
traktRawRat <- httr::GET(url = 'https://api.trakt.tv/users/tomcopple/ratings/movies',
headers)
traktRat <- httr::content(traktRawRat, as = 'text') %>%
jsonlite::fromJSON(simplifyDataFrame = T, flatten = T) %>%
select(title = movie.title, rating) %>%
as_tibble()
traktRat %>% count(rating) %>% ggplot(aes(x = rating, y = n)) + geom_col(fill = scales::hue_pal()(4)[[1]])
trakt <- full_join(traktRat, traktHis) %>%
filter(str_detect(title, 'Charlie Brown', negate = TRUE))
trakt %>% filter(is.na(date))
trakt %>% filter(is.na(rating))
# Get Letterboxd History --------------------------------------------------
letFeed <- "https://letterboxd.com/tomcopple/rss/"
letNew <- httr::GET(letFeed) %>%
xml2::read_xml() %>%
xml2::xml_find_all(xpath = 'channel') %>%
xml2::xml_find_all(xpath = 'item') %>%
xml2::as_list() %>%
map(unlist) %>%
map_df(bind_rows) %>%
select(title = filmTitle, date = watchedDate, rating = memberRating) %>%
mutate(date = lubridate::ymd(date),
rating = as.numeric(rating))
letHist <- rdrop2::drop_read_csv(file = 'R/trakt/letterboxd/letHist.csv',
dtoken = dropbox) %>%
as_tibble() %>%
mutate(date = lubridate::ymd(date))
let <- bind_rows(letNew, letHist) %>% distinct(title, date, rating)
write_csv(let, here::here('tempData', 'letHist.csv'))
rdrop2::drop_upload(file = here::here('tempData', 'letHist.csv'),
path = 'R/trakt/letterboxd')
# Merge together ----------------------------------------------------------
## First look at what's in trakt but not letterboxd
anti_join(trakt, let %>% mutate(rating = as.integer(rating * 2)))
## Also see other way round (but don't do anything with this yet?)
anti_join(let %>% mutate(rating = as.integer(rating * 2)), trakt)
notLet <- anti_join(trakt, let %>% mutate(rating = as.integer(rating * 2)),
by = 'title') %>%
arrange(desc(date))
notLet
|
6cfabdda4fa0f088a5bfe96ac7fe3ef746dea726 | 858280a49963a19e62f75f1f1b705f8234bcdadb | /R/server_using_hdf.R | 9a8dd50a08ae59ea143c209f98faf6614e5aa69d | [
"MIT"
] | permissive | gusef/IrisViewer | 476f5624645c1b59559d191ec946fbac1c6f16d3 | a1df4b8f01184313e512600ffbb440c328f56959 | refs/heads/master | 2021-09-24T16:19:13.510645 | 2018-10-11T18:50:23 | 2018-10-11T18:50:23 | 105,020,749 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,080 | r | server_using_hdf.R | #' Shiny server
#'
#' Run the Shiny App IrisViewer
#' @export
server_using_hdf <- function(input, output, session) {
options(shiny.maxRequestSize=1000*1024^2)
#reactive values
values <- reactiveValues(verbose = NULL,
current_sample = NULL,
from_cell = NULL,
to_cell = NULL,
IF_colors = NULL,
colors=NULL,
current_tiffstack = NULL,
img_file = NULL)
#Initialization
observe({
#set the marker selects
singles <- iris_set@markers
doubles <- sub('[+-]$','',singles)
singles <- singles[!doubles %in% doubles[duplicated(doubles)]]
doubles <- table(doubles)
doubles <- names(doubles)[doubles == 2]
updateSelectInput(session, 'first_marker',
choices = singles)
updateSelectInput(session, 'second_marker',
choices = doubles)
#color palette to choose from
colpalette <- c('blue','red','green','yellow','orange','purple','grey')
#figure out the channels
img_dir <- h5ls(images)
channels <- img_dir[img_dir$group == img_dir$group[2],]$name
names(channels) <- sub('\\.\\.Opal.+','',channels)
dapi_pos <- grep('DAPI',channels)
channels <- c(channels[dapi_pos],channels[-dapi_pos])
values$channels <- channels
insertUI(
selector = "#IFColorSelect",
where = "afterBegin",
ui = lapply(1:length(channels),
function(idx,channels,colpalette){
colourpicker::colourInput(
paste0("IF_col_",idx),
names(channels)[idx],
showColour = 'background',
value = colpalette[idx])},
channels,
colpalette))
#colors
values$IF_colors <- data.frame(
name=names(channels),
channel=channels,
colors=colpalette[1:length(channels)],
stringsAsFactors = F)
#add observers to the color selectors
lapply(1:length(channels),
function(id){
nam <- paste0("IF_col_",id)
observeEvent(input[[nam]], {
values$IF_colors$colors[id] <- input[[nam]]
})
})
#make selectors for the marker colors
marker_colors <- c('#7fc97f', '#6a3d9a', '#bc80bd', '#e31a1c',
'#beaed4', '#33a02c', '#fdc086', '#386cb0', '#f0027f',
'#bf5b17', '#666666', '#ffff99')
markers <- iris_set@markers
insertUI(
selector = "#MarkerColorSelect",
where = "afterBegin",
ui = lapply(1:length(markers),
function(idx,markers,marker_colors){
colourpicker::colourInput(
paste0("Marker_col_",idx),
markers[idx],
showColour = 'background',
value = marker_colors[idx])},
markers,
marker_colors))
#set the color markers
values$colors <- data.frame(marker=markers,
colors=marker_colors[1:length(markers)],
stringsAsFactors = F)
#add observers to the color selectors
lapply(1:length(markers),
function(id){
nam <- paste0("Marker_col_",id)
observeEvent(input[[nam]], {
values$colors$colors[id] <- input[[nam]]
})
})
#add a channel selection panel
temp <- channels
names(temp) <- NULL
insertUI(
selector = "#ChannelSelect",
where = "afterBegin",
ui = checkboxGroupInput("ChannelSelectBox",
label = 'Select channels',
choiceNames = as.list(names(channels)),
choiceValues = as.list(temp),
selected = temp[1])
)
})
##############################################################################
#### When changing the marker panels let's reset the plots and images
observeEvent(input$first_marker,{
values$current_sample <- NULL
values$current_tiffstack <- NULL
})
observeEvent(input$second_marker,{
values$current_sample <- NULL
values$current_tiffstack <- NULL
})
##############################################################################
#### Plot nearest neighbor panels
plot_nn <- function(input, output, session,
transpose = FALSE, callback){
if (!is.null(input$second_marker)){
#use the Iris plot function to extract all relevant values
vals <- plot_nearest_neighbor(iris_set,
from = input$first_marker,
to = input$second_marker,
transposed = transpose)
means <- t(vals$means)
colnames(means) <- c('x','y')
se <- t(vals$ses)
colnames(se) <- NULL
#set up a legend
legend <- data.frame(col=c('grey','black'),
name=c(paste0(input$second_marker,'-'),
paste0(input$second_marker,'+')))
margins <- list(top = 40,
right = 20,
bottom = 70,
left = 80)
#plot the barplot
d3Barplot(data=means,
se=se,
margins=margins,
beside=T,
las=2,
col=c('grey','black'),
xlab='',
ylab=vals$ylab,
title=vals$label,
title_size=20,
legend=legend,
subtitle=paste('Paired signed rank test:', format(vals$pval,digits=4)),
callback=callback)
}
}
output$nn_panel <- renderd3Barplot({
plot_nn(input, output, session,
transpose = FALSE, callback = 'NN_select')
})
output$nnt_panel <- renderd3Barplot({
plot_nn(input, output, session,
transpose = TRUE, callback = 'NN_transpose')
})
#if an element on the first NN was clicked
observeEvent(input$NN_select, {
values$from_cell <- input$first_marker
if (input$NN_select$group == 'x'){
values$to_cell <- paste0(input$second_marker,'-')
}else{
values$to_cell <- paste0(input$second_marker,'+')
}
display_coordinates(input, values, session, input$NN_select$x_value)
})
#if an element on the transposed NN was clicked
observeEvent(input$NN_transpose, {
values$to_cell <- input$first_marker
if (input$NN_transpose$group == 'x'){
values$from_cell <- paste0(input$second_marker,'-')
}else{
values$from_cell <- paste0(input$second_marker,'+')
}
display_coordinates(input, values, session, input$NN_transpose$x_value)
})
display_coordinates <- function(input, values, session, selector){
current <- iris_set@samples[[selector]]
#update the sample
values$current_sample <- current
#add the coordinate selector
updateSelectInput(session, 'coord_select',
choices = names(current@coordinates))
#extract all the images
extract_tiffstack(selector,
names(values$current_sample@coordinates)[1])
}
#extracting all tiffs related to the current sample / coordinate
extract_tiffstack <- function(samp,coord){
#access the right images
img_dir <- h5ls(images)
group_name <- paste0("/",samp,"_[",coord,"]")
img_dir <- img_dir[img_dir$group == group_name,]
image_names <- paste(group_name,img_dir$name,sep='/')
#extract the layers
maps <- lapply(image_names,
function(x,images){
h5read(images,
x)},
images)
names(maps) <- img_dir$name
values$current_tiffstack <- maps[match(values$channels,names(maps))]
}
##############################################################################
#### Rayplot and image output
output$rayplot_panel <- renderPlot({
if (!is.null(values$current_sample) &&
!is.null(values$to_cell) &&
input$coord_select %in% names(values$current_sample@coordinates)){
#figure out the coloring to be consistent
from_col <- values$colors$colors[match(values$from_cell,values$colors$marker)]
to_col <- values$colors$colors[match(values$to_cell,values$colors$marker)]
#plot a ray plot
rayplot_single_coordinate(x = values$current_sample@coordinates[[input$coord_select]],
samp_name = values$current_sample@sample_name,
from_type = values$from_cell,
from_col = from_col,
to_type = values$to_cell,
to_col = to_col)
}else{
return(NULL)
}
}, height = function() {
min(700, session$clientData$rayplot_panel_width)
})
#first removes the old image and then adds a new one
observeEvent(input$coord_select, {
if (!is.null(values$current_sample) &&
input$coord_select %in% names(values$current_sample@coordinates)){
#extract all the images
extract_tiffstack(values$current_sample@sample_name,
input$coord_select)
}
})
#simple rendering of an multiplex IF image
output$IF_image <- renderShinyMagnifier({
#extract_tiffstack(samp='1049',coord='52474,10131')
if (!is.null(values$current_tiffstack)){
#get colors
selection <- input$ChannelSelectBox
channels <- values$IF_colors
channels <- channels[match(selection,channels$channel),]
cols <- channels$colors
#and tiffs
tif <- values$current_tiffstack[selection]
#sum up the colors into one single image
img <- array(0,dim=c(dim(tif[[1]]),3))
for (i in 1:length(tif)){
rgb <- col2rgb(cols[i])
for (j in 1:3){
#alpha blending
img[,,j] <- img[,,j] * (1.0 - tif[[i]]) + (tif[[i]] * rgb[j]/255)
}
}
#and save it as a jpeg
temp_dir <- tempdir()
temp_file <- paste0('temp',sample(1000000,1),'.jpg')
values$img_file <- temp_file
unlink(file.path(temp_dir,temp_file))
writeJPEG(img, file.path(temp_dir,temp_file), color.space='RGBA')
addResourcePath('img', temp_dir)
ShinyMagnifier(file.path('img',temp_file),
file.path('img',temp_file),
zoom = 4,
width = 0.8 * input$dimension[1] / 2,
vspace = '50 0')
}
})
}
|
2330e8d1e4e01f6eaafc518d5b5ddedfcb3559ed | a5bbcb2b8c60e803c0bc6c5f3b6acd6f76f608cd | /man/bsButtons.Rd | 31c85a3bf52fcbbcaaaa1df06fe9a89ae5c559fc | [] | no_license | DataXujing/shinyBS | fdfaf0784b40c3693e43ade945bec22efa411bd1 | 6bfa2a44b6d05cebd251d7470b039878510fce3d | refs/heads/master | 2021-07-05T21:12:42.048441 | 2015-01-23T15:12:03 | 2015-01-23T15:12:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,006 | rd | bsButtons.Rd | \name{Button Customizations}
\alias{bsButtonGroup}
\alias{bsActionButton}
\alias{bsButton}
\alias{bsButtonGroup}
\alias{bsToggleButton}
\alias{updateButton}
\alias{updateButtonGroup}
\title{
Shiny Button Customizations
}
\description{
Functions for setting and changing the style, size, and state of various
buttons in a shiny app.
}
\usage{
bsButton(inputId, label, value, style = NULL, size = NULL, block = FALSE,
disabled = FALSE)
bsActionButton(inputId, label, style = NULL, size = NULL, block = FALSE,
disabled = FALSE)
bsToggleButton(inputId, label, value, style = NULL, size = NULL, block = FALSE,
disabled = FALSE)
bsButtonGroup(inputId, ..., label, toggle = "checkbox", style, size,
value = NULL, disabled = FALSE, block = FALSE, vertical = FALSE)
updateButton(session, id, label = NULL, value = NULL, style = NULL, size = NULL,
block = NULL, disabled = NULL)
updateButtonGroup(session, id, toggle = NULL, style = NULL, size = NULL,
disabled = NULL, value = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{session}{
The \code{session} object passed to function given to \code{shinyServer}
}
\item{inputId}{
Id to assign to the button or button group
}
\item{id}{
The id of the button/button group you want to update
}
\item{\dots}{
\code{bsButton()} objects to be added to the button group
}
\item{label}{
For buttons, the text to appear inside the button. For button groups, an
optional label that will appear above the button group
}
\item{toggle}{
The type of toggle behaviour the button group should have (See Details)
}
\item{style}{
The bootstrap style the button(s) should take (See Details)
}
\item{size}{
The bootstrap size the button(s) should take (See Details)
}
\item{block}{
Should the button or button group be a block level element? (i.e., should it
span the width of its parent element)
}
\item{vertical}{
Should the button group's buttons have a vertical orientation?
}
\item{value}{
The value of the button/button group (See Details)
}
\item{disabled}{
Should the button(s) be disabled? \code{logical}
}
}
\details{
\code{bsActionButton()} creates an action button that behaves just as a standard shiny action button does. It has the added functionality of being able to changed its style and size. It can also be disabled/enabled.\cr
\code{toggle} can take a value of either \code{radio} or \code{checkbox}. \code{radio} will allow only one button in the button group to be selected at a time. \code{checkbox} will allow any number of buttons to be selected at a time. \cr
\code{style} can be any of the styles described in the Twitter Bootstrap 2.3.2 documentation. Acceptable values are currently: primary, info, success, warning, danger, inverse, or link. Additionally, when calling one of the update functions, style can be set to \code{default} to return to the default button style.\cr
\code{size} can be any of the sizes described in the Twitter Bootstrap 2.3.2 documentation. Accepatble values are currently: large, small, or mini. Additionally, when calling one of the update functions, style can be set to \code{default} to return to the default size.\cr
For toggle buttons, \code{value} can be \code{TRUE} or \code{FALSE} and corresponds to whether the button is currently 'clicked.' For \code{bsButton}, \code{value} is used to set the value that will be returned by containing \code{bsButtonGroup} object when the button is clicked. For button groups, \code{value} is used to set the current value to be returned by the group and should correspond to values assigned to buttons contained in the button group.\cr
\code{vertical} and \code{block} for button groups are experimental. They do not work well together and may not work under all browsers.
}
\references{
\href{http://getbootstrap.com/2.3.2/components.html}{Alerts for Twitter Bootstrap 2.3.2}
}
\author{
Eric Bailey
}
\note{
Run \code{bsDemo()} for a live example of alerts.
}
\examples{
\dontrun{
# Create an action button, toggle button and a button group
# with three buttons with default styling in ui.R
bsActionButton("ab1", label = "bsActionButton"),
bsToggleButton("tb1", label = "bsToggleButton"), tags$p(),
bsButtonGroup("btngrp1", label = "bsButtonGroup", toggle = "radio", value = "right",
bsButton("btn1", label = "Left", value = "left"),
bsButton("btn2", label = "Middle", value = "middle"),
bsButton("btn3", label = "Right", value = "right")
)
# Update the previous buttons/button group to be small
# and of primary style in server.R
updateButton(session, "ab1", style = "primary", size = "small")
updateButton(session, "tb1", style = "primary", size = "small")
updateButtonGroup(session, "btngrp1", style = "primary", size = "small")
}
}
|
48c06e262b630ddf832042be6a716a30de4e4c6b | d93c4ca934a81dd95d20bbc9b4a6a9dd395369ff | /cachematrix.R | 3d7eaea3c3362ac8a9402f0b9546c8d8d808dc0c | [] | no_license | sederry/ProgrammingAssignment2 | 31f9bec6772a722f7cee1d0405fb4698f5a1aa1a | e9b96ab87e73602635ff7c17ff8cf7d702c45230 | refs/heads/master | 2021-01-17T14:16:53.507079 | 2014-07-27T21:17:52 | 2014-07-27T21:17:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,322 | r | cachematrix.R | ##This task is divided by 2 parts. First one (MakeCacheMatrix) is writing the inverse to cache memory and second
##(cachesolve) is looking for it and if it is found it is pulling answer from cache memory. Else it is computed
##and stored in memory
makeCacheMatrix <- function(x = matrix()){ # defining the function
inverse <- NULL # initializing the inverse variable
set <- function(y){ # setting 'set' as a function to store the value of matrix
x <<- y # initiate x and set it to be = y
inverse <<- NULL # initiate inverse and sets it to NULL
}
get <- function() x # obtaining the value of the matrix and assign it to `get` value
setinv <- function(solve) # the `setinv` function uses the inbuilt function 'solve' to determine the inverse of the matrix x
inverse <<- solve # assign `solve` function it inv value
getinv <- function() inverse # the `getinv` function is used to obtain value of the inverse function of the matrix
list(set = set, get = get, setinv = setinv, getinv = getinv) # set the list
}
## This part is looking for matrix inverse in memory. If it is found, then the value is pulled out from the cache,
## else it is computed and stored in the memory
cacheSolve <- function(x, ...){ # defining the function, assigning it it CacheSolve
m <- x$getinv() # looking for the inverse of the function in different environment and assignt it to m
if (!is.null(m)){ # if the value is found
message("found in cache") # print the message
return(m) # and return the value
}
else{ # if not
data <- x$get() # the inverse of the matrix is computed using the solve function from previous part
m <- solve(data,...) # assigning the inverse of the matrix to the m
x$setinv(m) # setting inverse to be x
m
return(m) # and returning the answer
}
} |
8ccd7138c24cbaffbe8af4523b32f3ff8f549d53 | eb581ac9926e8a7278e693bd3fdf5ba0c1838337 | /InClass/InClass_S3.R | 812d973adb0a4cbc44ccbc13690ad83cb4cc04ed | [] | no_license | Squirlz/IntroR | a319a961e0a3e0889fb090c3095fe4878ad6035f | 32645066c4d1fa97f11bf7bd2865ac49164b808e | refs/heads/master | 2021-01-16T20:40:04.096192 | 2016-06-09T21:51:06 | 2016-06-09T21:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,152 | r | InClass_S3.R | ## NOTES
## Setting the levels of a factor
ranks <- c("low", "medium", "high")
sampleRanks <- sample(ranks, size = 20, replace = TRUE)
factor(sampleRanks, levels = c("low", "medium", "high"))
sampRank <- factor(sampleRanks, levels = c("high", "medium", "low"))
## converting numeric factors to JUST numeric (tricky)
fac <- factor(as.character(10:18))
as.numeric(fac)
as.numeric(as.character(fac))
## Factors are stored as numbers internally
as.numeric(sampRank)
## Vector subsets
vec <- c("a" = 1, "b" = 2, "c" = 3)
vec["a"]
vec[1]
vec[c(TRUE, FALSE, TRUE)]
ranks
ranks["a"]
## Subsets with
## Double bracket notation
mtcars[["mpg"]]
aList <- list(a = 1:3, b = letters[1:3])
aList[[1]]
aList[["b"]]
# Dollar sign accessor
aList$a
aList$b
mtcars$hp
## Using Conditions to subset
mtcars[ mtcars$mpg > 20, ]
airquality[ airquality$Month == "May", ]
# Sorting data
airquality
airquality$Month <- factor(airquality$Month,
levels = 5:9,
labels = c("May", "June", "July",
"August", "September"))
order(airquality$Temp)
airquality[ order(airquality$Temp), ]
airquality[ order(airquality$Temp, decreasing = TRUE), ]
## dplyr alternative
library(dplyr)
arrange(airquality, Temp)
arrange(airquality, -Temp)
## Checking for duplicated rows (none found)
duplicated(airquality)
unique(airquality$Day)
?complete.cases
complete.cases(airquality)
airquality[complete.cases(airquality) ,]
## Aggregate data
## formula notation
aggregate(. ~ Month, data = airquality, mean)
## dplyr alternative
airquality %>%
group_by(Month) %>%
summarise_each(funs(mean(., na.rm = TRUE)))
## Merging
shuffle <- sample(nrow(mtcars))
A <- mtcars[shuffle, 5:9]
B <- mtcars[, 1:4]
merge(x = A, y = B, by = "row.names")
## Note: for dplyr see cheatsheet for joins
## Binding
## Adding rows and columns
rbind()
cbind()
## Binning
library(Hmisc)
?cut2
## Renaming factors
levels(x) <- c("new", "names")
## Transformations
## Adding a variable
mtcars$logDis <- log(mtcars$disp)
head(mtcars)
## with dplyr
mtcars %>% mutate(lDis = log(disp))
|
061a4f9a6a063f949e1585550ba096e4bdbd4f51 | a7d4e58193c5896f397efb2fdd8c707908e99348 | /R/vector_to_number.R | b0c0d476a12de9ebdd70d65f576038ea26574d74 | [] | no_license | erwinrmendez/EulerFunctions | 165c71d968386b3e5c6bf8da7eafa992670b1fbc | a60c377b45f8c2541adc8e29578409279bb4c1d4 | refs/heads/master | 2023-07-22T14:15:09.214385 | 2021-09-02T00:42:08 | 2021-09-02T00:42:08 | 196,075,905 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 413 | r | vector_to_number.R | #' Vector to Number
#'
#' Converts numerical vector to a number, concatenating the numbers in given vector as consecutive digits of the number.
#' @param numeric_vector Numerical vector to parse into number.
#' @return Number
#' @export
#' @examples
#' vector_to_number(c(1,2,3))
vector_to_number <- function(numeric_vector) {
number <- as.numeric(paste0(numeric_vector, collapse = ''))
return(number)
}
|
2368c14bde27ce506feecea2001d3faea9a64b30 | d03924f56c9f09371d9e381421a2c3ce002eb92c | /man/r-methods.Rd | 22f6cb3e0c92f8d5106755a5db99be3d64df6bc6 | [] | no_license | cran/distr | 0b0396bbd5661eb117ca54026afc801afaf25251 | c6565f7fef060f0e7e7a46320a8fef415d35910f | refs/heads/master | 2023-05-25T00:55:19.097550 | 2023-05-08T07:10:06 | 2023-05-08T07:10:06 | 17,695,561 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 555 | rd | r-methods.Rd | \name{r-methods}
\docType{methods}
\alias{r-methods}
\alias{r}
\alias{r,Distribution-method}
\title{ Methods for Function r in Package `distr' }
\description{r-methods}
\section{Methods}{\describe{
\item{r}{\code{signature(object = "Distribution")}: generates random deviates
according to the distribution}
}}
\seealso{
\code{\link{Distribution-class}}
}
\keyword{distribution}
\keyword{methods}
\concept{random number generator}
\concept{RNG}
\concept{accessor function}
\concept{pseudo random number}
\concept{anamorphosis}
|
700248a5a6765c4c40cab9ff14c21963db6e4cc2 | e56247c094ad626694e2d187930f774362616d2d | /R/common.R | 503e40caea6bd5d4d5be978786dfa4636fe05ca1 | [] | no_license | pmur002/rdataviewer | 2996fc981e84d77f4f205e2a0162bdd25675317a | 31459cf81ae9b28a86f9d18e3c0f09b26d46f011 | refs/heads/master | 2021-01-02T09:15:27.076865 | 2011-11-29T22:34:55 | 2011-11-29T22:34:55 | 32,362,192 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 438 | r | common.R |
# Pad the column names with spaces. That way, whenever we
# print the data frame, the columns stay the same width.
padColNames <- function(names, widths) {
unlist(mapply(function(name, width) {
nc <- nchar(name)
if (nc + 1 < width) {
name <- paste(paste(rep(" ", width - nc - 1), collapse=""),
name, sep="")
} else {
name
}
}, names, widths))
}
|
541eb207254e662c324e4f38c7212a49ef38c76d | 2195aa79fbd3cf2f048ad5a9ee3a1ef948ff6601 | /docs/CreateAccountDialog.rd | 6f7b49d8f2ef3edde463dc56da3f1812a3c3c74d | [
"MIT"
] | permissive | snakamura/q3 | d3601503df4ebb08f051332a9669cd71dc5256b2 | 6ab405b61deec8bb3fc0f35057dd880efd96b87f | refs/heads/master | 2016-09-02T00:33:43.224628 | 2014-07-22T23:38:22 | 2014-07-22T23:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,398 | rd | CreateAccountDialog.rd | =begin
=[アカウントの作成]ダイアログ
((<アカウントの作成|"IMG:images/CreateAccountDialog.png">))
[OK]を押すと作成したアカウントのプロパティを指定するダイアログが開きます。
+[名前]
アカウント名を指定します。
ファイル名として使えない文字は使えません。
+[クラス]
アカウントクラスを指定します。選択できるのは、「mail」「news」「rss」のいずれかです。メールアカウントを作成するには「mail」を、ニュースアカウント作成するには「news」を、RSSアカウントを作成するには「rss」を選択します。
+[受信プロトコル]
受信するのに使用するプロトコルを指定します。アカウントクラスによって選択できるプロトコルが変わります。各アカウントクラスで選択できるのはそれぞれ以下のプロトコルになります。
*mail
*POP3
*IMAP4
*news
*NNTP
*rss
*RSS
+[送信アカウント]
送信するのに使用するプロトコルを指定します。アカウントクラスによって選択できるプロトコルが変わります。各アカウントクラスで選択できるのはそれぞれ以下のプロトコルになります。
*mail
*SMTP
*POP3 (XTND XMIT)
*news
*NNTP
*rss
*Blog
====[メッセージボックス]
メッセージボックスの作り方を指定します。以下の二つから選択できます。
基本的には[1メッセージ1ファイル]をお勧めします。ただし、Windows CEで外部メモリカードを使用する場合には、[1ファイル]にした方がディスクの使用量が少なくて済みます。
[1ファイル]にした場合に、ウィルス入りのメッセージを受信したときに、ウィルス対策ソフトがファイルを消してしまうことがあります。このような動作をするウィルス対策ソフトを使用している場合には、この形式は使わないでください。
+[1メッセージ1ファイル]
一通のメッセージを一つのファイルにします。
*利点
*トラブルが起きたときにメッセージが失われる可能性が低い
*全文検索が使える
*インデックスファイルが壊れたときに復元しやすい
*欠点
*ディスクを多く消費する
*少し遅い
+[1ファイル]
すべてのメッセージを一つのファイルにします。
*利点
*ディスクの使用効率が良い
*欠点
*全文検索が使えない
[1ファイル]を選んだ場合には、[ブロックサイズ]を指定できます。0以外を指定すると指定したファイルサイズでファイルを分割します。この機能は、主にWindows CEで本体メモリに大きなファイルを置くと極端に処理速度が落ちるのを回避するために使用します。
+[インデックスのブロックサイズ]
インデックスファイルのブロックサイズを指定します。上記のどちらの形式を選んだ場合でも、メッセージのインデックス情報は別の一つのファイルに保存されます。0以外を指定すると、そのファイルを指定したファイルサイズで分割します。
=end
|
38ebac7e46f1efbdd968e0afb9dc90c283f82212 | 03ba353aaf188e0b34f98de82e8ecb481682f63e | /man/readRice2018.Rd | a5d9d04c2cba93288e03a5c98415ea45f9f3bc50 | [
"MIT"
] | permissive | ktoddbrown/RIDS | ef8b5c0bc0d791e54102896a32991fe6d8649e0b | 32a2e38c50b1b740a5b6035f7bd36cf36b460c88 | refs/heads/master | 2022-04-22T19:12:08.703955 | 2020-04-24T15:31:40 | 2020-04-24T15:31:40 | 256,232,822 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 941 | rd | readRice2018.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readRice2018.R
\name{readRice2018}
\alias{readRice2018}
\title{Read in Rice 2018}
\usage{
readRice2018(dataDir = "data/Rice2018")
}
\arguments{
\item{dataDir}{string that specifies the data directory}
}
\value{
a list that contains the tabular dataset, a tabular version of the meta-data, the file names of the local data copies, a list of study information (abstract, copy rights, method notes)
}
\description{
Reads in data from Charles Rice. 2018. OMB01 Microbial biomass in the Belowground Plot Experiment at Konza Prairie (1989-1999). LTER Network Member Node. https://pasta.lternet.edu/package/metadata/eml/knb-lter-knz/53/7.
ABSTRACT: The purpose of this data set is to observe long-term variations in microbial biomass in belowground plots at Konza Prairie. These effects are due to annual burning, mowing, and nitrogen and phosphorus fertilization.
}
|
d241d8dd6883433db84e9fde8920a7c2fb0aeba4 | 375233ef3269f616b1c61af73e951ba1628c624f | /app.R | 3594c423dc58473c4888cfa205f7c17bff2d8f49 | [] | no_license | ttan27/baseball | 2c02eb2164e4451b4c7dfaa8c00c679ba8721e6a | 33b7d8da3596f8b3662aa12f8f878a89efd7fd42 | refs/heads/master | 2020-03-26T04:57:12.467594 | 2018-08-19T22:47:44 | 2018-08-19T22:47:44 | 144,530,490 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,933 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
library(shiny)
library(tidyverse)
library(shinydashboard)
library(rvest)
library(DT)
source('functions.R')
#Defining UI
ui <- dashboardPage(
dashboardHeader(title = "MLB Dashboard"),
dashboardSidebar(
sidebarMenu(
menuItem("Front Page", tabName = "front", icon = icon("dashboard")),
menuItem("Teams", icon = icon("th"), tabName = "teams")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "front",
fluidRow(
tabBox(
title = "American League Standings",
id = "tabset1", height = 400,
width = 6,
tabPanel("AL East", div(style = 'overflow-x: scroll', DT::dataTableOutput('ale'))),
tabPanel("AL Central", div(style = 'overflow-x: scroll', DT::dataTableOutput('alc'))),
tabPanel("AL West", div(style = 'overflow-x: scroll', DT::dataTableOutput('alw')))
),
tabBox(
title = "National League Standings",
id = "tabset2", height = 400, width = 6,
tabPanel("NL East", div(style = 'overflow-x: scroll', DT::dataTableOutput('nle'))),
tabPanel("NL Central", div(style = 'overflow-x: scroll', DT::dataTableOutput('nlc'))),
tabPanel("NL West", div(style = 'overflow-x: scroll', DT::dataTableOutput('nlw')))
)
),
fluidRow(
tabBox(
title = "American League Leaders",
id = "tabset3", height = 300,
width = 6,
tabPanel("AVG", div(style = 'overflow-x: scroll', DT::dataTableOutput('alAVG'))),
tabPanel("H", div(style = 'overflow-x: scroll', DT::dataTableOutput('alH'))),
tabPanel("HR", div(style = 'overflow-x: scroll', DT::dataTableOutput('alHR'))),
tabPanel("SB", div(style = 'overflow-x: scroll', DT::dataTableOutput('alSB'))),
tabPanel("RBI", div(style = 'overflow-x: scroll', DT::dataTableOutput('alRBI')))
),
tabBox(
title = "National League Leaders",
id = "tabset3", height = 300,
width = 6,
tabPanel("AVG", div(style = 'overflow-x: scroll', DT::dataTableOutput('nlAVG'))),
tabPanel("H", div(style = 'overflow-x: scroll', DT::dataTableOutput('nlH'))),
tabPanel("HR", div(style = 'overflow-x: scroll', DT::dataTableOutput('nlHR'))),
tabPanel("SB", div(style = 'overflow-x: scroll', DT::dataTableOutput('nlSB'))),
tabPanel("RBI", div(style = 'overflow-x: scroll', DT::dataTableOutput('nlRBI')))
)
)
),
tabItem(tabName = "teams",
fluidRow(
box(
title = "Team Select",
width = 12,
selectInput("teamSel","Select Team",choices = getTeams()[2]),
selectInput("yearSel","Select Year",choices = NULL),
actionButton("selTeam", "Go")
)
),
# fluidRow(
# tabBox(
# id = "teamLead",
# title = "Batting Leaders",
# width = 6,
# tabPanel("AVG", value = "BA", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
# tabPanel("H", value = "H", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
# tabPanel("HR", value = "HR", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
# tabPanel("RBI", value = "RBI", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
# tabPanel("SB", value = "SB", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead')))
# )
# # tabBox(
# # id = "teamPitchLead",
# # title = "Pitching Leaders",
# # width = 6,
# # tabPanel("ERA", value = "ERA", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamPitchLead')))
# # # tabPanel("W", value = "H", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamPitchLead'))),
# # # tabPanel("SO", value = "SO", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamPitchLead'))),
# # # tabPanel("IP", value = "IP", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamPitchLead'))),
# # # tabPanel("SV", value = "SV", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamPitchLead')))
# # )
# ),
fluidRow(
tabBox(
id = "teamLead",
title = "Batting Leaders",
width = 6,
tabPanel("AVG", value = "BA", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
tabPanel("H", value = "H", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
tabPanel("HR", value = "HR", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
tabPanel("RBI", value = "RBI", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead'))),
tabPanel("SB", value = "SB", div(style = 'overflow-x: scroll', DT::dataTableOutput('teamLead')))
)
),
fluidRow(
box(
title = "Team Results",
width = 12,
div(style = 'overflow-x: scroll', DT::dataTableOutput('teamSched'))
)
)
)
)
)
)
# Define server
server <- function(input, output, session) {
observeEvent(input$teamSel,{
updateSelectInput(session,'yearSel',
choices=c(2018:getTeams()[getTeams()$`Team ID`==input$teamSel, 4]))
##https://stackoverflow.com/questions/48376156/updating-a-selectinput-based-on-previous-selectinput-under-common-server-functio
})
observeEvent(input$selTeam,{
#output team schedule and results
output$teamSched <- renderDataTable(getTeamDetail(input$teamSel, input$yearSel))
#batting leaders
output$teamLead <- renderDataTable(
getTeamLeaders(input$yearSel, input$teamLead, input$teamSel, TRUE)
)
# output$teamPitchLead <- renderDataTable(
# getTeamPitchingLeaders(input$yearSel, input$teamPitchLead, input$teamSel, FALSE)
# )
})
#renaming columns
aleS <- as.data.frame(getStandings()[1])
names(aleS) = c('Tm', 'W', 'L', 'W.L', 'GB')
alcS <- as.data.frame(getStandings()[2])
names(alcS) = c('Tm', 'W', 'L', 'W.L', 'GB')
alwS <- as.data.frame(getStandings()[3])
names(alwS) = c('Tm', 'W', 'L', 'W.L', 'GB')
nleS <- as.data.frame(getStandings()[4])
names(nleS) = c('Tm', 'W', 'L', 'W.L', 'GB')
nlcS <- as.data.frame(getStandings()[5])
names(nlcS) = c('Tm', 'W', 'L', 'W.L', 'GB')
nlwS <- as.data.frame(getStandings()[6])
names(nlwS) = c('Tm', 'W', 'L', 'W.L', 'GB')
#outputting standings
output$ale <- renderDataTable(aleS)
output$alc <- renderDataTable(alcS)
output$alw <- renderDataTable(alwS)
output$nle <- renderDataTable(nleS)
output$nlc <- renderDataTable(nlcS)
output$nlw <- renderDataTable(nlwS)
#batting league leaders
output$alAVG <- renderDataTable(getLeagueLeaders('al', 2018, 'AVG'))
output$nlAVG <- renderDataTable(getLeagueLeaders('nl', 2018, 'AVG'))
output$alH <- renderDataTable(getLeagueLeaders('al', 2018, 'H'))
output$nlH <- renderDataTable(getLeagueLeaders('nl', 2018, 'H'))
output$alHR <- renderDataTable(getLeagueLeaders('al', 2018, 'HR'))
output$nlHR <- renderDataTable(getLeagueLeaders('nl', 2018, 'HR'))
output$alSB <- renderDataTable(getLeagueLeaders('al', 2018, 'SB'))
output$nlSB <- renderDataTable(getLeagueLeaders('nl', 2018, 'SB'))
output$alRBI <- renderDataTable(getLeagueLeaders('al', 2018, 'RBI'))
output$nlRBI <- renderDataTable(getLeagueLeaders('nl', 2018, 'RBI'))
}
# Run the application
shinyApp(ui = ui, server = server)
|
fc5bcaaceb158bca44d6082e48080fad1fa40e19 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GAD/examples/snk.test.Rd.R | 46fc338f142753da076fd769667cec69668df109 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 949 | r | snk.test.Rd.R | library(GAD)
### Name: snk.test
### Title: Student-Newman-Keuls (SNK) procedure
### Aliases: snk.test
### Keywords: htest
### ** Examples
library(GAD)
data(rohlf95)
CG <- as.fixed(rohlf95$cages)
MQ <- as.random(rohlf95$mosquito)
model <- lm(wing ~ CG + CG%in%MQ, data = rohlf95)
gad(model)
##Check estimates to see model structure
estimates(model)
snk.test(model,term = 'CG:MQ', among = 'MQ', within = 'CG')
##
##
##Example using snails dataset
data(snails)
O <- as.random(snails$origin)
S <- as.random(snails$shore)
B <- as.random(snails$boulder)
C <- as.random(snails$cage)
model <- lm(growth ~ O + S + O*S + B%in%S + O*(B%in%S) + C%in%(O*(B%in%S)),
data = snails)
gad(model)
##Check estimates to see model structure
estimates(model)
snk.test(model, term = 'O')
snk.test(model,term = 'O:S', among = 'S', within = 'O')
#if term O:S:B were significant, we could try
snk.test(model, term = 'O:S:B', among = 'B', within = 'O:S')
|
36c697d50f9013ce8224576785ddc2254cfb21f0 | 731140756e9b8dd64767cec86612713d93248824 | /MTTTA14/Week5/T7.R | 3fc60af85fefb5ceab90fe497a6af22e489ea07f | [] | no_license | TP1997/Some-R-exercises | b1669b93fe7f3d3ac39992d329bb89606bf57ba3 | 2d2716e32ce6779692f8515e85a751d4e7ce34a7 | refs/heads/master | 2020-09-30T23:56:00.866562 | 2020-01-07T19:27:38 | 2020-01-07T19:27:38 | 227,403,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 290 | r | T7.R | #Tehtävä 7
set.seed(123)
gen1=function(n, pis, us, sss){
u=t(pis) %*% us
ss=t(pis) %*% sss
return(rnorm(n, u, sqrt(ss)))
}
#Anna tunnusluvut:
pis=matrix(c(0.28,0.18,0.54), nrow = 3)
us=matrix(c(110,187,229), nrow = 3)
sss=matrix(c(354,320,845), nrow = 3)
gen1(10, pis, us, sss)
|
e7e556f8f0126075ffa2532cda316213a49496b6 | e565d9aa96dc33f1be329a493f23137bf6ff2d87 | /man/add.price.Rd | e06be2843be432660b652c8ef3e68b66c888c4c1 | [] | no_license | jdanielnd/tsgen | 02803fdb178e851974807fce9a4794fcdc5b10c0 | 008e34559f1bf124c67aced9becb42ac8b78afee | refs/heads/master | 2016-08-05T14:06:06.391571 | 2012-05-07T01:05:01 | 2012-05-07T01:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,143 | rd | add.price.Rd | \name{add.price}
\alias{add.price}
\title{
Add price response
}
\description{
Add price response to the series regarding the parameters
}
\usage{
add.price(tser, elast, random.price=FALSE, inf.limit=NULL, sup.limit=NULL, p.vec=NULL)
}
\arguments{
\item{tser}{
time series which price response will be added
}
\item{elast}{
can be either a number, or a vector. If it's a vector, a loess will be adjusted to it, generating values of seasonality for each observation.
}
\item{random.price}{
TRUE or FALSE depending on whether the price should be random, or a loess fitted vector.
}
\item{inf.limit}{
inferior limit of price when it's random.
}
\item{sup.limit}{
superior limit of price when it's random.
}
\item{p.vec}{
vector of prices to be fitted by loess
}
}
\value{
returns the time series with price response
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
set.seed(123)
kp <- c(100,110,140,120,90)
bs <- basic.series(kp, start=c(2008,1))
bss <- add.season(bs)
add.price(bss, -2, random.price=TRUE, 2, 10)
add.price(bss, c(-2,-3,-2,-1), random.price=FALSE, p.vec=c(4,2,5,7,10))
}
|
b450e9e82faa059ac6e38a5ec506d2ef34bd1111 | 0d819c261034e6e5077cfdf861e2ceca8b28292c | /rume_setup.R | b57165714b43b7bd88cd5dfc4ce33ba9ec56326f | [] | no_license | gbizel/debt | 8e59b0e99f7babe5c2691e08984ab078f434a744 | 1bbd67cc1e4e9bdf5247b40dfe73534a8a304184 | refs/heads/master | 2020-06-23T17:20:21.846613 | 2016-09-21T09:44:56 | 2016-09-21T09:44:56 | 65,899,330 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,069 | r | rume_setup.R | # install.packages("sqldf")
# install.packages("RODBC")
# install.packages("ggplot2")
# install.packages("dplyr")
install.packages("plyr")
# install.packages("data.table")
require("data.table")
library("Hmisc")
# library("RODBC")
# library("sqldf") # ?
library("dplyr")
library("ggplot2")
list.files("/Users/gaston/Desktop/ifp/base_2010")
db_path = "/Users/gaston/Desktop/ifp/base_2010/Database_Base_400_Final_Work.mdb"
rume <- mdb.get(db_path)
# -----------------------------------
head(rume$`T 1 General informations`)
idf <- function ( column ) {
unlist(lapply(column, as.factor))
}
types_avances = idf(rume$`T25 Migration Full`$X25.2.Q.Advance.through)
summary(types_avances)
avance_travail = which(types_avances == "1")
job_who = rume$`T25 Migration Full`$X25.2.K.How.know.person[avance_travail]
idf(job_who)
summary(idf(job_who))
job_kind = idf(rume$`T25 Migration Full`$X25.2.H.Migration.job)
summary(job_kind)/length(job_kind)*100
summary(job_kind[avance_travail])/length(job_kind[avance_travail])*100
head(rume$`T25 Migration Full`)
rume$`T25 Migration Full`$X25.2.Q.Advance.through
rume$`T25 Migration Full`$X25.2.Q.Advance.through
rume$`X_Advance through`
# qry <- "SELECT * FROM "
#
# sqldf::sqldf("select * from rume$`T 1-1 Family members`")
#
# rume$`T 1-1 Family members`
#
# install.packages("RODBC")
#
# mdbTables(qry)
#
# RODBC::sqlTables(rume, tableType = "TABLES")
#
#
# mdb.get()
# rume_t = rume$`T 1 General informations`
#
# head(rume_t)
#
# sqldf("select Code.family from rume_t")
rume_migration = rume$`T25 Migration Full`
index_advance = which(rume_migration$X25.2.Q.Advance.through == "1" &
rume_migration$X25.2.H.Migration.job %in% c("1","2") )
rume_migration_avance <- rume_migration[index_advance,]
rume_migration_avance$Code.family
rume_occupation = rume$`T 2 Occupations`
index_stop = which(rume_occupation$X2.1.Stop.working.due.to.accident == 1)
rume_stop = rume_occupation[index_stop,]
# il faut aussi matcher le family member
match(rume_migration_avance$Code.family, rume_stop$Code.Family)
match(1:200, c(1,4,4,5,3,30))
rume_migration_avance[1:20,1]
#Code individu
rume_migration$Code.individu <- paste(
rume_migration$Code.family, rume_migration$X25.2.A.Code.id.member)
# --------------------------------
r_migration = rume$`T25 Migration Full`
index_brick_sugar = which(r_migration$X25.2.Q.Advance.through == "1" &
r_migration$X25.2.H.Migration.job %in% c("1","2"))
indiv_brick_sugar = data.frame(0,1,2)
indiv_brick_sugar = as.data.frame(r_migration$Code.family[index_brick_sugar])
indiv_brick_sugar[,2] <- r_migration$X25.2.A.Code.id.member[index_brick_sugar]
colnames(indiv_brick_sugar) <- c("Code.family","Code.member")
indiv_brick_sugar
cat = 26
category_match <- function ( cat ) {
i = 0
if (cat < 10) {
i = match( paste("T", cat, sep = " "), substr(names(rume),1,3) )
} else {
i = match( paste("T", cat, sep = "" ), substr(names(rume),1,3) )
}
return (i)
}
question_match <- function ( question ) {
}
key_match <- function ( cat, souscat, conditions ) {
if (is.null(souscat)) { souscat = 0 }
i = category_match(cat)
r_category <- rume[[i+souscat-1]]
names(rume)[i+souscat-1]
return(r_category)
}
test18 <- key_match(18,1, c(1,"1",4,"1"))
mattt = cbind(test18[,2],test18[,3]) == cond(c(1,2), nrow(test18)) #,nrow(test18)
mattt[,3] <- sum(mattt[,1] , mattt[,2])
c(1,2,3,4)
cond <- function (vect,l) {
sortie = c()
for (k in vect) {
sortie = cbind(sortie,rep(k,l))
}
return (sortie)
}
names(rume)
## Asservis et accidents --------------
`
# plus judicieux : comparer les assets des foyers..
## cf age-->
r_family = rume$`T 1-1 Family members`
indiv_match.fam = match( indiv_brick_sugar[,3],
paste(r_family$Code.family,r_family$X1.A.Code.id.member))
r_family.asservis <- r_family[indiv_match.fam, ]
summary(r_family$X1.E.Age)
summary(r_family.asservis$X1.E.Age)
summary(sapply(r_family$X1.C.Male.Female,as.factor))
summary(sapply(r_family.asservis$X1.C.Male.Female,as.factor))
# Plot age (tests) -------
#
age <- data.frame(age = as.vector(r_family$X1.E.Age) )
age.asservis <- data.frame(age = as.vector(r_family.asservis$X1.E.Age) )
age.groups <- data.frame(groupe = factor( rep( c("asservi","total"),
c(length(age.asservis$age),length(age$age)) ) ),
age = rbind(age.asservis,age) )
age.means <- age.groups %>%
group_by(groupe) %>%
summarise(age.mean=mean(age))
age.groups %>%
ggplot(aes(x=age, fill=groupe)) +
geom_histogram(aes(y=..density..),
binwidth=10,
position="dodge",
alpha = 0.5) +
geom_density(alpha=.5, position="identity") +
geom_vline(data=age.means, aes(xintercept=age.mean, colour=groupe),
linetype="dashed", size=1)
age.asservis %>%
ggplot(aes(x=age)) +
geom_histogram(aes(y=..density..),
binwidth=2,
colour="black", fill="white") +
geom_density(alpha=.2, fill="#FF6666") +
scale_x_continuous( limits = l)
## Plot fonction ----------
density_plot <- function( data, data.asservis, name, width, limits = NULL ) {
dat <- data.frame(dat = as.vector(data) )
dat.asservis <- data.frame(dat = as.vector(data.asservis) )
dat.groups <- data.frame(groupe = factor( rep( c("asservi","total"),
c(length(dat.asservis$dat),length(dat$dat)) ) ),
dat = rbind(dat.asservis,dat) )
dat.means <- dat.groups %>%
group_by(groupe) %>%
summarise(dat.mean=mean(dat))
dat.groups %>%
ggplot(aes(x=dat, fill=groupe)) +
geom_histogram(aes(y=..density..),
binwidth=width,
position="dodge",
alpha = 0.5) +
geom_density(alpha=.5, position="identity") +
geom_vline(data=dat.means, aes(xintercept=dat.mean, colour=groupe),
linetype="dashed", size=1) +
labs(x = name) +
scale_x_continuous(limits = limits)
}
density_plot(r_family$X1.E.Age,
r_family.asservis$X1.E.Age,
"age",
width = 6)
density_plot(r_family$X1.C.Male.Female,
r_family.asservis$X1.C.Male.Female,
"sex",
width = .5)
density_plot(r_family$X1.D.Relation,
r_family.asservis$X1.D.Relation,
"relation",
width = 0.5,
limits = c(0,14))
# Multivariate plot -------
data = as.factor(r_family$X1.D.Relation)
data.asservis = as.factor(r_family.asservis$X1.D.Relation)
# test <-dat.groups %>%
# group_by(groupe, dat) %>%
# summarize(perc = n())
#
# test <-dat %>%
# group_by(dat) %>%
# summarize(perc = n())
# test$perc <- test$perc/nrow(dat)
#
# test2 <-dat.asservis %>%
# group_by(dat) %>%
# summarize(perc = n())
multi_plot <- function( data, data.asservis, level = 0, names = NULL) {
data <- as.factor(data)
data.asservis <- as.factor(data.asservis)
if (!is.null(names)) {
data <- match_levels(data,names)
data.asservis <- match_levels(data.asservis,names)
}
dat <- data.frame(dat = data )
dat.asservis <- data.frame(dat = data.asservis )
dat.groups <- data.frame(groupe = factor( rep( c("asservi","total"),
c(length(dat.asservis$dat),length(dat$dat)) ) ),
dat = rbind(dat.asservis,dat) )
DT <- data.table(dat.groups)
DT.pt <- DT[, grp(dat), by=groupe]
print(DT.pt)
# DT.pt <- DT.pt[-c(8,9,10,11,12,14,22:26,28)]
inutile <- which(DT.pt$percentage < level)
if (length(inutile) > 0) {
DT.pt <- DT.pt[-inutile]
}
DT.pt2 <- within(DT.pt,
x <- factor(x,
levels= names[,2]))
# attention : dangereux (risque de mélanger les levels?)
# verifier avec l'impression chiffrée
DT.pt2 %>%
ggplot() +
geom_bar(aes(x= x, y=percentage, fill = groupe),
position="dodge",
stat="identity")
}
multi_plot(as.factor(r_family$X1.D.Relation),
as.factor(r_family.asservis$X1.D.Relation),
level = 0.01,
names = relation_levels)
# -->
# father ++
# wife, son -
# daugter: replaced by daughter in law, or sun in law
multi_plot(as.factor(r_family$X1.G.Education),
as.factor(r_family.asservis$X1.G.Education),
level = 0.01,
names = rume$X_Education)
x = factor(c(1,2))
# mutate(x,c(1,2),c(2,3))
plyr::mapvalues(x,c(1,2),c(2,3))
## Levels ------------- -----------
match_levels <- function(data,names) {
return(plyr::mapvalues(as.factor(data), names[,1],as.vector(names[,2])))
}
match_levels(r_family.asservis$X1.D.Relation,rume$X_Relation)
match_levels(r_family.asservis$X1.D.Relation,relation_levels)
relation_levels = t(matrix(c(01, "father",
02, "wife",
03, "mother",
04, "father",
05, "son",
06, "daughter",
07, "daughter-in-law",
08, "son-in-law",
09, "sister",
10, "mother in law",
11, "father in law",
12, "brother elder",
13, "brother younger",
14, "others"),2,14))
# relation levels = X_Family...
## -----------------
relation_levels
r_family.asservis[which(r_family.asservis$X1.D.Relation == 7),]
# belles filles: 30 35 26 20
## test dplyr --------------
tbl_df(r_family)
fest_femm <-filter(r_family, X1.C.Male.Female == 1 )
fest_femm_ord <- arrange(fest_femm, X1.D.Relation, X1.E.Age)
r_family %>%
group_by(X1.D.Relation) %>%
summarize(mean(X1.E.Age, na.rm= T))
r_family %>%
mutate(relation = match_levels(r_family$X1.D.Relation, relation_levels)) %>%
filter(relation %in% c("father","wife", "son", "daughter", "daugther-in-law", "son-in-law")) %>%
ggplot(aes(x = X1.E.Age, fill = relation)) +
geom_density(position = "identity", alpha = 0.5)
r_family.asservis %>%
mutate(relation = match_levels(r_family.asservis$X1.D.Relation, relation_levels)) %>%
filter(relation %in% c("father","wife", "son", "daughter", "daugther-in-law")) %>%
ggplot(aes(x = X1.E.Age, fill = relation)) +
geom_density(position = "identity", alpha = 0.5)
r_family %>%
mutate( education = match_levels(r_family$X1.G.Education, rume$X_Education ),
sexe = as.factor(X1.C.Male.Female)) %>%
filter(education %in% c("Primary","High School","No education")) %>%
ggplot(aes(x = sexe, fill = education)) +
geom_bar(position = "stack", alpha = .3)
r_family.asservis %>%
mutate( education = match_levels(r_family.asservis$X1.G.Education, rume$X_Education ),
sexe = as.factor(X1.C.Male.Female)) %>%
ggplot(aes(x = sexe, fill = education)) +
geom_bar(position = "stack", alpha = .3)
relation_levels
typeof(r_family$X1.H.Student.at.present)
nrow(r_family)
length(which(r_family$X1.C.Male.Female == 1))
length(which(r_family$X1.C.Male.Female == 2))
|
91e976650a80aab596c933635058be088a9e9a10 | 6ea8bfe20af75824e219d9520e7fbf8a5b2c4c98 | /man/estimate10.Rd | 4dad9e0d517fde297548de6e942c1f2907b86702 | [] | no_license | mgomez-olmedo/acdr | 35f03888e5a14b932b3483255586f712c57a26cd | 71b0b4407e36e139776c3dd9010c7b98c7e816d6 | refs/heads/master | 2020-06-14T00:45:05.420849 | 2019-07-10T07:41:30 | 2019-07-10T07:41:30 | 194,839,717 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 863 | rd | estimate10.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate10.R
\name{estimate10}
\alias{estimate10}
\title{Function that estimates probabilities from a string x
It returns a list with the estimations, the sample sizes, and the forgotten samples
forg is a vector of rhos. It considers all the rhos and selects
the rho with maximum likelihood in each case.}
\usage{
estimate10(x, forg, l)
}
\arguments{
\item{x}{stream to analyze}
\item{forg}{vector of values for rho parameter}
\item{l}{length to consider}
}
\value{
list with data, estimations and values of s and ro
}
\description{
Function that estimates probabilities from a string x
It returns a list with the estimations, the sample sizes, and the forgotten samples
forg is a vector of rhos. It considers all the rhos and selects
the rho with maximum likelihood in each case.
}
|
4015993252d95920400e4d7524762c8c43c6f565 | 66f8711bc942a1bc635a6deea253e9a49c718094 | /man/docSetComplete.Rd | 6b98163b2a3b0e7af5adc0213a43b5f43d0a11d7 | [
"MIT"
] | permissive | seanrsilver/novnet | bd179476c48a8dd809757c60488dde7193a4145b | 85107cfbbabc68c603134db5b5fc8bbf9219624b | refs/heads/master | 2020-06-05T18:20:58.057024 | 2019-06-18T14:29:45 | 2019-06-18T14:29:45 | 192,495,039 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 561 | rd | docSetComplete.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docSetComplete.R
\name{docSetComplete}
\alias{docSetComplete}
\title{docSetComplete()
pulls all files with .txt and Char.csv
runs docSetCheck() for all files
writes docSetCheck.csv}
\usage{
docSetComplete(local = TRUE)
}
\arguments{
\item{local}{If FALSE (default), downloads from Google Drive and saves to folder.}
}
\description{
docSetComplete()
pulls all files with .txt and Char.csv
runs docSetCheck() for all files
writes docSetCheck.csv
}
\keyword{NovNet}
\keyword{Utilities}
|
81a35c0e9ad5cddb59b6a1badb7917297a061fcf | 2ead52057ac3a4daf8a583196cab31ea92df5a53 | /ch18.R | 4aae5d480d4f6ff74e0fa763c4c8ecb0c5cc7b23 | [] | no_license | noeldjohnson/r4ds_ch18 | e3fbd2cbbaea7880c4b3821570c46425e7b9ba76 | 3bfdea60174a25a2eedbdcd2fa8ed4465e1a151b | refs/heads/master | 2020-06-03T14:30:59.656309 | 2019-06-13T19:35:59 | 2019-06-13T19:36:29 | 191,605,859 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,374 | r | ch18.R | # Chapter 18: Model Basics with modelr
setwd("/Users/noeljohnson/Dropbox/R Course/Learn_Git/R4ds_ch18")
library(tidyverse)
library(modelr)
library(skimr)
library(stargazer)
library(jtools)
library(viridis)
options(na.action = na.warn)
head(sim1)
skim(sim1)
cor(sim1)
ggplot(sim1, aes(x, y)) +
geom_point()
models <- tibble(
a1 = runif(1000, -20, 40),
a2 = runif(1000, -5, 5)
)
head(models)
skim(models)
hist(models$a1)
hist(models$a2)
# ggplot(sim1, aes(x, y)) +
# geom_abline(
# aes(intercept = a1, slope = a2),
# data = models, alpha = 1/4
# ) +
# geom_point()
#
# model1 <- function(a, data) {
# a[1] + data$x * a[2]
# }
# model1(c(7, 1.5), sim1)
#
# measure_distance <- function(mod, data) {
# diff <- data$y - model1(mod, data)
# sqrt(mean(diff ^ 2))
# }
# measure_distance(c(7, 1.5), sim1)
#
#
# sim1_dist <- function(a1, a2) {
# measure_distance(c(a1, a2), sim1)
# }
# models <- models %>%
# mutate(dist = purrr::map2_dbl(a1, a2, sim1_dist))
# models
#
# ggplot(sim1, aes(x, y)) +
# geom_point(size = 2, color = "grey30") +
# geom_abline(
# aes(intercept = a1, slope = a2, color = -dist),
# data = filter(models, rank(dist) <= 10)
# )
#
# ggplot(models, aes(a1, a2)) +
# geom_point(
# data = filter(models, rank(dist) <= 10),
# size = 4, color = "red"
# ) +
# geom_point(aes(colour = -dist))
#
# grid <- expand.grid(
# a1 = seq(-5, 20, length = 30),
# a2 = seq(1, 3, length = 30)
# ) %>%
# mutate(dist = purrr::map2_dbl(a1, a2, sim1_dist))
#
# grid %>%
# ggplot(aes(a1, a2)) +
# geom_point(
# data = filter(grid, rank(dist) <= 10),
# size = 4, colour = "red"
# ) +
# geom_point(aes(color = -dist))
#
# ggplot(sim1, aes(x, y)) +
# geom_point(size = 2, color = "grey30") +
# geom_abline(
# aes(intercept = a1, slope = a2, color = -dist),
# data = filter(grid, rank(dist) <= 10)
# )
#
# best <- optim(c(0, 0), measure_distance, data = sim1)
# best$par
#
# ggplot(sim1, aes(x, y)) +
# geom_point(size = 2, color = "grey30") +
# geom_abline(intercept = best$par[1], slope = best$par[2])
#
# sim1_mod <- lm(y ~ x, data = sim1)
# coef(sim1_mod)
# predict(sim1_mod, sim1)
# sim1_mod
# names(sim1_mod)
# summary(sim1_mod)
# stargazer(sim1_mod, type = "text")
# summ(sim1_mod)
# summ(sim1_mod, robust = "HC1")
# summ(sim1_mod, scale = TRUE)
# summ(sim1_mod, confint = TRUE, ci.width = .95, digits = 3)
# plot_summs(sim1_mod)
# plot_summs(sim1_mod, scale = TRUE)
# plot_summs(sim1_mod, scale = TRUE, inner_ci_level = .9)
# plot_summs(sim1_mod, scale = TRUE, plot.distributions = TRUE, inner_ci_level = .9)
# sim1 <- sim1 %>% mutate(xsquared = x^2)
# sim2_mod <- lm(y ~ x + xsquared, data = sim1)
# plot_summs(sim1_mod, sim2_mod, scale = TRUE)
# plot_summs(sim1_mod, sim2_mod, scale = TRUE, plot.distributions = TRUE)
# plot_summs(sim1_mod, sim1_mod, sim1_mod, scale = TRUE, robust = list(FALSE, "HC0", "HC3"),
# model.names = c("OLS", "HC0", "HC3"))
# effect_plot(sim1_mod, pred = x, interval = TRUE)
# effect_plot(sim1_mod, pred = x, interval = TRUE, plot.points = TRUE)
# export_summs(sim1_mod, sim2_mod, scale = TRUE)
#
#
# grid <- sim1 %>%
# data_grid(x)
# grid
#
# grid <- grid %>%
# add_predictions(sim1_mod)
# grid
#
# ggplot(sim1, aes(x, y)) +
# geom_point(size = 1, color = "grey30") +
# geom_point(
# aes(x, pred),
# data = grid,
# colour = "red",
# size = 3
# )
#
# sim1 <- sim1 %>%
# add_residuals(sim1_mod)
# sim1
#
# ggplot(sim1, aes(resid)) +
# geom_freqpoly(binwidth = 0.5)
#
# ggplot(sim1, aes(x, resid)) +
# geom_ref_line(h = 0) +
# geom_point()
#
# # Formulas and Model Families
#
# df <- tribble(
# ~y, ~x1, ~x2,
# 4, 2, 5,
# 5, 1, 6
# )
# df
#
# model_matrix(df, y ~ x1)
#
# model_matrix(df, y ~ x1 + x2)
#
# model_matrix(df, y ~ x1 - 1)
#
# df <- tribble(
# ~ sex, ~ response,
# "male", 1,
# "female", 2,
# "male", 1
# )
# model_matrix(df, response ~ sex)
# df
#
# ggplot(sim2) +
# geom_point(aes(x, y))
#
# mod2 <- lm(y ~ x, data = sim2)
# summary(mod2)
#
# grid <- sim2 %>%
# data_grid(x) %>%
# add_predictions(mod2)
# grid
#
# ggplot(sim2, aes(x)) +
# geom_point(aes(y = y)) +
# geom_point(
# data = grid,
# aes(y = pred),
# color = "red",
# size = 4
# )
#
# ggplot(sim3, aes(x1, y)) +
# geom_point(aes(color = x2))
#
# mod1 <- lm(y ~ x1 + x2, data = sim3)
# mod2 <- lm(y ~ x1 * x2, data = sim3)
#
# grid <- sim3 %>%
# data_grid(x1, x2) %>%
# gather_predictions(mod1, mod2)
# grid
#
# ggplot(sim3, aes(x1, y, color = x2)) +
# geom_point() +
# geom_line(data = grid, aes(y = pred)) +
# facet_wrap(~ model)
#
# sim3 <- sim3 %>%
# gather_residuals(mod1, mod2)
#
# ggplot(sim3, aes(x1, resid, color = x2)) +
# geom_point() +
# facet_grid(model ~ x2)
#
# mod1 <- lm(y ~ x1 + x2, data = sim4)
# mod2 <- lm(y ~ x1 * x2, data = sim4)
#
# grid <- sim4 %>%
# data_grid(
# x1 = seq_range(x1, 5),
# x2 = seq_range(x2, 5)
# ) %>%
# gather_predictions(mod1, mod2)
# grid
#
# ggplot(grid, aes(x1, x2)) +
# geom_tile(aes(fill = pred)) +
# facet_wrap(~ model)
#
# ggplot(grid, aes(x1, pred, color = x2, group = x2)) +
# geom_line() +
# facet_wrap(~ model)
# ggplot(grid, aes(x2, pred, color = x1, group = x1)) +
# geom_line() +
# facet_wrap(~ model)
#
# # Transformations
#
# df <- tribble(
# ~y, ~x,
# 1, 1,
# 2, 2,
# 3, 3
# )
# model_matrix(df, y ~ x^2 + x)
# model_matrix(df, y ~ I(x^2) + x)
#
# model_matrix(df, y ~ poly(x, 2))
#
# library(splines)
# model_matrix(df, y ~ ns(x, 2))
#
# sim5 <- tibble(
# x = seq(0, 3.5 * pi, length = 50),
# y = 4 * sin(x) + rnorm(length(x))
# )
# ggplot(sim5, aes(x, y)) +
# geom_point()
#
# mod1 <- lm(y ~ ns(x, 1), data = sim5)
# mod2 <- lm(y ~ ns(x, 2), data = sim5)
# mod3 <- lm(y ~ ns(x, 3), data = sim5)
# mod4 <- lm(y ~ ns(x, 4), data = sim5)
# mod5 <- lm(y ~ ns(x, 5), data = sim5)
#
# grid <- sim5 %>%
# data_grid(x = seq_range(x, n = 50, expand = 0.1)) %>%
# gather_predictions(mod1, mod2, mod3, mod4, mod5, .pred = "y")
#
# ggplot(sim5, aes(x, y)) +
# geom_point() +
# geom_line(data = grid, color = "red") +
# facet_wrap(~ model)
#
# # Missing Values
#
# df <- tribble(
# ~x, ~y,
# 1, 2.2,
# 2, NA,
# 3, 3.5,
# 4, 8.3,
# NA, 10
# )
#
# mod <- lm(y ~ x, data = df)
#
# nobs(mod)
# End Code
|
78a6b7ff0ff3b05c0f2635b236c74dbae0cdda55 | 5a08e607367a964680b4740a6f64587eb7c7020a | /util/readPathwayFile.R | cd3f01a67613bb50b6b08e983b5508f2ed4e6423 | [] | no_license | qiongmeng-m/EpiPsychosis_IGF2 | 1b409ca334de0bab68f3f7b6822d289a1c647744 | 82fc709aa8e9406ae138aafe2fb13f79c658d54a | refs/heads/master | 2021-10-18T22:00:57.756578 | 2019-02-14T16:58:56 | 2019-02-14T16:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,151 | r | readPathwayFile.R | #' Parse GMT file and return pathways as list
#'
#' @details The GMT file format currently supported should match the ones
#' found at http://downloads.baderlab.org. The original GMT file format is:
#' <set name><set description><member 1><member 2>...<member N>,
#' one row per set, with values tab-delimited.
#' The version at baderlab.org has additional unique formatting of the
#' <set name> column as follows:
#' <pathway_full_name>%<pathway_source>%<pathway_source_id>
#'
#' Example:
#' UREA CYCLE%HUMANCYC%PWY-4984 urea cycle ASS1 ARG1 CPS1 ASL OTC
#' ABACAVIR METABOLISM%REACTOME DATABASE ID RELEASE 55%2161541 Abacavir metabolism ADH1A GUK1 ADAL PCK1 NT5C2
#'
#' This function requires the specific formatting of the first column
#' to assign the key name of the output list (see \code{useIDasName}
#' argument).
#' @param fname (char) path to pathway file in gmt format
#' pathway score to include pathway in the filter list
#' @param MIN_SIZE (integer) min num genes allowed in a pathway. Pathways
#' with fewer number of genes are excluded from the output list
#' @param MAX_SIZE (integer) max num genes allowed in a pathway. Pathways
#' with gene counts greater than this are excluded from the output list
#' @param EXCLUDE_KEGG: (boolean) If TRUE exclude KEGG pathways. Our
#' experience has been that some KEGG gene sets are to broad to be
#' physiologically relevant
#' @param IDasName: (boolean) Value for key in output list.
#' If TRUE, uses db name and ID as name (e.g. KEGG:hsa04940)
#' If FALSE, pathway name. If TRUE,
#' @param getOrigNames (logical) when TRUE also returns a mapping of the
#' cleaned pathway names to the original names
#' @return Depends on value of getOrigNames. If FALSE (Default), list with
#' pathway name as key, vector of genes as value. If TRUE, returns list of
#' length two, (1) geneSets: pathway-gene mappings as default,
#' (2) pNames: data.frame with original and cleaned names.
#' @examples pathFile <- sprintf("%s/extdata/pathways.gmt",
#' path.package("netDx"))
#' pathwayList <- readPathways(pathFile)
#'
#' @export
readPathways <- function(fname,MIN_SIZE=10L, MAX_SIZE=200L,
EXCLUDE_KEGG=TRUE,IDasName=FALSE,verbose=TRUE,getOrigNames=FALSE) {
# change locale to accommodate nonstandard chars in pathway names
oldLocale <- Sys.getlocale("LC_ALL")
Sys.setlocale("LC_ALL","C")
out <- list()
# read list of master pathways
if (verbose) cat("---------------------------------------\n")
if (verbose) cat(sprintf("File: %s\n\n", basename(fname)))
f <- file(fname,"r")
# TODO: deal with duplicate pathway names
#pName <- list()
ctr <- 0
options(warn=1)
origNames <- c()
repeat {
s <- scan(f, what="character",nlines=1,quiet=TRUE,sep="\t")
if (length(s)==0) break;
currFullName <- s[1]
pPos<- gregexpr("%",s[1])[[1]];
src <- ""
src_id <- ""
if (pPos[1]==-1) {
#cat("\n\n% symbol not found in pathway name")
s[1] <- s[1]
} else {
src <- substr(s[1],pPos[1]+1,pPos[2]-1)
src_id <- substr(s[1],pPos[2]+1,nchar(s[1]))
if (IDasName)
s[1] <- paste(src,src_id,sep=":")
else
s[1] <- substr(s[1],1,pPos[1]-1)
}
if (!EXCLUDE_KEGG || (src!="KEGG")) {
idx <- which(s=="") # remove trailing blank rows.
if (any(idx)) s <- s[-idx]
if (getOrigNames) { currnm <- currFullName } else {currnm <- s[1]}
out[[currnm]] <- s[3:length(s)]
#pName[[s[1]]] <- s[2] # stores pathway source - prob not needed
}
ctr <- ctr+1
}
close(f)
if (verbose) {
cat(sprintf("Read %i pathways in total, internal list has %i entries\n",
ctr, length(out)))
cat(sprintf("\tFILTER: sets with num genes in [%i, %i]\n",
MIN_SIZE,MAX_SIZE))
}
# filter by pathway size
ln <- unlist(lapply(out, length))
idx <- which(ln < MIN_SIZE | ln >= MAX_SIZE)
out[idx] <- NULL
if (verbose) cat(sprintf("\t => %i pathways excluded\n\t => %i left\n",
length(idx),length(out)))
# clean pathway names
nm <- cleanPathwayName(names(out))
if (getOrigNames) {
# do nothing
} else {
names(out) <- nm
}
return(out)
}
|
15389a3da47fd7f63149d4bdbb1d60614d90bc24 | da766092589958390e0bf188c2f37552bfc3e704 | /src/plot2.R | 9aa59b8d5c6409d059ff863bea5836fff658af6d | [] | no_license | dar7yl/ExploratoryDataAnalysis | 0eb69dd30af0329df2acb6fa0927addaf266d5ab | 0cf07fe87b8204db037ff81c21a16403a501e29a | refs/heads/master | 2020-04-05T23:32:11.497791 | 2015-02-09T03:53:39 | 2015-02-09T03:53:39 | 30,272,504 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 375 | r | plot2.R | png(filename = file.path("figure", "plot2.png"),
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"), antialias="none")
with (hpc, plot(DateTime, Global_active_power, type="l",
xlab="Date", ylab="Global Active Power (kilowatts)",
col="black") )
dev.off()
|
89b64da19fa71934367bc643b4dd6d4018c5ac94 | 45a1a5743b7bf53ed0ed68368fbf5650a74a8a9e | /RProblemSet2Solutions.R | 1faf2ca52ec651f4c383de150f387c8da901b66f | [] | no_license | matthewyaspan/matttest1 | 85511b0881ae947d5d474b9a36ff4e9970a9de71 | b3e6468c8692b2503ef639775359b4fffb9c3172 | refs/heads/master | 2020-04-29T00:52:15.450350 | 2019-03-14T22:52:18 | 2019-03-14T22:52:18 | 175,699,988 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,182 | r | RProblemSet2Solutions.R | setwd("Downloads/") ## SET THIS TO YOUR WORKING DIRECTORY. THAT DIRECTORY OUGHT TO HAVE `all2001.csv` IN IT!!!
library(tidyverse)
all.2001 <- read_csv(file.choose())
### Find descriptions of the fields of events files here: https://www.retrosheet.org/datause.txt
all.2001 %>% head()
all.2001 %>% View()
############# EXAMPLES #############
## What Percentage of Events ended in a 3 ball count?
all.2001 %>% summarize(mean(BALLS_CT == 3))
## Percentage of Events with NA Pitch Sequences:
all.2001 %>% summarize(mean(is.na(PITCH_SEQ_TX)))
## What Percentage of Events occurred during plate appearances in which the first pitch was put into play?
all.2001 %>% filter(!is.na(PITCH_SEQ_TX)) %>% summarize(mean(str_sub(PITCH_SEQ_TX, 1, 1) == "X"))
## What percentage of the time does a runner score from third on a given event?
all.2001 %>% filter(!is.na(BASE3_RUN_ID)) %>% summarize(mean(RUN3_DEST_ID >= 4))
############# QUESTION 1 #############
## How many Pinch Hit Home Runs Occurred in 2001?
all.2001 %>% filter(PH_FL) %>% filter(EVENT_CD == 23) %>% nrow()
############# QUESTION 2 #############
## How many events had a pitch sequence that *started* with two consecutive balls and a called strike?
all.2001 %>% filter(str_sub(PITCH_SEQ_TX, 1, 3) == "BBC") %>% nrow()
############# QUESTION 3 #############
## Find the average number of runs scored on plays in which there was an error.
all.2001 %>% filter(ERR_CT > 0) %>% summarize(mean(EVENT_RUNS_CT))
############# QUESTION 4 #############
## Using the 'grepl' function (amongst others), find how many events had a pitch sequence with two consecutive balls and then a swinging strike.
all.2001 %>% filter(grepl("BBS", PITCH_SEQ_TX)) %>% nrow()
############# QUESTION 5 #############
## What player led the league in defensive assists in 2001? (their ID in retrosheet is sufficient.)
# My solution....
all.2001.assists <- all.2001 %>%
mutate(all.assists=paste0(ASS1_FLD_CD, ASS2_FLD_CD,ASS3_FLD_CD, ASS4_FLD_CD, ASS5_FLD_CD, ASS6_FLD_CD, ASS7_FLD_CD, ASS8_FLD_CD, ASS9_FLD_CD, ASS10_FLD_CD)) %>%
filter(ASS1_FLD_CD > 0)
assists.df <- all.2001.assists %>% filter(grepl("1", all.assists)) %>% group_by_at(vars("RESP_PIT_ID")) %>% summarize(!!"assists.1" := n()) %>% rename(fielder.id = 1)
for (i in 2:9) {
pos.assists.df <- all.2001.assists %>% filter(grepl(i, all.assists)) %>% group_by_at(vars(paste0("POS", i, "_FLD_ID"))) %>% summarize(!!paste0("assists.", i) := n()) %>% rename(fielder.id = 1)
assists.df <- assists.df %>% full_join(pos.assists.df)
}
assists.df %>% mutate_all(funs(replace(., is.na(.), 0))) %>% mutate(total.assists = assists.1 + assists.2 + assists.3 + assists.4 + assists.5 + assists.6 + assists.7 + assists.8 + assists.9) %>% arrange(desc(total.assists)) %>%
slice(1)
# Excellent submission by Brian Bauer
all.2001 %>%
select(RESP_PIT_ID, POS2_FLD_ID,
POS3_FLD_ID, POS4_FLD_ID, POS5_FLD_ID, POS6_FLD_ID, POS7_FLD_ID, POS8_FLD_ID,
POS9_FLD_ID, ASS1_FLD_CD, ASS2_FLD_CD, ASS3_FLD_CD, ASS4_FLD_CD, ASS5_FLD_CD,
ASS6_FLD_CD, ASS7_FLD_CD, ASS8_FLD_CD, ASS9_FLD_CD, ASS10_FLD_CD) %>%
filter(ASS1_FLD_CD > 0) %>%
gather(position,value,ASS1_FLD_CD:ASS10_FLD_CD) %>% # this turns a row with many columns into many rows with a "single" column
filter(value > 0) %>%
mutate(playerID = case_when(
value == 1 ~ RESP_PIT_ID, value == 2 ~ POS2_FLD_ID, value == 3 ~ POS3_FLD_ID, value == 4 ~ POS4_FLD_ID,
value == 5 ~ POS5_FLD_ID, value == 6 ~ POS6_FLD_ID, value == 7 ~ POS7_FLD_ID, value == 8 ~ POS8_FLD_ID,
value == 9 ~ POS9_FLD_ID
)) %>%
group_by(playerID) %>%
summarize(assists = n()) %>%
filter(assists > 400) %>%
inner_join(Master, by=c("playerID" = "retroID")) %>%
select(nameFirst, nameLast, assists) %>%
arrange(desc(assists))
############# QUESTION 6 #############
## Using the Master table from the Lahman database, the inner_join function, and our all.2001 dataframe,
## find the number of events that had a player whose first name was "Troy" at third base
library(Lahman)
data("Master")
all.2001 %>% inner_join(Master, by=c("POS5_FLD_ID" = "retroID")) %>% filter(nameFirst == "Tony") %>% nrow()
|
6f3bd31978b2dd41c59fa3acefd81b62ad484b4b | b059a52cd2c7573ccd216b23a77b2acb49ee267c | /quaterly_reviews/FY20Q4_USAID_CURR_Trend.R | 04e6be641c8de8d6245226a4ec4702e16516674f | [] | no_license | gsarfaty/SA-Scripts | f6b977282b709065ac9ec4f9492e4c9555d42eed | 022505bbcd2f3ce41325ea4fb1b2c06285accbcb | refs/heads/main | 2023-03-06T23:21:00.973088 | 2021-02-21T16:20:24 | 2021-02-21T16:20:24 | 323,477,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,361 | r | FY20Q4_USAID_CURR_Trend.R | library(extrafont)
library(tidyverse)
library(ICPIutilities)
library(here)
library(glitr)
library(scales)
library(patchwork)
library(formattable)
library(gt)
# MER
df<-read_msd(here("Processed_Files/MSD_genie", "msd_fy17to20_2020-11-17_attributes.txt"))
# Data - proxy linkage by metro ---------------------------------------------------------------------
CURR_trend <-df%>%
filter(indicator %in% c("TX_CURR"),
standardizeddisaggregate %in% c("Total Numerator"),
fiscal_year %in% c("2020"),
DSP=="Yes")%>%
group_by(fiscal_year,agency_lookback,Partner_lookback,short_name,indicator) %>%
summarise_at(vars(targets:cumulative), sum, na.rm = TRUE)%>%
ungroup() %>%
reshape_msd(clean=TRUE) %>%
filter(period_type=="results")
## Trend Viz
curr_trend_viz<-CURR_trend %>%
ggplot(aes(y =val,
x = period,
fill=indicator))+
geom_col(width = .6)+
scale_fill_manual(values=c(grey40k))+
scale_y_continuous(labels=label_comma())+
si_style_yline()+
labs(caption="TX_CURR | PEPFAR DSPs")+
theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_text(size=14),
legend.position = "none")
print(curr_trend_viz)
ggsave(here("Quarterly Reviews/Self_assessment","FY20_ALL_CURR.png"),
width=4, height=4, dpi=300, units="in") |
b82130be275b8134c8c0d4d9ef347ecec3f6bb73 | 9fd54ea5eedf5ac7cae614e579026a4ecde1eaff | /R/cofactor.pca.cor.R | 44916d04bad472cbdc491651efb278f4ee170a82 | [] | no_license | rachael-kane/GLM2020 | 04399c88a32ffef947ca35e5ed3b5304e2e212c1 | ebb39d2a6a10508206edd8716d673fee664613f1 | refs/heads/master | 2021-04-07T03:01:16.196184 | 2020-03-30T20:15:25 | 2020-03-30T20:15:25 | 248,638,794 | 0 | 0 | null | 2020-03-20T02:18:36 | 2020-03-20T01:09:57 | R | UTF-8 | R | false | false | 5,325 | r | cofactor.pca.cor.R | #' Correlation between cofactors and principal components.
#'
#' @description Test for correlations between user-specified cofactors and principal components calculated from genotype data. Automatically remove principal components linearly dependent (correlated) with user-specified cofactors.
#'
#' @param U A numeric matrix containing user-specified cofactors. Dimensions are n rows (individuals) by t columns (cofactors).
#' @param G A numeric matrix containing genotype data. Dimensions are n rows (individuals) by m columns (genetic markers).
#'
#' @return A list of 1 or 3 objects.
#'
#' @return U unspecified: 1 object.
#' $cov, a numeric matrix containing all principal components and individual scores.
#' @return U specified: 3 objects.
#' $orig_pc, a numeric matrix containing all original principal components
#' $cov, a numeric matrix containing user-specified cofactors and retained principal components.
#' $removed, a matrix indicating which principal components were removed.
#'
#' @details
#'
#' When U is unspecified, cofactor.pca.cor will return a list of 1 object.
#' With U unspecified, function will carry out principal components analysis identically to the native R function prcomp(),
#' and cofactor.pca.cor will return principal components scores in $cov.
#' $cov is a numeric matrix containing all principal components and individual scores.
#' Dimensions are n rows (individuals) by t columns (principal components).
#'
#' When U is specified, cofactor.pca.cor will return a list of 3 objects.
#' $orig_pc is a numeric matrix containing all original principal components and individual scores.
#' $cov is a numeric matrix containing user-specified cofactors and all principal components not correlated with the
#' user-specified cofactors. Dimensions are n rows (individuals) by t columns (cofactors).
#' $removed is a character matrix indicating which principal components were removed.
#'
#' The $cov matrix is intended for use as the "C" argument in the GWASbyGLM function included in this package.
#'
#' Type vignette("GLM2020_tutorial") for example use.
cofactor.pca.cor<-function(U, G){
#Carries out principal components analysis
pca.obj<-prcomp(G)
#Isolates the principal component scores matrix (rows as individuals, columns as principal components)
pca<-pca.obj$x
#If user-specified cofactors (U) are not specified, the function returns the principal component scores matrix
if(missing(U)){
gwas.covariates<-pca
#Combines the original principal components scores, the final set of covariates (U + retained principal components)
list_cov<-list(cov=gwas.covariates)
#Output$cov is a covariate matrix for use as the argument "C" in the GWASbyGLM function
return(list_cov)
#If user-specified cofactors (U) are specified, the function tests for correlations between cofactors in U and principal components
}else{
#Borrows the matrix correlation function from the R package "psych"
pca.c.corr.test<-corr.test(x=U[,1:ncol(U)], y=pca[,1:ncol(pca)], adjust="none")
#Identifies pairs of U cofactors and principal components that are significantly correlated, with a Bonferroni correction for multiple testing
#Columns in sig.pca.c.corr are principal components, rows are U cofactors
#The sig.pca.c.corr matrix cells contain values of 1 and 0, indicating significant correlation or lack of correlation, respectively, between the principal component and U cofactor
sig.pca.c.corr<-pca.c.corr.test$p<(0.05/(ncol(U)*ncol(pca)))
#Creates empy matrix, to which the retained principal components and individual scores will be attached
filtered.pca.temp<-matrix(ncol=1,nrow=nrow(pca))
#Creates empty dataframe, to be filled with lines indicating which principal components are removed
removal.report.temp<-matrix(ncol=1, nrow=1)
#When a principal component is correlated with any of the U cofactors, it is removed
for (i in ncol(sig.pca.c.corr)){
#Columns in sig.pca.c.corr are principal components, rows are U cofactors
#If a principal component is uncorrelated with all U cofactors, the sum down the column equals 0
if ((sum(sig.pca.c.corr[,i]))==0){
filtered.pca.temp<-cbind(filtered.pca.temp, pca[,i])
}else{
report<-paste("Removed principal component", i)
removal.report.temp<-rbind(removal.report.temp, report)
}
}
#Creates matrix consisting of principal components and individual scores for the uncorrelated principal components
filtered.pca<-data.matrix(filtered.pca.temp[,2:ncol(filtered.pca.temp)])
#Binds the matrix of U cofactors with the matrix of retained principal components
gwas.covariates<-cbind(U,filtered.pca)
#Creates the final report of which principal components were removed
removal.report<-removal.report.temp[2:nrow(removal.report.temp),1]
#Combines the original principal components scores, the final set of covariates (U + retained principal components, and the removal report)
list_origpca_retainedcov_removed<-list(orig_pc=pca, cov=gwas.covariates, removed=removal.report)
#Function returns this set of outputs when U is specified
#Output$cov is a covariate matrix for use as the argument "C" in the GWASbyGLM function
return(list_origpca_retainedcov_removed)
}
}
|
016e235c3692ab48a9d7bb06bfd752dc657e6786 | 472309e5a7db0def9f4bfba6b45972c3c1c58909 | /HW1/BayesLogit/BLR_real_data_fit.R | 33cf2e11e427fd4d7fec5705f96278c06f973f07 | [
"MIT"
] | permissive | topherconley/Stuff | 346a0ef269c525276d97109994ea428514c7c1b4 | 7340606dbd4175749695739fb9a24cc92b96adbe | refs/heads/master | 2021-01-21T05:55:02.943654 | 2013-12-09T09:22:21 | 2013-12-09T09:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,998 | r | BLR_real_data_fit.R |
########################
#working directory & start clean
rm(list = ls())
setwd("~/myrepos//sta250/Stuff/HW1/BayesLogit/")
########################
#####################################
#read in #cancer data set
#parse it to meaningful
#objects in terms of {m, y, X}
data <- read.table("breast_cancer.txt", header = TRUE)#, na.strings = "?")
#check that there are no missing values, otherwise send error message
check.missing <- na.fail(data)
#this data is not in grouped format as the previous simulation
m <- rep(1, times = dim(data)[1])
#Call malignant cases as "success" and redefine response in terms of {1,0}
y <- ifelse(data$diagnosis == "M", 1, 0)
covariate.index <- 1:10
X <- cbind(rep(1, times = dim(data)[1]), scale(as.matrix(data[,covariate.index])))
colnames(X) <- c("intercept", names(data)[covariate.index])
####################################
#################################################
# Set up the model specifications:
p <- dim(X)[2]
beta.0 <- rep(0, times = p)
Sigma.0.inv <- diag(rep(1000,p))
#################################################
##################################################
#Load the key algorithmic functions
source("BLR_metropolis_within_gibbs.R")
##################################################
######################################################################
# Fit the Bayesian model:
beta.chain <- bayes.logreg(m = m,y = y,X = X,
beta.0 = beta.0, Sigma.0.inv = Sigma.0.inv,
niter=5e4, burnin=2e4,
print.every=1000, retune=500, verbose=FALSE)
#####################################################################
#save the results
#save(list = ls(), file = "real_data_output_long.rda")
#########################################################################
#diagnostics
load("real_data_output_long.rda")
######################################################################
#trace plot diagnostics
library(MCMCpack)
library(coda)
mcmc.beta.chain <- mcmc(beta.chain)
plot(mcmc.beta.chain)
effectiveSize(mcmc.beta.chain)
#acceptance rates
acc.rate <- 100*(1 - rejectionRate(mcmc.beta.chain))
acc.rate.mat <- matrix(acc.rate, nrow = 1, ncol = 11)
colnames(acc.rate.mat) <- names(acc.rate)
library(xtable)
xtable(acc.rate.mat)
#autocorrelation
autocorr.plot(mcmc.beta.chain)
#lag 1
beta.ac1 <- sapply(1:p, function(i) autocorr(mcmc.beta.chain, lags = 1)[,,i])
beta.ac1 <- as.data.frame(beta.ac)
names(beta.ac1) <- paste("beta", 1:11, sep = "")
xtable(beta.ac1)
######################################################################
#experimental: thinning the mcmc chain
thin.index <- seq(from = 1, to = 8e4, by = 5)
thin.beta.chain <- beta.chain[thin.index,]
mcmc.thin.beta.chain <- mcmc(thin.beta.chain)
autocorr.plot(mcmc.beta.chain)
######################################################################
# Extract posterior quantiles...
posterior.quantiles <- apply(beta.chain , MARGIN = 2, FUN = quantile,
probs = c(0.025, 0.975))
colnames(posterior.quantiles) <- paste("beta", 1:11, sep = "")
xtable(posterior.quantiles)
######################################################################
######################################################################
#posterior predictive analysis
pdf("real_data_posterior_predictive.pdf")
beta.post.pred.mean <- post.predictive(n.pred = 5000, posterior = beta.chain, y = y, X = X, stat = mean)
beta.post.pred.sd <- post.predictive(n.pred = 5000, posterior = beta.chain, y = y, X = X, stat = sd)
par(mfrow = c(1,2))
library(RColorBrewer)
brew.col <- brewer.pal(n = 4, "RdBu")
hist(beta.post.pred.mean, 40, col = brew.col[4], main = "Mean Post. Predictive", xlab = "mean")
abline(v = mean(y), col = brew.col[1], lwd = 4)
hist(beta.post.pred.sd, 40, col = brew.col[4], main = "Std. Dev. Post Predictive", xlab = "std. dev")
abline(v = sd(y), col = brew.col[1], lwd = 4)
dev.off()
######################################################################
|
084cdf0b7fe67a8b725f56388844fb78b6e18921 | a1a8a8ec12e9de8cf2af8a0dbfc6cc849667ebc3 | /Lego_Program/test_anime.R | dda398db7d14fad7d3ccc6beda85d0e8577ff5ae | [] | no_license | MathiasPires/Lego | 09a673134a17b5fcc7d0c4cb3e8efd2659355d3a | c3780f6c1a99ce303b2d04576303b860de96d232 | refs/heads/master | 2021-01-25T05:22:13.371971 | 2015-09-24T19:26:59 | 2015-09-24T19:26:59 | 37,341,175 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,261 | r | test_anime.R | rm(list=c(ls()))
#setwd("/Users/piresmm/git/Lego/Lego_Program/")
library(igraph)
library(plotrix)
library(RColorBrewer)
source("R/build_template.R")
source("R/test_compatability.R")
#sequence <- seq(10,2000,100)
#prop_active <- numeric(length(sequence))
sequence <- seq(10,200,10)
num_e <- numeric(length(sequence))
avg_num_e <- numeric(length(sequence))
tic <- 0
for (i in sequence) {
#for (i in sequence) {
tic <- tic + 1
num_play <- i
# pw_prob <- c(
# pr_ne = 0.025,
# pr_nn = 0.025,
# pr_ni = 0.05,
# pr_nm = 0.005,
# pr_ia = 0.05,
# pr_ie = 0.2,
# pr_ii = 0.5,
# pr_aa = 0.05,
# pr_ee = 0.05
# )
#Defining probabilities of each type
#Basic probs could also be based on num_play. e.g., We should expect p.n*num_play n's per column/row
# p.n=0.02/(i-9)
# p.e=0.1/(i-9)
# p.m=0.1/(i-9)
# p.a=0/(i-9)
# #Ignore with 1 - pr(sum(other))
# p.i= 1 - (sum(p.n,p.e,p.m,p.a))
p.n=0.02
p.e=0.1
p.m=0.1
p.a=0
#Ignore with 1 - pr(sum(other))
p.i= 1 - (sum(p.n,p.e,p.m,p.a))
# #Normalization [0,1]
# S_prob=sum(c(p.n,p.e,p.i,p.m,p.a))
# p.n=p.n/S_prob
# p.e=p.e/S_prob
# p.i=p.i/S_prob
# p.m=p.m/S_prob
# p.a=p.a/S_prob
#Defining paiwise probabilities
pw_prob <- c(
pr_ne = p.n*(p.e/(p.e+p.n+p.i+p.m)),
pr_nn = p.n*(p.n/(p.e+p.n+p.i+p.m)),
pr_ni = p.n*(p.i/(p.e+p.n+p.i+p.m)),
pr_nm = p.n*(p.m/(p.e+p.n+p.i+p.m)),
pr_ia = p.i*(p.a/(p.e+p.a+p.n+p.i)),
pr_ie = p.i*(p.e/(p.e+p.a+p.n+p.i)),
pr_ii = p.i*(p.i/(p.e+p.a+p.n+p.i)),
pr_aa = p.a*(p.a/(p.a+p.i)),
pr_ee = p.e*(p.e/(p.i+p.n+p.e))
)
#make sure this vector sums to one
pw_prob <- pw_prob / sum(pw_prob)
#Build the interaction template
int_m <- build_template(num_play,pw_prob, 0.8)
num_e[tic] <- length(which(int_m == "e"))
#Average number of trophic interactions per species
avg_num_e[tic] <- mean(apply(int_m,1,function(x){length(which(x == "e"))}))
}
plot(sequence,num_e,xlab="Template size",ylab="Number of trophic interactions",pch=16)
plot(sequence,avg_num_e,xlab="Template size",ylab="Avg Num. trophic interactions",pch=16)
lmodel <- lm(num_e~sequence)
|
0895166939999f930f559bf6d24f7823f57e061b | 8866f2576324045f7f57bf02b87433bd3ed34145 | /man/generic_recoding.Rd | 49fbde995c9b1528c00d56afc46c134b836dc943 | [] | no_license | cran/rock | 31ba91c6be5bff97c1659b3a8c3e5fbe6644f285 | 61999cb18c02680719a96b8ec3d0f33010849270 | refs/heads/master | 2022-12-26T21:02:05.960658 | 2022-12-13T11:30:02 | 2022-12-13T11:30:02 | 236,884,462 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,537 | rd | generic_recoding.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generic_recoding.R
\name{generic_recoding}
\alias{generic_recoding}
\title{Generic underlying recoding function}
\usage{
generic_recoding(
input,
codes,
func,
filenameRegex = ".*",
filter = TRUE,
output = NULL,
outputPrefix = "",
outputSuffix = "_recoded",
decisionLabel = NULL,
justification = NULL,
justificationFile = NULL,
preventOverwriting = rock::opts$get("preventOverwriting"),
encoding = rock::opts$get("encoding"),
silent = rock::opts$get("silent"),
...
)
}
\arguments{
\item{input}{One of 1) a character string specifying the path to a file
with a source; 2) an object with a loaded source as produced by a call
to \code{\link[=load_source]{load_source()}}; 3) a character string specifying the path to a
directory containing one or more sources; 4) or an object with a list of
loaded sources as produced by a call to \code{\link[=load_sources]{load_sources()}}.}
\item{codes}{The codes to process}
\item{func}{The function to apply.}
\item{filenameRegex}{Only process files matching this regular expression.}
\item{filter}{Optionally, a filter to apply to specify a subset of the
source(s) to process (see \code{\link[=get_source_filter]{get_source_filter()}}).}
\item{output}{If specified, the coded source will be written here.}
\item{outputPrefix, outputSuffix}{The prefix and suffix to add to the
filenames when writing the processed files to disk, in case multiple
sources are passed as input.}
\item{decisionLabel}{A description of the (recoding) decision that was taken.}
\item{justification}{The justification for this action.}
\item{justificationFile}{If specified, the justification is appended to
this file. If not, it is saved to the \code{justifier::workspace()}. This can
then be saved or displayed at the end of the R Markdown file or R script
using \code{justifier::save_workspace()}.}
\item{preventOverwriting}{Whether to prevent overwriting existing files
when writing the files to \code{output}.}
\item{encoding}{The encoding to use.}
\item{silent}{Whether to be chatty or quiet.}
\item{...}{Other arguments to pass to \code{fnc}.}
}
\value{
Invisibly, the recoded source(s) or source(s) object.
}
\description{
This function contains the general set of actions that are always used
when recoding a source (e.g. check the input, document the
justification, etc). Users should normally never call this function.
}
|
c7e9e367674715166107ac6759cf6cd043a9cb1f | 2ec82dd20d0b86e9b37158579ea71495d1e9fb63 | /R/adjacencyMatFromDF.R | 38ed7f7a6137e89510deb1ec33a3df3f5a792902 | [
"MIT"
] | permissive | sverchkov/CommunityInference | 92619dfd036c20ec3e9e2d4ce998299fc4212f70 | 1382351bde3597b01516dfde2cabc1323b75f4e9 | refs/heads/master | 2020-03-25T16:19:14.212537 | 2018-08-13T14:40:53 | 2018-08-13T14:40:53 | 143,925,184 | 0 | 0 | MIT | 2018-08-07T20:53:43 | 2018-08-07T20:46:14 | null | UTF-8 | R | false | false | 1,675 | r | adjacencyMatFromDF.R | #' Get an adjacency matrix from an edge data frame
#'
#' Get an adjacency matrix from an edge data frame.
#' If no weights are provided all weights are set to 1
#'
#' @param edges a data frame with columns a, b, and (optionally) weight
#' @param nodes an array of the unique node IDs used in a, b (inferred if not provided)
#' @return an adjacency matrix
#' @export
adjacencyMatFromDF <- function( edges, nodes = NULL, cluster = NULL ) {
if ( is.null( nodes ) ) nodes <- getUniqueNodesFromEdgesDF( edges )
weighted <- !is.null( edges$weight )
n <- length( nodes )
# Check if we want a sparse matrix
if ( ( nrow(edges)*2 < n^2/3 ) && ( "Matrix" %in% installed.packages()[,"Package"] ) ) {
adj_mat <- Matrix::sparseMatrix( i = match( edges$a, nodes ), j = match( edges$b, nodes ), x = edges$weight,
dims = c(n,n), use.last.ij = T, symmetric = T )
} else {
if( is.null( cluster ) )
adj_mat <- mapply( function(i){
mask_a <- edges$a == nodes[i]
mask_b <- edges$b == nodes[i]
jays <- match( c( edges$b[mask_a], edges$a[mask_b] ), nodes )
w <- numeric( n )
w[ jays ] <- c( edges$weight[mask_a], edges$weight[mask_b] )
return ( w )
}, 1:n, SIMPLIFY = TRUE )
else
adj_mat <- parallel::parSapply( cl = cluster, X = 1:n, FUN = function (i){
mask_a <- edges$a == nodes[i]
mask_b <- edges$b == nodes[i]
jays <- match( c( edges$b[mask_a], edges$a[mask_b] ), nodes )
w <- numeric( n )
w[ jays ] <- c( edges$weight[mask_a], edges$weight[mask_b] )
return ( w )
}, simplify = T )
}
return ( adj_mat )
}
|
db96e2133302130d98cd8f8c94731a42a59de2b8 | d9588b116a3111fff9a5ba545b51d5307bed2776 | /Putting_pebbles.R | f93203da230ab7328fda24f87d38bcec9dec331a | [] | no_license | Loulou-lepou/Beginning_R_programming | c4683a0289e6e08148ce48adfa6d94f1dc099a64 | f0fb796263376cb4e6c1e646e1d42be7dcbc1b8e | refs/heads/main | 2023-08-29T12:15:04.243050 | 2023-08-25T05:21:05 | 2023-08-25T05:21:05 | 396,885,155 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,140 | r | Putting_pebbles.R | # Putting pebbles
# E4 : Đặt sỏi, Tin học trẻ quốc gia 2021, vòng sơ loại - Bảng A
# https://ucode.vn/contests/tin-hoc-tre-quoc-gia-2021-so-khao-bang-a-replay-16562?u=10682&l=16562
# Initially [Round 0], one puts 2 pebbles at a certain distance d > 0 on a straight line.
# Then, [Round 1] one puts another pebble at the midpoint of these pebbles.
# Keep putting pebbles at midpoints of previous consecutive pebbles.
# Input: a natural number N = # round
# Constraint: N <= 10^9
# Output: a unique number is the final digit of # pebbles after round N
# failed 5/7
# N = 1000 => In print(a%%10) : probable complete loss of accuracy in modulus
# N <- as.integer(readline(prompt = "N = "))
# a <- 2
# d <- 1
# for (i in 1: N){
# d = d * 2
# a = d + 1
# }
# print(a %% 10)
# succeeded 7/7
# The final digit repeats in a cycle of [3, 5, 9, 7]
# where N in [1, 2, 3, 0] (mod 4)
N <- as.integer(readline(prompt = "N = "))
r <- N %% 4
print(paste('after round ', N, 'the final digit of # pebbles is '))
if (N > 0){
if (r == 0) print(7)
if (r == 1) print(3)
if (r == 2) print(5)
if (r == 3) print(9)
} else {print(2)}
|
ee9c48cc65e64bdc7aca20f17f9673ee436c7303 | 72e2e29041acafe5ced8f29e72b877cae13ceebd | /R/main.R | 48eb94714d5ebdc6436de95335f581f5de8c953e | [
"MIT"
] | permissive | markbaas/sparklyr.aqi | c9e09dd6300575adb52365154450f37d5a155cb1 | e8230bb99a5cc6c824b88f554f0c84f83cdf43c6 | refs/heads/master | 2023-02-19T01:39:58.767822 | 2021-01-13T19:31:30 | 2021-01-13T19:31:30 | 329,406,488 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 165 | r | main.R | #' @import sparklyr
#' @export
sparklyr_register_aqi <- function(sc) {
sparklyr::invoke_static(
sc, "sparklyr.aqi.Main", "register", spark_session(sc))
} |
a9cf4fe16aa1a76a5f488049c43fee9bde9a8da8 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.machine.learning/man/panorama_describe_device_job.Rd | 2bc9566535982e97c65d4990d7c6feb20defed44 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 491 | rd | panorama_describe_device_job.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/panorama_operations.R
\name{panorama_describe_device_job}
\alias{panorama_describe_device_job}
\title{Returns information about a device job}
\usage{
panorama_describe_device_job(JobId)
}
\arguments{
\item{JobId}{[required] The job's ID.}
}
\description{
Returns information about a device job.
See \url{https://www.paws-r-sdk.com/docs/panorama_describe_device_job/} for full documentation.
}
\keyword{internal}
|
15a57f860128adced00f410238cbda0c9c73e54a | 0c15d0d2204175993afb6489a07322f35bb29efa | /plot2.R | 10a476bed5c0b4cb5db65244f59bf3517fa4fdae | [] | no_license | morphocia/ExData_Plotting1 | 56d629c5b8e1e7cf84e7a34e0d0585d3474bef13 | f453a682a06b8d15a560aa34a4bcce55bb477b43 | refs/heads/master | 2021-01-17T22:22:53.147564 | 2014-11-08T06:02:31 | 2014-11-08T06:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 657 | r | plot2.R | data <- read.table("household_power_consumption.txt",
sep=";",
stringsAsFactors = FALSE,
na.strings = "?",
comment.char="",
header = TRUE,
nrows = 69516)
data <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
data$Time <- with(data,strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"))
data <- data[-1]
png(filename = "plot2.png", bg="transparent")
with(data, plot(Time,
Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)"))
dev.off() |
93efefb1e179fd4dbc411ba6b3e2d5173715a59d | e315f9a5a47e987beef4c65a409cc740c1e93afc | /R/daylcalc.R | 59e9222d34d1470881dbb6328caf5670195cd8ca | [] | no_license | paleolimbot/EMPOWER | c7585c896f281cd9c50e186bf2e4530db791bb43 | f277840e0e70900f615e2c706b42ff2fe6afacd6 | refs/heads/master | 2021-08-23T14:24:52.090078 | 2017-12-05T07:09:28 | 2017-12-05T07:09:28 | 113,146,310 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 653 | r | daylcalc.R |
# ----------------------------------------------------------------------- #
# Calculation of day length as function of day of year and latitude #
# ----------------------------------------------------------------------- #
#' Calculation of day length as function of day of year and latitude
#'
#' @param jday The julian day of the year
#' @param latradians Latitude, in radians
#'
#' @return Day length (in hours?)
#' @keywords internal
#'
FNdaylcalc <- function(jday,latradians) {
declin <- 23.45*sin(2*pi*(284+jday)*0.00274)*pi/180 # solar declination angle
daylnow <- 2*acos(-1*tan(latradians)*tan(declin))*12/pi
return(daylnow)
}
|
cc286d9cd5e5d96c4c38e3c241fc0a536dda2490 | 48e27c4972218672b0590c0d19a53a11c345d073 | /gatherCalls.R | 5dac00a28a9477bdeaf9507c839a0274eb80d2c0 | [] | no_license | drramki-chop/ExomeDepthSGE | f868c819b51008d541c2a1d1d12ba59dd3749770 | 9201696f33003f11d387aa5d1939ea4813175c0e | refs/heads/master | 2020-06-02T17:06:50.872549 | 2019-06-10T20:35:49 | 2019-06-10T20:35:49 | 191,241,371 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 344 | r | gatherCalls.R | library(purrr)
library(readr)
input <- yaml::read_yaml("input.yaml")
cnvFiles <- list.files("./results/",pattern = "*.exomeDepthCalls.txt")
calls <- cnvFiles %>%
map(~ read_table(file.path("./results/", .))) %>%
reduce(rbind)
write.table(calls,paste0(as.character(input$cohort.name),".exomeDepthCalls.txt"),row.names=F,sep="\t",quote=F)
|
e2849bcc36bc23d571218f10644aff28de23965e | 82d7ca1ad43104782dcb863530ab409546c54349 | /Demand/demand1.R | a0b1643d0217c5ffe92e7f5e1309f26718e32276 | [] | no_license | eruscus/freeoj_git | ef291f4695b10ba5b29e076a16269b0db351e4a2 | 75c20fa5d07077dd3a364f022b116e371a08a053 | refs/heads/master | 2020-06-15T07:11:05.043629 | 2016-12-13T03:29:17 | 2016-12-13T03:29:17 | 75,315,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,043 | r | demand1.R | load('ORA.RData')
load('POJ.RData')
load('FCOJ.RData')
load('ROJ.RData')
split.region <- function(NAME) {
RE = (ORA$Region == NAME) #NEED TO REPLACE ORA BY APPROPRIATE PRODUCT HERE AND ON LINE 5
Per.region = cbind(ORA$Time[RE], ORA$Month[RE], ORA$Week[RE], ORA$Price[RE], ORA$Sales[RE], ORA$Capacity[RE], ORA$Indicator[RE])
return(Per.region)
}
#FIRST, COMMAND+A THEN CLICK RUN (to compile all functions. Don't worry about the outputs.)
#GO TO LINE 120. NO NEED TO READ THIS.
remove.zeros <- function(X){ #X is a matrix
year = vector(,576)
year[1] = 1
for (i in {2:576}){
year[i] = year[i-1]
if (X[i,3] == 1) {
year[i] = year[i-1]+1
}
}
NZ = (X[,6] != 0)
Y = cbind(X[,1][NZ], year[NZ], X[,2][NZ], X[,3][NZ], X[,4][NZ], X[,5][NZ], X[,6][NZ], X[,7][NZ])
return(Y)
}
est.demand <- function(A){
X = A[,6]
for (i in {1:length(A[,1])}){
if(A[,8][i] == 1){
a = runif(1, min = .1, max = .5)
X[i] = A[,6][i]*(1+a)
}
}
Y = cbind(A[,1], A[,2], A[,3], A[,4], A[,5], X, A[,7], A[,8])
return(Y)
}
#TIME, YEAR, MONTH, WEEK, PRICE, DEMAND, CAPACITY, INDICATOR
plot.h.season <- function(X){
is.H = (X[,2] <= 10)
WEEK = X[,4][is.H]
DEMAND = X[,6][is.H]
plot(WEEK, DEMAND)
axis(1, at = WEEK)
grid(nx = 49)
return()
}
plot.all.season <- function(X){
WEEK = X[,4]
DEMAND = X[,6]
plot(WEEK, DEMAND)
axis(1, at = WEEK)
grid(nx = 49)
return()
}
#TIME, YEAR, MONTH, WEEK, PRICE, DEMAND, CAPACITY, INDICATOR
fit.demand <- function(S.start, S.end, X){
season.length = S.end-S.start + 1
HTD = vector(,season.length)
HTP = vector(,season.length)
OneTP = vector(,season.length)
OneTD = vector(,season.length)
TwoTD = vector(,season.length)
TwoTP = vector(,season.length)
#first, average the historical data
for(i in {1:length(X[,1])}){
if((X[,4][i] <= S.end) && (X[,4][i] >= S.start)){
if(X[,2][i] <= 10){
HTD[X[,4][i]] = HTD[X[,4][i]] + X[,6][i]
HTP[X[,4][i]] = HTP[X[,4][i]] + X[,5][i]
}
if(X[,2][i] == 11){
OneTD[X[,4][i]] = OneTD[X[,4][i]] + X[,6][i]
OneTP[X[,4][i]] = OneTP[X[,4][i]] + X[,5][i]
}
if(X[,2][i] == 12){
TwoTD[X[,4][i]] = TwoTD[X[,4][i]] + X[,6][i]
TwoTP[X[,4][i]] = TwoTP[X[,4][i]] + X[,5][i]
}
}
}
HD = HTD/10
HP = HTP/10
OneD = OneTD
OneP = OneTP
TwoD = TwoTD
TwoP = TwoTP
DEMAND = c(HD, OneD, TwoD)
PRICE = c(HP, OneP, TwoP)
LINE = lsfit(DEMAND, PRICE)
return(LINE$coeff)
}
#EXAMPLE REGION CODE. NEED TO REPLACE DS BY APPRPRIATE REGION IN WHAT FOLLOWS
DS = split.region('DS')
DS_Zero = remove.zeros(DS)
DS.est = est.demand(DS_Zero)
plot.h.season(DS.est) #PLOTS HISTORICAL DATA
#LOOK AT THE PLOT AND DECIDE ON THE SEASONS. IF SHITTY, THEN RUN FOLLOWING FUNCTION:
plot.all.season(DS.est)
#THEN FOR EACH SEASON RUN:
#HERE, 1 AND 8 ARE THE BEGINNING AND END WEEKS OF THE SEASON
fit.demand(1,8,DS.est)
#COPY OUTPUT TO EXCEL FILE
|
bf8d8cd0bba7c7222aac7edc75f83d6110f952d1 | baf387b232d4e1a2645ef430ae129957efa9e544 | /Data Operations in R.R | 49c70a141fd04bc4badf82b9afec2a4b3f142fa0 | [] | no_license | DiannitaOlipmimi/Skill_Academy | 2b207f0aa993868d8d3579c557d8732b55e95fc6 | a4d00bba3dcd10d34514c477f237a8dcb56676bf | refs/heads/main | 2023-05-28T12:00:53.252374 | 2021-06-07T02:57:40 | 2021-06-07T02:57:40 | 374,363,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,189 | r | Data Operations in R.R | #membuat data set
users=data.frame(
names=c("Adi","Budi","Cindi","Dedi"),
gender=c("male","male","female","male"),
age=c(10,20,30,40)
)
#memasukan dataset
dataset <- read_csv("Skill Academy/Programming Foundation for DS/Intorduction to R language/dataset_superstore_simple.csv")
View(dataset)
#operasi tdhp datase
summary(dataset)
#isi kolom atas
head(dataset)
head(dataset,10)
nrow(dataset)
ncol(dataset)
#export
write.csv(dataset,'dataset_new.csv')
#operasi dataframe dengan library dplyr
#dplyr digunakan untuk memanipulasi data
#tidyverse digunakan dataframe datascientist
library(dplyr)
library(tidyverse)
#melihat dataset
glimpse(dataset)
#mengambil beberapa kolom
#function select
select(dataset, order_id)
dataset_result1=select(dataset, c(order_id,order_date,sales))
dataset_result1
#mengambil semua kolom kecuali kolom tertentu
dataset_result2=select(dataset, -c(profit,sub_category))
dataset_result2
select(dataset_result2, c(order_id,sales,customer_id))
#function filter
filter(dataset,segment == 'Consumer')
dataset_result3 = filter(dataset,segment == 'Consumer')
dataset_result3
#filter segment consumer dan profit lebih drai 0
dataset4=filter(dataset,segment == 'Consumer' & profit > 0)
dataset4
#filter segment consumer atau profit lebih drai 0
dataset5=filter(dataset,segment == 'Consumer' | profit > 0)
dataset5
#filter segment tidak sama dengan consumer dan profit lebih drai 0
dataset6=filter(dataset,segment != 'Consumer' & profit > 0)
dataset6
#function mutate
#membuat kolom baru
dataset7=mutate(dataset, avg_price = sales/quantity)
dataset7
#memunculkan hanya kolom avg_price
transmute(dataset, avg_price = sales/quantity)
dataset7$avg_price=transmute(dataset,avg_price = sales/quantity)
#piping, beberapa operasi sekaligus
library(dplyr)
dataset8=filter(dataset, segment == 'Consumer')
dataset9=mutate(dataset, avg_price = sales/quantity)
dataset10=select(dataset9, c(order_id, order_date,sales,avg_price))
dataset10
dataset11= dataset %>% filter(segment == 'Consumer')
%>% mutate(avg_price = sales/quantity)
%>% select(c(order_id, order_date,sales,avg_price))
dataset11
|
570cba6a6db503d79fa7e89a42378f19fb224088 | 925888968dc02f56a97ce3ce2c05e591e7b60828 | /reading_material/HW1/hw01_q4_full_solution (1).R | c2a204c8d2261219ea17423eaf74b6605e133b7c | [] | no_license | Rui425/Yale_Faces | 5217331f05d944dcb9b01b4b6c237af378a968a4 | cdf83bc273847981e6981eb0e9c2c2f0d851fb07 | refs/heads/master | 2021-01-01T19:00:58.708732 | 2015-07-15T02:32:52 | 2015-07-15T02:32:52 | 38,589,794 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,818 | r | hw01_q4_full_solution (1).R | #############################
# < Your Name Here >
# STAT W4240
# Homework <HW Number> , Problem <Problem Number>
# < Homework Due Date >
#
# The following code loads the eigenfaces data and
# performs a set of simple loading and plotting functions
#############################
#################
# Setup
#################
# make sure R is in the proper working directory
# note that this will be a different path for every machine
setwd("~/Documents/academic/teaching/STAT_W4240_2014_SPRG/hw/hw01")
# first include the relevant libraries
# note that a loading error might mean that you have to
# install the package into your R distribution. From the
# command line, type install.packages("pixmap")
library(pixmap)
#################
# Problem 1a
#################
# paste or type in the given code here
face_01 = read.pnm(file = "CroppedYale/yaleB01/yaleB01_P00A-005E+10.pgm")
# now plot the data
plot(face_01)
# give it a nice title
title('hw01_01a: the first face')
# save the result
filename = 'hw01_01a.png'
dev.copy(device=png, file=filename, height=600, width=800)
dev.off()
# extract the class and size
face_01_class = attr(face_01,'class')
face_01_size = attr(face_01,'size')
# print the result in a nice format
sprintf('Face 01 is of class %s, which has size %d by %d' , face_01_class , face_01_size[1] , face_01_size[2] )
#################
# Problem 1b
#################
# make face_01 into a matrix with the given command
face_01_matrix = getChannels(face_01)
# load a second face
face_02 = read.pnm(file = "CroppedYale/yaleB02/yaleB02_P00A-005E+10.pgm")
face_02_matrix = getChannels(face_02)
# combine two faces into a single data matrix and make that a pixmap
faces_matrix = cbind( face_01_matrix , face_02_matrix )
faces = pixmapGrey( faces_matrix )
# plot to verify
plot(faces)
# find min and max values
faces_min = min(faces_matrix)
faces_max = max(faces_matrix)
# from the above we see the values are between 0 and 1, 0
# corresponding to black, 1 to white
#################
# Problem 1c
#################
# get directory structure
dir_list_1 = dir(path="CroppedYale/",all.files=FALSE)
dir_list_2 = dir(path="CroppedYale/",all.files=FALSE,recursive=TRUE)
# find lengths
len_dl1 = length(dir_list_1)
len_dl2 = length(dir_list_2)
#################
# Problem 1d
#################
# the list of pictures (note the absence of 14 means that 31 corresponds to yaleB32)
pic_list = c( 05 , 11 , 31 )
view_list = c( 'P00A-005E+10' , 'P00A-005E-10' , 'P00A-010E+00')
# preallocate an empty list
pic_data = vector("list",length(pic_list)*length(view_list))
# initialize an empty matrix of faces data
faces_matrix = vector()
# outer loop through the pictures
for ( i in 1:length(pic_list) ){
# initialize an empty row of faces data
this_face_row = vector()
# inner loop over views
for ( j in 1:length(view_list) ){
# compile the correct file name
# note that dir_list_1[pic_list[2]] should be "yaleB17" if pic_list[2] is B17
this_filename = sprintf("CroppedYale/%s/%s_%s.pgm", dir_list_1[pic_list[i]] , dir_list_1[pic_list[i]] , view_list[j])
# you can print out each name to help debug the code
# print(this_filename)
# load the data
this_face = read.pnm(file = this_filename)
this_face_matrix = getChannels(this_face)
# append the view to the row for this face
this_face_row = cbind( this_face_row , this_face_matrix )
}
# append the latest row to the face_matrix
faces_matrix = rbind( faces_matrix , this_face_row )
}
# now faces_matrix has been built properly. plot and save it.
faces = pixmapGrey(faces_matrix)
plot(faces)
# give it a nice title
title('hw01_01d: 3x3 grid of faces')
# save the result
filename = 'hw01_01d.png'
dev.copy(device=png, file=filename, height=600, width=800)
dev.off()
#################
# End of Script
#################
|
b7715fde7ed7eea1af607cebf9a34d236cf64655 | 59d0ef049e63b38cb5a8b84cad8cd45fcaad8974 | /man/proxGrad.Rd | bb19429622666f6babb74a13550c0826b0e3d7b1 | [] | no_license | guhjy/lavaPenalty | e6a9452992c34c555e7de8957b34e79b7338fc7f | 9bbaaa25517c12b81ad282b0b05b3029f4a8e6f5 | refs/heads/master | 2020-04-08T14:24:05.235200 | 2018-02-26T09:43:01 | 2018-02-26T09:43:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,100 | rd | proxGrad.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Penalty_ISTA.R
\name{proxGrad}
\alias{proxGrad}
\title{Proximal gradient algorithm}
\usage{
proxGrad(start, proxOperator, hessian, gradient, objective,
control = lava.options()$proxGrad)
}
\arguments{
\item{start}{initial values for the parameters}
\item{proxOperator}{proximal operator corresponding to the penalization applied to the log likelihood}
\item{hessian}{second derivative of the likelihood given by lava. Only used to estimate the step parameter of the algorithm when step = NULL}
\item{gradient}{first derivative of the likelihood given by lava.}
\item{objective}{likelihood given by lava. Used to adjust the step parameter when using backtracking}
\item{control}{settings for the proximal gradient algorithm. See lava.options.}
}
\description{
Estimate parameters using a proximal gradient algorithm
}
\references{
Bech and Teboulle - 2009 A Fast Iterative Shrinkage-Thresholding Algorithm
Li 2015 - Accelerated Proximal Gradient Methods for Nonconvex Programming
Simon 2013 - A sparse group Lasso
}
|
240c4eec52713d1e6e81824f9e10751e86c2fdd2 | 36cf5ac1e49e87481abf2cf33af8b7dbec496a0c | /Analise_de_Regressao_Linear_Exercicios_Praticos_2.R | 8ab16de0cc7f6f7836171d7dcc9b0e9df6fbc40f | [] | no_license | diego-s-fernandes/materia_mestrado | 65d1996ecd8c117b46d8b73b2d43277543fd1dd9 | cec33a64564717f058abbde134e93c9fe8de0ec1 | refs/heads/master | 2020-08-09T22:56:10.072438 | 2019-10-25T14:26:37 | 2019-10-25T14:26:37 | 214,195,112 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 18,101 | r | Analise_de_Regressao_Linear_Exercicios_Praticos_2.R |
rm(list=ls()); #---- limpa todo o ambiente de variáveis para a execução do R
#install.packages("plyr")
#install.packages("caret")
#install.packages("leaps")
#install.packages("ggplot2")
library(plyr);
library(caret)
library(leaps)
library(car);
#---- indique aqui o diretorio de trabalho
setwd("C:\\Users\\Alexandre\\Dropbox\\Novos_cursos\\IDP\\IDP_Introducao_a_Estatistica\\ProgramasR\\Dados_Municipios");
#---- lendo as bases de dados em CSV
dados <- read.csv2("IDH_Brasil_2010.csv", header=T, sep=";", dec=",", encoding="latin1");
codigos_ufs <- read.csv2("codigos_ufs.csv", header=T, sep=";", dec=",", encoding="latin1");
empresas <- read.csv2("CADASTRO_EMPRESAS_2008.csv", header=T, sep=";", dec=".", encoding="latin1")
fiscal <- read.csv2("financas_publicas_2008.csv", header=T, sep=";", dec=".", encoding="latin1")
obitos <- read.csv2("OBITOS_DATASUS.csv", header=T, sep=";", dec=".", encoding ="latin1")
#---- fazendo um join de colunas de duas tabelas
fiscal1 <- fiscal[, !(colnames(fiscal) %in% c("nome_mun", "cod_uf", "uf"))] #-- excluindo colunas
dados1 <- merge(x = dados, y = codigos_ufs, by.x = "uf", by.y = "uf", all.x = T, all.y = T);
dados2 <- merge(x = dados1, y = empresas, by.x = "codmun", by.y = "codmun", all.x = TRUE, all.y = TRUE)
dados3 <- merge(x = dados2, y = fiscal1, by.x = "codmun", by.y = "cod_mun", all.x = TRUE)
dados4 <- merge(x = dados3, y = obitos, by.x = "codmun", by.y = "codmun", all.x = TRUE)
#--------------------------------------------------------------------------------#
#--- Efetuando regressões lineares
#--------------------------------------------------------------------------------#
dados3$perc_pop_rural <- dados3$populacao_rural / dados3$populacao_total
mod1.ex <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo)
summary(mod1.ex)
mod2.ex <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo
+ dados3$perc_pop_rural
+ as.factor(dados3$Regiao))
summary(mod2.ex)
mod3.ex <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo
+ dados3$perc_pop_rural
+ as.factor(dados3$Regiao)
+ as.factor(dados3$Regiao)*dados3$renda_per_capita)
summary(mod3.ex)
mod1a.ex <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ I(renda_per_capita^2)
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo, data = dados)
summary(mod1a.ex)
#---- intervalos de confiança para os parâmetros da regressão estimada
confint(mod1.ex) #--- probabilidade de cobertura de 95%
confint(mod1.ex, level = 0.9) #--- probabilidade de cobertura de 90%
confint(mod1.ex, level = 0.8) #--- probabilidade de cobertura de 80%
#------------------------------------------------------------------
#---- testando hipóteses para um ou vários parâmetros
#------------------------------------------------------------------
mod2.ex <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo
+ dados3$perc_pop_rural
+ as.factor(dados3$Regiao))
summary(mod2.ex)
linearHypothesis(mod2.ex, c("(Intercept) = 0"))
?linearHypothesis
linearHypothesis(mod2.ex, c("dados3$indice_gini = 0"))
linearHypothesis(mod2.ex, c("dados3$indice_gini = 1"))
linearHypothesis(mod2.ex, c("dados3$indice_gini = 0",
"dados3$salario_medio_mensal = 0",
"dados3$perc_pop_rural"))
linearHypothesis(mod2.ex, c("dados3$indice_gini = 0",
"dados3$salario_medio_mensal = 0",
"dados3$perc_pop_rural"), test = "F") #--- default
linearHypothesis(mod2.ex, c("dados3$indice_gini = 0",
"dados3$salario_medio_mensal = 0"), test = "Chisq")
linearHypothesis(mod2.ex, c("dados3$indice_gini + dados3$renda_per_capita = 0",
"dados3$salario_medio_mensal = 0"))
mod2.ex.rest <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo
+ dados3$perc_pop_rural)
summary(mod2.ex.rest)
anova(mod2.ex.rest, mod2.ex, test='LRT')
mod1b.ex <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ I(renda_per_capita^2)
+ I(renda_per_capita^3)
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo, data = dados)
summary(mod1b.ex)
mod1b.ex.rest <- lm(dados3$mort_infantil ~ dados3$renda_per_capita
+ dados3$indice_gini
+ dados3$salario_medio_mensal
+ dados3$perc_criancas_extrem_pobres
+ dados3$perc_criancas_pobres
+ dados3$perc_pessoas_dom_agua_estogo_inadequados
+ dados3$perc_pessoas_dom_paredes_inadequadas
+ dados3$perc_pop_dom_com_coleta_lixo, data = dados)
summary(mod1b.ex.rest)
anova(mod1b.ex.rest, mod1b.ex, test='LRT')
#--- distribuição qui-quadrada
qchisq(0.90, df = 2)
qchisq(0.95, df = 5)
qchisq(0.95, df = 7) #--- valores críticos
qchisq(0.95, df = 4)
1 - pchisq(30, df = 7) #--- probabilidades da cauda da direita
1 - pchisq(15, df = 4)
#--- distribuição F
qf(0.90, df1 = 2, df2 = 2)
qf(0.90, df1 = 6, df2 = 10)
qf(0.95, df1 = 7, df2 = 200) #--- valores críticos
qf(0.95, df1 = 4, df2 = 200)
1 - pf(30, df1 = 7, df2 = 200) #--- probabilidades da cauda da direita
1 - pf(15, df1 = 4, df2 = 200)
#--- convergência da F para uma qui-quadrada
qchisq(0.90, df = 4)
qf(0.90, df1 = 4, df2 = 10)*4
qf(0.90, df1 = 4, df2 = 100)*4
qf(0.90, df1 = 4, df2 = 1000)*4
qf(0.90, df1 = 4, df2 = 10000000)*4
#----------------------------------------------------------------------------
#---- exemplos de expressões matriciais em R para modelos de regressão
#----------------------------------------------------------------------------
mod1.X <- lm(mort_infantil ~ renda_per_capita
+ salario_medio_mensal
+ perc_criancas_extrem_pobres
+ perc_pessoas_dom_agua_estogo_inadequados
+ perc_pop_dom_com_coleta_lixo, data = dados3)
summary(mod1.X)
X1 <- model.matrix(mod1.X) #---- design matrix para o modelo de regressão
head(X1)
tail(X1)
df.X1 <- as.data.frame(X1) #---- transformando em data.frame para visualização mais fácil
View(df.X1)
mod2.X <- lm(mort_infantil ~ renda_per_capita + as.factor(Regiao), data = dados3)
summary(mod2.X)
X2 <- model.matrix(mod2.X)
tail(X2)
head(X2)
df.X2 <- as.data.frame(X2) #---- transformando em data.frame para visualização mais fácil
View(df.X2)
#--- desvio padrão e variância dos resíduos da regressão - cálculo manual
n <- nrow(X1) #--- número de observações
k <- ncol(X1) - 1 #--- número de var explicativas
n;k
mod1.residuos <- mod1.X$residuals
head(mod1.residuos)
tail(mod1.residuos)
hist(mod1.residuos, col = 'red', breaks = 20)
mod1.residuos.var <- (t(mod1.residuos) %*% mod1.residuos) / (n-k-1)
mod1.residuos.var
mod1.residuos.desvpad <- sqrt(mod1.residuos.var)
mod1.residuos.desvpad
#--- matriz de variância-covariância e erros padrões dos coeficientes
mod1.residuos.var <- as.numeric(mod1.residuos.var)
mod1.residuos.var
sqrt(mod1.residuos.var)
cov1 <- mod1.residuos.var * (solve(t(X1) %*% X1))
cov1
diag(cov1)
erropadrao1 <- sqrt(diag(cov1))
erropadrao1
#--- coeficientes estimados, estatística teste e pvalores
Y1 <- dados3$mort_infantil
beta1 <- (solve(t(X1) %*% X1)) %*% (t(X1) %*% Y1) #--- coeficientes
beta1
estatistica_t1 <- beta1 / erropadrao1 #--- estatística teste t
estatistica_t1
pvalor1 <- 2*(1 - pt(abs(estatistica_t1), n-k-1)) #--- p-valores (com t-Student)
pvalor1
resultados1 <- cbind(beta1, erropadrao1, estatistica_t1, pvalor1) #--- juntando tudo
resultados1
#--- excluindo uma coluna da matriz de desenho X2
head(X2) #-- antes da exclusão
X2 <- X2[,!(colnames(X2) %in% c("as.factor(Regiao)Sudeste"))]
head(X2) #-- depois da exclusão
#------------------------------------------------------
#---- efetuando cross-validation, AIC e BIC
#------------------------------------------------------
set.seed(2104)
trainIndex <- createDataPartition(dados3$Regiao, p = .8, list = FALSE, times = 1) #-- balanceando entre regiões
head(trainIndex)
dadosTrain <- dados3[ trainIndex,] #--- amostra de treinamento
dadosTest <- dados3[-trainIndex,] #--- amostra usada para testar a previsão
table(dadosTrain$Regiao)
table(dadosTest$Regiao)
mod1 <- lm(mort_infantil ~ renda_per_capita
+ I(renda_per_capita^2)
+ I(renda_per_capita^3)
+ indice_gini
+ salario_medio_mensal
+ perc_criancas_extrem_pobres
+ perc_criancas_pobres
+ perc_pessoas_dom_agua_estogo_inadequados
+ perc_pessoas_dom_paredes_inadequadas
+ perc_pop_dom_com_coleta_lixo, data = dadosTrain)
summary(mod1)
mod2 <- lm(mort_infantil ~ renda_per_capita
+ indice_gini
+ salario_medio_mensal
+ perc_criancas_extrem_pobres
+ perc_criancas_pobres
+ perc_pessoas_dom_agua_estogo_inadequados
+ perc_pessoas_dom_paredes_inadequadas
+ perc_pop_dom_com_coleta_lixo
+ perc_pop_rural
+ as.factor(Regiao)
+ as.factor(Regiao)*renda_per_capita, data = dadosTrain)
summary(mod2)
mod3 <- lm(mort_infantil ~ renda_per_capita
+ indice_gini
+ salario_medio_mensal
+ perc_criancas_extrem_pobres
+ perc_criancas_pobres
+ perc_pessoas_dom_agua_estogo_inadequados
+ perc_pessoas_dom_paredes_inadequadas
+ perc_pop_dom_com_coleta_lixo
+ perc_pop_rural, data = dadosTrain)
summary(mod3)
mod1.pred <- predict(mod1, newdata = dadosTest, se.fit = T)
mod2.pred <- predict(mod2, newdata = dadosTest, se.fit = T)
mod3.pred <- predict(mod3, newdata = dadosTest, se.fit = T)
mod1.pred.error <- mod1.pred$fit - dadosTest$mort_infantil
mod2.pred.error <- mod2.pred$fit - dadosTest$mort_infantil
mod3.pred.error <- mod3.pred$fit - dadosTest$mort_infantil
mod1.mspe <- mean(mod1.pred.error^2)
mod2.mspe <- mean(mod2.pred.error^2)
mod3.mspe <- mean(mod3.pred.error^2)
mod1.mspe
mod2.mspe
mod3.mspe
AIC(mod1)
AIC(mod2)
AIC(mod3)
BIC(mod1)
BIC(mod2)
BIC(mod3)
#------------------------------------------------------
#---- Best subset selection
#------------------------------------------------------
mod.full <- lm(mort_infantil ~ renda_per_capita
+ I(renda_per_capita^2)
+ I(renda_per_capita^3)
+ I(renda_per_capita^4)
+ I(renda_per_capita^5)
+ indice_gini
+ I(indice_gini^2)
+ I(indice_gini^3)
+ I(indice_gini^4)
+ I(indice_gini^5)
+ salario_medio_mensal
+ I(salario_medio_mensal^2)
+ I(salario_medio_mensal^3)
+ I(salario_medio_mensal^4)
+ I(salario_medio_mensal^5)
+ perc_criancas_extrem_pobres
+ perc_criancas_pobres
+ perc_pessoas_dom_agua_estogo_inadequados
+ perc_pessoas_dom_paredes_inadequadas
+ perc_pop_dom_com_coleta_lixo
+ perc_pop_rural
+ as.factor(Regiao)
+ as.factor(Regiao)*renda_per_capita, data = dados3)
summary(mod.full)
formula(mod.full)
bestsub <- regsubsets(formula(mod.full), data = dados3, nvmax = 50)
bestsub
summary.bestsub <- summary(bestsub)
#--- gráficos para os diversos critérios
par(mfrow = c(2,2));
par(mar = c(4,4,2,2));
plot(summary.bestsub$cp, xlab = "Número de variáveis", ylab = "Cp de Mallow",
col = "red", lty = 1, lwd = 2, type = 'o', main = "Critério Cp de Mallow")
summary.bestsub$cp
which.min(summary.bestsub$cp)
summary.bestsub$which[21,]
points(21, summary.bestsub$cp[21], pch=20, col = "blue", cex = 3.0)
plot(summary.bestsub$adjr2, xlab = "Número de variáveis", ylab = "R2 Ajustado",
col = "red", lty = 1, lwd = 2, type = 'o', main = "Critério R2 Ajustado")
summary.bestsub$adjr2
which.max(summary.bestsub$adjr2)
summary.bestsub$which[23,]
points(23, summary.bestsub$adjr2[23], pch=20, col = "blue", cex = 3.0)
plot(summary.bestsub$rsq, xlab = "Número de variáveis", ylab = "R2",
col = "red", lty = 1, lwd = 2, type = 'o', main = "Critério R2")
summary.bestsub$rsq
which.max(summary.bestsub$rsq)
summary.bestsub$which[29,]
points(29, summary.bestsub$rsq[29], pch=20, col = "blue", cex = 3.0)
plot(summary.bestsub$bic, xlab = "Número de variáveis", ylab = "BIC",
col = "red", lty = 1, lwd = 2, type = 'o', main = "Critério BIC")
summary.bestsub$bic
which.min(summary.bestsub$bic)
summary.bestsub$which[11,]
points(11, summary.bestsub$bic[11], pch=20, col = "blue", cex = 3.0)
#-- melhor modelo com o R2 ajustado
bestsub$xnames[summary.bestsub$which[23,]]
#-- melhor modelo com o BIC
bestsub$xnames[summary.bestsub$which[11,]]
#-- melhor modelo com o Cp
bestsub$xnames[summary.bestsub$which[21,]]
#-- coeficientes dos modelos melhores
coef(bestsub, 23)
coef(bestsub, 11)
coef(bestsub, 21)
#-- selecionando apenas as variáveis dos melhores modelos
dt.mat.x <- data.frame(model.matrix(mod.full))
dt.mat.x.bic <- dt.mat.x[, summary.bestsub$which[11,]]
dt.mat.x.adjr2 <- dt.mat.x[, summary.bestsub$which[23,]]
dt.mat.x.cp <- dt.mat.x[, summary.bestsub$which[21,]]
dt.mat.x.bic <- data.frame(mort_infantil = dados3$mort_infantil, dt.mat.x.bic)
dt.mat.x.adjr2 <- data.frame(mort_infantil = dados3$mort_infantil, dt.mat.x.adjr2)
dt.mat.x.cp <- data.frame(mort_infantil = dados3$mort_infantil, dt.mat.x.cp)
#-- rodando modelos com variáveis selecionadas dos melhores modelos
mod.bic <- lm(mort_infantil ~ . - X.Intercept., data = dt.mat.x.bic)
summary(mod.bic)
mod.cp <- lm(mort_infantil ~ . - X.Intercept., data = dt.mat.x.cp)
summary(mod.cp)
mod.adjr2 <- lm(mort_infantil ~ . - X.Intercept., data = dt.mat.x.adjr2)
summary(mod.adjr2)
#------------------------------------------------------
#---- Backwards, forward e stepwise selection
#------------------------------------------------------
mod.full <- lm(mort_infantil ~ renda_per_capita
+ I(renda_per_capita^2)
+ I(renda_per_capita^3)
+ I(renda_per_capita^4)
+ I(renda_per_capita^5)
+ indice_gini
+ I(indice_gini^2)
+ I(indice_gini^3)
+ I(indice_gini^4)
+ I(indice_gini^5)
+ salario_medio_mensal
+ I(salario_medio_mensal^2)
+ I(salario_medio_mensal^3)
+ I(salario_medio_mensal^4)
+ I(salario_medio_mensal^5)
+ perc_criancas_extrem_pobres
+ perc_criancas_pobres
+ perc_pessoas_dom_agua_estogo_inadequados
+ perc_pessoas_dom_paredes_inadequadas
+ perc_pop_dom_com_coleta_lixo
+ perc_pop_rural
+ as.factor(Regiao)
+ as.factor(Regiao)*renda_per_capita, data = dados3)
summary(mod.full)
step1 <- step(mod.full, direction = "backward")
summary(step1)
step2 <- step(mod.full, direction = "forward")
summary(step2)
step3 <- step(mod.full, direction = "both")
summary(step3)
formula(step3)
mod.step3 <- lm(formula = formula(step3), data = dados3)
summary(mod.step3)
#----------------------------------------------------------------------------
#---- The end
#----------------------------------------------------------------------------
|
9dca3bb2def3f47478f371251358d91e2eed36da | b7c9b940bf964a1699517bd939dcd2603f9fbaae | /plot2.R | 6943c8c360379b50144369a8004e4dcab1e1832e | [] | no_license | manuagrawal/ExData_Plotting1 | 98230d4bd10fdf36da65ae6560f26ed3f3190413 | 6a0c1b024f8018ab2598e542e80814d533a09d89 | refs/heads/master | 2020-12-07T03:50:32.628440 | 2015-06-07T01:36:53 | 2015-06-07T01:36:53 | 36,996,352 | 0 | 0 | null | 2015-06-06T21:54:21 | 2015-06-06T21:54:21 | null | UTF-8 | R | false | false | 538 | r | plot2.R | library(sqldf)
fileName <- "household_power_consumption.txt"
rawData<-read.csv.sql(fileName, sep=";",
sql="select * from file where Date in ('1/2/2007','2/2/2007')")
rawData$Date <-as.Date(rawData$Date,format="%d/%m/%Y")
rawData$Time <-strptime(paste(rawData$Date,rawData$Time,sep=" "), format="%Y-%m-%d %H:%M:%S")
rawData$Weekday <-weekdays(rawData$Date)
par(mfcol=c(1,1))
with(rawData,plot(Time,Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab=""))
dev.copy(png,file="plot2.png")
dev.off() |
824b8c9ef9a1f0cea86845350ea4c6c692096283 | 1da626ffc276dddffd4484b800d4fb44eb103c20 | /R/colormapMiss.R | c800ac75fa3baf1918bbec96c1859c5d06f09085 | [] | no_license | statistikat/VIM | e6d6bb91ddbb6bf02ac3786532e22b5982f04e9e | 497b373b2be3522b1143c3ec335c0910fc521dd5 | refs/heads/master | 2023-07-10T02:22:32.723575 | 2023-06-21T13:36:33 | 2023-06-21T13:36:33 | 12,780,450 | 62 | 10 | null | 2023-08-23T10:59:17 | 2013-09-12T09:59:16 | R | UTF-8 | R | false | false | 14,564 | r | colormapMiss.R | # ---------------------------------------
# Author: Andreas Alfons, Bernd Prantner
# and Daniel Schopfhauser
# Vienna University of Technology
# ---------------------------------------
#' Colored map with information about missing/imputed values
#'
#' Colored map in which the proportion or amount of missing/imputed values in
#' each region is coded according to a continuous or discrete color scheme.
#' The sequential color palette may thereby be computed in the *HCL* or
#' the *RGB* color space.
#'
#' The proportion or amount of missing/imputed values in `x` of each
#' region is coded according to a continuous or discrete color scheme in the
#' color range defined by `col`. In addition, the proportions or numbers
#' can be shown as labels in the regions.
#'
#' If `interactive` is `TRUE`, clicking in a region displays more
#' detailed information about missing/imputed values on the console. Clicking
#' outside the borders quits the interactive session.
#'
#' @rdname colormapMiss
#' @aliases colormapMiss colormapMissLegend
#' @param x a numeric vector.
#' @param region a vector or factor of the same length as `x` giving the
#' regions.
#' @param map an object of any class that contains polygons and provides its
#' own plot method (e.g., `"SpatialPolygons"` from package `sp`).
#' @param imp_index a logical-vector indicating which values of \sQuote{x} have
#' been imputed. If given, it is used for highlighting and the colors are
#' adjusted according to the given colors for imputed variables (see
#' `col`).
#' @param prop a logical indicating whether the proportion of missing/imputed
#' values should be used rather than the total amount.
#' @param polysRegion a numeric vector specifying the region that each polygon
#' belongs to.
#' @param range a numeric vector of length two specifying the range (minimum
#' and maximum) of the proportion or amount of missing/imputed values to be
#' used for the color scheme.
#' @param n for `colormapMiss`, the number of equally spaced cut-off
#' points for a discretized color scheme. If this is not a positive integer, a
#' continuous color scheme is used (the default). In the latter case, the
#' number of rectangles to be drawn in the legend can be specified in
#' `colormapMissLegend`. A reasonably large number makes it appear
#' continuously.
#' @param col the color range (start end end) to be used. RGB colors may be
#' specified as character strings or as objects of class
#' "[colorspace::RGB()]". HCL colors need to be specified as objects
#' of class "[colorspace::polarLUV()]". If only one color is
#' supplied, it is used as end color, while the start color is taken to be
#' transparent for RGB or white for HCL.
#' @param gamma numeric; the display *gamma* value (see
#' [colorspace::hex()]).
#' @param fixup a logical indicating whether the colors should be corrected to
#' valid RGB values (see [colorspace::hex()]).
#' @param coords a matrix or `data.frame` with two columns giving the
#' coordinates for the labels.
#' @param numbers a logical indicating whether the corresponding proportions or
#' numbers of missing/imputed values should be used as labels for the regions.
#' @param digits the number of digits to be used in the labels (in case of
#' proportions).
#' @param cex.numbers the character expansion factor to be used for the labels.
#' @param col.numbers the color to be used for the labels.
#' @param legend a logical indicating whether a legend should be plotted.
#' @param interactive a logical indicating whether more detailed information
#' about missing/imputed values should be displayed interactively (see
#' \sQuote{Details}).
#' @param xleft left *x* position of the legend.
#' @param ybottom bottom *y* position of the legend.
#' @param xright right *x* position of the legend.
#' @param ytop top *y* position of the legend.
#' @param cmap a list as returned by `colormapMiss` that contains the
#' required information for the legend.
#' @param horizontal a logical indicating whether the legend should be drawn
#' horizontally or vertically.
#' @param \dots further arguments to be passed to `plot`.
#' @return `colormapMiss` returns a list with the following components:
#' - nmiss a numeric vector containing the number of missing/imputed
#' values in each region.
#' - nobs a numeric vector containing the number of observations in
#' each region.
#' - pmiss a numeric vector containing the proportion of missing
#' values in each region.
#' - prop a logical indicating whether the proportion of
#' missing/imputed values have been used rather than the total amount.
#' - range the range of the proportion or amount of missing/imputed
#' values corresponding to the color range.
#' - n either a positive integer giving the number of equally spaced
#' cut-off points for a discretized color scheme, or `NULL` for a
#' continuous color scheme.
#' - start the start color of the color scheme.
#' - end the end color of the color scheme.
#' - space a character string giving the color space (either
#' `"rgb"` for RGB colors or `"hcl"` for HCL colors).
#' - gamma numeric; the display *gamma* value (see
#' [colorspace::hex()]).
#' - fixup a logical indicating whether the colors have been
#' corrected to valid RGB values (see [colorspace::hex()]).
#' @note Some of the argument names and positions have changed with versions
#' 1.3 and 1.4 due to extended functionality and for more consistency with
#' other plot functions in `VIM`. For back compatibility, the arguments
#' `cex.text` and `col.text` can still be supplied to \code{\dots{}}
#' and are handled correctly. Nevertheless, they are deprecated and no longer
#' documented. Use `cex.numbers` and `col.numbers` instead.
#' @author Andreas Alfons, modifications to show imputed values by Bernd
#' Prantner
#' @seealso [colSequence()], [growdotMiss()],
#' [mapMiss()]
#' @references M. Templ, A. Alfons, P. Filzmoser (2012) Exploring incomplete
#' data using visualization tools. *Journal of Advances in Data Analysis
#' and Classification*, Online first. DOI: 10.1007/s11634-011-0102-y.
#' @keywords hplot
#' @export
colormapMiss <- function(x, region, map, imp_index = NULL,
prop = TRUE, polysRegion = 1:length(x), range = NULL,
n = NULL, col = c("red","orange"),
gamma = 2.2, fixup = TRUE, coords = NULL,
numbers = TRUE, digits = 2, cex.numbers = 0.8,
col.numbers = par("fg"), legend = TRUE,
interactive = TRUE, ...) {
check_data(x)
x <- as.data.frame(x)
# back compatibility
dots <- list(...)
if(missing(cex.numbers) && "cex.text" %in% names(dots)) {
cex.numbers <- dots$cex.text
}
if(missing(col.numbers) && "col.text" %in% names(dots)) {
col.numbers <- dots$col.text
}
# initializations
imputed <- FALSE
if(!is.null(imp_index)) {
if(any(is.na(x))) {
imputed <- FALSE
warning("'imp_index' is given, but there are missing values in 'x'! 'imp_index' will be ignored.", call. = FALSE)
} else {
if(is.numeric(imp_index) && range(imp_index) == c(0,1)) imp_index <- as.logical(imp_index)
else if(!is.logical(imp_index)) stop("The missing-index of the imputed Variable must be of the type logical")
imputed <- TRUE
}
}
x <- as.vector(x)
region <- as.factor(region)
if(!is.null(coords)) { # error messages
if(!(inherits(coords, c("data.frame","matrix"))))
stop("'coords' must be a data.frame or matrix")
if(ncol(coords) != 2) stop("'coords' must be 2-dimensional")
}
if(is.character(map)) map <- get(map, envir=.GlobalEnv)
prop <- isTRUE(prop)
# check colors
if(!is(col, "RGB") && !is(col, "polarLUV") &&
(!is.character(col) || length(col) == 0 || col == c("red","orange"))) {
if(!imputed) col <- "red"
else col <- "orange"
}
if(is.character(col)) {
# colors given as character string
if(length(col) == 1) {
start <- par("bg")
end <- col
} else {
start <- col[1]
end <- col[2]
}
space <- "rgb"
} else {
space <- if(is(col, "RGB")) "rgb" else "hcl"
if(nrow(coords(col)) == 1) {
if(is(col, "RGB")) {
# RGB colors
start <- par("bg")
} else {
# HCL colors
start <- polarLUV(0, 0, col@coords[1, "H"])
}
end <- col
} else {
start <- col[1,]
end <- col[2,]
}
}
# compute number and proportions of missing values
if(!imputed) nmiss <- tapply(x, list(region), countNA)
else {
getImp <- function(x) length(which(x))
nmiss <- tapply(unlist(imp_index), list(region), getImp)
}
nobs <- tapply(x, list(region), length)
pmiss <- 100*nmiss/nobs
# check breakpoints
if(is.null(range)) {
range <- c(0, if(prop) ceiling(max(pmiss)) else max(nmiss))
} else {
# TODO: check 'range'
}
# get colors for regions
n <- rep(n, length.out=1)
if(isTRUE(n > 1)) {
# equally spaced categories
breaks <- seq(range[1], range[2], length=n+1)
cat <- cut(if(prop) pmiss else nmiss, breaks,
labels=FALSE, include.lowest=TRUE)
pcol <- seq(0, 1, length=n)
cols <- colSequence(pcol, start, end, space, gamma=gamma, fixup=fixup)
cols <- cols[cat]
} else {
# continuous color scheme
n <- NULL
pcol <- if(prop) pmiss else nmiss
pcol <- (pcol - range[1])/diff(range)
cols <- colSequence(pcol, start, end, space, gamma=gamma, fixup=fixup)
}
cols <- cols[polysRegion]
localPlot <- function(..., cex.text, col.text) plot(...)
localPlot(map, col=cols, ...)
if(isTRUE(numbers)) {
# number or percentage of missings as labels for regions
if(is.null(coords)) coords <- coordinates(map)
labs <- if(prop) paste(round(pmiss, digits), "%", sep="") else nmiss
plabs <- labs[polysRegion]
plabs[duplicated(polysRegion)] <- ""
text(coords, labels=plabs, cex=cex.numbers, col=col.numbers)
}
# useful statistics for legend
cmap <- list(nmiss=nmiss, nobs=nobs, pmiss=pmiss, prop=prop, range=range,
n=n, start=start, end=end, space=space, gamma=gamma, fixup=fixup)
if(isTRUE(legend)) {
usr <- par("usr")
xrange <- usr[1:2]
xdiff <- usr[2] - usr[1]
yrange <- usr[3:4]
ydiff <- usr[4] - usr[3]
length <- 1/3
height <- 0.1*length
xleft <- xrange[1] + 0.02*xdiff
xright <- xleft + length*xdiff
ytop <- yrange[2] - 0.02*ydiff
ybottom <- ytop - height*ydiff
colormapMissLegend(xleft, ybottom, xright, ytop,
cmap, cex.numbers=cex.numbers, col.numbers=col.numbers)
}
if(isTRUE(interactive)) {
cat("Click on a region to get more information about missings.\n")
cat("To regain use of the R console, click outside the borders.\n")
p <- locatorVIM()
while(!is.null(p)) {
p <- SpatialPoints(matrix(unlist(p), ncol=2))
poly <- over(p, map)
ind <- polysRegion[poly]
if(!is.na(ind)) {
if(!imputed) label <- "missings"
else label <- "imputed missings"
cat(paste("\n ", levels(region)[ind], ":", sep=""))
cat(paste("\n Number of ", label, ": ", nmiss[ind]))
cat(paste("\n Number of observations:", nobs[ind]))
cat(paste("\n Proportion of ", label, ": ",
round(pmiss[ind], digits), "%\n", sep=""))
p <- locatorVIM()
} else p <- NULL
}
}
# return statistics invisibly
invisible(cmap)
}
## legend
#' @export colormapMissLegend
#' @rdname colormapMiss
colormapMissLegend <- function(xleft, ybottom, xright, ytop, cmap,
# range, prop = FALSE, col = "red",
n = 1000, horizontal = TRUE, digits = 2,
cex.numbers = 0.8, col.numbers = par("fg"),
...) {
# back compatibility
dots <- list(...)
dn <- names(dots)
if(missing(cmap)) {
if("range" %in% dn) range <- dots$range
else stop("argument 'range' is missing, with no default")
prop <- if("prop" %in% dn) dots$prop else FALSE
col <- if("col" %in% dn) dots$col else "red"
cmap <- list(prop=prop, range=range, n=NULL, start=par("bg"),
end=col, space="rgb", gamma=2.4, fixup=TRUE)
}
if(missing(cex.numbers) && "cex.text" %in% dn) cex.numbers <- dots$cex.text
if(missing(col.numbers) && "col.text" %in% dn) col.numbers <- dots$col.text
# initializations
prop <- isTRUE(cmap$prop)
range <- cmap$range
cont <- is.null(cmap$n) # is legend for continuous color scheme?
n <- if(cont) n else cmap$n
n <- rep(n, length.out=1)
# allow to plot legend outside plot region
op <- par(xpd=TRUE)
on.exit(par(op))
# compute steps for legend
length <- xright - xleft
height <- ytop - ybottom
# compute colors for legend
col <- colSequence(seq(0, 1, length=n), cmap$start, cmap$end,
cmap$space, gamma=cmap$gamma, fixup=cmap$fixup)
# compute grid and position of legend
grid <- seq(0, 1, length=n+1)
if(cont) {
pos <- 0:1
ann <- range
} else {
pos <- grid
ann <- seq(range[1], range[2], length=n+1)
}
ann <- if(prop) paste(format(ann, digits), "%", sep="") else ann
# plot legend
# TODO: check space for labels
if(horizontal) {
grid <- grid*length + xleft
if(cont) {
rect(grid[-(n+1)], ybottom, grid[-1], ytop, col=col, border=NA)
rect(xleft, ybottom, xright, ytop, border=NULL)
} else rect(grid[-(n+1)], ybottom, grid[-1], ytop, col=col, border=NULL)
pos <- pos*length + xleft
text(pos, ybottom-0.25*height, labels=ann,
adj=c(0.5,1), cex=cex.numbers, col=col.numbers)
} else {
grid <- grid*height + ybottom
if(cont) {
rect(xleft, grid[-(n+1)], xright, grid[-1], col=col, border=NA)
rect(xleft, ybottom, xright, ytop, border=NULL)
} else rect(xleft, grid[-(n+1)], xright, grid[-1], col=col, border=NULL)
pos <- pos*height + ybottom
text(xright+0.25*length, pos, labels=ann,
adj=c(0,0.5), cex=cex.numbers, col=col.numbers)
}
invisible()
}
|
d8d99ac05a50bbca1bbce2ed22e5828ad18b42ff | 10c97b033b7d93d500a4dd563234eef128dc43ab | /tests/testthat/www.fleaflicker.com/api/FetchLeagueTransactions-cade24.R | 533dc1b11ecaa76dad259edfc54c5589dc890e2f | [
"MIT"
] | permissive | tonyelhabr/ffscrapr | f38e7c87bb65ddbf6e1c9736c16e56944760af46 | 4e0944da56d8890c441c4abe9c25bc2477a1e388 | refs/heads/main | 2023-03-10T08:48:01.840281 | 2020-12-16T06:19:07 | 2020-12-16T06:19:07 | 328,791,006 | 0 | 0 | NOASSERTION | 2021-01-11T23:59:24 | 2021-01-11T21:03:44 | null | UTF-8 | R | false | false | 136,652 | r | FetchLeagueTransactions-cade24.R | structure(list(
url = "https://www.fleaflicker.com/api/FetchLeagueTransactions?sport=NFL&league_id=206154&team_id=1373475&result_offset=210",
status_code = 200L, headers = structure(list(
date = "Tue, 24 Nov 2020 01:19:57 GMT",
`content-type` = "application/json;charset=utf-8", vary = "accept-encoding",
`content-encoding` = "gzip"
), class = c(
"insensitive",
"list"
)), all_headers = list(list(
status = 200L, version = "HTTP/2",
headers = structure(list(
date = "Tue, 24 Nov 2020 01:19:57 GMT",
`content-type` = "application/json;charset=utf-8",
vary = "accept-encoding", `content-encoding` = "gzip"
), class = c(
"insensitive",
"list"
))
)), cookies = structure(list(
domain = logical(0),
flag = logical(0), path = logical(0), secure = logical(0),
expiration = structure(numeric(0), class = c(
"POSIXct",
"POSIXt"
)), name = logical(0), value = logical(0)
), row.names = integer(0), class = "data.frame"),
content = charToRaw("{\"items\":[{\"timeEpochMilli\":\"1568196000000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":11557,\"nameFull\":\"Neville Hewitt\",\"nameShort\":\"N. Hewitt\",\"proTeamAbbreviation\":\"NYJ\",\"position\":\"LB\",\"nflByeWeek\":10,\"news\":[{\"timeEpochMilli\":\"1606101993000\",\"contents\":\"Hewitt racked up 11 tackles (nine solo) and a tackle for loss in Sunday's 34-28 loss to the Chargers.\",\"analysis\":\"Hewitt was one of three Jets defenders to record double-digit tackles as the Chargers dominated time of possession, joining Harvey Langi and Ashtyn Davis. The 27-year-old linebacker has already set a new career high with 85 tackles and still has six games left to build on that total, starting with a Week 12 tilt against Miami.\",\"title\":\"Climbs to career-best 85 tackles\"}],\"nameFirst\":\"Neville\",\"nameLast\":\"Hewitt\",\"proTeam\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6414,\"away\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"home\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":28,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":9.0,\"formatted\":\"9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":3.21,\"formatted\":\"3.2\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":3.02,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":3.21,\"formatted\":\"3.2\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":3.02,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373393,\"name\":\"Philadelphia Fire\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373393_0_150x150.jpg\",\"initials\":\"PF\"},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":167,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":36,\"formatted\":\"36\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":8.5,\"formatted\":\"8.5\"},\"duration\":1},{\"value\":{\"value\":11.0,\"formatted\":\"11\"},\"duration\":3},{\"value\":{\"value\":10.9,\"formatted\":\"10.9\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":94.0,\"formatted\":\"94\"},\"seasonAverage\":{\"value\":10.444445,\"formatted\":\"10.44\"},\"seasonsStandartDeviation\":{\"value\":3.130888,\"formatted\":\"3.13\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1568196000000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13938,\"nameFull\":\"Shaun Dion Hamilton\",\"nameShort\":\"S. Hamilton\",\"proTeamAbbreviation\":\"WAS\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13938.png\",\"nflByeWeek\":8,\"nameFirst\":\"Shaun Dion\",\"nameLast\":\"Hamilton\",\"proTeam\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6306,\"away\":{\"abbreviation\":\"CIN\",\"location\":\"Cincinnati\",\"name\":\"Bengals\"},\"home\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.62,\"formatted\":\"0.6\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":0.95,\"formatted\":\"0.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.62,\"formatted\":\"0.6\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":0.95,\"formatted\":\"0.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":605,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":106,\"formatted\":\"106\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":5.5,\"formatted\":\"5.5\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":3.5,\"formatted\":\"3.5\"},\"duration\":3},{\"value\":{\"value\":3.9,\"formatted\":\"3.9\"},\"duration\":5}],\"seasonTotal\":{\"value\":20.5,\"formatted\":\"20.5\"},\"seasonAverage\":{\"value\":3.4166667,\"formatted\":\"3.42\"},\"seasonsStandartDeviation\":{\"value\":1.7892424,\"formatted\":\"1.79\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1568196000000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":11507,\"nameFull\":\"Damiere Byrd\",\"nameShort\":\"D. Byrd\",\"proTeamAbbreviation\":\"NE\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11507.png\",\"news\":[{\"timeEpochMilli\":\"1606082994000\",\"contents\":\"Byrd caught six of seven targets for 132 yards and a touchdown while adding an 11-yard carry in Sunday's 27-20 loss to Houston.\",\"analysis\":\"Byrd was on the receiving end of Cam Newton's first passing touchdown to a wide receiver this season, hauling in a 42-yard deep ball in the third quarter. This season-best performance from Byrd comes on the heels of a goose egg against Baltimore, and given Newton's struggles throwing the ball coming into this game, Byrd will be tough to trust against the Cardinals in Week 12 despite the momentum from this breakout effort.\",\"title\":\"Scores long TD in loss\"},{\"timeEpochMilli\":\"1606099411000\",\"contents\":\"New England Patriots wide receiver Damiere Byrd put together his best game of the season versus the Houston Texans in Week 11. He hauled in six of his seven targets for 132 yards and one touchdown in the loss. Those 132 yards set a career high for Byrd who has not gone over 100 yards all season until now. Fantasy owners should not get too excited about this performance. Byrd is worth adding in deep leagues, but he still doesn't produce anything more than low flex value most weeks.\",\"url\":\"https://www.rotoballer.com/player-news/damiere-byrd-sets-career-high-in-receiving-yards/806969\",\"title\":\"Damiere Byrd Sets Career High In Receiving Yards\"}],\"nameFirst\":\"Damiere\",\"nameLast\":\"Byrd\",\"proTeam\":{\"abbreviation\":\"NE\",\"location\":\"New England\",\"name\":\"Patriots\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6300,\"away\":{\"abbreviation\":\"NE\",\"location\":\"New England\",\"name\":\"Patriots\"},\"home\":{\"abbreviation\":\"HOU\",\"location\":\"Houston\",\"name\":\"Texans\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":20,\"homeScore\":27,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":85.71429,\"formatted\":\"6/7\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":132.0,\"formatted\":\"132\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"2/2\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":36.26,\"formatted\":\"36.3\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.1,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"2/2\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":36.26,\"formatted\":\"36.3\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.1,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1371776,\"name\":\"Winter Hill Black Shamrocks\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1371776_0_150x150.jpg\",\"initials\":\"WH\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":366,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":86,\"formatted\":\"86\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":11.0,\"formatted\":\"11\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":6.67,\"formatted\":\"6.67\"},\"duration\":3},{\"value\":{\"value\":7.36,\"formatted\":\"7.36\"},\"duration\":5}],\"seasonTotal\":{\"value\":54.7,\"formatted\":\"54.7\"},\"seasonAverage\":{\"value\":6.8375,\"formatted\":\"6.84\"},\"seasonsStandartDeviation\":{\"value\":4.828027,\"formatted\":\"4.83\"},\"seasonConsistency\":\"RATING_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1568196000000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14188,\"nameFull\":\"Chris Board\",\"nameShort\":\"C. Board\",\"proTeamAbbreviation\":\"BAL\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14188.png\",\"nflByeWeek\":7,\"nameFirst\":\"Chris\",\"nameLast\":\"Board\",\"proTeam\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6301,\"away\":{\"abbreviation\":\"TEN\",\"location\":\"Tennessee\",\"name\":\"Titans\"},\"home\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":30,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":540,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":95,\"formatted\":\"95\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":3.5,\"formatted\":\"3.5\"},\"duration\":1},{\"value\":{\"value\":4.83,\"formatted\":\"4.83\"},\"duration\":3},{\"value\":{\"value\":4.2,\"formatted\":\"4.2\"},\"duration\":5}],\"seasonTotal\":{\"value\":28.5,\"formatted\":\"28.5\"},\"seasonAverage\":{\"value\":3.5625,\"formatted\":\"3.56\"},\"seasonsStandartDeviation\":{\"value\":2.228193,\"formatted\":\"2.23\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567470599000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14705,\"nameFull\":\"Cody Barton\",\"nameShort\":\"C. Barton\",\"proTeamAbbreviation\":\"SEA\",\"position\":\"LB\",\"nflByeWeek\":6,\"nameFirst\":\"Cody\",\"nameLast\":\"Barton\",\"proTeam\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.94,\"formatted\":\"0.9\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":-0.13,\"formatted\":\"-0.1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.94,\"formatted\":\"0.9\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":-0.13,\"formatted\":\"-0.1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":504,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":89,\"formatted\":\"89\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":3.0,\"formatted\":\"3\"},\"duration\":3},{\"value\":{\"value\":6.4,\"formatted\":\"6.4\"},\"duration\":5}],\"seasonTotal\":{\"value\":33.5,\"formatted\":\"33.5\"},\"seasonAverage\":{\"value\":5.5833335,\"formatted\":\"5.58\"},\"seasonsStandartDeviation\":{\"value\":5.1753955,\"formatted\":\"5.18\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567470593000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14737,\"nameFull\":\"Sione Takitaki\",\"nameShort\":\"S. Takitaki\",\"proTeamAbbreviation\":\"CLE\",\"position\":\"LB\",\"nflByeWeek\":9,\"news\":[{\"timeEpochMilli\":\"1606139985000\",\"contents\":\"Takitaki two tackles, one for a loss, and an interception return for a touchdown in Sunday's 22-17 win over Philadelphia.\",\"analysis\":\"Takitaki had the game's first score, stepping in front of Miles Sanders to snatch a lazy lob from Carson Wentz and never broke stride for a 50-yard touchdown return. It was the second-year linebacker's first interception.\",\"title\":\"Contributes defensive TD\"},{\"timeEpochMilli\":\"1606083070000\",\"contents\":\"The Cleveland Browns Defense played extremely well in a 22-17 win over the Philadelphia Eagles in Week 11. Cleveland's defense notched five sacks, a safety, and forced three turnovers in the win over a Philly offense that struggled for much of the afternoon. Fantasy managers who started Cleveland's defense were also treated to a defensive touchdown when Sione Takitaki returned an interception 50 yards for a score in the second quarter. Olivier Vernon led the way with three sacks in a game where...\",\"url\":\"https://www.rotoballer.com/player-news/browns-defense-comes-up-big-in-week-11/806813\",\"title\":\"Browns Defense Comes Up Big In Week 11\"}],\"nameFirst\":\"Sione\",\"nameLast\":\"Takitaki\",\"proTeam\":{\"abbreviation\":\"CLE\",\"location\":\"Cleveland\",\"name\":\"Browns\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6303,\"away\":{\"abbreviation\":\"PHI\",\"location\":\"Philadelphia\",\"name\":\"Eagles\"},\"home\":{\"abbreviation\":\"CLE\",\"location\":\"Cleveland\",\"name\":\"Browns\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":17,\"homeScore\":22,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":3.0,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.02,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.86,\"formatted\":\"2.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.02,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.86,\"formatted\":\"2.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":419,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":71,\"formatted\":\"71\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":4.5,\"formatted\":\"4.5\"},\"duration\":1},{\"value\":{\"value\":3.83,\"formatted\":\"3.83\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":4.9,\"formatted\":\"4.9\"},\"duration\":5}],\"seasonTotal\":{\"value\":48.0,\"formatted\":\"48\"},\"seasonAverage\":{\"value\":5.3333335,\"formatted\":\"5.33\"},\"seasonsStandartDeviation\":{\"value\":2.0275874,\"formatted\":\"2.03\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567469845000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14500,\"nameFull\":\"Chad Beebe\",\"nameShort\":\"C. Beebe\",\"proTeamAbbreviation\":\"MIN\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14500.png\",\"nflByeWeek\":7,\"news\":[{\"timeEpochMilli\":\"1606170389000\",\"contents\":\"Beebe did not have a reception or target in Sunday's loss to Dallas. He played 17 snaps on offense.\",\"analysis\":\"The Vikings utilized two tight ends more with Irv Smith back from a groin injury and the game flow didn't dictate the use of three-receiver sets as much. Beebe remains the third receiver, but he could have a larger role this week if Adam Thielen misses time after landing on the COVID-19 list.\",\"title\":\"No receptions Sunday\"}],\"nameFirst\":\"Chad\",\"nameLast\":\"Beebe\",\"proTeam\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6309,\"away\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"home\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":15.64,\"formatted\":\"15.6\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":15.64,\"formatted\":\"15.6\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":650,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":156,\"formatted\":\"156\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":3.1,\"formatted\":\"3.1\"},\"duration\":1},{\"value\":{\"value\":2.22,\"formatted\":\"2.22\"},\"duration\":3},{\"value\":{\"value\":2.58,\"formatted\":\"2.58\"},\"duration\":5}],\"seasonTotal\":{\"value\":15.599998,\"formatted\":\"15.6\"},\"seasonAverage\":{\"value\":2.5999997,\"formatted\":\"2.6\"},\"seasonsStandartDeviation\":{\"value\":0.86071754,\"formatted\":\"0.86\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567469705000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13216,\"nameFull\":\"Blake Jarwin\",\"nameShort\":\"B. Jarwin\",\"proTeamAbbreviation\":\"DAL\",\"position\":\"TE\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13216.png\",\"nflByeWeek\":10,\"injury\":{\"typeAbbreviaition\":\"IR\",\"description\":\"Knee - ACL\",\"severity\":\"OUT\",\"typeFull\":\"Injured Reserve\"},\"nameFirst\":\"Blake\",\"nameLast\":\"Jarwin\",\"proTeam\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"positionEligibility\":[\"TE\"]},\"requestedGames\":[{\"game\":{\"id\":6309,\"away\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"home\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373973,\"name\":\"Red River Land Thunder\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373973_0_150x150.jpg\",\"initials\":\"RR\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":857,\"positions\":[{\"position\":{\"label\":\"TE\",\"group\":\"START\",\"eligibility\":[\"TE\"],\"colors\":[\"DRAFT_BOARD_YELLOW\"]},\"ordinal\":101,\"formatted\":\"101\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":2.7,\"formatted\":\"2.7\"},\"duration\":1},{\"value\":{\"value\":2.7,\"formatted\":\"2.7\"},\"duration\":3},{\"value\":{\"value\":2.7,\"formatted\":\"2.7\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":2.7,\"formatted\":\"2.7\"},\"seasonAverage\":{\"value\":2.7,\"formatted\":\"2.7\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567469380000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":12247,\"nameFull\":\"Dwayne Washington\",\"nameShort\":\"D. Washington\",\"proTeamAbbreviation\":\"NO\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12247.png\",\"nflByeWeek\":6,\"injury\":{\"typeAbbreviaition\":\"OUT\",\"description\":\"Back\",\"severity\":\"OUT\",\"typeFull\":\"Out\"},\"nameFirst\":\"Dwayne\",\"nameLast\":\"Washington\",\"proTeam\":{\"abbreviation\":\"NO\",\"location\":\"New Orleans\",\"name\":\"Saints\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6304,\"away\":{\"abbreviation\":\"ATL\",\"location\":\"Atlanta\",\"name\":\"Falcons\"},\"home\":{\"abbreviation\":\"NO\",\"location\":\"New Orleans\",\"name\":\"Saints\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":842,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":137,\"formatted\":\"137\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":0.9,\"formatted\":\"0.9\"},\"duration\":1},{\"value\":{\"value\":1.03,\"formatted\":\"1.03\"},\"duration\":3},{\"value\":{\"value\":1.03,\"formatted\":\"1.03\"},\"duration\":5}],\"seasonTotal\":{\"value\":3.1,\"formatted\":\"3.1\"},\"seasonAverage\":{\"value\":1.0333333,\"formatted\":\"1.03\"},\"seasonsStandartDeviation\":{\"value\":0.3399346,\"formatted\":\"0.34\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567366884000\",\"transaction\":{\"type\":\"TRANSACTION_TRADE\",\"draftPick\":{\"season\":2020,\"round\":4},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"tradeId\":4967150}},{\"timeEpochMilli\":\"1567366884000\",\"transaction\":{\"type\":\"TRANSACTION_TRADE\",\"player\":{\"proPlayer\":{\"id\":14191,\"nameFull\":\"Gus Edwards\",\"nameShort\":\"G. Edwards\",\"proTeamAbbreviation\":\"BAL\",\"position\":\"RB\",\"nflByeWeek\":7,\"news\":[{\"timeEpochMilli\":\"1606093246000\",\"contents\":\"Edwards logged three carries for six yards in Week 11 against the Titans.\",\"analysis\":\"Edwards was out-carried by J.K. Dobbins 15-3 and made no notable contributions to the outcome of the game. He did get one carry more than Mark Ingram, who has seen his role in the offense diminished. Even so, since Ingram returned in Week 10 against New England, Edwards has commanded only 10 rushing attempts across two contests.\",\"title\":\"Losing grip on role\"},{\"timeEpochMilli\":\"1606094086000\",\"contents\":\"Baltimore Ravens running back Gus Edwards rushed three times for six yards in Sunday's loss to the Tennessee Titans. He was on the field for just 13 snaps. Maybe it was a one-game blip, or maybe J.K. Dobbins has assumed the lead running back role in Baltimore and isn't going to give it back. Don't drop Edwards, who still has deep league value thanks to his ability to find his way into the end zone, but it might be a good idea to keep him on the bench in Week 12.\",\"url\":\"https://www.rotoballer.com/player-news/gus-edwards-nonfactor-in-week-11/806917\",\"title\":\"Gus Edwards Nonfactor In Week 11\"}],\"nameFirst\":\"Gus\",\"nameLast\":\"Edwards\",\"hasLockedPremiumContent\":true,\"proTeam\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6301,\"away\":{\"abbreviation\":\"TEN\",\"location\":\"Tennessee\",\"name\":\"Titans\"},\"home\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":30,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":6.0,\"formatted\":\"6\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":38.75,\"formatted\":\"38.8\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":3.16,\"formatted\":\"3.2\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.16,\"formatted\":\"0.2\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":38.75,\"formatted\":\"38.8\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":3.16,\"formatted\":\"3.2\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.16,\"formatted\":\"0.2\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374255,\"name\":\"Mushroom City Karts\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374255_0_150x150.jpg\",\"initials\":\"MC\"},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":253,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":40,\"formatted\":\"40\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":9.8,\"formatted\":\"9.8\"},\"duration\":1},{\"value\":{\"value\":13.63,\"formatted\":\"13.63\"},\"duration\":3,\"overPerforming\":true},{\"value\":{\"value\":10.7,\"formatted\":\"10.7\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":75.7,\"formatted\":\"75.7\"},\"seasonAverage\":{\"value\":8.411111,\"formatted\":\"8.41\"},\"seasonsStandartDeviation\":{\"value\":4.589952,\"formatted\":\"4.59\"}},\"team\":{\"id\":1374255,\"name\":\"Mushroom City Karts\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374255_0_150x150.jpg\",\"initials\":\"MC\"},\"tradeId\":4967150}},{\"timeEpochMilli\":\"1567366835000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":7360,\"nameFull\":\"Prince Amukamara\",\"nameShort\":\"P. Amukamara\",\"proTeamAbbreviation\":\"ARI\",\"position\":\"CB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/7360.png\",\"nflByeWeek\":8,\"nameFirst\":\"Prince\",\"nameLast\":\"Amukamara\",\"proTeam\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"positionEligibility\":[\"CB\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567366803000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11211,\"nameFull\":\"Henry Anderson\",\"nameShort\":\"H. Anderson\",\"proTeamAbbreviation\":\"NYJ\",\"position\":\"IL\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11211.png\",\"nflByeWeek\":10,\"nameFirst\":\"Henry\",\"nameLast\":\"Anderson\",\"proTeam\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"positionEligibility\":[\"DE\",\"IL\"]},\"requestedGames\":[{\"game\":{\"id\":6414,\"away\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"home\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":28,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":4.0,\"formatted\":\"4\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.5,\"formatted\":\"0.5\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.15,\"formatted\":\"1.1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":-0.02,\"formatted\":\"-0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.05,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.15,\"formatted\":\"1.1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":-0.02,\"formatted\":\"-0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.05,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374252,\"name\":\"Central City Crusaders\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374252_0_150x150.jpg\",\"initials\":\"CC\"},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":549,\"positions\":[{\"position\":{\"label\":\"IL\",\"group\":\"START\",\"eligibility\":[\"IL\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":82,\"formatted\":\"82\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":8.0,\"formatted\":\"8\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":6.33,\"formatted\":\"6.33\"},\"duration\":3,\"overPerforming\":true},{\"value\":{\"value\":4.3,\"formatted\":\"4.3\"},\"duration\":5}],\"seasonTotal\":{\"value\":27.0,\"formatted\":\"27\"},\"seasonAverage\":{\"value\":3.375,\"formatted\":\"3.38\"},\"seasonsStandartDeviation\":{\"value\":2.619041,\"formatted\":\"2.62\"},\"seasonConsistency\":\"RATING_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1567363855000\",\"transaction\":{\"type\":\"TRANSACTION_TRADE\",\"player\":{\"proPlayer\":{\"id\":13860,\"nameFull\":\"Josey Jewell\",\"nameShort\":\"J. Jewell\",\"proTeamAbbreviation\":\"DEN\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13860.png\",\"nameFirst\":\"Josey\",\"nameLast\":\"Jewell\",\"proTeam\":{\"abbreviation\":\"DEN\",\"location\":\"Denver\",\"name\":\"Broncos\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6413,\"away\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"home\":{\"abbreviation\":\"DEN\",\"location\":\"Denver\",\"name\":\"Broncos\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":13,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":2.29,\"formatted\":\"2.3\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.97,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.15,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":2.29,\"formatted\":\"2.3\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.97,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.15,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373993,\"name\":\"Boomtown Sly Foxes\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373993_0_150x150.jpg\",\"initials\":\"BS\"},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":116,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":30,\"formatted\":\"30\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":10.0,\"formatted\":\"10\"},\"duration\":1},{\"value\":{\"value\":13.0,\"formatted\":\"13\"},\"duration\":3},{\"value\":{\"value\":9.4,\"formatted\":\"9.4\"},\"duration\":5}],\"seasonTotal\":{\"value\":106.5,\"formatted\":\"106.5\"},\"seasonAverage\":{\"value\":11.833333,\"formatted\":\"11.83\"},\"seasonsStandartDeviation\":{\"value\":9.809293,\"formatted\":\"9.81\"},\"seasonConsistency\":\"RATING_BAD\"},\"team\":{\"id\":1373973,\"name\":\"Red River Land Thunder\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373973_0_150x150.jpg\",\"initials\":\"RR\"},\"tradeId\":4962262}},{\"timeEpochMilli\":\"1567363855000\",\"transaction\":{\"type\":\"TRANSACTION_TRADE\",\"player\":{\"proPlayer\":{\"id\":11062,\"nameFull\":\"Todd Davis\",\"nameShort\":\"T. Davis\",\"proTeamAbbreviation\":\"MIN\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11062.png\",\"nflByeWeek\":7,\"nameFirst\":\"Todd\",\"nameLast\":\"Davis\",\"proTeam\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6309,\"away\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"home\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":761,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":137,\"formatted\":\"137\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":2.0,\"formatted\":\"2\"},\"duration\":1},{\"value\":{\"value\":1.83,\"formatted\":\"1.83\"},\"duration\":3},{\"value\":{\"value\":1.75,\"formatted\":\"1.75\"},\"duration\":5}],\"seasonTotal\":{\"value\":7.0,\"formatted\":\"7\"},\"seasonAverage\":{\"value\":1.75,\"formatted\":\"1.75\"},\"seasonsStandartDeviation\":{\"value\":0.25,\"formatted\":\"0.25\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"tradeId\":4962262}},{\"timeEpochMilli\":\"1567363855000\",\"transaction\":{\"type\":\"TRANSACTION_TRADE\",\"player\":{\"proPlayer\":{\"id\":3046,\"nameFull\":\"Thomas Davis\",\"nameShort\":\"T. Davis\",\"proTeamAbbreviation\":\"WAS\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/3046.png\",\"nflByeWeek\":8,\"nameFirst\":\"Thomas\",\"nameLast\":\"Davis\",\"proTeam\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6306,\"away\":{\"abbreviation\":\"CIN\",\"location\":\"Cincinnati\",\"name\":\"Bengals\"},\"home\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.86,\"formatted\":\"0.9\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.12,\"formatted\":\"1.1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.86,\"formatted\":\"0.9\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.12,\"formatted\":\"1.1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373973,\"name\":\"Red River Land Thunder\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373973_0_150x150.jpg\",\"initials\":\"RR\"},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":761,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":137,\"formatted\":\"137\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":4.0,\"formatted\":\"4\"},\"duration\":1},{\"value\":{\"value\":3.5,\"formatted\":\"3.5\"},\"duration\":3},{\"value\":{\"value\":3.5,\"formatted\":\"3.5\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":7.0,\"formatted\":\"7\"},\"seasonAverage\":{\"value\":3.5,\"formatted\":\"3.5\"},\"seasonsStandartDeviation\":{\"value\":0.5,\"formatted\":\"0.5\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373973,\"name\":\"Red River Land Thunder\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373973_0_150x150.jpg\",\"initials\":\"RR\"},\"tradeId\":4962262}},{\"timeEpochMilli\":\"1566986400000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13938,\"nameFull\":\"Shaun Dion Hamilton\",\"nameShort\":\"S. Hamilton\",\"proTeamAbbreviation\":\"WAS\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13938.png\",\"nflByeWeek\":8,\"nameFirst\":\"Shaun Dion\",\"nameLast\":\"Hamilton\",\"proTeam\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6306,\"away\":{\"abbreviation\":\"CIN\",\"location\":\"Cincinnati\",\"name\":\"Bengals\"},\"home\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.62,\"formatted\":\"0.6\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":0.95,\"formatted\":\"0.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.62,\"formatted\":\"0.6\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":0.95,\"formatted\":\"0.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":605,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":106,\"formatted\":\"106\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":5.5,\"formatted\":\"5.5\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":3.5,\"formatted\":\"3.5\"},\"duration\":3},{\"value\":{\"value\":3.9,\"formatted\":\"3.9\"},\"duration\":5}],\"seasonTotal\":{\"value\":20.5,\"formatted\":\"20.5\"},\"seasonAverage\":{\"value\":3.4166667,\"formatted\":\"3.42\"},\"seasonsStandartDeviation\":{\"value\":1.7892424,\"formatted\":\"1.79\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566986400000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11191,\"nameFull\":\"Maxx Williams\",\"nameShort\":\"M. Williams\",\"proTeamAbbreviation\":\"ARI\",\"position\":\"TE\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11191.png\",\"nflByeWeek\":8,\"news\":[{\"timeEpochMilli\":\"1606014939000\",\"contents\":\"Williams reeled in both of his targets for 29 yards during Thursday's 28-21 loss in Seattle.\",\"analysis\":\"Williams accounted for the Cardinals' longest play of the game, a 25-yard catch to help set up the team's first touchdown at the start of the second quarter. Otherwise, he was relatively quiet, despite pacing Arizona tight ends in offensive snap share (57 percent). Meanwhile, Dan Arnold (a four-yard TD catch on two targets, 39 percent) and practice squad callup Evan Baylis (no targets, nine percent) scooped up the remaining work given to the position group. Williams has been tending to an ankle issue for most of the season, but since his return Week 9 he's continued to lead Cards TEs in snaps on offense.\",\"title\":\"Nabs two catches Thursday\"}],\"nameFirst\":\"Maxx\",\"nameLast\":\"Williams\",\"proTeam\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"positionEligibility\":[\"TE\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"2/2\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":29.0,\"formatted\":\"29\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":11.86,\"formatted\":\"11.9\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.08,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":11.86,\"formatted\":\"11.9\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.08,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":748,\"positions\":[{\"position\":{\"label\":\"TE\",\"group\":\"START\",\"eligibility\":[\"TE\"],\"colors\":[\"DRAFT_BOARD_YELLOW\"]},\"ordinal\":82,\"formatted\":\"82\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.9,\"formatted\":\"7.9\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":3.95,\"formatted\":\"3.95\"},\"duration\":3},{\"value\":{\"value\":3.95,\"formatted\":\"3.95\"},\"duration\":5}],\"seasonTotal\":{\"value\":7.9,\"formatted\":\"7.9\"},\"seasonAverage\":{\"value\":3.95,\"formatted\":\"3.95\"},\"seasonsStandartDeviation\":{\"value\":3.95,\"formatted\":\"3.95\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566986400000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":12247,\"nameFull\":\"Dwayne Washington\",\"nameShort\":\"D. Washington\",\"proTeamAbbreviation\":\"NO\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12247.png\",\"nflByeWeek\":6,\"injury\":{\"typeAbbreviaition\":\"OUT\",\"description\":\"Back\",\"severity\":\"OUT\",\"typeFull\":\"Out\"},\"nameFirst\":\"Dwayne\",\"nameLast\":\"Washington\",\"proTeam\":{\"abbreviation\":\"NO\",\"location\":\"New Orleans\",\"name\":\"Saints\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6304,\"away\":{\"abbreviation\":\"ATL\",\"location\":\"Atlanta\",\"name\":\"Falcons\"},\"home\":{\"abbreviation\":\"NO\",\"location\":\"New Orleans\",\"name\":\"Saints\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":842,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":137,\"formatted\":\"137\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":0.9,\"formatted\":\"0.9\"},\"duration\":1},{\"value\":{\"value\":1.03,\"formatted\":\"1.03\"},\"duration\":3},{\"value\":{\"value\":1.03,\"formatted\":\"1.03\"},\"duration\":5}],\"seasonTotal\":{\"value\":3.1,\"formatted\":\"3.1\"},\"seasonAverage\":{\"value\":1.0333333,\"formatted\":\"1.03\"},\"seasonsStandartDeviation\":{\"value\":0.3399346,\"formatted\":\"0.34\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566986400000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13844,\"nameFull\":\"Jordan Akins\",\"nameShort\":\"J. Akins\",\"proTeamAbbreviation\":\"HOU\",\"position\":\"TE\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13844.png\",\"nflByeWeek\":8,\"news\":[{\"timeEpochMilli\":\"1606142316000\",\"contents\":\"Akins caught five of six targets for 83 yards in Sunday's 27-20 win over the Patriots.\",\"analysis\":\"Akins re-emerged as the team's top tight end option, amassing more targets than Darren Fells (three) and Pharaoh Brown (two) combined. The 11 targets for the position group was the most since Week 2 and could be a sign of things to come should Randall Cobb (toe) and Kenny Stills (quadriceps) miss significant time. With a short week to prepare for Thursday's meeting in Detroit, Akins could be in line for a significant workload.\",\"title\":\"Back atop TE list\"},{\"timeEpochMilli\":\"1606152803000\",\"contents\":\"BALLER MOVE: Add in 14+ Team LeaguesROSTERED: 2% of LeaguesANALYSIS: Even though Jordan Akins has just 55.5 PPR total points through Week 11, he is the TE28 of the NFL right now. That's insane. A borderline TE2 with that point total just shows how slim the position is. Akins' 7.9 PPR per game are just a bit better (TE23), but not much more, though. But at this point in the season and given the status of things, Akins is far from being the worst option for you at the position if you need some TE ...\",\"url\":\"https://www.rotoballer.com/jordan-akins-te-hou-week-12-waiver-wire-pickups/807072\",\"title\":\"Jordan Akins (TE, HOU) - Week 12 Waiver Wire Pickups\"}],\"nameFirst\":\"Jordan\",\"nameLast\":\"Akins\",\"proTeam\":{\"abbreviation\":\"HOU\",\"location\":\"Houston\",\"name\":\"Texans\"},\"positionEligibility\":[\"TE\"]},\"requestedGames\":[{\"game\":{\"id\":6300,\"away\":{\"abbreviation\":\"NE\",\"location\":\"New England\",\"name\":\"Patriots\"},\"home\":{\"abbreviation\":\"HOU\",\"location\":\"Houston\",\"name\":\"Texans\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":20,\"homeScore\":27,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":83.333336,\"formatted\":\"5/6\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":83.0,\"formatted\":\"83\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"2/2\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":25.25,\"formatted\":\"25.2\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.09,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"2/2\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":25.25,\"formatted\":\"25.2\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.09,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373535,\"name\":\"Winterthur Angry Ducks\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373535_0_150x150.jpg\",\"initials\":\"WA\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":429,\"positions\":[{\"position\":{\"label\":\"TE\",\"group\":\"START\",\"eligibility\":[\"TE\"],\"colors\":[\"DRAFT_BOARD_YELLOW\"]},\"ordinal\":36,\"formatted\":\"36\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":4.83,\"formatted\":\"4.83\"},\"duration\":3},{\"value\":{\"value\":6.76,\"formatted\":\"6.76\"},\"duration\":5}],\"seasonTotal\":{\"value\":46.700005,\"formatted\":\"46.7\"},\"seasonAverage\":{\"value\":7.7833343,\"formatted\":\"7.78\"},\"seasonsStandartDeviation\":{\"value\":4.520107,\"formatted\":\"4.52\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":14705,\"nameFull\":\"Cody Barton\",\"nameShort\":\"C. Barton\",\"proTeamAbbreviation\":\"SEA\",\"position\":\"LB\",\"nflByeWeek\":6,\"nameFirst\":\"Cody\",\"nameLast\":\"Barton\",\"proTeam\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.94,\"formatted\":\"0.9\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":-0.13,\"formatted\":\"-0.1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.94,\"formatted\":\"0.9\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":-0.13,\"formatted\":\"-0.1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":504,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":89,\"formatted\":\"89\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":3.0,\"formatted\":\"3\"},\"duration\":3},{\"value\":{\"value\":6.4,\"formatted\":\"6.4\"},\"duration\":5}],\"seasonTotal\":{\"value\":33.5,\"formatted\":\"33.5\"},\"seasonAverage\":{\"value\":5.5833335,\"formatted\":\"5.58\"},\"seasonsStandartDeviation\":{\"value\":5.1753955,\"formatted\":\"5.18\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13821,\"nameFull\":\"Oren Burks\",\"nameShort\":\"O. Burks\",\"proTeamAbbreviation\":\"GB\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13821.png\",\"nflByeWeek\":5,\"nameFirst\":\"Oren\",\"nameLast\":\"Burks\",\"proTeam\":{\"abbreviation\":\"GB\",\"location\":\"Green Bay\",\"name\":\"Packers\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6305,\"away\":{\"abbreviation\":\"GB\",\"location\":\"Green Bay\",\"name\":\"Packers\"},\"home\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.54,\"formatted\":\"0.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.29,\"formatted\":\"2.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.54,\"formatted\":\"0.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.29,\"formatted\":\"2.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":540,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":96,\"formatted\":\"96\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":11.0,\"formatted\":\"11\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":6.33,\"formatted\":\"6.33\"},\"duration\":3},{\"value\":{\"value\":7.0,\"formatted\":\"7\"},\"duration\":5}],\"seasonTotal\":{\"value\":28.0,\"formatted\":\"28\"},\"seasonAverage\":{\"value\":7.0,\"formatted\":\"7\"},\"seasonsStandartDeviation\":{\"value\":3.1622777,\"formatted\":\"3.16\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":14719,\"nameFull\":\"Zach Allen\",\"nameShort\":\"Z. Allen\",\"proTeamAbbreviation\":\"ARI\",\"position\":\"IL\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14719.png\",\"nflByeWeek\":8,\"injury\":{\"typeAbbreviaition\":\"IR\",\"description\":\"Ankle\",\"severity\":\"OUT\",\"typeFull\":\"Injured Reserve\"},\"nameFirst\":\"Zach\",\"nameLast\":\"Allen\",\"proTeam\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"positionEligibility\":[\"DE\",\"IL\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":551,\"positions\":[{\"position\":{\"label\":\"IL\",\"group\":\"START\",\"eligibility\":[\"IL\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":86,\"formatted\":\"86\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"formatted\":\"0\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":2.7,\"formatted\":\"2.7\"},\"duration\":5}],\"seasonTotal\":{\"value\":26.5,\"formatted\":\"26.5\"},\"seasonAverage\":{\"value\":4.4166665,\"formatted\":\"4.42\"},\"seasonsStandartDeviation\":{\"value\":4.4009156,\"formatted\":\"4.4\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":8460,\"nameFull\":\"Derek Wolfe\",\"nameShort\":\"D. Wolfe\",\"proTeamAbbreviation\":\"BAL\",\"position\":\"IL\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/8460.png\",\"nflByeWeek\":7,\"nameFirst\":\"Derek\",\"nameLast\":\"Wolfe\",\"proTeam\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"positionEligibility\":[\"DE\",\"IL\"]},\"requestedGames\":[{\"game\":{\"id\":6301,\"away\":{\"abbreviation\":\"TEN\",\"location\":\"Tennessee\",\"name\":\"Titans\"},\"home\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":30,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":4.0,\"formatted\":\"4\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.14,\"formatted\":\"1.1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":0.89,\"formatted\":\"0.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.09,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.14,\"formatted\":\"1.1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":0.89,\"formatted\":\"0.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.09,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":511,\"positions\":[{\"position\":{\"label\":\"IL\",\"group\":\"START\",\"eligibility\":[\"IL\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":69,\"formatted\":\"69\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":6.5,\"formatted\":\"6.5\"},\"duration\":1},{\"value\":{\"value\":7.83,\"formatted\":\"7.83\"},\"duration\":3},{\"value\":{\"value\":6.0,\"formatted\":\"6\"},\"duration\":5}],\"seasonTotal\":{\"value\":32.5,\"formatted\":\"32.5\"},\"seasonAverage\":{\"value\":5.4166665,\"formatted\":\"5.42\"},\"seasonsStandartDeviation\":{\"value\":3.4450045,\"formatted\":\"3.45\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":14500,\"nameFull\":\"Chad Beebe\",\"nameShort\":\"C. Beebe\",\"proTeamAbbreviation\":\"MIN\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14500.png\",\"nflByeWeek\":7,\"news\":[{\"timeEpochMilli\":\"1606170389000\",\"contents\":\"Beebe did not have a reception or target in Sunday's loss to Dallas. He played 17 snaps on offense.\",\"analysis\":\"The Vikings utilized two tight ends more with Irv Smith back from a groin injury and the game flow didn't dictate the use of three-receiver sets as much. Beebe remains the third receiver, but he could have a larger role this week if Adam Thielen misses time after landing on the COVID-19 list.\",\"title\":\"No receptions Sunday\"}],\"nameFirst\":\"Chad\",\"nameLast\":\"Beebe\",\"proTeam\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6309,\"away\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"home\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":15.64,\"formatted\":\"15.6\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":15.64,\"formatted\":\"15.6\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":650,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":156,\"formatted\":\"156\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":3.1,\"formatted\":\"3.1\"},\"duration\":1},{\"value\":{\"value\":2.22,\"formatted\":\"2.22\"},\"duration\":3},{\"value\":{\"value\":2.58,\"formatted\":\"2.58\"},\"duration\":5}],\"seasonTotal\":{\"value\":15.599998,\"formatted\":\"15.6\"},\"seasonAverage\":{\"value\":2.5999997,\"formatted\":\"2.6\"},\"seasonsStandartDeviation\":{\"value\":0.86071754,\"formatted\":\"0.86\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14958,\"nameFull\":\"Stanley Morgan\",\"nameShort\":\"S. Morgan\",\"proTeamAbbreviation\":\"CIN\",\"position\":\"WR\",\"nflByeWeek\":9,\"injury\":{\"typeAbbreviaition\":\"OUT\",\"description\":\"Undisclosed\",\"severity\":\"OUT\",\"typeFull\":\"Out\"},\"nameFirst\":\"Stanley\",\"nameLast\":\"Morgan\",\"proTeam\":{\"abbreviation\":\"CIN\",\"location\":\"Cincinnati\",\"name\":\"Bengals\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6306,\"away\":{\"abbreviation\":\"CIN\",\"location\":\"Cincinnati\",\"name\":\"Bengals\"},\"home\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":11191,\"nameFull\":\"Maxx Williams\",\"nameShort\":\"M. Williams\",\"proTeamAbbreviation\":\"ARI\",\"position\":\"TE\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11191.png\",\"nflByeWeek\":8,\"news\":[{\"timeEpochMilli\":\"1606014939000\",\"contents\":\"Williams reeled in both of his targets for 29 yards during Thursday's 28-21 loss in Seattle.\",\"analysis\":\"Williams accounted for the Cardinals' longest play of the game, a 25-yard catch to help set up the team's first touchdown at the start of the second quarter. Otherwise, he was relatively quiet, despite pacing Arizona tight ends in offensive snap share (57 percent). Meanwhile, Dan Arnold (a four-yard TD catch on two targets, 39 percent) and practice squad callup Evan Baylis (no targets, nine percent) scooped up the remaining work given to the position group. Williams has been tending to an ankle issue for most of the season, but since his return Week 9 he's continued to lead Cards TEs in snaps on offense.\",\"title\":\"Nabs two catches Thursday\"}],\"nameFirst\":\"Maxx\",\"nameLast\":\"Williams\",\"proTeam\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"positionEligibility\":[\"TE\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"2/2\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":29.0,\"formatted\":\"29\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":11.86,\"formatted\":\"11.9\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.08,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":11.86,\"formatted\":\"11.9\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.08,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":748,\"positions\":[{\"position\":{\"label\":\"TE\",\"group\":\"START\",\"eligibility\":[\"TE\"],\"colors\":[\"DRAFT_BOARD_YELLOW\"]},\"ordinal\":82,\"formatted\":\"82\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.9,\"formatted\":\"7.9\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":3.95,\"formatted\":\"3.95\"},\"duration\":3},{\"value\":{\"value\":3.95,\"formatted\":\"3.95\"},\"duration\":5}],\"seasonTotal\":{\"value\":7.9,\"formatted\":\"7.9\"},\"seasonAverage\":{\"value\":3.95,\"formatted\":\"3.95\"},\"seasonsStandartDeviation\":{\"value\":3.95,\"formatted\":\"3.95\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1566381600000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":6611,\"nameFull\":\"Rob Gronkowski\",\"nameShort\":\"R. Gronkowski\",\"proTeamAbbreviation\":\"TB\",\"position\":\"TE\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/6611.png\",\"nflByeWeek\":13,\"news\":[{\"timeEpochMilli\":\"1605999203000\",\"contents\":\"Gronkowski will play in Monday's game against the Rams.\",\"analysis\":\"Gronkowski sat out practice Friday for what likely was a routine rest day, and he's officially been cleared for Monday's primetime clash. The 31-year-old tight end took awhile to get rolling this season, but he's been impressive of late, recording 17 receptions for 234 yards and four touchdowns over the past five games.\",\"title\":\"Ready for Week 11\"},{\"timeEpochMilli\":\"1606050921000\",\"contents\":\"Weve made it to Week 11. Teams that are going to compete are becoming more and more clear while others are beginning to fade. We had 13 double-digit scoring kickers on the weekend heading into the Monday night game. Probably one of the most impressive stats was that seven kickers outscored the top two tight end scorers, Mark Andrews and Rob Gronkowski, from the week who put up 13.1 PPR pointsWe had some explosive performances and big kicks abound. Buffalo Bills kicker Tyler Bass banged home ...\",\"url\":\"https://www.rotoballer.com/week-11-kicker-streamers-starters-rankings-2020-fantasy-tiers/804799\",\"title\":\"Week 11 Kicker Streamers and Starts - 2020 Fantasy Tiers, Rankings\"}],\"nameFirst\":\"Rob\",\"nameLast\":\"Gronkowski\",\"proTeam\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"positionEligibility\":[\"TE\"]},\"requestedGames\":[{\"game\":{\"id\":6311,\"away\":{\"abbreviation\":\"LAR\",\"location\":\"Los Angeles\",\"name\":\"Rams\"},\"home\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"startTimeEpochMilli\":\"1606180500000\",\"status\":\"IN_PROGRESS\",\"segment\":1,\"segmentSecondsRemaining\":790,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"stateFootball\":{\"down\":2,\"distance\":8,\"fieldLine\":40,\"fieldLineAbsolute\":40,\"description\":\"2nd & 8 at TB 40\"}},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":43.31,\"formatted\":\"43.3\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.24,\"formatted\":\"0.2\"}}],\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"hasPossession\":true}],\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":43.31,\"formatted\":\"43.3\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.24,\"formatted\":\"0.2\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373393,\"name\":\"Philadelphia Fire\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373393_0_150x150.jpg\",\"initials\":\"PF\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":141,\"positions\":[{\"position\":{\"label\":\"TE\",\"group\":\"START\",\"eligibility\":[\"TE\"],\"colors\":[\"DRAFT_BOARD_YELLOW\"]},\"ordinal\":11,\"formatted\":\"11\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":14.1,\"formatted\":\"14.1\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":10.8,\"formatted\":\"10.8\"},\"duration\":3},{\"value\":{\"value\":14.18,\"formatted\":\"14.18\"},\"duration\":5}],\"seasonTotal\":{\"value\":99.899994,\"formatted\":\"99.9\"},\"seasonAverage\":{\"value\":9.99,\"formatted\":\"9.99\"},\"seasonsStandartDeviation\":{\"value\":6.7991858,\"formatted\":\"6.8\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1565776800000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":14188,\"nameFull\":\"Chris Board\",\"nameShort\":\"C. Board\",\"proTeamAbbreviation\":\"BAL\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14188.png\",\"nflByeWeek\":7,\"nameFirst\":\"Chris\",\"nameLast\":\"Board\",\"proTeam\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6301,\"away\":{\"abbreviation\":\"TEN\",\"location\":\"Tennessee\",\"name\":\"Titans\"},\"home\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":30,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":540,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":95,\"formatted\":\"95\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":3.5,\"formatted\":\"3.5\"},\"duration\":1},{\"value\":{\"value\":4.83,\"formatted\":\"4.83\"},\"duration\":3},{\"value\":{\"value\":4.2,\"formatted\":\"4.2\"},\"duration\":5}],\"seasonTotal\":{\"value\":28.5,\"formatted\":\"28.5\"},\"seasonAverage\":{\"value\":3.5625,\"formatted\":\"3.56\"},\"seasonsStandartDeviation\":{\"value\":2.228193,\"formatted\":\"2.23\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1565776800000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11199,\"nameFull\":\"Tyler Kroft\",\"nameShort\":\"T. Kroft\",\"proTeamAbbreviation\":\"BUF\",\"position\":\"TE\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11199.png\",\"nflByeWeek\":11,\"injury\":{\"typeAbbreviaition\":\"CVD\",\"description\":\"Undisclosed\",\"severity\":\"OUT\",\"typeFull\":\"COVID-19\"},\"nameFirst\":\"Tyler\",\"nameLast\":\"Kroft\",\"proTeam\":{\"abbreviation\":\"BUF\",\"location\":\"Buffalo\",\"name\":\"Bills\"},\"positionEligibility\":[\"TE\"]},\"requestedGames\":[{\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"isBye\":true,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373970,\"name\":\"Bamenda Herd\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373970_0_150x150.jpg\",\"initials\":\"BH\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":420,\"positions\":[{\"position\":{\"label\":\"TE\",\"group\":\"START\",\"eligibility\":[\"TE\"],\"colors\":[\"DRAFT_BOARD_YELLOW\"]},\"ordinal\":34,\"formatted\":\"34\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.6,\"formatted\":\"7.6\"},\"duration\":1},{\"value\":{\"value\":7.37,\"formatted\":\"7.37\"},\"duration\":3},{\"value\":{\"value\":4.76,\"formatted\":\"4.76\"},\"duration\":5}],\"seasonTotal\":{\"value\":47.899998,\"formatted\":\"47.9\"},\"seasonAverage\":{\"value\":6.842857,\"formatted\":\"6.84\"},\"seasonsStandartDeviation\":{\"value\":7.035595,\"formatted\":\"7.04\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}}],\"resultOffsetNext\":240}"),
date = structure(1606180797, class = c("POSIXct", "POSIXt"), tzone = "GMT"), times = c(
redirect = 0, namelookup = 3.4e-05,
connect = 3.7e-05, pretransfer = 0.000146, starttransfer = 0.049436,
total = 0.049902
)
), class = "response")
|
6e36c32624e0cee9d5e4d575796c789e216f0152 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rucrdtw/examples/ucrdtw_fv.Rd.R | 54242e8bca18a82a68c947053d8f910acb4fff74 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 368 | r | ucrdtw_fv.Rd.R | library(rucrdtw)
### Name: ucrdtw_fv
### Title: UCR DTW Algorithm file-vector method
### Aliases: ucrdtw_fv
### ** Examples
#locate example data file
dataf <- system.file("extdata/col_sc.txt", package="rucrdtw")
#load example data set
data("synthetic_control")
#extract first row as query
query <- synthetic_control[1,]
#run query
ucrdtw_fv(dataf, query, 0.05)
|
c9379a95db5f8f1bc9880186443aaefbf7cd3037 | d1cc80f311dd27b990e81b9cd9503f381b2cc2f7 | /R/sp500.R | f4b8c3611b10bb9fbe41c311a11cb6be6116cab6 | [] | no_license | yangkedc1984/FinEx | 568cf74cb32db9aa693bbdcfdf1c5f12a36736d4 | e6e85b6c6a0bc0f505401901bf21e9969ef8b9b3 | refs/heads/master | 2023-04-19T14:51:21.434676 | 2021-04-06T17:00:45 | 2021-04-06T17:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 684 | r | sp500.R | #' Daily returns and 5-minute realized variance for the SP500.
#'
#' Daily observations from 2000 to 2014 from the Oxford-Man Realized library (Heber et al., 2009). These time series were used in the
#' empirical analysis by Bee and Trapin (2018).
#'
#' @docType data
#'
#' @usage data(sp500)
#'
#' @references
#' Heber, G., Lunde, A., Shephard, N., and Sheppard, K. (2009). \emph{Oxford-Man Institute’s realized library}, version 0.1.
#'
#' Bee, M., and Trapin, L. (2018). Estimating and forecasting conditional risk measures with extreme value theory: A review. \emph{Risks}, 6(2), 45.
#'
#' @examples
#' data(sp500)
#' returns <- sp500$r
#' realized_variance <- sp500$rv
"sp500"
|
21584c94295f1a59607b70c532816c8dddd09ec0 | 8154338803dd323819c3956410d8a58fd86314d0 | /Scripts/best.R | a9efbfbf0215ea280d929dc2777420271c7d7482 | [] | no_license | Ghost-8D/RStudio_repo | 4d8289482afa3d8a2bbd3ec149a2635e9177d0ce | 5d0a26147803896921db76ce8f041b299d010e41 | refs/heads/master | 2022-12-05T07:31:30.833346 | 2020-08-17T12:40:12 | 2020-08-17T12:40:12 | 284,327,657 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,718 | r | best.R | ## The function reads the outcome-of-care-measures.csv file and returns a character
## vector with the name of the hospital that has the best (i.e. lowest) 30-day
## mortality for the specified outcome in that state.
##
## The function takes two arguments:
## - state : the 2-character abbreviated name of a state
## - outcome : outcome name (either "heart attack", "heart failure" or "pneumonia")
##
## Note: The function throws an error if any of the two arguments is not valid.
##
## Handling ties: If there is a tie for the best hospital for a given outcome,
## then the hospital names should be sorted in alphabetical order and the first
## hospital in that set should be chosen (i.e. if hospitals “b”, “c”, and “f”
## are tied for best, then hospital “b” should be returned).
##
## Usage example:
## > best("TX", "heart failure")
## [1] "FORT DUNCAN MEDICAL CENTER"
## > best("MD", "heart attack")
## [1] "JOHNS HOPKINS HOSPITAL, THE"
best <- function(state, outcome) {
## Read outcome data
directory <- file.path("data", "rprog_data_ProgAssignment3-data")
input_data <- read.csv(file.path(directory, "outcome-of-care-measures.csv"),
colClasses = "character")
## Check that state and outcome are valid
avail_states <- unique(input_data$State)
if (! state %in% avail_states){
stop("invalid state")
}
if (! outcome %in% c("heart attack", "heart failure", "pneumonia")){
stop("invalid outcome")
}
## Based on the selected outcome find the best hospital
name_col <- 2
state_col <- 7
if (outcome == "heart attack"){
outcome_col <- 11
} else if (outcome == "heart failure"){
outcome_col <- 17
} else if (outcome == "pneumonia"){
outcome_col <- 23
}
# Convert to numeric and suppress warning for NAs
suppressWarnings(input_data[, outcome_col] <- as.numeric(input_data[, outcome_col]))
# Get only relevant columns
target_data <- input_data[, c(name_col, state_col, outcome_col)]
# Exclude rows with NAs
use <- complete.cases(target_data)
# Get columns for specified state
use_state <- target_data[, 2] == state
rel_data <- target_data[use & use_state, ]
# Rename columns for easier handling
names(rel_data) <- c("Hospital", "State", "Mortality")
# Order by ascending mortality and then by ascending hospital names
# For descending order add a '-' in front of the column data
index <- order(rel_data$Mortality, rel_data$Hospital)
sorted_data <- rel_data[index, ]
## Return hospital name in that state with lowest 30-day death rate
sorted_data[1,1]
} |
d71235ad2104939d9e188d761320d4dfd7e2aabe | c0340c511cff5b40b4681c4d3238d807624c0323 | /results/plane/recordBaselinePositions.R | 9ce10d6fe25d99f272b6b22d66e35d2f56e3a3cd | [] | no_license | m-hahn/grammar-optim | 5fa7ade47d2ad91f517c887ee2c65af24059069d | 07a1a80692a504bcafc8120a21c4dc9066b495ee | refs/heads/master | 2022-08-30T06:54:42.749264 | 2022-08-05T12:09:28 | 2022-08-05T12:09:28 | 156,456,167 | 13 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,865 | r | recordBaselinePositions.R | library(lme4)
library(tidyr)
library(dplyr)
library(ggplot2)
dataS = read.csv("../../grammars/plane/plane-fixed.tsv", sep="\t") %>% mutate(Model = as.character(Model))
dataS2 = read.csv("../../grammars/plane/plane-fixed-best.tsv", sep="\t") %>% mutate(Model = as.character(Model))
dataS3 = read.csv("../../grammars/plane/plane-fixed-best-large.tsv", sep="\t") %>% mutate(Model = as.character(Model)) %>% mutate(FullSurp = NULL)
dataS4 = read.csv("../../grammars/plane/plane-fixed-random2.tsv", sep="\t") %>% mutate(Model = as.character(Model)) %>% mutate(FullSurp = NULL)
dataS5 = read.csv("../../grammars/plane/plane-fixed-random3.tsv", sep="\t") %>% mutate(Model = as.character(Model)) %>% mutate(FullSurp = NULL)
dataS6 = read.csv("../../grammars/plane/plane-fixed-random4.tsv", sep="\t") %>% mutate(Model = as.character(Model)) %>% mutate(FullSurp = NULL)
dataS7 = read.csv("../../grammars/plane/plane-fixed-random5.tsv", sep="\t") %>% mutate(Model = as.character(Model)) %>% mutate(FullSurp = NULL)
dataS = rbind(dataS, dataS2, dataS3, dataS4, dataS5, dataS6, dataS7)
dataP = read.csv("../../grammars/plane/plane-parse-unified.tsv", sep="\t") %>% mutate(Model = as.character(Model))
dataS = dataS %>% group_by(Language, Type, Model) %>% summarise(Surprisal = mean(Surp, na.rm=TRUE))
dataP = dataP %>% group_by(Language, Type, Model) %>% summarise(Pars = mean(Pars, na.rm=TRUE))
dataS = as.data.frame(dataS)
dataP = as.data.frame(dataP)
dataS = dataS %>% mutate(Type = as.character(Type))
dataP = dataP %>% mutate(Type = as.character(Type))
dataS = dataS %>% mutate(Model = as.character(Model))
dataP = dataP %>% mutate(Model = as.character(Model))
dataS = dataS %>% mutate(Language = as.character(Language))
dataP = dataP %>% mutate(Language = as.character(Language))
data = merge(dataS, dataP, by=c("Language", "Model", "Type"), all.x=TRUE, all.y=TRUE)
data = data %>% mutate(Type = ifelse(Type == "manual_output_funchead_RANDOM2", "manual_output_funchead_RANDOM", as.character(Type)))
data = data %>% mutate(Type = ifelse(Type == "manual_output_funchead_RANDOM3", "manual_output_funchead_RANDOM", as.character(Type)))
data = data %>% mutate(Type = ifelse(Type == "manual_output_funchead_RANDOM4", "manual_output_funchead_RANDOM", as.character(Type)))
data = data %>% mutate(Type = ifelse(Type == "manual_output_funchead_RANDOM5", "manual_output_funchead_RANDOM", as.character(Type)))
dataBaseline = data %>% filter(Type == "manual_output_funchead_RANDOM")
dataGround = data %>% filter(Type == "manual_output_funchead_ground_coarse_final") %>% select(Language, Surprisal, Pars) %>% rename(SurprisalGround = Surprisal) %>% rename(ParsGround = Pars) %>% mutate(EffGround = ParsGround + 0.9*SurprisalGround) %>% group_by(Language)
data = merge(dataBaseline, dataGround, by=c("Language"))
write.csv(data, file="analyze_pareto_optimality/pareto-data.tsv")
|
d2a5b0451cf70b50e2375942bca541299146d7d3 | 240ddc7e67e3a631538813b759a2d1cb5d8cc3d7 | /plot4.R | 161a6f793a23cff04839ec230aa557c3aa85cc79 | [] | no_license | tldc01/ExData_Plotting1 | 8ebbb462c8a89e20d6b82652f70fe4eb81ec5194 | f1f7dc995882fc273e61d6bfbd85ddf5cd1defb0 | refs/heads/master | 2021-01-12T13:13:47.565620 | 2016-10-28T17:13:59 | 2016-10-28T17:13:59 | 72,157,310 | 0 | 0 | null | 2016-10-27T23:45:24 | 2016-10-27T23:45:24 | null | UTF-8 | R | false | false | 2,057 | r | plot4.R |
elecdata<-read.table("household_power_consumption.txt", header = TRUE,colClasses=c(rep("character")), sep = ";" ) #read in the data
dim(elecdata) #confirm it's all there!
febdata<-elecdata[elecdata$Date=="1/2/2007"|elecdata$Date=="2/2/2007",] #filter just Feb 1 and 2 in 2007
good<-subset(febdata,febdata[,1]!="?" & febdata[,2]!="?" & febdata[,3]!="?" & febdata[,4]!="?" & febdata[,5]!="?" & febdata[,6]!="?"
& febdata[,7]!="?" & febdata[,8]!="?" & febdata[,9]!="?") #remove incomplete/unknown values, if any
png(file="plot4.png",width=480,height=480) #set up output file
mytspower<-ts(as.numeric(good$Global_active_power)) #create the necessary time series
mytsvoltage<-ts(as.numeric(good$Voltage))
myts1<-ts(as.numeric(good$Sub_metering_1))
myts2<-ts(as.numeric(good$Sub_metering_2))
myts3<-ts(as.numeric(good$Sub_metering_3))
mytspower2<-ts(as.numeric(good$Global_reactive_power))
layout(matrix(c(1,2,3,4),2,2,byrow=TRUE)) #establish the layout of the charts
plot(mytspower,ylab="Global Active Power",xlab=" ",xaxt="n") #generate the first graph
axis(side=1,at=c(0,1500,2880),labels=c("Thu","Fri","Sat"),tick=TRUE,lwd=1) #add the formatting
plot(mytsvoltage,ylab="Voltage",xlab="datetime",xaxt="n") #generate the second graph
axis(side=1,at=c(0,1500,2880),labels=c("Thu","Fri","Sat"),tick=TRUE,lwd=1) #add the formatting
plot(myts1,ylab="Energy sub metering",xlab=" ",xaxt="n") #generate third graph
lines(myts2, col="red") #add the second plot and make the line red
lines(myts3, col="blue") #add the third plot and make the line blue
axis(side=1,at=c(0,1500,2880),labels=c("Thu","Fri","Sat"),tick=TRUE,lwd=1) #add the formatting
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),bty="n",cex=.75) #add the legend
plot(mytspower2,ylab="Global_reactive_power",xlab="datetime",xaxt="n") #generate the fourth and final graph
axis(side=1,at=c(0,1500,2880),labels=c("Thu","Fri","Sat"),tick=TRUE,lwd=1) #add the formatting
dev.off()
|
10a40dc70d1d1d509d29995669b7aac4e3b4b5c0 | 65c734e82dd10541e8d18f8db6cea7f75df540b5 | /tests/testthat/test_compose.R | 1ffbcc6a662fbcf2b46ea6f4402e65cae76ecdd4 | [
"MIT"
] | permissive | Ilia-Kosenkov/primitiveR | d974051be941c516df5aa62dc6ffcb49787df1fb | d61a626a836f9cfc518675f92c360fa6ee3950c0 | refs/heads/master | 2021-07-20T06:08:53.486692 | 2021-01-20T03:44:49 | 2021-01-20T03:44:49 | 236,519,157 | 0 | 0 | MIT | 2021-01-20T03:44:50 | 2020-01-27T15:12:55 | R | UTF-8 | R | false | false | 814 | r | test_compose.R | context("Composition tests")
test_that("%>>% behaves correctly", {
(~.x + 1) %>>%
(~.x + 2) %>>%
identity %>>%
(function(x) x + 3) -> f
set.seed(1)
rnorm(1000, 1, 20) -> x
expect_true((x + 6) %===% f(x))
})
test_that("%<<% behaves correctly", {
(~.x + 1) %<<%
(~.x + 2) %<<%
identity %<<%
(function(x) x + 3) -> f
set.seed(1)
rnorm(1000, 1, 20) -> x
expect_true((x + 6) %===% f(x))
})
test_that("%>>% with only two functions", {
(~.x + 5) %>>% (~.x * 2) -> f
set.seed(1)
rnorm(1000, 1, 20) -> x
expect_true((2 * (x + 5)) %===% f(x))
})
test_that("%<<% with only two functions", {
(~.x * 2) %<<% (~.x + 5) -> f
set.seed(1)
rnorm(1000, 1, 20) -> x
expect_true((2 * (x + 5)) %===% f(x))
}) |
af2f06df5287f5be26bc3059308d3364de14fd12 | b26b28874f761ea2b085db8d60c994af57d99cd5 | /man/IsoPlot.Rd | c9de03ec0178c4b47486aee40d7b629b16f53d4a | [] | no_license | mjnueda/maSigPro | de1f90885fafc23b38585bb75aec40aeee9ed89f | 000885f3103c02880fb9bd1e69f0c679e08ae580 | refs/heads/master | 2021-01-22T12:37:54.848463 | 2020-12-08T17:53:08 | 2020-12-08T17:53:08 | 102,353,792 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,195 | rd | IsoPlot.Rd | \name{IsoPlot}
\alias{IsoPlot}
\title{Plotting the isoform profiles of a specific gene by groups}
\description{
This function makes a plot with the isoforms of a specific gene splitting the different experimental groups.
}
\usage{
IsoPlot(get, name, only.sig.iso=FALSE, ylim=NULL, xlab = "Time",
ylab = "Expression value", points=TRUE, cex.main=3,cex.legend=1.5)
}
\arguments{
\item{get}{a \code{getDS} object a cluster of flat Isoform}
\item{name}{Name of the specific gen to show in the plot}
\item{only.sig.iso}{TRUE when the plot is made only with statistically significant isoforms.}
\item{ylim}{Range of the y axis of the desired plot. If it is NULL it will be computed automatically. }
\item{xlab}{label for the x axis}
\item{ylab}{label for the y axis}
\item{points}{ TRUE to plot points and lines. FALSE to plot only lines. }
\item{cex.main}{ graphical parameter magnification to be used for main}
\item{cex.legend}{ graphical parameter magnification to be used for legend }
}
\details{
The plot can be made with all the available isoforms or only with the statistilly significant ones.
}
\value{
Plot of isoform profiles of a specific gene by groups.
}
\references{
Nueda, M.J., Martorell, J., Marti, C., Tarazona, S., Conesa, A. 2018. Identification and visualization of differential isoform expression in RNA-seq time series. Bioinformatics. 34, 3, 524-526.
Nueda, M.J., Tarazona, S., Conesa, A. 2014.
Next maSigPro: updating maSigPro bioconductor package for RNA-seq time series.
Bioinformatics, 30, 2598-602.
Conesa, A., Nueda M.J., Alberto Ferrer, A., Talon, T. 2006.
maSigPro: a Method to Identify Significant Differential Expression Profiles in Time-Course Microarray Experiments.
Bioinformatics 22, 1096-1102.
}
\author{Maria Jose Nueda, \email{mj.nueda@ua.es}}
\seealso{ \code{\link{getDS}}, \code{\link{IsoModel}} }
\examples{
data(ISOdata)
data(ISOdesign)
mdis <- make.design.matrix(ISOdesign)
MyIso <- IsoModel(data=ISOdata[,-1], gen=ISOdata[,1], design=mdis, counts=TRUE)
Myget <- getDS(MyIso)
IsoPlot(Myget,"Gene1005",only.sig.iso=FALSE,cex.main=2,cex.legend=1)
}
|
08c7b4cd149cb1f68e4fff82e1ce404603911839 | 0457cde508c1d7411af4b0f3ad143f2932f6b48a | /man/vt_pal.Rd | 884e2dfbcc5edb23df827e31f8677bb74a4f48b2 | [] | no_license | bboti86/vtcolors | 56978e6341395cf07c7e928d7af4ba992c1e2672 | 2e6564dd53ec0906c7a12071583bc368016b05bd | refs/heads/main | 2023-05-08T10:30:05.068443 | 2021-05-31T23:36:22 | 2021-05-31T23:36:22 | 372,546,878 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 520 | rd | vt_pal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VT_Color_Palette.R
\name{vt_pal}
\alias{vt_pal}
\title{Return function to interpolate a VT color palette}
\usage{
vt_pal(palette = "primary", reverse = FALSE, ...)
}
\arguments{
\item{palette}{Character name of palette in vt_palettes}
\item{reverse}{Boolean indicating whether the palette should be reversed}
\item{...}{Additional arguments to pass to colorRampPalette()}
}
\description{
Return function to interpolate a VT color palette
}
|
a466af08a1549f1fa5c472df1a4fdf57d3be0a89 | fcf46fdb7479a9f3df87446a720ca301a72888d5 | /slurm_cluster_code/power_simu_general_ttest_12.R | aba11788b93efa2a96c1dd373f4a6195b8be83b3 | [] | no_license | devcao/LOCOpath_repo | b080ac685f04f0f22f88c5667293e2e73a797045 | 2c6b0b5553e9298e6c30cf3bfe25348e31625088 | refs/heads/master | 2022-12-28T19:35:24.549883 | 2020-10-19T02:53:55 | 2020-10-19T02:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,998 | r | power_simu_general_ttest_12.R | .libPaths(new="~/R")
rm(list=ls())
setwd("~/hdi_simu")
require(multcomp)
source("~/hdi_simu/compare_power.R")
#######################################################################################################################################
# Set simulation parameters (to be done with command-line arguments)
# Execute this from within the directory containing this R script:
############################################################################
options(echo=TRUE)
args <- commandArgs(trailingOnly = TRUE)
print(args)
# args <- c("1000","2","1","20","3","5","3",".92",".96",".95",".98","1")
n <- as.numeric(args[1])
p <- as.numeric(args[2])
iter <- as.numeric(args[3]) # goes from 1 to 12
beta_i = as.numeric(args[4])
####
##### if not running on a cluster
#n = 100
#p = 12
#iter = 500
#B = 500
#####
###################################################
###################################################
###################################################
bb = beta_i/10
for (rho in list(0, 0.5, 0.9, 'weak_equl','equl')){
results = General.Test.Power(n = n, p = p, beta=c(1+bb,rep(1,3),rep(0, 8)), rho=rho, iter = iter, setting = 'dep')
print(mem_used())
f1 = paste0("~/hdi_simu/results/eq_pc_ttest_",p,"_", 'rho',rho,'beta_',bb,".RData")
save(results,file = f1)
}
#results = desparse.Power(n = n, p = p, beta=c(bb,rep(1,9),rep(0,p-10)), rho=0.9, iter = iter, setting = 'dep', which.covariate = 1, betaNull = 0)
#print(mem_used())
#f1 = paste0("~/hdi_path/results/SI/proj_AR09_p_",p,"_",bb,".RData")
#save(results,file = f1)
#results = desparse.Power(n = n, p = p, beta=c(bb,rep(1,9),rep(0,p-10)), rho="equl", iter = iter, setting = 'dep', which.covariate = 1, betaNull = 0)
#print(mem_used())
#f1 = paste0("~/hdi_path/results/SI/proj_Eq_p_",p,"_",bb,".RData")
#save(results,file = f1)
#results = desparse.Power(n = n, p = p, beta=c(bb,rep(1,9),rep(0,p-10)), rho="weak_equl", iter = iter, setting = 'dep', which.covariate = 1, betaNull = 0)
#print(mem_used())
#f1 = paste0("~/hdi_path/results/SI/proj_WkEq_p_",p,"_",bb,".RData")
#save(results,file = f1)
#p0=runif(10,0,2)
#results = Path.Resample.Power(n = n, p = p, beta=c(rep(1,10),bb,bb,rep(0,988)), rho=0, multiTest = TRUE, iter = iter, B = B, setting = 'dep', which.covariate = list(c(1,2,11,12)), betaNull = list(c(1,1,0,0)), parallel = TRUE, norm = norm, path.method = path.method, beta.init = beta.init)
#print(mem_used())
#f3 = paste0("~/hdi_path/results/L2.sq/Multiple_Exp_",save.name,bb,".RData")
#save(results,file = f3)
#p0 = runif(10,0,2)
#results = Path.Resample.Power(n = n, p = p, beta=c(rep(1,10),bb,bb,rep(0,988)), rho="equl", iter = iter, B = B,multiTest = TRUE, setting = 'dep', which.covariate = list(c(1,2,11,12)), betaNull = list(c(1,1,0,0)), parallel = TRUE, norm = norm, path.method = path.method, beta.init = beta.init)
#print(mem_used())
#f4 = paste0("~/hdi_path/results/L2.sq/Multiple_Equal_",save.name,bb,".RData")
#save(results,file = f4)
|
db3383eb56468676ce98de7d227e48fe453dc09b | 335079b2323f11a029f189ce2e5f672ec014925d | /man/search_design.Rd | 19f1a4c394dedbd968620ed370276bfd47a414d8 | [
"MIT"
] | permissive | JedStephens/ExpertChoice | 930aa5d9763a1300d7ecc6cafc15774677968ede | dda9602c4ce5321f04c1ef05cbebed4ab1b2059b | refs/heads/master | 2020-05-20T21:51:44.290974 | 2020-04-07T17:18:11 | 2020-04-07T17:18:11 | 185,771,415 | 14 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,732 | rd | search_design.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_design.R
\name{search_design}
\alias{search_design}
\title{Search Full Factorial for Fractional Factorial Design}
\usage{
search_design(full_factorial, fractional_factorial_design)
}
\arguments{
\item{full_factorial}{a `data.table` generated by the `full_factorial` function}
\item{fractional_factorial_design}{a means of creating a fractional design using either orthogonal arrays or Federov. See the tutorial for examples.}
}
\value{
a `data.frame` with only the rows of your chosen fractional factorial design.
}
\description{
Returns a consistent fractional factorial design from the input fractional factorial design. The key advantage of this function is that it ensures factors are coded and enchances the attributes of the output.
}
\examples{
# The use of this function depends on what the input to the argument fractional_factorial_design
# will be. See Step 4 of Practical Introduction to ExpertChoice vignette.
# Step 1
attrshort = list(condition = c("0", "1", "2"),
technical =c("0", "1", "2"),
provenance = c("0", "1"))
#Step 2
# ff stands for "full fatorial"
ff <- full_factorial(attrshort)
af <- augment_levels(ff)
# af stands for "augmented factorial"
# Step 3
# Choose a design type: Federov or Orthogonal. Here an Orthogonal one is used.
nlevels <- unlist(purrr::map(ff, function(x){length(levels(x))}))
fractional_factorial <- DoE.base::oa.design(nlevels = nlevels, columns = "min34")
# Step 4! - The search_design function.
# The functional draws out the rows from the original augmented full factorial design.
colnames(fractional_factorial) <- colnames(ff)
fractional <- search_design(ff, fractional_factorial)
}
|
097440856094068913b38fad2086a90c6d3573a7 | 6e4c33dd536eff09c15eb88b8b6e85e2f9df373f | /LY354740_Rat/LY354740_Stats/scripts/figures/Fig2.R | a7af2f8289979d3586fd838acabd43ea530e7738 | [] | no_license | MariosPanayi/Marios-temp | e9a4cf6d2764d9ea815a6ca173ef154b86b3fd45 | 4dc67ef4fdfa7d08f298e3582bf6ee21c365ecc4 | refs/heads/master | 2023-03-09T12:27:07.457077 | 2023-02-24T22:24:19 | 2023-02-24T22:24:19 | 109,297,687 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,601 | r | Fig2.R | ## Packages for data organisation and plotting
library(tidyverse)
# Package for relative file paths
library(here)
# library(ggpubr)
library(cowplot)
library(ggsignif)
library(patchwork)
library(RColorBrewer)
################################################################################
## Packages for Data analysis
library(afex)
afex_options(emmeans_model = "multivariate")# use multivariate model for all follow-up tests.
library(emmeans)
# install.packages("devtools")
# devtools::install_github("crsh/papaja")
library(papaja)
library(knitr)
################################################################################
## Experiment 3
# reload data
full_data <- read_csv(here("rawdata", "/LY354740_Expt3_Locomotor_FoodDep.csv"))
#####
## Data
plot_data1 <- full_data %>%
group_by(Subj, Drug, Amph, LY, Period, bin10mins) %>%
summarise(activity = sum(activity)) %>%
ungroup() %>%
group_by(Subj) %>%
mutate(activity_perc = activity/activity[bin10mins=="0"]) %>%
ungroup() %>%
filter(bin10mins < 13) %>%
mutate(bin10mins = as.factor(bin10mins))
# Re order and rename levels for plotting
plot_data1$Drug <- fct_relevel(plot_data1$Drug, c("Veh_Veh", "Veh_LY", "Veh_Amph", "LY_Amph"))
levels <- c("Veh/Veh" = "Veh_Veh", "Veh/LY354740" = "Veh_LY", "Amph/Veh" = "Veh_Amph", "Amph/LY354740" = "LY_Amph")
plot_data1$Drug <- fct_recode(plot_data1$Drug, !!!levels)
#
# fillcolours <- c("No Inj" = "#FFFFFF", "Veh" = "#D9D9D9", "1 mg/kg" = "#F4A582" , "10 mg/kg" = "#B2182B")
# fillcolours <- c("Veh/Veh" = "#FFFFFF", "Amph/Veh" = "#4393C3", "Amph/LY354740" = "#252525")
fillcolours <- c("Veh/Veh" = "#FFFFFF", "Veh/LY354740" = "#FFFFFF", "Amph/Veh" = "#FFFFFF", "Amph/LY354740" = "#252525")
linecolours <- c("Veh/Veh" = "#000000", "Veh/LY354740" = "#B2182B", "Amph/Veh" = "#4393C3" , "Amph/LY354740" = "#252525")
Linetypes <- c("Veh/Veh" = "dotted", "Veh/LY354740" = "dotted", "Amph/Veh" = "solid" , "Amph/LY354740" = "solid")
pointshapes <- c("Veh/Veh" = 21, "Veh/LY354740" = 21, "Amph/Veh" = 22 , "Amph/LY354740" = 15)
highlightarea <- data.frame(x = c(0, 0, 6, 6), y = c(0,1500, 1500, 0 ))
#Note to plot the polygon first, you need to create a layer with the aes defined in ggplot(). Then when calling the polygon layer you have to specify that it shouldn't inherit the aes from the ggplot command even though different data are specified
# Plot for fun
Expt3Locoplot_10mins <- ggplot(data = plot_data1, mapping = aes(x = bin10mins, y = activity, group = Drug, colour = Drug, linetype = Drug, shape = Drug, fill = Drug)) +
geom_blank() +
geom_polygon(data=highlightarea, mapping = aes(x = as.numeric(x), y = as.numeric(y)), fill = "gray95", inherit.aes = FALSE) +
stat_summary_bin(fun.data = "mean_se", geom = "line", size = .5) +
stat_summary(fun.data = "mean_se", geom = "errorbar", width = 0.0, size = .3, linetype = 1, show.legend = FALSE) +
stat_summary_bin(fun.data = "mean_se", geom = "point", size = 2) +
# Make Pretty
scale_y_continuous( expand = expansion(mult = c(0, 0))) +
ggtitle("Food Restricted") + xlab("10 mins") + ylab("Total beam breaks") +
theme_cowplot(8) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=8)) +
coord_cartesian(ylim = c(0,1500)) +
theme(axis.title.x=element_text(face = "bold")) +
scale_linetype_manual(name = "Drug", values = Linetypes) +
scale_colour_manual(name = "Drug", values = linecolours, aesthetics = c("colour")) +
scale_shape_manual(name = "Drug", values = pointshapes) +
scale_fill_manual(name = "Drug", values = fillcolours) +
theme(legend.key.width=unit(1.5,"line")) +
geom_signif(y_position = c(1400),xmin = c(12.5), xmax = c(18.5), annotation = c("**"), tip_length = c(.0, .0), size = .5, vjust = .5,linetype = 1, colour = "black")
# Expt 4
full_data <- read_csv(here("rawdata", "/LY354740_Expt4_Locomotor_AdLib.csv"))
plot_data2 <- full_data %>%
group_by(Subj, Drug, Amph, LY, Period, bin10mins) %>%
summarise(activity = sum(activity)) %>%
ungroup() %>%
group_by(Subj) %>%
mutate(activity_perc = activity/activity[bin10mins=="0"]) %>%
ungroup() %>%
filter(bin10mins < 13) %>%
mutate(bin10mins = as.factor(bin10mins))
# Re order and rename levels for plotting
plot_data2$Drug <- fct_relevel(plot_data2$Drug, c("Veh_Veh", "Veh_LY", "Veh_Amph", "LY_Amph"))
levels <- c("Veh/Veh" = "Veh_Veh", "Veh/LY354740" = "Veh_LY", "Amph/Veh" = "Veh_Amph", "Amph/LY354740" = "LY_Amph")
plot_data2$Drug <- fct_recode(plot_data2$Drug, !!!levels)
fillcolours <- c("Veh/Veh" = "#FFFFFF", "Veh/LY354740" = "#FFFFFF", "Amph/Veh" = "#FFFFFF", "Amph/LY354740" = "#252525")
linecolours <- c("Veh/Veh" = "#000000", "Veh/LY354740" = "#B2182B", "Amph/Veh" = "#4393C3" , "Amph/LY354740" = "#252525")
Linetypes <- c("Veh/Veh" = "dotted", "Veh/LY354740" = "dotted", "Amph/Veh" = "solid" , "Amph/LY354740" = "solid")
pointshapes <- c("Veh/Veh" = 21, "Veh/LY354740" = 21, "Amph/Veh" = 22 , "Amph/LY354740" = 15)
highlightarea <- data.frame(x = c(0, 0, 6, 6), y = c(0,1500, 1500, 0 ))
#Note to plot the polygon first, you need to create a layer with the aes defined in ggplot(). Then when calling the polygon layer you have to specify that it shouldn't inherit the aes from the ggplot command even though different data are specified
# Plot for fun
Expt4Locoplot_10mins <- ggplot(data = plot_data2, mapping = aes(x = bin10mins, y = activity, group = Drug, colour = Drug, linetype = Drug, shape = Drug, fill = Drug)) +
geom_blank() +
geom_polygon(data=highlightarea, mapping = aes(x = as.numeric(x), y = as.numeric(y)), fill = "gray95", inherit.aes = FALSE) +
stat_summary_bin(fun.data = "mean_se", geom = "line", size = .5) +
stat_summary(fun.data = "mean_se", geom = "errorbar", width = 0.0, size = .3, linetype = 1, show.legend = FALSE) +
stat_summary_bin(fun.data = "mean_se", geom = "point", size = 2) +
# Make Pretty
scale_y_continuous( expand = expansion(mult = c(0, 0))) +
ggtitle("Ad libitum") + xlab("10 mins") + ylab("Total beam breaks") +
theme_cowplot(8) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=8)) +
coord_cartesian(ylim = c(0,1500)) +
theme(axis.title.x=element_text(face = "bold")) +
scale_linetype_manual(name = "Drug", values = Linetypes) +
scale_colour_manual(name = "Drug", values = linecolours, aesthetics = c("colour")) +
scale_shape_manual(name = "Drug", values = pointshapes) +
scale_fill_manual(name = "Drug", values = fillcolours) +
theme(legend.key.width=unit(1.5,"line")) +
geom_signif(y_position = c(1400),xmin = c(6.5), xmax = c(12.5), annotation = c("**"), tip_length = c(.0, .0), size = .5, vjust = .5,linetype = 1, colour = "black")
Fig2 <- (Expt3Locoplot_10mins + Expt4Locoplot_10mins) + plot_annotation(tag_levels = 'A') + plot_layout(guides = "collect")
filename = here("figures", "Fig2.png")
ggsave(filename, Fig2, width = 7.20472, height = 4/2, units = "in", dpi = 1200)
filename = here("figures", "Fig2.pdf")
ggsave(filename, Fig2, width = 7.20472, height = 4/2, units = "in")
# Experiment 5 - Amph Hunger manipulation
full_data <- read_csv(here("rawdata", "/LY354740_Expt5_Locomotor_FoodDepAmphDose.csv"))
#####
## 10 min data
plot_data3 <- full_data %>%
group_by(Subj, Feeding, Amph, bin10mins) %>%
summarise(activity = sum(activity)) %>%
ungroup() %>%
group_by(Subj) %>%
mutate(activity_perc = activity/activity[bin10mins=="0"]) %>%
ungroup() %>%
filter(bin10mins < 13) %>%
mutate(bin10mins = as.factor(bin10mins))
# Re order and rename levels for plotting
plot_data3$Amph <- fct_relevel(as.factor(plot_data3$Amph), c("0", "1", "2.5", "5"))
levels <- c("Veh" = "0", "1.0 mg/kg" = "1", "2.5 mg/kg" = "2.5", "5.0 mg/kg" = "5")
plot_data3$Amph <- fct_recode(plot_data3$Amph, !!!levels)
plot_data4 <- full_data %>%
filter(bin60mins < 3 &
bin60mins > 0) %>%
group_by(Subj, Feeding, Amph) %>%
summarise(activity = sum(activity)) %>%
ungroup()
# Re order and rename levels for plotting
plot_data4$Amph <- fct_relevel(as.factor(plot_data4$Amph), c("0", "1", "2.5", "5"))
levels <- c("Veh" = "0", "1.0 mg/kg" = "1", "2.5 mg/kg" = "2.5", "5.0 mg/kg" = "5")
plot_data4$Amph <- fct_recode(plot_data4$Amph, !!!levels)
plot_data4$Feeding <- fct_relevel(as.factor(plot_data4$Feeding), c("Ad Lib", "Food Dep"))
# # Display a specific palette
# display.brewer.pal(n = 11, name = "RdBu")
# # Display hexadecimal colour code of the palette
# brewer.pal(n = 11, name = "RdBu")
# # Red-Blue Palette
# "#67001F" "#B2182B" "#D6604D" "#F4A582" "#FDDBC7" "#F7F7F7" "#D1E5F0" "#92C5DE" "#4393C3" "#2166AC" "#053061"
fillcolours <- c("Veh.Ad Lib" = "#F7F7F7", "1.0 mg/kg.Ad Lib" = "#D1E5F0", "2.5 mg/kg.Ad Lib" = "#4393C3", "5.0 mg/kg.Ad Lib" = "#053061", "Veh.Food Dep" = "#F7F7F7", "1.0 mg/kg.Food Dep" = "#D1E5F0", "2.5 mg/kg.Food Dep" = "#4393C3", "5.0 mg/kg.Food Dep" = "#053061")
linecolours <- c("Veh.Ad Lib" = "#053061", "1.0 mg/kg.Ad Lib" = "#053061", "2.5 mg/kg.Ad Lib" = "#053061", "5.0 mg/kg.Ad Lib" = "#053061", "Veh.Food Dep" = "#053061", "1.0 mg/kg.Food Dep" = "#053061", "2.5 mg/kg.Food Dep" = "#053061", "5.0 mg/kg.Food Dep" = "#053061")
Linetypes <- c("Veh.Ad Lib" = "solid", "1.0 mg/kg.Ad Lib" = "solid", "2.5 mg/kg.Ad Lib" = "solid", "5.0 mg/kg.Ad Lib" = "solid", "Veh.Food Dep" = "dotted", "1.0 mg/kg.Food Dep" = "dotted", "2.5 mg/kg.Food Dep" = "dotted", "5.0 mg/kg.Food Dep" = "dotted")
pointshapes <- c("Veh.Ad Lib" = 21, "1.0 mg/kg.Ad Lib" = 21, "2.5 mg/kg.Ad Lib" = 21, "5.0 mg/kg.Ad Lib" = 21, "Veh.Food Dep" = 22, "1.0 mg/kg.Food Dep" = 22, "2.5 mg/kg.Food Dep" = 22, "5.0 mg/kg.Food Dep" = 22)
highlightarea <- data.frame(x = c(0, 0, 6, 6), y = c(0,6000, 6000, 0 ))
#Note to plot the polygon first, you need to create a layer with the aes defined in ggplot(). Then when calling the polygon layer you have to specify that it shouldn't inherit the aes from the ggplot command even though different data are specified
# Plot for fun
Expt5Locoplot_10mins <- ggplot(data = plot_data3, mapping = aes(x = bin10mins, y = activity, group = interaction(Amph,Feeding), colour = interaction(Amph,Feeding), linetype = interaction(Amph,Feeding), shape = interaction(Amph,Feeding), fill = interaction(Amph, Feeding))) +
geom_blank() +
geom_polygon(data=highlightarea, mapping = aes(x = as.numeric(x), y = as.numeric(y)), fill = "gray95", inherit.aes = FALSE) +
stat_summary_bin(fun.data = "mean_se", geom = "line", size = .5) +
stat_summary(fun.data = "mean_se", geom = "errorbar", width = 0.0, size = .3, linetype = 1, show.legend = FALSE) +
stat_summary_bin(fun.data = "mean_se", geom = "point", size = 2) +
# Make Pretty
scale_y_continuous(expand = expansion(mult = c(0, 0))) +
ggtitle("Amphetamine") + xlab("10 mins") + ylab("Total beam breaks") +
theme_cowplot(8) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=8)) +
coord_cartesian(ylim = c(0,6000)) +
theme(axis.title.x=element_text(face = "bold")) +
theme(strip.background = element_rect(fill=NA )) +
scale_linetype_manual(name = "", values = Linetypes) +
scale_colour_manual(name = "", values = linecolours, aesthetics = c("colour")) +
scale_shape_manual(name = "", values = pointshapes) +
scale_fill_manual(name = "", values = fillcolours) +
theme(legend.key.width=unit(1.5,"line"))
fillcolours <- c("Ad Lib" = "#FFFFFF",
"Food Dep" = "#4393C3")
linecolours <- c("Ad Lib" = "#4393C3",
"Food Dep" = "#4393C3")
Linetypes <- c("Ad Lib" = "solid",
"Food Dep" = "solid")
Expt5SumPlot <- plot_data4 %>%
ggplot(mapping = aes(x = Amph, y = activity, group = interaction(Amph,Feeding), fill = Feeding, colour = Feeding, linetype = Feeding)) +
stat_summary_bin(fun.data = "mean_se", geom = "bar", position = "dodge", colour="black", size = .5) +
stat_summary(fun.data = "mean_se", geom = "errorbar", position = position_dodge(width = 0.9), colour="black", width = 0, size = .5, linetype = "solid") +
# Make Pretty
scale_y_continuous( expand = expansion(mult = c(0, 0))) +
ggtitle("") + xlab("Amphetamine") + ylab("Total beam breaks \n (120 mins)") +
theme_cowplot(8) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=8)) +
coord_cartesian(ylim = c(0, 60000)) +
theme(axis.title.x=element_text(face = "bold")) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_linetype_manual(name = "**", values = Linetypes) +
scale_colour_manual(name = "**", values = linecolours, aesthetics = c("colour")) +
scale_fill_manual(name = "**", values = fillcolours) +
geom_signif(y_position = c(48000, 52000, 56000, 46000),xmin = c("Veh","Veh","Veh","1.0 mg/kg"), xmax = c("1.0 mg/kg","2.5 mg/kg", "5.0 mg/kg","2.5 mg/kg"), annotation = c("**", "**", "**", "**"), tip_length = c(.01, 0.01), size = .5, vjust = .5, colour = "black")
# contrast estimate ci statistic p.value
# 0_1 0 - 1 -1,284.99 $[-2,182.11$, $-387.86]$ -3.77 .002
# 0_25 0 - 2.5 -2,633.72 $[-3,530.85$, $-1,736.59]$ -7.73 < .001
# 0_5 0 - 5 -1,975.52 $[-2,872.65$, $-1,078.40]$ -5.80 < .001
# 1_25 1 - 2.5 -1,348.73 $[-2,178.25$, $-519.21]$ -4.28 < .001
# 1_5 1 - 5 -690.54 $[-1,520.06$, $138.98]$ -2.19 .136
# 25_5 2.5 - 5 658.19 $[-171.33$, $1,487.72]$ 2.09 .167
# Experiment 5 - Amph Hunger manipulation - Blood amphetamine levels
full_data <- read_csv(here("rawdata", "/LY354740_Expt5_DBS_FoodDepAmphDose.csv"))
#####
## 1st half data
plot_data5 <- full_data %>%
filter(
Time_hrs < 2
)
# Re order and rename levels for plotting
plot_data5$Amph <- fct_relevel(as.factor(plot_data5$Amph), c("1", "2.5", "5"))
levels <- c("1.0 mg/kg" = "1", "2.5 mg/kg" = "2.5", "5.0 mg/kg" = "5")
plot_data5$Amph <- fct_recode(plot_data5$Amph, !!!levels)
plot_data5$Feeding <- fct_relevel(as.factor(plot_data5$Feeding), c("Ad Lib", "Food Dep"))
plot_data5$Time_hrs <- fct_relevel(as.factor(plot_data5$Time_hrs), c("0.25", "0.5", "1"))
levels <- c("15" = "0.25", "30" = "0.5", "60" = "1")
plot_data5$Time_hrs <- fct_recode(plot_data5$Time_hrs, !!!levels)
fillcolours <- c("1.0 mg/kg.Ad Lib" = "#D1E5F0", "2.5 mg/kg.Ad Lib" = "#4393C3", "5.0 mg/kg.Ad Lib" = "#053061", "1.0 mg/kg.Food Dep" = "#D1E5F0", "2.5 mg/kg.Food Dep" = "#4393C3", "5.0 mg/kg.Food Dep" = "#053061")
linecolours <- c("1.0 mg/kg.Ad Lib" = "#053061", "2.5 mg/kg.Ad Lib" = "#053061", "5.0 mg/kg.Ad Lib" = "#053061", "1.0 mg/kg.Food Dep" = "#053061", "2.5 mg/kg.Food Dep" = "#053061", "5.0 mg/kg.Food Dep" = "#053061")
Linetypes <- c("1.0 mg/kg.Ad Lib" = "solid", "2.5 mg/kg.Ad Lib" = "solid", "5.0 mg/kg.Ad Lib" = "solid", "1.0 mg/kg.Food Dep" = "dotted", "2.5 mg/kg.Food Dep" = "dotted", "5.0 mg/kg.Food Dep" = "dotted")
pointshapes <- c("1.0 mg/kg.Ad Lib" = 21, "2.5 mg/kg.Ad Lib" = 21, "5.0 mg/kg.Ad Lib" = 21, "1.0 mg/kg.Food Dep" = 22, "2.5 mg/kg.Food Dep" = 22, "5.0 mg/kg.Food Dep" = 22)
highlightarea <- data.frame(x = c(0, 0, 6, 6), y = c(0,6000, 6000, 0 ))
#Note to plot the polygon first, you need to create a layer with the aes defined in ggplot(). Then when calling the polygon layer you have to specify that it shouldn't inherit the aes from the ggplot command even though different data are specified
# Plot for fun
Expt5DBSplot <- ggplot(data = plot_data5, mapping = aes(x = as.factor(Time_hrs), y = nM, group = interaction(Amph,Feeding), colour = interaction(Amph,Feeding), linetype = interaction(Amph,Feeding), shape = interaction(Amph,Feeding), fill = interaction(Amph, Feeding))) +
stat_summary_bin(fun.data = "mean_se", geom = "line", size = .5) +
stat_summary(fun.data = "mean_se", geom = "errorbar", width = 0.0, size = .5, linetype = 1, show.legend = FALSE) +
stat_summary_bin(fun.data = "mean_se", geom = "point", size = 2) +
# Make Pretty
scale_y_continuous(expand = expansion(mult = c(0, 0))) +
ggtitle("Amphetamine") + xlab("Mins") + ylab("Concentration (nM)") +
theme_cowplot(8) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=8)) +
coord_cartesian(ylim = c(0,8000)) +
theme(axis.title.x=element_text(face = "bold")) +
theme(strip.background = element_rect(fill=NA )) +
scale_linetype_manual(name = "", values = Linetypes) +
scale_colour_manual(name = "", values = linecolours, aesthetics = c("colour")) +
scale_shape_manual(name = "", values = pointshapes) +
scale_fill_manual(name = "", values = fillcolours) +
theme(legend.key.width=unit(1.5,"line"))
## Supplementary Figure 2
FigS2 <- Expt5Locoplot_10mins / (Expt5SumPlot + Expt5DBSplot) + plot_annotation(tag_levels = 'A')
filename = here("figures", "FigS2.png")
ggsave(filename, FigS2, width = 5.51181, height = 4, units = "in", dpi = 1200)
filename = here("figures", "FigS2.pdf")
ggsave(filename, FigS2, width = 5.51181, height = 4, units = "in")
#
# # R Brewer colour package
# # Display all colour blind friendly palettes
# display.brewer.all(colorblindFriendly = TRUE)
# # Display a specific palette
# display.brewer.pal(n = 11, name = "RdBu")
# # Display hexadecimal colour code of the palette
# brewer.pal(n = 11, name = "RdBu")
# # Red-Blue Palette
# "#67001F" "#B2182B" "#D6604D" "#F4A582" "#FDDBC7" "#F7F7F7" "#D1E5F0" "#92C5DE" "#4393C3" "#2166AC" "#053061"
#
# # Grey Palette
# display.brewer.pal(n = 6, name = "Greys")
# brewer.pal(n = 6, name = "Greys")
# "#F7F7F7" "#D9D9D9" "#BDBDBD" "#969696" "#636363" "#252525"
|
9ec38cf7e26a30de2d80770dac1931219957a597 | 43dbb0b85d72c75f262a87ef80865a16f54e75d8 | /graficas.R | f49c5da0b70e759432adc909f367921ea9a02e40 | [] | no_license | alfcar9/proy_SOA | 0880033ff137986f05c285730f451e257020c2c6 | 3146fa06b4dff0c3e098be254ccf2ddf6eb87319 | refs/heads/master | 2021-07-19T20:53:46.888396 | 2017-10-28T02:02:19 | 2017-10-28T02:02:19 | 107,810,114 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,920 | r | graficas.R | setwd("~/Desktop/1er_Semestre/Sistemas Opera/Proy_SOA")
library(tidyverse)
datos <- read_csv("~/Desktop/1er_Semestre/Sistemas Opera/Proy_SOA/datos2.csv")
#datos <- read_delim("~/Desktop/1er_Semestre/Sistemas Opera/Proy_SOA/datos.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
#datos <- read_delim("~/Desktop/1er_Semestre/Sistemas Opera/Proy_SOA/datos2.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
colnames(datos) <- c("corrida", "servidor", "tiempo_proces", "cliente", "time_stamp", "tasa", "polling")
datos <- datos %>% select(-servidor)
ndatos <- nrow(datos)
datos$time_stamp <- datos$time_stamp - min(datos$time_stamp, na.rm = TRUE)
datos <- datos %>% mutate(dif_tiempos = c(NA, datos$time_stamp[2:ndatos] - datos$time_stamp[1:(ndatos-1)]))
datos <- datos %>% mutate(dif_tiempos = ifelse(dif_tiempos >1000 | dif_tiempos < 0, NA, dif_tiempos))
throughput_df <- datos %>% group_by(polling, tasa) %>% summarise(tasa_out=max(corrida)/100)
throughput_df[nrow(throughput_df)+1,] <- c("Si",100000, 200)
throughput_df[nrow(throughput_df)+1,] <- c("No",100000, 0)
throughput_df[nrow(throughput_df)+1,] <- c("Óptimo/Fatal, resp.",100000, 0)
throughput_df$tasa <- as.double(throughput_df$tasa)
throughput_df$tasa_out <- as.double(throughput_df$tasa_out)
throughput_df <- throughput_df %>% mutate(tasainp = 101000/tasa )
throughput_df[13,3] <- 5110
throughput_df <- throughput_df %>% mutate(tasa_df = (tasainp - tasa_out))
throughput_df <- throughput_df %>% gather(metodo, valor, -tasainp, -tasa, -polling)
throughput_df <- throughput_df %>% mutate(metodo = ifelse(metodo == "tasa_out", "Tasa de sí recibidos", "Tasa no recibidos"))
g1 <- ggplot(data = throughput_df, aes(x = tasainp, y = valor, col = polling, shape = polling)) + geom_line(size = 0.3) +
geom_point(size = 2) + labs(title = "Rendimiento", x = "Tasa de paquetes recibidos (pkts/seg)", y = "Tasa de paquetes procesados (pkts/seg)") + theme_bw() +
ylim(0, 180) + geom_vline(xintercept = 0) + geom_hline(yintercept = 0) + facet_wrap(~metodo) +
geom_abline(slope = 1) + scale_color_manual(values=cbPalette)
g1
prop1 <- sort(sapply(1:101, function(i) datos %>% filter(cliente==i) %>% nrow()))
prop1 <- 100*(prop1/sum(prop1))
prop2 <- sort(sapply(1:101, function(i) datos %>% filter(polling == "Si", cliente==i) %>% nrow()))
prop2 <- 100*(prop2/sum(prop2))
prop3 <- sort(sapply(1:101, function(i) datos %>% filter(polling == "No", cliente==i) %>% nrow()))
prop3 <- 100*(prop3/sum(prop3))
prop <- c(prop1, prop2 ,prop3)
Atención <- factor(c(ifelse(abs(prop1-100/101) < .15, "Justo", "Injusto"),
ifelse(abs(prop2-100/101) < .15, "Justo", "Injusto"),
ifelse(abs(prop3-100/101) < .15, "Justo", "Injusto")))
clientes_df <- data_frame(cliente = rep(1:101, 3), prop = prop, Atención = Atención, Experimento = rep(c("Ambas","Con Poleo","Sin Poleo"), each = 101))
cbPalette <- c("#a9a9a9", "#a9a9a9", "#a9a9a9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
g2 <- ggplot(clientes_df, aes(x = cliente, y = prop, fill = Atención)) +
geom_bar(stat="identity", width = 1, col = 'white', size = 0.01 ) +
scale_y_continuous(breaks = round(seq(0, (max(prop) + 0.02), by = 0.1), 2)) +
scale_fill_manual(values=cbPalette) +
labs(title = "Atención por Clientes") + xlab("Clientes") + ylab("Proporción de atencion por Clientes %") +
theme_bw() + facet_wrap(~Experimento)
g2
throughput_df <- datos %>% group_by(polling, tasa) %>% summarise(latencia=mean(dif_tiempos, na.rm = TRUE))
throughput_df <- throughput_df %>% mutate(tasainp = 10100000/tasa)
g3 <- ggplot(data = throughput_df, aes(x = tasainp/100, y = latencia/1000, col = polling, shape = polling)) + geom_line(size = 0.3) +
geom_point(size = 3) + labs(title = "Latencia", x = "Tasa de paquetes recibidos (pkts/seg)", y = "Latencia promedio en seg de procesamiento") +
theme_bw() + scale_color_manual(values=cbPalette)
g3
|
a0186397c35a850020acb2711af0bc46274428e4 | ab48b43cac52dcc402d41e2bccabb8b343ddc7de | /bin/runVision | 32bf35bb914ade7d1704fd865f0a51d961630a97 | [] | no_license | deto/dotfiles | 06acbf6d92821102424bb2fd834fc64a4b757dac | 43d0a0c57890ccda0673bdf7a9d117a033a00a71 | refs/heads/master | 2023-08-04T13:47:18.590876 | 2020-03-07T01:30:29 | 2020-03-07T01:30:29 | 41,401,958 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 331 | runVision | #!/usr/bin/env Rscript
Sys.setenv(DISPLAY="")
library(VISION)
args <- commandArgs(TRUE)
filename <- args[1]
port <- strtoi(args[2])
if (length(args) > 2){
name <- args[3]
} else {
name <- NULL
}
vis <- readRDS(filename)
options(mc.cores=10)
viewResults(vis, host = "0.0.0.0", port = port, browser = FALSE, name=name)
| |
9fd42ace596853aaf588283b14dae002bdc09edd | 124ce63e1fbb922ed991c792a7eabee4fa4276cb | /Importação de dados.md | 81e94b84a949223c8eefbe53c0276f091130c107 | [] | no_license | analise-viz-dados-1sem-2020/hw99-analise-viz-dados-grupo02 | 38d1902fb893b70700ab02038b97d603f77aab93 | c3a3123bc0c68800f7a55b13a9c4935bafae6f3c | refs/heads/master | 2022-12-04T11:31:04.192033 | 2020-08-06T23:04:21 | 2020-08-06T23:04:21 | 277,394,971 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 113 | md | Importação de dados.md | library(readr)
TarifaMediaFornecimento <- read_csv("TarifaMediaFornecimento.csv")
View(TarifaMediaFornecimento)
|
3d4ce9c3841778fecf075d162ab45bb55d1df24b | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/Ryacas0/man/getSyms.Rd | 1edec0d43ece67999b58b7d7dabd6ce13a94f417 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 489 | rd | getSyms.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sym2.R
\name{getSyms}
\alias{getSyms}
\title{List \code{Sym()} objects}
\usage{
getSyms(all.names = FALSE)
}
\arguments{
\item{all.names}{a logical value. If \code{TRUE}, all object names are returned. If \code{FALSE}, names which begin with a \code{.} are omitted.}
}
\description{
Lists all \code{Sym()} objects in the global environment (\code{.GlobalEnv})
}
\examples{
getSyms()
xs <- Sym("x")
getSyms()
}
|
50c543d8b33588e7ff0f466d2568f7fef5cc0d28 | f020db9cb67886a11c1b8721b04af1ba22136ed5 | /tests/testthat/test-repos.R | fab2775b7cd9bbde1f1db233312aea3abdb59230 | [] | no_license | slopp/renv | 2b5ac42ae89682f6cdb022221b53530679484982 | fd88fbe2d95a0f417a7fae4f188acdeb99691798 | refs/heads/master | 2020-04-20T22:14:33.890121 | 2019-07-04T06:53:31 | 2019-07-04T06:53:31 | 169,133,285 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 243 | r | test-repos.R |
context("Repositories")
test_that("we can query our local repository during tests", {
expected <- list.files("packages")
renv_tests_scope()
ap <- renv_available_packages(type = "source")[[1]]
expect_setequal(ap$Package, expected)
})
|
284a1111f3a1a33ec954cac52372d2c34487e673 | f54619014ace99deda9818b0fca9e72ce5a807e8 | /cachematrix.R | 311b0eeef2b9372039a08cbece294bf83561ae38 | [] | no_license | mathurabhay/ProgrammingAssignment2 | 8940da1673142504cbe539623249fdf2bfaec22d | 09914493969061fc099b30ee1f2221c472c2bdb9 | refs/heads/master | 2020-12-24T13:27:55.601715 | 2014-08-24T07:59:15 | 2014-08-24T07:59:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,448 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function will creates an R-Object having four functions;
## 1.set vector
## 2.get vector
## 3.set mean
## 4.get mean
## Purpose of having this fucntion is to store the vector and its mean.
makeCacheMatrix <- function(m = matrix()) {
## initiate a null inverse matrix
im <- NULL
## set and get matrix methods
setmatrixvalue <- function (ymat){
m <<- ymat
im <- null
}
get <- function () m
## set and get inverse matrix method
setinverse <- function (solve) im <<- solve
getinverse <- function () im
list (setmatrixvalue = setmatrixvalue, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve functions takes a matrix a returns its inverse. This is done by using solve function in R
## cachesolve first checks if inverse of matrix is already calculated, if so it returns the cached value else
## it would freshly calculate the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
im <- x$getinverse()
if (!is.null(im)){
message ("getting inverse matrix")
return (im)
}
data <- x$get()
##return (data)
im <- solve (data) %*% data
x$setinverse(im)
im
}
|
5cbae83e7077a10c474bc42247ff10708414db14 | 09c8652e27a002177042556c2be2b24654d949b0 | /exp. 4 4 cause (3 ratings)/exp_3_four_cause.R | 015144de906289b127e8f19589879b2c834fe2f7 | [] | no_license | dtbenton/backwards_block_submission | c2347e0364447d203080db2766f321a118f0c6cd | 786c9c0bd8ede7d4b0c7ed26600addc8e2582abe | refs/heads/master | 2022-02-17T11:00:30.636309 | 2019-09-11T21:01:54 | 2019-09-11T21:01:54 | 146,052,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,492 | r | exp_3_four_cause.R | ########################################################
########################################################
########################################################
############# #############
############# EXPERIMENT 4 SCRIPT #############
############# #############
########################################################
########################################################
########################################################
# load all relevant libraries:
library(lme4)
library(nlme)
library(boot)
library(car)
library(reshape2)
library(ggplot2)
library(ez)
library(plyr)
library(ggsignif)
library(lsr)
library(sjmisc)
library(sjstats)
library(BayesFactor)
options(scipen=9999)
# DATA CLEAN UP AND RESTRUCTURING #
D = read.csv(file.choose(), header = TRUE, stringsAsFactors = FALSE)
D = D[c(1:20),]
D_tall = reshape(D, varying = 4:51, v.names = "measure", timevar = "condition",
idvar = "ID",
direction = "long")
D_tall$measure = as.numeric(D_tall$measure)
D_tall$sex = as.factor(D_tall$sex)
D_tall$condition = as.factor(D_tall$condition)
D_tall = D_tall[order(D_tall$ID),]
# ADD A CONDITION NAME COLUMN
D_tall$condition_names = as.factor(rep(1:4, each = 12, times = 20))
D_tall$condition_names = revalue(x = as.factor(D_tall$condition_names),
c("1" = "BB", "2"="IS", "3" = "1C",
"4" = "2C"))
# ADD A CONDITION ORDER COLUMN
D_tall$condition_order = revalue(x = as.factor(D_tall$condition),
c("1" = "1234", "2"="2413", "3"="3142", "4"="4321"))
# ADD A 'PHASE' COLUMN
D_tall$phase = as.factor(rep(1:3, each = 4, times = 80))
D_tall$phase = revalue(x = as.factor(D_tall$phase),
c("1" = "Pre", "2"="Mid", "3" = "Post"))
# RENAME SEX COLUMN
D_tall$sex = revalue(x = as.factor(D_tall$sex),
c("1" = "M", "2"="F"))
# OBJECT COLUMN
D_tall$objects = as.factor(rep(1:4, times = 240))
D_tall$objects = revalue(x = as.factor(D_tall$objects),
c("1" = "A", "2"="B", "3"="C", "4"="D"))
# REORDER COLUMNS'
D_tall$condition = NULL
D_tall$row.names = NULL
D_tall = D_tall[,c(1,2,3,5,6,7,4)]
########################################################
############# #############
############# Assumption Checks #############
############# #############
########################################################
# NORMALITY CHECK
# plot norm plots for each condition
par(mfrow=c(2,2))
for (ii in c("BB","IS","1C","2C"))hist(D_tall$measure[D_tall$condition_names==ii], breaks=5)
par(mfrow=c(1,1))
# get p-values for multi-variate norm test
shapiro.ps = rep(0,4)
for(i in c("BB","IS","1C","2C")) {
shap.calc = shapiro.test(D_tall$measure[D_tall$condition_names==i])
shapiro.ps[i] = shap.calc$p.value
}
# EQUAL VARIANCE CHECK
#box plots
boxplot(D_tall$measure~D_tall$condition_names)
# formal test of equal variance
leveneTest(D_tall$measure, as.factor(D_tall$condition_names), center=median) # used 'median' because it's a better measure of central tendency given the non-normality
# ASSUMPTION CHECK SUMMARY
# Based on the analyses above, there is a clear violation of the multi-variate normality and
# the homoskedasticity assumptions.
# Violations were indicated by a p-value of less than .005 for 22 of the 24 tests.
# Conventional parametric tests, therefore, are not appropriate, and so subsequent
# confidence intervals will be estimated using boostrapping and p-values will be
# obtained using permutation testing. Planned comparisons were also conducted using
# permutation tests.
########################################################
########################################################
########################################################
############# #############
############# Models #############
############# #############
########################################################
########################################################
########################################################
########################
### GLOBAL FUNCTIONS ###
########################
# PERMUTATION FUNCTION
perm_func = function(s1,s2,p1,p2,o1,o2){ # s=condition name; p=phase; o=objects
set.seed(2018) # do NOT forget to put the arguments in quote.
b = rep(0,4000)
for(i in 1:4000){
x = sample(D_tall$measure)
dif = x[D_tall$condition_names==s1 & D_tall$phase==p1 & D_tall$objects==o1] -
x[D_tall$condition_names==s2 & D_tall$phase==p2 & D_tall$objects==o2]
b[i] = mean(dif)
}
# compute the actual difference beween BB pre and BB post
bb_diff = mean(D_tall$measure[D_tall$condition_names==s1 & D_tall$phase==p1 & D_tall$objects==o1]-
D_tall$measure[D_tall$condition_names==s2 & D_tall$phase==p2 & D_tall$objects==o2])
# 1- and 2-tailed p-values
c(bb_diff, sum(abs(b) > bb_diff)/4000, sum(abs(b) < bb_diff)/4000,
sum(b > bb_diff)/4000, sum(b < bb_diff)/4000)
}
# BOOTSTRAP FUNCTION
# Single-factor bootstrap function
global_boot = function(s,p,o){
set.seed(2018)
boot_fit = function(data,b,formula){
d= data[b,]
dif.1 = mean(d$measure[d$condition_names==s & d$phase==p & d$objects==o],
data=D_tall)
return(dif.1)
}
boot_obj = boot(D_tall, boot_fit, R=4000)
c(boot_obj$t0, boot_obj$t0 + 1.96*-sd(boot_obj$t),
boot_obj$t0 + 1.96*sd(boot_obj$t))
}
# BOOTSTRAP FUNCTION
# Condition-difference bootstrap function
global_boot_2 = function(s1,s2,p1,p2,o1,o2){
set.seed(2018)
boot_fit = function(data,b,formula){
d= data[b,]
dif.1 = mean(d$measure[d$condition_names==s1 & d$phase==p1 & d$objects==o1],
data=D_tall) - mean(d$measure[d$condition_names==s2 & d$phase==p2 & d$objects==o2],
data=D_tall)
return(dif.1)
}
boot_obj = boot(D_tall, boot_fit, R=4000)
c(boot_obj$t0, boot_obj$t0 + 1.96*-sd(boot_obj$t),
boot_obj$t0 + 1.96*sd(boot_obj$t))
}
############################
### PRELIMINARY ANALYSES ###
############################
# DETERMINING WHETHER 'SEX' OR 'TEST TRIAL ORDER' INTERACTED WITH ANY OF THE
# REMAINING FACTORS.
prelim_analysis = lme(measure~(sex+condition_names+objects+phase)^4,
random=~1|ID,
data=D_tall)
anova.lme(prelim_analysis)
########################################################
#### CONTROL CONDITITION ANALYSES ####
########################################################
# NOTE THAT FORMAL ANALYSIS WERE NOT INCLUDED IN THE MANUSCRIPT FOR EXPERIMENT 3.
# HOWEVER, THE CODE WILL BE KEPT HERE IN CASE I'M REQUIRED TO REPORT THEM IN THE
# REVIEW OF THE MS.
#####################################################################################
# CONDITION (1C vs 2C) x OBJECT (A vs B) x PHASE (Pre vs Mid vs Post) OMNIBUS ANOVA #
#####################################################################################
# create a data frame in which the 1C condition is subsetted
one__and_two_cause_subset = subset(D_tall, ! condition_names %in% c("BB","IS")) # creating a smaller
# data set by removing the
# BB and IS conditions.
# 1C condition
lme_one__and_two_cause_subset = lme(measure~(condition_names+phase+objects)^3,
random=~1|ID,
data=one__and_two_cause_subset)
# omnibus ANOVA
anova.lme(lme_one__and_two_cause_subset)
#######################
# ONE-CAUSE CONDITION #
#######################
#### A RATINGS AND MEASURES ####
# Apre:
global_boot("1C","Pre","A")
# Amid:
global_boot("1C","Mid","A")
# Apost:
global_boot("1C","Post","A")
# Apre vs Amid
perm_func("1C","1C","Pre","Mid","A","A")
global_boot_2("1C","1C","Pre","Mid","A","A")
# Apre vs Apost
perm_func("1C","1C","Pre","Post","A","A")
global_boot_2("1C","1C","Pre","Post","A","A")
# Amid vs Apost
perm_func("1C","1C","Mid","Post","A","A")
global_boot_2("1C","1C","Mid","Post","A","A")
#### B RATINGS AND MEASURES ####
# Bpre:
global_boot("1C","Pre","B")
# Bmid:
global_boot("1C","Mid","B")
# Bpost:
global_boot("1C","Post","B")
# Bpre vs Bmid
perm_func("1C","1C","Pre","Mid","B","B")
global_boot_2("1C","1C","Pre","Mid","B","B")
# Bpre vs Bpost
perm_func("1C","1C","Pre","Post","B","B")
global_boot_2("1C","1C","Pre","Post","B","B")
# Bmid vs Bpost
perm_func("1C","1C","Mid","Post","B","B")
global_boot_2("1C","1C","Mid","Post","B","B")
#### C RATINGS AND MEASURES ####
# Cpre:
global_boot("1C","Pre","C")
# Cmid:
global_boot("1C","Mid","C")
# Cpost:
global_boot("1C","Post","C")
# Cpre vs Cmid
perm_func("1C","1C","Pre","Mid","C","C")
global_boot_2("1C","1C","Pre","Mid","C","C")
# Cpre vs Cpost
perm_func("1C","1C","Pre","Post","C","C")
global_boot_2("1C","1C","Pre","Post","C","C")
# Cmid vs Cpost
perm_func("1C","1C","Mid","Post","C","C")
global_boot_2("1C","1C","Mid","Post","C","C")
#######################
# TWO-CAUSE CONDITION #
#######################
#### A RATINGS AND MEASURES ####
# Apre:
global_boot("2C","Pre","A")
# Amid:
global_boot("2C","Mid","A")
# Apost:
global_boot("2C","Post","A")
# Apre vs Amid
perm_func("2C","2C","Pre","Mid","A","A")
global_boot_2("2C","2C","Pre","Mid","A","A")
# Apre vs Apost
perm_func("2C","2C","Pre","Post","A","A")
global_boot_2("2C","2C","Pre","Post","A","A")
# Amid vs Apost
perm_func("2C","2C","Mid","Post","A","A")
global_boot_2("2C","2C","Mid","Post","A","A")
#### B RATINGS AND MEASURES ####
# Bpre:
global_boot("2C","Pre","B")
# Bmid:
global_boot("2C","Mid","B")
# Bpost:
global_boot("2C","Post","B")
# Bpre vs Bmid
perm_func("2C","2C","Pre","Mid","B","B")
global_boot_2("2C","2C","Pre","Mid","B","B")
# Bpre vs Bpost
perm_func("2C","2C","Pre","Post","B","B")
global_boot_2("2C","2C","Pre","Post","B","B")
# Bmid vs Bpost
perm_func("2C","2C","Mid","Post","B","B")
global_boot_2("2C","2C","Mid","Post","B","B")
#### C RATINGS AND MEASURES ####
# Cpre:
global_boot("2C","Pre","C")
# Cmid:
global_boot("2C","Mid","C")
# Cpost:
global_boot("2C","Post","C")
# Cpre vs Cmid
perm_func("2C","2C","Pre","Mid","C","C")
global_boot_2("2C","2C","Pre","Mid","C","C")
# Cpre vs Cpost
perm_func("2C","2C","Pre","Post","C","C")
global_boot_2("2C","2C","Pre","Post","C","C")
# Cmid vs Cpost
perm_func("2C","2C","Mid","Post","C","C")
global_boot_2("2C","2C","Mid","Post","C","C")
#####################################################
#### MAIN CONDITITION ANALYSES ####
#####################################################
################
# IS CONDITION #
################
# create a data frame in which the IS condition is subsetted
IS_subset = subset(D_tall, ! condition_names %in% c("BB","1C", "2C"))
# 1C condition
IS_subset_lme = lme(measure~(phase+objects)^2,
random=~1|ID,
data=IS_subset)
# omnibus ANOVA
anova.lme(IS_subset_lme)
#######################
# PLANNED COMPARISONS #
#######################
#### A RATINGS AND MEASURES ####
# Apre:
# Mean: 53.5; 95%CI[48.8,62.19]
global_boot("IS","Pre","A")
# Amid:
# Mean: 78.15; 95%CI[71.46,84.84]
global_boot("IS","Mid","A")
# Apost:
# Mean: 8.25; 95%CI[-1.86,18.36]
global_boot("IS","Post","A")
# Apre vs Amid
perm_func("IS","IS","Pre","Mid","A","A")
# -24.650 1.000 0.000 0.996 0.004
global_boot_2("IS","IS","Pre","Mid","A","A")
# -24.65000 -35.59173 -13.70827
# Apre vs Apost
perm_func("IS","IS","Pre","Post","A","A")
# 45.25 0.00 1.00 0.00 1.00
global_boot_2("IS","IS","Pre","Post","A","A")
# 45.25000 31.86622 58.63378
# Amid vs Apost
perm_func("IS","IS","Mid","Post","A","A")
# 69.9 0.0 1.0 0.0 1.0
global_boot_2("IS","IS","Mid","Post","A","A")
# 69.90000 57.58002 82.21998
#### B RATINGS AND MEASURES ####
# Bpre:
# Mean: 51.7500; 95%CI[42.8935,60.6065]
global_boot("IS","Pre","B")
# 51.7500 42.8935 60.6065
# Bmid:
# Mean: 64.40000; 95%CI[51.92206,76.87794]
global_boot("IS","Mid","B")
# 64.40000 51.92206 76.87794
# Bpost:
# Mean: 99.50000; 95%CI[98.52844,100.47156]
global_boot("IS","Post","B")
# 99.50000 98.52844 100.47156
# Bpre vs Bmid
perm_func("IS","IS","Pre","Mid","B","B")
# -12.65000 1.00000 0.00000 0.93725 0.06175
global_boot_2("IS","IS","Pre","Mid","B","B")
#-12.650000 -27.765826 2.465826
# Bpre vs Bpost
perm_func("IS","IS","Pre","Post","B","B")
# -47.75 1.00 0.00 1.00 0.00
global_boot_2("IS","IS","Pre","Post","B","B")
# -47.75000 -56.67078 -38.829227
# Bmid vs Bpost
perm_func("IS","IS","Mid","Post","B","B")
# -35.1 1.0 0.0 1.0 0.0
global_boot_2("IS","IS","Mid","Post","B","B")
# -35.10000 -47.61257 -22.58743
#### C RATINGS AND MEASURES ####
# Cpre:
# Mean: 44.75000; 95%CI[38.16464,51.33536]
global_boot("IS","Pre","C")
# 44.75000 38.16464 51.33536
# Cmid:
# Mean: 47.25000; 95%CI[38.87933,55.62067]
global_boot("IS","Mid","C")
# 47.25000 38.87933 55.62067
# Cpost:
# Mean: 49.75000; 95%CI[46.53611,52.96389]
global_boot("IS","Post","C")
# 49.75000 46.53611 52.96389
# Cpre vs Cmid
perm_func("IS","IS","Pre","Mid","C","C")
# -2.50000 1.00000 0.00000 0.61650 0.37775
global_boot_2("IS","IS","Pre","Mid","C","C")
# -2.500000 -13.248049 8.248049
# Cpre vs Cpost
perm_func("IS","IS","Pre","Post","C","C")
# -5.00000 1.00000 0.00000 0.71075 0.28425
global_boot_2("IS","IS","Pre","Post","C","C")
# -5.000000 -12.269478 2.269478
# Cmid vs Cpost
perm_func("IS","IS","Mid","Post","C","C")
# 3-2.5000 1.0000 0.0000 0.6015 0.3920
global_boot_2("IS","IS","Mid","Post","C","C")
# -2.500000 -11.491956 6.491956
#### D RATINGS AND MEASURES ####
# Dpre:
# Mean: 58.25000; 95%CI[51.34072,65.15928]
global_boot("IS","Pre","D")
# 58.25000 51.34072 65.15928
# Dmid:
# Mean: 52.25000; 95%CI[45.73075,58.76925]
global_boot("IS","Mid","D")
# 52.25000 45.73075 58.76925
# Dpost:
# Mean: 51.0000; 95%CI[47.9345,54.0655]
global_boot("IS","Post","D")
# 51.0000 47.9345 54.0655
# Dpre vs Dmid
perm_func("IS","IS","Pre","Mid","D","D")
# 6.00000 0.45250 0.53650 0.22950 0.76525
global_boot_2("IS","IS","Pre","Mid","C","C")
# -2.500000 -13.248049 8.248049
# Dpre vs Dpost
perm_func("IS","IS","Pre","Post","D","D")
# 7.25000 0.38400 0.60850 0.19425 0.80175
global_boot_2("IS","IS","Pre","Post","D","D")
# 7.2500000 -0.3149281 14.8149281
# Dmid vs Dpost
perm_func("IS","IS","Mid","Post","D","D")
# 1.25000 0.86950 0.11175 0.43800 0.55225
global_boot_2("IS","IS","Mid","Post","D","D")
# 1.250000 -5.948724 8.448724
#### BAYES FACTOR TO COMPARE PRE- AND MID RATINGS OF OBJECT B IN THE IS CONDITION ####
IS_subset_2 = subset(IS_subset, ! phase %in% c("Post"))
IS_subset_3 = subset(IS_subset_2, ! objects %in% c("A","C","D"))
# define the null and alternative models #
lm.null = lme(measure~1, random=~1|ID, data=IS_subset_3)
lm.alt = lme(measure~phase, random=~1|ID, data=IS_subset_3)
#obtain BICs for the null and alternative models
null.bic = BIC(lm.null)
alt.bic = BIC(lm.alt)
# compute the BF01 - this is the BF whose value is interpreted as the evidence in favor of the null (e.g., if the BF01 = 2.6, this means that there is 2.6 times as much evidence for the null than for the alternative or the evidence is 2.6:1 in favor of the null)
BF01 = exp((alt.bic - null.bic)/2) # this yields a BF that is interpreted as the evidence in favor of the null; it's critical that the alt.bic comes first otherwise your interpretation of the resulting BF value will be incorrect
BF10 = 1/BF01
## MORE APPROPRIATE METHOD FOR COMPUTING A BAYES' FACTOR ##
## COMPARING B-PRE AND B-MID IN THE IS CONDITION ##
x = D_tall$measure[D_tall$condition_names=="IS" & D_tall$objects=="B" & D_tall$phase=="Pre"]
y = D_tall$measure[D_tall$condition_names=="IS" & D_tall$objects=="B" & D_tall$phase=="Mid"]
BF_bb_B_prepost = ttestBF(x=x,y=y,paired=TRUE)
BF_bb_B_prepost
################
# BB CONDITION #
################
# create a data frame in which the IS condition is subsetted
BB_subset = subset(D_tall, ! condition_names %in% c("IS","1C", "2C"))
# 1C condition
BB_subset_lme = lme(measure~(phase+objects)^2,
random=~1|ID,
data=BB_subset)
# omnibus ANOVA
anova.lme(BB_subset_lme)
#######################
# PLANNED COMPARISONS #
#######################
#### A RATINGS AND MEASURES ####
# Apre:
# Mean: 50.2500; 95%CI[45.2578,55.2422]
global_boot("BB","Pre","A")
# 50.2500 45.2578 55.2422
# Amid:
# Mean: 71.00000; 95%CI[64.64633,77.35367]
global_boot("BB","Mid","A")
# 71.00000 64.64633 77.35367
# Apost:
# Mean: 99.75000; 95%CI[99.25304,100.24696]
global_boot("BB","Post","A")
# 92.95833 99.25304 100.24696
# Apre vs Amid
perm_func("BB","BB","Pre","Mid","A","A")
# -20.75000 1.00000 0.00000 0.99325 0.00675
global_boot_2("BB","BB","Pre","Mid","A","A")
# -20.7500 -28.7261 -12.7739
# Apre vs Apost
perm_func("BB","BB","Pre","Post","A","A")
# -49.5 1.0 0.0 1.0 0.0
global_boot_2("BB","BB","Pre","Post","A","A")
# -49.50000 -54.51708 -44.48292
# Amid vs Apost
perm_func("BB","BB","Mid","Post","A","A")
# -28.7500 1.0000 0.0000 0.9995 0.0005
global_boot_2("BB","BB","Mid","Post","A","A")
# -28.75000 -35.12404 -22.37596
#### B RATINGS AND MEASURES ####
# Bpre:
# Mean: 50.90000; 95%CI[44.56488,57.23512]
global_boot("BB","Pre","B")
# 50.90000 44.56488 57.23512
# Bmid:
# Mean: 63.40000; 95%CI[54.73142,72.06858]
global_boot("BB","Mid","B")
# 63.40000 54.73142 72.06858
# Bpost:
# Mean: 50.25000; 95%CI[37.71325,62.78675]
global_boot("BB","Post","B")
# 50.25000 37.71325 62.78675
# Bpre vs Bmid
perm_func("BB","BB","Pre","Mid","B","B")
# -12.5000 1.0000 0.0000 0.9340 0.0635
global_boot_2("BB","BB","Pre","Mid","B","B")
# -12.500000 -23.271454 -1.728546
# Bpre vs Bpost
perm_func("BB","BB","Pre","Post","B","B")
# 0.6500 0.9370 0.0600 0.4655 0.5330
global_boot_2("BB","BB","Pre","Post","B","B")
# 0.65000 -13.36115 14.66115
# Bmid vs Bpost
perm_func("BB","BB","Mid","Post","B","B")
# 13.15000 0.10725 0.89225 0.05350 0.94600
global_boot_2("BB","BB","Mid","Post","B","B")
# 13.150000 -2.159301 28.459301
#### BAYES FACTOR TO COMPARE POST- AND MID RATINGS OF OBJECT B IN THE IS CONDITION ####
BB_subset_2 = subset(BB_subset, ! phase %in% c("Pre"))
BB_subset_3 = subset(BB_subset_2, ! objects %in% c("A","C"))
# define the null and alternative models #
lm.null = lme(measure~1, random=~1|ID, data=BB_subset_3)
lm.alt = lme(measure~phase, random=~1|ID, data=BB_subset_3)
#obtain BICs for the null and alternative models
null.bic = BIC(lm.null)
alt.bic = BIC(lm.alt)
# compute the BF01 - this is the BF whose value is interpreted as the evidence in favor of the null (e.g., if the BF01 = 2.6, this means that there is 2.6 times as much evidence for the null than for the alternative or the evidence is 2.6:1 in favor of the null)
BF01 = exp((alt.bic - null.bic)/2) # this yields a BF that is interpreted as the evidence in favor of the null; it's critical that the alt.bic comes first otherwise your interpretation of the resulting BF value will be incorrect
BF10 = 1/BF01
## MORE APPROPRIATE METHOD FOR COMPUTING A BAYES' FACTOR ##
## COMPARING B-MID AND B-POST IN THE BB CONDITION ##
x2 = D_tall$measure[D_tall$condition_names=="BB" & D_tall$objects=="B" & D_tall$phase=="Mid"]
y2 = D_tall$measure[D_tall$condition_names=="BB" & D_tall$objects=="B" & D_tall$phase=="Post"]
mean(x2)
mean(y2)
BF_bb_B_prepost = ttestBF(x=x2,y=y2,paired=TRUE)
BF_bb_B_prepost
#### C RATINGS AND MEASURES ####
# Cpre:
# Mean: 50.0000; 95%CI[44.6435,55.3565]
global_boot("BB","Pre","C")
# 50.0000 44.6435 55.3565
# Cmid:
# Mean: 51.25000; 95%CI[44.41516,58.08484]
global_boot("BB","Mid","C")
# 51.25000 44.41516 58.08484
# Cpost:
# Mean: 51.15000; 95%CI[45.86878,56.43122]
global_boot("BB","Post","C")
# 51.15000 45.86878 56.43122
# Cpre vs Cmid
perm_func("BB","BB","Pre","Mid","C","C")
# -1.25000 1.00000 0.00000 0.55450 0.44025
global_boot_2("BB","BB","Pre","Mid","C","C")
# -1.250000 -9.957799 7.457799
# Cpre vs Cpost
perm_func("BB","BB","Pre","Post","C","C")
# -1.1500 1.0000 0.0000 0.5595 0.4390
global_boot_2("BB","BB","Pre","Post","C","C")
# -1.1500 -8.6424 6.3424
# Cmid vs Cpost
perm_func("BB","BB","Mid","Post","C","C")
# 0.10000 0.99000 0.00875 0.50175 0.49750
global_boot_2("BB","BB","Mid","Post","C","C")
# 0.100000 -8.604116 8.804116
#### D RATINGS AND MEASURES ####
# Dpre:
# Mean: 53.30000; 95%CI[46.58798,60.01202]
global_boot("BB","Pre","D")
# 53.30000 46.58798 60.01202
# Dmid:
# Mean: 50.50000; 95%CI[46.76551,54.23449]
global_boot("BB","Mid","D")
# 50.50000 46.76551 54.23449
# Dpost:
# Mean: 47.55000; 95%CI[41.99166,53.10834]
global_boot("BB","Post","D")
# 47.55000 41.99166 53.10834
# Dpre vs Dmid
perm_func("BB","BB","Pre","Mid","D","D")
# 2.80000 0.72400 0.27325 0.37125 0.62775
global_boot_2("BB","BB","Pre","Mid","D","D")
# 2.800000 -4.939667 10.539667
# Dpre vs Dpost
perm_func("BB","BB","Pre","Post","D","D")
# 5.7500 0.4815 0.5090 0.2510 0.7440
global_boot_2("BB","BB","Pre","Post","D","D")
# 5.750000 -2.980983 14.480983
# Dmid vs Dpost
perm_func("BB","BB","Mid","Post","D","D")
# 2.95000 0.74150 0.25675 0.37450 0.62450
global_boot_2("BB","BB","Mid","Post","D","D")
# 2.950000 -3.732969 9.632969
###################################################################
# COMPARE POST-RATING OF A and B BETWEEN THE BB AND IS CONDITIONS #
###################################################################
# OBJECT A POST RATINGS ACROSS THE BB AND IS CONDITIONS #
# Apost_IS:
# Mean: 8.25; 95%CI[-1.86,18.36]
global_boot("IS","Post","A")
# Apost_BB:
# Mean: 99.75; 95%CI[99.25,100.25]
global_boot("BB","Post","A")
perm_func("IS","BB","Post","Post","B","B")
global_boot_2("IS","BB","Post","Post","B","B")
# OBJECT B POST RATINGS ACROSS THE BB AND IS CONDITIONS #
perm_func("IS","BB","Post","Post","B","B")
# 49.25 0.00 1.00 0.00 1.00
global_boot_2("IS","BB","Post","Post","B","B")
# 49.25000 36.67355 61.82645
################################################################
################################################################
################################################################
############# #############
############# OMNIBUS FIGURE #############
############# #############
################################################################
################################################################
################################################################
condition_barplot = ggplot(D_tall, aes(objects, measure, fill = phase)) # create the bar graph with test.trial.2 on the x-axis and measure on the y-axis
condition_barplot + stat_summary(fun.y = mean, geom = "bar", position = "dodge", colour = "black") + # add the bars, which represent the means and the place them side-by-side with 'dodge'
stat_summary(fun.data=mean_cl_boot, geom = "errorbar", position = position_dodge(width=0.90), width = 0.2) + # add errors bars
ylab("ratings (scale: 0-100)") + # change the label of the y-axis
facet_wrap(~condition_names, scales = 'free') + # scales='free' ensures that each blot has x labels
theme_bw() + # remove the gray background
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) + # remove the major and minor grids
scale_y_continuous(expand = c(0, 0)) + # ensure that bars hit the x-axis
coord_cartesian(ylim=c(0, 110)) +
theme_classic() +
scale_fill_manual(values = c("white","gray68", "black")) +
theme(strip.text = element_text(colour = 'black', size = 12)) + # this changes the size and potentially weight of the facet labels
theme(axis.title=element_text(size="12"),axis.text=element_text(size=12)) +
theme(legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6)) +
theme(legend.text = element_text(size = 12)) +
annotate("segment", x=-Inf, xend=Inf, y=-Inf, yend=-Inf) + # this adds a vertical & horizontal line to each plot
annotate("segment", x=-Inf, xend=-Inf, y=-Inf, yend=Inf) + # ditto
theme(legend.title=element_blank()) +
labs(x = "Test trials")
##############################################################################
##############################################################################
##############################################################################
############# #############
############# INDIVIDUAL DIFFERENCE FIGURE #############
############# #############
##############################################################################
##############################################################################
##############################################################################
condition_barplot = ggplot(D_tall, aes(objects, measure, fill = phase)) # create the bar graph with test.trial.2 on the x-axis and measure on the y-axis
condition_barplot + stat_summary(fun.y = mean, geom = "bar", position = "dodge", colour = "black") + # add the bars, which represent the means and the place them side-by-side with 'dodge'
stat_summary(fun.data=mean_cl_boot, geom = "errorbar", position = position_dodge(width=0.90), width = 0.2) + # add errors bars
ylab("ratings (scale: 0-100)") + # change the label of the y-axis
facet_wrap(condition_names, labeller = label_wrap_gen(multi_line=FALSE)) + # scales='free' ensures that each blot has x labels
theme_bw() + # remove the gray background
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) + # remove the major and minor grids
scale_y_continuous(expand = c(0, 0)) + # ensure that bars hit the x-axis
coord_cartesian(ylim=c(0, 110)) +
theme_classic() +
scale_fill_manual(values = c("white","gray68", "black")) +
theme(strip.text = element_text(colour = 'black', size = 12)) + # this changes the size and potentially weight of the facet labels
theme(axis.title=element_text(size="12"),axis.text=element_text(size=12)) +
theme(legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6)) +
theme(legend.text = element_text(size = 12)) +
annotate("segment", x=-Inf, xend=Inf, y=-Inf, yend=-Inf) + # this adds a vertical & horizontal line to each plot
annotate("segment", x=-Inf, xend=-Inf, y=-Inf, yend=Inf) + # ditto
theme(legend.title=element_blank()) +
labs(x = "Test trials")
# FOR THE BB CONDITION ONLY
condition_barplot = ggplot(BB_subset, aes(objects, measure, fill = phase)) # create the bar graph with test.trial.2 on the x-axis and measure on the y-axis
condition_barplot + stat_summary(fun.y = mean, geom = "bar", position = "dodge", colour = "black") + # add the bars, which represent the means and the place them side-by-side with 'dodge'
stat_summary(fun.data=mean_cl_boot, geom = "errorbar", position = position_dodge(width=0.90), width = 0.2) + # add errors bars
ylab("ratings (scale: 0-100)") + # change the label of the y-axis
facet_wrap(~ID, scales = 'free') + # scales='free' ensures that each blot has x labels
theme_bw() + # remove the gray background
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) + # remove the major and minor grids
scale_y_continuous(expand = c(0, 0)) + # ensure that bars hit the x-axis
coord_cartesian(ylim=c(0, 110)) +
theme_classic() +
scale_fill_manual(values = c("white","gray68", "black")) +
theme(strip.text = element_text(colour = 'black', size = 12)) + # this changes the size and potentially weight of the facet labels
theme(axis.title=element_text(size="12"),axis.text=element_text(size=12)) +
theme(legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6)) +
theme(legend.text = element_text(size = 12)) +
annotate("segment", x=-Inf, xend=Inf, y=-Inf, yend=-Inf) + # this adds a vertical & horizontal line to each plot
annotate("segment", x=-Inf, xend=-Inf, y=-Inf, yend=Inf) + # ditto
theme(legend.title=element_blank()) +
labs(x = "Test trials")
|
da4c9463aff382c2ea310c205b0769b34c2cdc48 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/clusterGeneration/examples/simClustDesign.Rd.R | 40e677f56bfc522a34700ee91676b66448e6ef8f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 496 | r | simClustDesign.Rd.R | library(clusterGeneration)
### Name: simClustDesign
### Title: DESIGN FOR RANDOM CLUSTER GENERATION WITH SPECIFIED DEGREE OF
### SEPARATION
### Aliases: simClustDesign
### Keywords: cluster
### ** Examples
## Not run:
##D tmp<-simClustDesign(numClust=3,
##D sepVal=c(0.01,0.21),
##D sepLabels=c("L","M"),
##D numNonNoisy=4,
##D numOutlier=0,
##D numReplicate=2,
##D clustszind=2)
## End(Not run)
|
17905f69197d7c10694630dcaecb3bbcb5906099 | 10ce9ea816c88d34c0e66b955d5f039fc6617ed9 | /run_analysis.R | dae20b75729395e7dc04eb990ec7117d60fff100 | [] | no_license | rabeekraja/ProgrammingAssignment4 | 4a7c05beb110e33b3453d0be43015b2a07fd5f9e | 06b75866851ec02047790ce549d11b933f5fa7a8 | refs/heads/master | 2020-09-07T11:26:31.856985 | 2019-11-11T14:54:38 | 2019-11-11T14:54:38 | 220,764,533 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,874 | r | run_analysis.R | #This R script does following steps
#Step #1:Merges the training and the test sets to create one data set.
#Step #2:Extracts only the measurements on the mean and standard deviation for each measurement.
# Step #3:Appropriately labels the data set with descriptive variable names.
#Step #4 :Uses descriptive activity names to name the activities in the data set
#Step #5:From the data set in step 4, creates a second, independent tidy data set with the
#average of each variable for each activity and each subject.
# Set current working directory
setwd("C:/rlib")
#Load Libraries
# check if plyr package is installed
if (!"plyr" %in% installed.packages()) {
install.packages("plyr")
}
library(plyr)
## Data download and unzip
# fileName to store in local drive
fileN <- "UCIDataSets.zip"
# URL to download zip file data
dataUrl <- "http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# Directory/folder name that datasets available
dir <- "UCI HAR Dataset"
extract_data <- function(fileName,url,dirName) {
# Check if file is already downloaded or copied -if not download.
#The choice of binary transfer (mode = "wb" or "ab") is important on Windows,
#since unlike Unix-alikes it does distinguish between text and binary files
#and for text transfers changes \n line endings to \r\n (aka ‘CRLF’).
# added mode of wb (binary)
if(!file.exists(fileName)){
download.file(url,fileName, mode = "wb")
}
# Verify already files are extracted otherwise unzip.
if(!file.exists(dirName)){
unzip(fileName, files = NULL, exdir=".")
}
}
#Call function extract data
extract_data(fileN,dataUrl,dir)
##Data Reading
#Read test text Data into variables
subject_test_data <- read.table("UCI HAR Dataset/test/subject_test.txt")
#print(count(subject_test_data))
X_test_data <- read.table("UCI HAR Dataset/test/X_test.txt")
#print(count(X_test_data))
y_test_data <- read.table("UCI HAR Dataset/test/y_test.txt")
#print(count(y_test_data))
#Read training text Data into variables
subject_train_data <- read.table("UCI HAR Dataset/train/subject_train.txt")
#print(count(subject_train_data))
X_train_data <- read.table("UCI HAR Dataset/train/X_train.txt")
#print(count(X_train_data))
y_train_data <- read.table("UCI HAR Dataset/train/y_train.txt")
#print(count(y_train_data))
#Read labels text Data into variable
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
#print(count(activity_labels))
# Read features
features <- read.table("UCI HAR Dataset/features.txt")
#print(count(features))
## Data Analysis Steps
# Step 1. Merge test & training data into single set.
#rbind here to append all the rows from test and all rows from training
# datasets into single set.
mergedDataSet <- rbind(X_train_data,X_test_data)
# Step 2. Extract Mean & Standard Deviation measurements.
# vector of mean,std data.
#mean_Std_Vector <- grep("mean()|std()", features[, 2])
mean_Std_Vector <-grep("mean\\(\\)|std\\(\\)", features[, 2])
#print(mean_Std_Vector)
mergedDataSet <- mergedDataSet[,mean_Std_Vector]
# 3. Label data set with proper activity names.
# Create features without () by using global replace.
replacedFeatureName <- sapply(features[, 2], function(x) {gsub("[()]", "",x)})
names(mergedDataSet) <- replacedFeatureName[mean_Std_Vector]
# combine test and train of subject data and activity data, give descriptive lables
subject <- rbind(subject_train_data, subject_test_data)
names(subject) <- 'subject'
activity <- rbind(y_train_data, y_test_data)
names(activity) <- 'activity'
# combine subjects, activities, and sub data set to create final data set.
mergedDataSet <- cbind(subject,activity, mergedDataSet)
# 4. Uses descriptive activity names to name the activities in the data set
# group the activity column of dataSet and rename.
activity_group <- factor(mergedDataSet$activity)
levels(activity_group) <- activity_labels[,2]
mergedDataSet$activity <- activity_group
names(mergedDataSet)<-gsub("^t", "Time", names(mergedDataSet))
names(mergedDataSet)<-gsub("^f", "Frequency", names(mergedDataSet))
names(mergedDataSet)<-gsub("Acc", "Accelerometer", names(mergedDataSet))
names(mergedDataSet)<-gsub("Gyro", "Gyroscope", names(mergedDataSet))
names(mergedDataSet)<-gsub("Mag", "Magnitude", names(mergedDataSet))
names(mergedDataSet)<-gsub("BodyBody", "Body", names(mergedDataSet))
names(mergedDataSet)<-gsub("[()]", "", names(mergedDataSet))
# 5. tidy data set with the average of each variable.
# gather data for subjects, activities.
finalData <- aggregate(. ~subject + activity, mergedDataSet, mean)
finalData <- tidydata[order(tidydata$subject, tidydata$activity),]
#print(finalData)
# write the tidy data to the current directory as "tidied_final_data.txt"
write.table(finalData, "tidied_final_data.txt", sep = ",",row.names = FALSE)
write.csv(finalData, "tidy_final_data.csv", row.names=FALSE) |
a2342193ac985e1e285af17cef66265a73727dd8 | ae64c28434067ca6fc976998b1e995546810a7d8 | /microbenchmarks/att/determinant.R | 5a37be73654c949b18260daee9f846a061670d6b | [] | no_license | bedatadriven/renjin-benchmarks | 285b54e4a292a902b2277e39ad4d69c3db9dff1c | d3a3ebddb0e375eaf7fa3960eae6b1efd39bf1da | refs/heads/master | 2021-04-12T04:24:36.559310 | 2018-08-23T14:12:03 | 2018-08-23T14:12:03 | 31,985,741 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 89 | r | determinant.R |
a <- rnorm(2500*2500)
dim(a) <- c(2500, 2500)
run <- function() {
b <- det(a)
b
} |
7142faafd4228ed7e335e8edf456a89a9b4f5686 | 43a688d2c8f4fa45ac8569f48e94cc1213ddebe5 | /R/CheckRho.R | 7f0be9dde3be68eeaae6816b51f6763619dd85e3 | [] | no_license | cran/MaskJointDensity | a0c5316667697e7f5b79b9a6820c160f5f202db0 | 929b9710f19e2552d6a7e629a27cf3b85e5bffa2 | refs/heads/master | 2020-03-18T06:41:41.441830 | 2018-05-22T11:13:25 | 2018-05-22T11:13:25 | 134,410,302 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 662 | r | CheckRho.R | CheckRho <-
function(x1,x2, mu1,mu2,s1,s2, Srho12, G_Point7,GH_Quadrature ){#x1 is a sample from population1
# and x2 is a sample from population2. They are used to
# create kernel density functions.
fhat1<-ks::kde(x=x1,binned=TRUE)
fhat2<-ks::kde(x=x2, binned=TRUE)
#Uphi_1<-pnorm(G_point7)
# Uphi_2<-pnorm(star_rho12*G_point7+sqrt(1-star_rho12^2)*)
g<-0
m<-7
for(l in 1:m){
for(k in 1:m){
g<-g+GH_Quadrature[l]*GH_Quadrature[k]*((ks::qkde(pnorm(G_Point7[l]),fhat1)-mu1)/s1)*((ks::qkde(pnorm(Srho12*G_Point7[l]+sqrt(1-Srho12^2)*G_Point7[k]),fhat2)-mu2)/s2)
}
}
return(g)
}
|
11bc28de18d768bbb2a430d98dfebc59f08c483f | 69fd7ecf68a14529ad4f282cbafaf6a53564f3a3 | /ErdemSert.R | 21e8a2a72a21d33fe4455982824602985b8c5e7b | [] | no_license | erdemsert/Applied-Data-Analysis-Lab-Project | e380a5ce41e393a0b673025098ecb24163c1b883 | f147136720b9ce373e6f67aeb5a8ed7063a654a2 | refs/heads/main | 2023-02-03T13:52:27.132488 | 2020-12-14T08:51:45 | 2020-12-14T08:51:45 | 321,287,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,563 | r | ErdemSert.R | dataset <- read.csv("Life_expectancy_dataset.csv")
head(dataset)
sum(is.na(dataset))
library(ggplot2)
library(dplyr)
#First Graph
#This graph shows the Overall Life Expectancy by looking at each continent and using mutate function to find the result
SortedWithContinent <- dataset %>% group_by(Continent)%>%
select(Male.Life,Female.Life)%>%
summarise(avgMale=mean(Male.Life),avgFemale=mean(Female.Life))%>%
filter(Continent=="Europe"|Continent=="Asia"|
Continent=="Africa"|Continent=="North America"|
Continent=="Oceania"|Continent=="South America")
numbers <- c("61.8","73.6","79.1","76.3","74.3","75.1")
SortedWithContinent%>%
mutate(SortedWithContinent,OverallLifeOfEachContinent=(avgMale+avgFemale)/2)%>%
ggplot2::ggplot(aes(Continent,OverallLifeOfEachContinent,fill=Continent))+
geom_bar(position="dodge", stat="identity", colour="black", width=0.3)+
ggtitle("Overall Life Expectancy in Each Continent")+
geom_text(aes(label=numbers), size = 4, fontface = "bold", vjust=-0.2) +
theme(legend.title = element_text(size=14,hjust = 0.5),
legend.position = "right",
legend.text = element_text(size = 11),
plot.title = element_text(color = "Red",size = 14,face = "bold",hjust = 0.5),
axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10))+
xlab("Continents")+
ylab("Overall Life Expectancy")
#Second Graph
values <- c("60.1","71.0","76.0","73.9","71.6","72.2")
#This graph shows the average of Male Life Expectancy in each Continent.
dataset %>% group_by(Continent)%>%summarise(avgMale=mean(Male.Life))%>%
ggplot2::ggplot(aes(Continent,avgMale,fill=Continent))+
geom_bar(position="dodge", stat="identity", colour="black", width=0.3)+
ggtitle("Average of Male Life Expectancy in Each Continent")+
geom_text(aes(label=values), size = 4, fontface = "bold", vjust=-0.2) +
theme(legend.title = element_text(size=14,hjust = 0.5),
legend.position = "right",
legend.text = element_text(size = 11),
plot.title = element_text(color = "Red",size = 14,face = "bold",hjust = 0.5),
axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10))+
xlab("Countries")+
ylab("Male Life Expectancy")
#Third Graph
#This graph shows that 5 Countries that have less Overall Life expectancy than the other countries
dataset %>%
dplyr::group_by(Country)%>%
dplyr::summarise(avgOverall=mean(Overall.Life))%>%arrange(desc(avgOverall))%>%
top_n(-5,avgOverall)%>%
ggplot2::ggplot(aes(Country,avgOverall))+
geom_segment( aes(x=Country, xend=Country, y=0, yend=avgOverall)) +
geom_point( size=5, color="red", fill=alpha("orange", 0.3), alpha=0.7, shape=21, stroke=2)+
ggtitle("5 Country that have worst Overall Life Expectancy")+
theme( plot.title = element_text(color = "Red",size = 14,face = "bold",hjust = 0.5),
legend.title = element_text(size=0),
legend.position = "right",
legend.text = element_text(size = 15, color = "Black"),
axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10))+
xlab("Countries")+
ylab("Overall Life Expectancy")
|
263eb406f10251dc91f64202a0066c279b7b26cb | 0a9c82670288e18cf0ddf47a70eb84650dccc7d7 | /Elongation_Rate_Shiny.R | 2040d2a1c3e32fbd188a757c2f8ba824b30ffc3f | [] | no_license | sheridar/Elongation_Rate_Shiny | a5e3c361ef962a5b359d2c3cca835c0014ea17c5 | 5aafedc0b1c75baf94cae07a260119342cd3bf0f | refs/heads/master | 2020-03-17T02:01:20.706666 | 2019-02-11T23:47:15 | 2019-02-11T23:47:15 | 133,175,240 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,939 | r | Elongation_Rate_Shiny.R |
# Install required packages ----
req_packages <- c(
"depmixS4", "DT",
"shiny", "tidyverse",
"magrittr", "rlang"
)
avail_packages <- installed.packages()[, "Package"]
missing_packages <- req_packages[ !(req_packages %in% avail_packages) ]
if (length(missing_packages)) {
install.packages(missing_packages)
}
for (i in seq_along(req_packages)) {
library(req_packages[i], character.only = T)
}
# Define UI for data upload app ----
ui <- fluidPage(
tags$head(
tags$style(HTML("
@import url('https://fonts.googleapis.com/css?family=Roboto:900');
h1 {
font-family: 'Roboto', sans-serif;
font-weight: 500;
font-size: 400%;
line-height: 1.1;
color: #cb181d;
}
"))
),
headerPanel("Elongation rate calculator"),
column(6,
fluidRow(
column(8,
fileInput(
"file_1", "Timecourse data",
accept = c("text/tsv", ".bed")
)
),
column(4,
numericInput(
"time_1", "Time (min)",
10, min = 0, max = 999
)
)
),
fluidRow(
column(8,
fileInput(
"file_2", label = NULL,
accept = c("text/tsv", ".bed")
)
),
column(4,
numericInput(
"time_2", label = NULL,
20, min = 1, max = 999
)
)
),
fluidRow(
column(8,
fileInput(
"control", "Control data",
accept = c("text/tsv", ".bed")
)
),
column(4,
numericInput(
"win_min", "Window min",
1, min = 1, max = 200
)
)
),
fluidRow(
column(8,
fileInput(
"gene_list", "Gene list",
accept = c("text/tsv", ".txt")
)
),
column(4,
numericInput(
"win_max", "Window max",
200, min = 1, max = 200
)
)
),
fluidRow(
div(
column(2, actionButton("runAnalysis", "RUN")),
column(2, downloadButton("download", "Export")),
column(2, offset = 1, actionButton("createPlot", "Plot")),
column(2, checkboxInput("HMMcheckbox", "HMM", value = TRUE)),
column(2, checkboxInput("simpleCheckbox", "Simple", value = FALSE)),
style = "height: 75px; background-color: white;"
)
)
),
column(6,
div(
DT::dataTableOutput("rateTable"),
style = "font-size: 75%; text-overflow: ellipsis"
)
),
fluidRow(
#column(9, plotOutput(width = 925, "metaPlot")),
column(9, plotOutput("metaPlot")),
column(3, plotOutput("boxPlot"))
)
)
# Define server logic to read selected file ----
server <- function(input, output) {
#session$onSessionEnded(stopApp)
options(shiny.maxRequestSize = 500 * 1024 ^ 2)
tryCatch(
{
####################################
# Create table of elongation rates #
####################################
tablesOut <- eventReactive(input$runAnalysis, ignoreInit = T, {
################
# Input values #
################
file_1 <- input$file_1
file_2 <- input$file_2
con_path <- input$control$datapath
file1_path <- input$file_1$datapath
file2_path <- input$file_2$datapath
genes <- input$gene_list
genes_path <- input$gene_list$datapath
time_1 <- input$time_1
time_2 <- input$time_2
win_min <- input$win_min
win_max <- input$win_max
req(file_1)
req(file_2)
req(time_1)
req(time_2)
################
# Import files #
################
col_names <- c(
"chrom", "start",
"end", "name",
"win_id", "strand",
"count"
)
file_list <- list(con_path, file1_path, file2_path)
# df_list <- map(file_list, function(x) read_tsv(x, col_names))
df_list <- map(file_list, ~ read_tsv(.x, col_names))
gene_list <- read_tsv(genes_path, col_names[1:4])
name_list <- list("tm_con", "tm_1", "tm_2")
names(df_list) <- name_list
################
# Merge tables #
################
# Function to merge tables
DRB_merge <- function(input, gene_list, win_min, win_max, merge_by) {
# Function to calculate distance from TSS
calc_kb <- function(input, id_col, len_col) {
input_sort <- input %>%
ungroup() %>%
arrange(!!sym(id_col))
lens <- c(input_sort[[len_col]])
kb_tot <- 0
kb_list <- vector("double", length(lens))
for (i in seq_along(lens)) {
kb_list[i] <- kb_tot
kb_tot <- kb_tot + lens[i]
}
kb_list <- tibble(kb_dist = kb_list)
res <- bind_cols(input_sort, kb_list)
res
}
# Function to add "key" columns to list of dfs
add_key <- function(input) {
res <- list()
for (i in seq_along(input)) {
x <- input[i]
y <- data.frame(x)
new_names <- str_replace(colnames(y), str_c(names(x), "."), "")
colnames(y) <- str_replace(new_names, "count", names(x))
res <- c(res, list(y))
}
res
}
# NOT SURE WHY THIS DOESN'T WORK
# Function to add "key" columns to list of dfs
# add_key <- function(input) {
#
# tbl_names <- names(input)
#
# res <- map2(input, tbl_names, function(x, y) {
# x %>%
# rename_(.dots = setNames("count", y)) %>%
# as_data_frame()
# })
#
# res
# }
# Function to merge tables
tbl_merge <- function(input, ...) {
tbl_list <- add_key(res)
# Merge tables
res <- purrr::reduce(tbl_list, function(x, y) {
left_join(x, y, ...)
}) %>%
na.omit()
res
}
# Table names
tbl_names <- names(input)
# Calculate gene length
genes <- gene_list %>%
mutate(Length = round((end - start) / 1000), digits = 1) %>%
dplyr::select(name, Length)
# Filter and calculate distance from TSS
res <- map(input, ~ {
# Filter by win_min and win_max
res <- .x %>%
left_join(genes, by = "name") %>%
na.omit() %>%
dplyr::select(-strand) %>%
group_by(name) %>%
filter(
win_id >= win_min,
win_id <= win_max
) %>%
filter(
min(win_id) == win_min,
max(win_id) == win_max,
sum(count) > 0
) %>%
ungroup() %>%
# Calculate distance for each window
mutate(
win_id = win_id - win_min,
win_len = (end - start) / 1000
) %>%
group_by(name) %>%
nest() %>%
mutate(
data = map(data, ~calc_kb(.x, id_col = "win_id", len_col = "win_len"))
) %>%
unnest() %>%
ungroup() %>%
dplyr::select(name, Length, win_id, win_len, kb_dist, count)
res
})
# Merge tables
res <- tbl_merge(res, by = merge_by) %>%
gather_("key", "count", tbl_names)
res
}
df_merge <- DRB_merge(
df_list, gene_list,
win_min, win_max,
merge_by = c(
"name", "Length",
"win_id", "win_len",
"kb_dist"
))
####################
# Normalize tables #
####################
# Function to normalize signal
DRB_norm <- function(input, win_tot = 60) {
# Remove windows that are past pAS
res <- input %>%
filter(kb_dist < Length) %>%
group_by(key, name) %>%
mutate(win_count = n()) %>%
filter(win_count >= win_tot) %>%
ungroup() %>%
# Merge windows
mutate(
mutate_num = round(win_count / win_tot),
win_id = floor(win_id / mutate_num)
) %>%
mutate(count = count * win_len) %>%
group_by(key, name, win_id) %>%
mutate(
count = sum(count),
win_len = sum(win_len),
kb_dist = min(kb_dist)
) %>%
unique() %>%
mutate(count = count / win_len) %>%
ungroup() %>%
dplyr::select(name, key, win_id, kb_dist, count) %>%
# Add pseudo count
group_by(key, name) %>%
mutate(zero = ifelse(count == 0, T, F)) %>%
group_by(key, name, zero) %>%
mutate(min_count = min(count)) %>%
group_by(key, name) %>%
mutate(count = ifelse(count == 0, max(min_count) / 2, count)) %>%
ungroup() %>%
dplyr::select(-zero, -min_count) %>%
# Internally normalize signal
group_by(key, name) %>%
mutate(count = count / sum(count)) %>%
ungroup() %>%
# Normalize by -DRB signal
separate(key, sep = "_", into = c("treatment", "tm")) %>%
spread(tm, count) %>%
#gather(tm, count, -name, -win_id, -treatment, -con) %>%
gather(tm, count, -name, -win_id, -kb_dist, -treatment, -con) %>%
mutate(count = count / con) %>%
dplyr::select(-con) %>%
unite(key, treatment, tm, sep = "_") #%>%
# Bin values using a range of 0 - 1.0 and step size of 0.025
# group_by(name, key) %>%
# mutate(max_count = max(count)) %>%
# ungroup() %>%
# mutate(
# count = count / max_count,
# count = floor(count / 0.025) / 40
# ) %>%
# dplyr::select(-max_count)
res
}
df_norm <- DRB_norm(df_merge, win_tot = 60)
#############################
# Identify wave coordinates #
#############################
# Function to find waves using HMM
find_HMM_waves <- function(input) {
res <- input %>%
group_by(key, name) %>%
nest() %>%
mutate(
data = map(data, ~ {
df_sort <- .x %>% arrange(win_id)
kb_dist <- df_sort$kb_dist
kb_max <- kb_dist[ (length(kb_dist) - 5) ]
counts <- df_sort$count
trstart_vals <- c(0.7, 0.2, 0.002, 0.3)
HMMmod <- depmix(response = counts ~ 1, data = data.frame(counts), nstates = 2, trstart = trstart_vals)
tryCatch(
HMMfit <- fit(HMMmod, emc = em.control(rand = FALSE)),
error = function(e) { cat("ERROR :", conditionMessage(e), "\n") }
)
if (exists("HMMfit")) {
summary(HMMfit)
HMMstate <- posterior(HMMfit)$state
wave_edge <- NA
if ( HMMstate %>% unique() %>% length() == 2 ) {
for (i in seq_along(HMMstate)) {
if (i > 4) {
sum_state <- sum(HMMstate[ (i - 4) : i ])
if (sum_state == 5) {
wave_edge <- kb_dist[i]
}
}
}
}
if (is.na(wave_edge)) {
wave_edge
} else if (wave_edge < kb_max) {
wave_edge
} else {
NA
}
} else {
NA
}
})
) %>%
unnest() %>%
na.omit() %>%
dplyr::rename(wave_edge = data)
# ungroup() %>%
# mutate(type = map(data, function(x) typeof(x))) %>%
# unnest(type) %>%
# filter(type != "NULL") %>%
# dplyr::select(-type) %>%
# rename(wave_edge = data) %>%
# unnest()
res
}
HMM_coords <- find_HMM_waves(df_norm)
# Function to find waves using arbitrary cutoff
find_simple_waves <- function(input, sd_lim = 10) {
res <- input %>%
group_by(name, key) %>%
mutate(
win_max = max(win_id),
win_min = win_max - 5,
win_type = ifelse(win_id <= win_min, "data", "background")
) %>%
group_by(name, key, win_type) %>% # Calculated mean count and sd for each timepoint
mutate(
mean_count = mean(count),
sd_count = sd(count),
limit = mean_count + (sd_lim * sd_count)
) %>%
group_by(name, key) %>%
mutate(limit = ifelse(win_id <= win_min, min(limit), limit)) %>%
filter(count > limit) %>% # Identified highest bin where the count is greater than the limit
arrange(desc(win_id)) %>%
dplyr::slice(1) %>%
ungroup() %>%
filter(
win_id > 0,
win_id < win_max
) %>%
dplyr::select(name, key, "wave_edge" = kb_dist)
res
}
simple_coords <- find_simple_waves(df_norm, sd_lim = 10)
##############################
# Calculate elongation rates #
##############################
# Function to calculate elongation rates
calc_rates <- function(input, time_1, time_2, prefix, win_min = 1, win_max = 200) {
# Function to extract gene symbols from dataframe
extract_gene_symbol <- function(input) {
# Function to extract gene symbol from string
get_last_name <- function(gene_string) {
res <- str_split(gene_string, "\\|")
str_len <- length(res[[1]])
res <- res[[1]] [[str_len]]
res
}
gene_names <- input %>%
dplyr::select(name)
other_data <- input %>%
dplyr::select(-name)
gene_matrix <- as.matrix(gene_names)
new_names <- map(gene_matrix, get_last_name)
new_names <- data.frame(name = as.matrix(new_names)) %>%
mutate(name = as.character(name))
res <- bind_cols(new_names, other_data)
}
# Calculate distance traveled
tm <- time_2 - time_1
# Calculate elongation rate
rate_table <- input %>%
spread(key, wave_edge) %>%
na.omit() %>%
filter(tm_2 > tm_1) %>%
mutate(
rate = (tm_2 - tm_1) / tm,
rate = round(rate, digits = 1),
long_name = name
) %>%
filter(rate > 0) %>%
dplyr::select(long_name, name, tm_1, tm_2, rate)
# Extract gene symbols
rate_table <- extract_gene_symbol(rate_table)
# Update column names
tm1_name <- str_c(prefix, time_1, "min", sep = " ")
tm2_name <- str_c(prefix, time_2, "min", sep = " ")
rate_name <- str_c(prefix, " rate (kb/min)")
col_names <- c(
"Name", "Long_name",
tm1_name, tm2_name,
rate_name
)
colnames(rate_table) <- col_names
rate_table
}
HMM_rates <- calc_rates(HMM_coords, time_1, time_2, prefix = "HMM", win_min, win_max)
simple_rates <- calc_rates(simple_coords, time_1, time_2, prefix = "Simple", win_min, win_max)
merged_rates <- left_join(HMM_rates, simple_rates, by = c("Long_name", "Name")) %>%
na.omit()
# Function to merge df_merge and rate tables
merge_tbls <- function(meta_tbl, rate_tbl) {
meta_tbl %>%
rename(Long_name = name) %>%
left_join(rate_tbl, by = c("Long_name")) %>%
na.omit()
}
HMM_meta_rates <- merge_tbls(df_merge, HMM_rates)
simple_meta_rates <- merge_tbls(df_merge, simple_rates)
merged_meta_rates <- merge_tbls(df_merge, merged_rates)
list(HMM_meta_rates, simple_meta_rates, merged_meta_rates)
})
####################################
# Output table of elongation rates #
####################################
# Function to simplify rate tables
simplify_rate_tbls <- function(input) {
input %>%
dplyr::select(-win_id, -win_len, -kb_dist, -key, -count) %>%
unique()
}
# Output table
output$rateTable <- DT::renderDataTable(
if (input$HMMcheckbox == TRUE && input$simpleCheckbox == FALSE) {
HMM_rates <- simplify_rate_tbls( tablesOut() [[1]] )
datatable(HMM_rates,
options = list(
columnDefs = list(list(visible = F, targets = c(1)))
),
selection = list(mode = "multiple")
)
} else if (input$HMMcheckbox == FALSE && input$simpleCheckbox == TRUE) {
simple_rates <- simplify_rate_tbls( tablesOut() [[2]] )
datatable(simple_rates,
options = list(
columnDefs = list(list(visible = F, targets = c(1)))
),
selection = list(mode = "multiple")
)
} else if (input$HMMcheckbox == TRUE && input$simpleCheckbox == TRUE) {
merged_rates <- simplify_rate_tbls( tablesOut() [[3]] )
datatable(merged_rates,
options = list(
columnDefs = list(list(visible = F, targets = c(1)))
),
selection = list(mode = "multiple")
)
}
)
# Download table
output$download <- downloadHandler(
filename = function() {
str_c("data-", Sys.Date(), ".txt")
},
content = function(file) {
if (input$HMMcheckbox == TRUE && input$simpleCheckbox == FALSE) {
HMM_rates <- simplify_rate_tbls( tablesOut() [[1]] )
write_tsv(HMM_rates, path = file)
} else if (input$HMMcheckbox == FALSE && input$simpleCheckbox == TRUE) {
simple_rates <- simplify_rate_tbls( tablesOut() [[2]] )
write_tsv(simple_rates, path = file)
} else if (input$HMMcheckbox == TRUE && input$simpleCheckbox == TRUE) {
merged_rates <- simplify_rate_tbls( tablesOut() [[3]] )
write_tsv(merged_rates, path = file)
}
}
)
###################
# Create metaplot #
###################
# Reactive to create metaplots
metaplotOut <- eventReactive(input$createPlot, ignoreInit = T, {
# Input times
time_1 <- input$time_1
time_2 <- input$time_2
tm1_name <- str_c(time_1, " min")
tm2_name <- str_c(time_2, " min")
# Input tables
if (input$HMMcheckbox == TRUE && input$simpleCheckbox == FALSE) {
meta_rates_tbl <- tablesOut() [[1]]
} else if (input$HMMcheckbox == FALSE && input$simpleCheckbox == TRUE) {
meta_rates_tbl <- tablesOut() [[2]]
} else if (input$HMMcheckbox == TRUE && input$simpleCheckbox == TRUE) {
meta_rates_tbl <- tablesOut() [[3]]
}
# Function to simplify metaplot tables
simplify_meta_tbls <- function(input) {
input %>%
dplyr::select(Long_name, key, kb_dist, count) %>%
rename(win_id = kb_dist)
}
# Simplify metaplot and rate tables
meta_tbl <- simplify_meta_tbls(meta_rates_tbl)
rate_tbl <- simplify_rate_tbls(meta_rates_tbl)
# Function to create metaplots
DRB_metaplot <- function(meta_in, rate_in) {
# Function to calculate mean signal
DRB_mean <- function(input, strand = F, relFreq = F) {
if (strand == T) {
res <- input %>%
separate(key, sep = "_", into = c("key", "rep", "strand", "type")) %>%
unite(key, key, rep, type, sep = "_")
}
else res <- input
if (relFreq == T) {
res <- res %>%
group_by(key, name) %>%
mutate(count = count / sum(count)) %>%
ungroup()
}
if (strand == T) {
res <- res %>%
separate(key, sep = "_", into = c("key", "rep", "type")) %>%
unite(key, key, rep, strand, type, sep = "_")
}
res <- res %>%
group_by(key, win_id) %>%
summarize(count = mean(count)) %>%
ungroup()
res
}
# Function to create metaplots
create_metaplots <- function(
input,
plot_title = NULL,
sub_title = NULL,
y_title = NULL,
waves,
line_type = 2,
text_pos,
plot_colors = c("#41ab5d", "#cb181d", "#225ea8")
) {
# Wave labels
wave_labels <- map(waves, function(input) {
str_c(input, " kb")
})
meta_plot <- input %>%
ggplot(aes(win_id, count, color = Timepoint)) +
geom_line(size = 3) +
scale_color_manual(values = plot_colors) +
labs(
subtitle = sub_title,
x = "Distance from TSS (kb)",
y = y_title
) +
annotate("text",
x = waves[[1]] + 5,
y = text_pos,
label = wave_labels[[1]],
size = 6,
color = plot_colors[2]
) +
annotate("text",
x = waves[[2]] + 5,
y = text_pos,
label = wave_labels[[2]],
size = 6,
color = plot_colors[3]
) +
theme_classic() +
theme(
strip.background = element_blank(),
plot.title = element_text(size = 35, face = "bold"),
plot.subtitle = element_text(size = 20),
axis.title = element_text(size = 20, face = "bold"),
axis.line = element_line(size = 2),
axis.ticks = element_line(size = 2),
axis.ticks.length = unit(10, units = "point"),
axis.text = element_text(size = 15, color = "black"),
legend.title = element_text(size = 20, face = "bold"),
legend.text = element_text(size = 18),
legend.text.align = 0,
legend.background = element_blank(),
legend.position = c(0.8, 0.8)
) +
geom_vline(
xintercept = waves[1:2],
size = 1, linetype = line_type,
color = plot_colors[2:3]
)
if (length(waves) == 4) {
meta_plot <- meta_plot +
geom_vline(
xintercept = waves[3:4],
size = 1, linetype = 3,
color = plot_colors[2:3]
) +
annotate("text",
x = waves[[3]] + 5,
y = text_pos * 0.8,
label = wave_labels[[3]],
size = 6,
color = plot_colors[2]
) +
annotate("text",
x = waves[[4]] + 5,
y = text_pos * 0.8,
label = wave_labels[[4]],
size = 6,
color = plot_colors[3]
)
}
if (!is.null(plot_title[[1]])) {
meta_plot <- meta_plot + labs(title = plot_title)
}
meta_plot
}
# Wave coordinates
wave_1 <- round( mean( as.numeric( rate_in [, 4] )), digits = 1)
wave_2 <- round( mean( as.numeric( rate_in [, 5] )), digits = 1)
waves <- c(wave_1, wave_2)
rate <- as.numeric( rate_in [, 6] )
mean_rate <- round( mean( rate ), digits = 1)
med_rate <- round( median( rate ), digits = 1)
if (input$HMMcheckbox == TRUE && input$simpleCheckbox == TRUE) {
sim_wave_1 <- round( mean( as.numeric( rate_in [, 7] )), digits = 1)
sim_wave_2 <- round( mean( as.numeric( rate_in [, 8] )), digits = 1)
sim_waves <- c(sim_wave_1, sim_wave_2)
sim_rate <- as.numeric( rate_in [, 9] )
sim_mean_rate <- round( mean( sim_rate ), digits = 1 )
sim_med_rate <- round( median( sim_rate ), digits = 1 )
waves <- c(waves, sim_waves)
mean_rate <- mean( c( mean_rate, sim_mean_rate ))
med_rate <- mean( c( med_rate, sim_med_rate ))
}
# Plot data
meta_mean <- DRB_mean(meta_in)
plot_data <- meta_mean %>%
mutate(
key = ifelse(key == "tm_1", tm1_name, key),
key = ifelse(key == "tm_2", tm2_name, key),
key = ifelse(key == "tm_con", "Control", key),
key = fct_relevel(key, c("Control", tm1_name, tm2_name))
) %>%
rename(Timepoint = key)
# Coordinates for plot labels
max_y <- plot_data %>%
mutate(max_value = max(count)) %>%
dplyr::select(max_value) %>%
unique()
max_x <- plot_data %>%
mutate(max_value = max(win_id)) %>%
dplyr::select(max_value) %>%
unique()
wave_text_y <- as.numeric(max_y) * 0.9
rate_text_x <- as.numeric(max_x) * 0.745
rate_text_y <- as.numeric(max_y) * 0.5
# Changed line type depending on wave-calling method
if (input$HMMcheckbox == FALSE && input$simpleCheckbox == TRUE) {
line_type <- 3
} else {
line_type <- 2
}
# Created metaplots
if (nrow(rate_in) == 1) {
create_metaplots(
plot_data,
plot_title = as.character( rate_in[, "Name"] ),
sub_title = str_c(mean_rate, " kb/min"),
y_title = "",
waves = waves,
line_type = line_type,
text_pos = wave_text_y
)
} else {
create_metaplots(
plot_data,
sub_title = "",
y_title = "Average Signal",
waves = waves,
line_type = line_type,
text_pos = wave_text_y
) +
annotate("text",
x = rate_text_x,
y = rate_text_y,
label = str_c("Mean: ", mean_rate, " kb/min\nMedian: ", med_rate, " kb/min"),
size = 6.5,
hjust = 0
)
}
}
# Reactive to retrieve info for selected genes
rateTable_selected <- reactive({
ids <- input$rateTable_rows_selected
long_name <- rate_tbl [ids, 1]
gene_symbol <- rate_tbl [ids, 3]
list(long_name, gene_symbol)
})
# Create metaplot for selected genes
if (!is.null(input$rateTable_rows_selected)) {
# Selected genes
gene_long <- data.frame( "Long_name" = rateTable_selected() [[1]] )
gene_symbol <- as.character( rateTable_selected() [[2]] )
meta_tbl <- meta_tbl %>%
semi_join(gene_long, by = "Long_name")
rate_tbl <- rate_tbl %>%
semi_join(gene_long, by = "Long_name")
}
DRB_metaplot(meta_tbl, rate_tbl)
})
# Output metaplot
output$metaPlot <- renderPlot(metaplotOut())
##################
# Create boxplot #
##################
boxplotOut <- eventReactive(input$createPlot, ignoreInit = T, {
# Function to create boxplot
DRB_boxplot <- function(input, plot_colors) {
input %>%
ggplot(aes(rate_key, rate)) +
geom_boxplot(size = 2, color = plot_colors) +
labs(
title = "",
x = "",
y = "Elongation Rate (kb/min)"
) +
theme_classic() +
theme(
legend.key.size = element_blank(),
strip.background = element_blank(),
axis.title = element_text(size = 20, face = "bold"),
axis.line = element_line(size = 2),
axis.ticks = element_line(size = 2),
axis.ticks.length = unit(10, units = "point"),
axis.text = element_text(size = 15, color = "black"),
legend.title = element_blank(),
legend.text = element_blank(),
legend.background = element_blank()
)
}
# Plot colors
plot_colors <- "#cb181d"
# Input tables
if (input$HMMcheckbox == TRUE && input$simpleCheckbox == FALSE) {
rate_tbl <- simplify_rate_tbls( tablesOut() [[1]] ) %>%
gather(rate_key, rate, `HMM rate (kb/min)`)
} else if (input$HMMcheckbox == FALSE && input$simpleCheckbox == TRUE) {
rate_tbl <- simplify_rate_tbls( tablesOut() [[2]] ) %>%
gather(rate_key, rate, `Simple rate (kb/min)`)
} else if (input$HMMcheckbox == TRUE && input$simpleCheckbox == TRUE) {
plot_colors <- c("#cb181d", "#225ea8")
rate_tbl <- simplify_rate_tbls( tablesOut() [[3]] ) %>%
gather(rate_key, rate, `HMM rate (kb/min)`, `Simple rate (kb/min)`)
}
rate_tbl <- rate_tbl %>%
dplyr::select(Long_name, rate_key, rate) %>%
mutate(rate_key = str_replace(rate_key, "rate \\(kb/min\\)", ""))
# Reactive to retrieve info for selected genes
rateTable_selected <- reactive({
ids <- input$rateTable_rows_selected
long_name <- rate_tbl [ids, 1]
gene_symbol <- rate_tbl [ids, 3]
list(long_name, gene_symbol)
})
# Create boxplots for selected genes
if (!is.null(input$rateTable_rows_selected)) {
# Selected genes
gene_long <- data.frame( "Long_name" = rateTable_selected() [[1]] )
gene_symbol <- as.character( rateTable_selected() [[2]] )
select_rates <- rate_tbl %>% semi_join(gene_long, by = "Long_name")
rate_names <- rate_tbl %>% dplyr::select(Long_name, rate_key)
select_tbl <- rate_names %>% left_join(select_rates)
if ( length(gene_symbol) < 21 ) {
DRB_boxplot(rate_tbl, plot_colors) +
geom_jitter(data = select_tbl, aes(rate_key, rate), color = "#41ab5d", width = 0.1, height = 0, size = 4)
} else {
DRB_boxplot(select_tbl, plot_colors)
}
} else {
DRB_boxplot(rate_tbl, plot_colors)
}
})
# Output boxplot
output$boxPlot <- renderPlot(boxplotOut())
},
# Return a safeError if a parsing error occurs
error = function(e) {
stop(safeError(e))
}
)
}
# Create Shiny app ----
shinyApp(ui, server)
|
cfb1164183ea41a37e29cb240c9b2e48895a18cb | a2539afceaadf3ef919d22f05381254c9ea3ce20 | /man/genus.Rd | 5723f7f416785241336607cf203a81d181a798c9 | [] | no_license | cstubben/genomes | a9e3e47907738e973c132c47e830cfed571202d6 | d0bf9f999931aa17926d90bdee569746f17160e8 | refs/heads/master | 2021-01-22T02:42:56.916047 | 2018-09-18T21:10:32 | 2018-09-18T21:10:32 | 9,374,826 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 467 | rd | genus.Rd | \name{genus}
\alias{genus}
\title{ Extract the genus name }
\description{
Extracts the genus name from a scientific name
}
\usage{
genus(x)
}
\arguments{
\item{x}{ A vector of scientific names }
}
\details{ Removes single quotes, brackets and candidate qualifiers. }
\value{
A vector of genus names
}
%\references{}
\author{ Chris Stubben }
%\note{ }
\seealso{ \code{\link{species}} }
\examples{
genus("[Bacillus] selenitireducens")
}
\keyword{ methods }
|
22831b1479e781948d24ee4fa25f8420ca492a55 | 516b6af59e9445054ce5d25ad9f9e50401f34330 | /plot2.R | df2c55a52ef8512dedb4d92201bead7085ad916c | [] | no_license | BuffaloFan32/ExData_Plotting1 | cf45fdea853650ba77e172fdb9890c933e93c52a | 248b74577e9dbe1a4f1cfd55e4b60c7db2b36134 | refs/heads/master | 2020-12-03T09:46:50.945570 | 2020-01-01T23:06:48 | 2020-01-01T23:06:48 | 231,271,800 | 0 | 0 | null | 2020-01-01T22:42:23 | 2020-01-01T22:42:23 | null | UTF-8 | R | false | false | 1,195 | r | plot2.R | library(lubridate)
#Clear variables
rm(list = ls())
#Download and unzip files
urll<-"https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
destfile<-paste0(getwd(),"/","exploratoryweek1.zip")
download.file(urll,destfile)
unzip("exploratoryweek1.zip",exdir = getwd(), list = FALSE, overwrite = TRUE)
path<-"./"
#Read the power data
power<-read.table(paste0(path, "household_power_consumption.txt"), header=TRUE, sep=';', stringsAsFactors=F)
#power[,"Date"]<-as.Date(power[,"Date"], format="%d/%m/%Y")
power2d<-power[which(power[,"Date"]=="1/2/2007"),]
power2d<-rbind(power2d, power[which(power[,"Date"]=="2/2/2007"),])
power2d[,"Global_active_power"]<-as.numeric(power2d[,"Global_active_power"])
#Combine date and time columns
datetime <- with(power2d, dmy(Date) + hms(Time))
power2d<-cbind(datetime, power2d)
#Line chart of Global Active Power
png(paste0(path, "plot2.png"), width=480, height=480, units = "px")
plot(type="l", Global_active_power ~ datetime, power2d,
ylab="Global Active Power (kilowatts)",
xlab="")
dev.off()
x11()
plot(type="l", Global_active_power ~ datetime, power2d,
ylab="Global Active Power (kilowatts)",
xlab="")
|
4b9a9470367595050e6d59f55974f105dbfd7a1a | a5d1975f45ed63de4f6ed733823d1b13fbc6755e | /Rpackage/test/groupTest.R | a052ec66e53c52180eb1ccdaa47a33411955eeaf | [
"MIT"
] | permissive | xieguigang/visualbasic.R | ce865f314cfca04e556cfc48fa0b50a2ec2c126a | d91613a72f722616ec342873b8acfb2d6fd2b7f2 | refs/heads/master | 2022-02-27T17:08:53.160878 | 2022-02-07T05:30:30 | 2022-02-07T05:30:30 | 81,850,763 | 0 | 0 | null | 2018-06-26T02:34:49 | 2017-02-13T17:17:08 | R | UTF-8 | R | false | false | 1,194 | r | groupTest.R | #Region "Microsoft.ROpen::5ca1a99c8058f7104427612ea8aad09d, test\groupTest.R"
# Summaries:
# mz.grouping <- function(mz, assert) {...
#End Region
seq <- runif(100000, min=0, max=100);
assert <- function(x , y) abs(x - y) <= 0.3;
print( system.time({
print( numeric.group(seq, assert)[[1]]);
}));
#' @param assert function(x,y) return logical
mz.grouping <- function(mz, assert) {
# 首先进行这个unique碎片的mz的合并操作
mz.unique <- unique(mz);
mz.groupKey <- list();
# 按照0.5da获取得到二级碎片mz分组
for (i in 1:length(mz.unique)) {
mz <- mz.unique[i];
members <- NULL;
# 被取出来的mz碎片都被设置为-1了
if (mz != -1) {
# 当前的这个mz也是这个分组的一个成员
members <- append(members, mz);
for (j in 1:length(mz.unique)) {
if (assert(mz.unique[j], mz)) {
members <- append(members, mz.unique[j]);
mz.unique[j] = -1; #防止被重复吸收
}
}
mz.groupKey[[as.character(mz)]] <- members;
}
}
mz.groupKey;
}
print(system.time({
print( mz.grouping(seq, assert)[[1]]);
}));
print( system.time({
print( numeric.group(seq, assert)[[1]]);
}));
|
fd730c09655ba6b7c35b64992344d79131ca1169 | 2e627e0abf7f01c48fddc9f7aaf46183574541df | /PBStools/man/dtget.Rd | 4b73c5bb57414911bc358ed44b54a1eadc506760 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pbs-software/pbs-tools | 30b245fd4d3fb20d67ba243bc6614dc38bc03af7 | 2110992d3b760a2995aa7ce0c36fcf938a3d2f4e | refs/heads/master | 2023-07-20T04:24:53.315152 | 2023-07-06T17:33:01 | 2023-07-06T17:33:01 | 37,491,664 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,160 | rd | dtget.Rd | \name{dtget}
\alias{dtget}
\alias{dtcall}
\alias{dtprint}
\alias{dtput}
\alias{dlisp}
\title{
Get/Print Objects From or Put Objects Into Temporary Work Environment for PBSdata
}
\description{
These functions are wrappers to the PBSmodelling accessor functions that
get/print objects from or put objects into a temporary work environment,
in this case \code{.PBSdataEnv}.
}
\usage{
dtget(...)
dtcall(...)
dtprint(...)
dtput(...)
dlisp(...)
}
\arguments{
\item{...}{For \code{dtget} through to \code{dtput}, the only free argument is: \cr
\code{x} -- name (with or without quotes) of an object to retrieve or store
in the temporary environment; cannot be represented by a variable. \cr
Fixed arguments: \code{penv = parent.frame(), tenv = .PBSdataEnv} \cr
See \code{\link[PBSmodelling]{tget}} for additional information.
For \code{dlisp}, there is only one fixed argument: \cr
\code{pos = .PBSdataEnv} \cr
All other arguments are available -- see \code{\link[PBSmodelling]{lisp}} }
}
\details{
These accessor functions were developed as a response to the CRAN
repository policy statement: \dQuote{Packages should not modify the
global environment (user's workspace).}
}
\value{
Objects are retrieved from or sent to the temporary working
environment to/from the place where the function(s) are called.
Additionally, \code{dtcall} invisibly returns the object without
transferring, which is useful when the object is a function that the
user may wish to call, for example, \code{dtcall(myfunc)()}.
}
\references{
CRAN Repository Policy:
\url{https://cran.r-project.org/web/packages/policies.html}
}
\author{
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Institute of Ocean Sciences (IOS), Sidney BC\cr
Last modified \code{Rd: 2020-10-08}
}
\seealso{
\code{\link[PBSmodelling]{tget}} and \code{\link[PBSmodelling]{lisp}} in \pkg{PBSmodelling}
}
\keyword{manip}
\keyword{environment}
|
9c72e07fabd84a6b4292cbbccfe525ec25931ec0 | 2099a2b0f63f250e09f7cd7350ca45d212e2d364 | /DUC-Dataset/Summary_m200_R/D117.M.200.html.R | e7be6af994b94753fb41bcf92e4581cc2ed9fd2b | [] | no_license | Angela7126/SLNSumEval | 3548301645264f9656b67dc807aec93b636778ef | b9e7157a735555861d2baf6c182e807e732a9dd6 | refs/heads/master | 2023-04-20T06:41:01.728968 | 2021-05-12T03:40:11 | 2021-05-12T03:40:11 | 366,429,744 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,578 | r | D117.M.200.html.R | <html>
<head>
<meta name="TextLength" content="SENT_NUM:12, WORD_NUM:187">
</head>
<body bgcolor="white">
<a href="#0" id="0">'Moon Tiger': A Dark Horse Takes the Booker.</a>
<a href="#1" id="1">They were chosen from a list of 113 books published in the United Kingdom in 1989.</a>
<a href="#2" id="2">_ ``Restoration,'' British author Rose Tremain's story of Robert Merivel, a favorite of King Charles II who married the monarch's youngest mistress.</a>
<a href="#3" id="3">"When she accepted her prize for "Moon Tiger," Penelope Lively looked surprised and pleased.</a>
<a href="#4" id="4">Last year the prize was won by Ben Okri, for his book The Famished Road.</a>
<a href="#5" id="5">Shades of Saint Therese of Lisieux.</a>
<a href="#6" id="6">``Everyone who loves good books benefits from this, surely,'' he added.</a>
<a href="#7" id="7">Another notable spinoff is the Booker harpies, readers intent on digesting all six titles, then vigorously disputing the choices, which never please everyone.</a>
<a href="#8" id="8">Most damaging of all was the verdict of the booksellers, who dismissed the shortlist as narcoleptic.</a>
<a href="#9" id="9">Unsworth's massive novel about the 1750s slave trade came out early this year to widespread critical praise.</a>
<a href="#10" id="10">Social, cultural and psychological reasons play a part.</a>
<a href="#11" id="11">Like Rebecca, the popular novel by du Maurier's granddaughter, it is a natural equivalent of what the current Booker Prize seeks to create: a combination of the classic and the bestseller.</a>
</body>
</html> |
d513907cc6dcf16b29150a523bd59d6afbffabfb | fed4409da9801ce1ca986b1814631acb6c8c8aed | /splitdoor/man/breakup_est_by_treatment_group.Rd | ad87b22c0eaf7396a989f5a2d3af521b7f6ab8f9 | [
"MIT"
] | permissive | amit-sharma/splitdoor-causal-criterion | 6b7684b9f752b77aaa3844311d336603249d4421 | 28e22817023e51b4c91205ef4519b4cbd62bf9b6 | refs/heads/master | 2021-01-12T05:02:54.684611 | 2019-12-16T01:08:31 | 2019-12-16T01:08:31 | 77,838,086 | 15 | 5 | MIT | 2019-12-16T01:08:32 | 2017-01-02T14:12:40 | R | UTF-8 | R | false | true | 712 | rd | breakup_est_by_treatment_group.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate_causal_estimates.R
\name{breakup_est_by_treatment_group}
\alias{breakup_est_by_treatment_group}
\title{Aggregates estimates for (time-period, treatment) pairs over treatment groups. Useful when treatments can be classified in a fixed number of groups.}
\usage{
breakup_est_by_treatment_group(ctr_df, estimate_colname)
}
\arguments{
\item{estimate_colname}{}
}
\value{
A data.frame containing mean causal estimates for each treatment group.
}
\description{
Aggregates estimates for (time-period, treatment) pairs over treatment groups. Useful when treatments can be classified in a fixed number of groups.
}
|
58147dbaae29177b1e0267307738908b8b270c55 | 06236b8badc177928591179402d03b2e754c9239 | /R Stuff/BasicOperationsOfADataFrame.R | d566cb3fbb67f67274b60728465d6b6941bf8b04 | [] | no_license | brett-davi5/R-Studio-and-GGPlotMap | 0662ff1ae9de7b20c1cf97ca4f1bc7dd91cf5b57 | 5669795e27c6a86eac6b7fd1f642f144aeffc081 | refs/heads/master | 2021-01-22T22:28:08.616091 | 2017-05-29T21:13:49 | 2017-05-29T21:13:49 | 92,775,776 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 595 | r | BasicOperationsOfADataFrame.R | #Basic Operations of a Data Frame
stats[1:10,] #look at the data of the first 10 rows
#this is subsetting
stats[3:9,]
stats[c(4, 100),]
#Remember how the square brackets work
stats[1,] #return first row
is.data.frame(stats[1,]) #no need for drop=F
stats[,1] #not a data frame
is.data.frame(stats[,1])
stats[,1,drop=F]
is.data.frame(stats[,1,drop=F])
#multiply columns
head(stats)
stats$Birth.rate*stats$Internet.users
#add column
head(stats)
stats$MyCalculations <- stats$Birth.rate*stats$Internet.users
stats
#remove a column
stats$MyCalculations <- NULL
|
1427b2f7b09564381e99d51623c290a21f9bdb91 | 42deb94948bd04300274d517aa8e3033c8dd3df2 | /R/plotMS.R | e384442779022d74d12a3817673a49f3781b78cf | [] | no_license | cran/pedometrics | fe0940a0af6ceb02f9910d955ce6af72da94a5dc | 7329722f174c88e86620b21a81c1affc0e42b93f | refs/heads/master | 2022-07-03T13:49:28.737562 | 2022-06-19T05:10:02 | 2022-06-19T05:10:02 | 22,748,555 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,049 | r | plotMS.R | #' Model series plot
#'
#' @description
#' Produce a graphical output to examine the effect of using different model specifications (design)
#' on the predictive performance of these models (a model series). Devised to access the results of
#' [pedometrics::buildModelSeries()] and [pedometrics::statsMS()], but can be easily adapted to
#' work with any model structure and performance measure.
#'
#' @param obj Object of class `data.frame`, generally returned by [pedometrics::statsMS()],
#' containing:
#'
#' 1. a series of performance statistics of several models, and
#' 2. the design information of each model.
#'
#' See \sQuote{Details} for more information.
#'
#' @param grid Vector of integer values or character strings indicating the columns of the
#' `data.frame` containing the design data which will be gridded using the function
#' [lattice::levelplot()]. See \sQuote{Details} for more information.
#'
#' @param line Character string or integer value indicating which of the performance statistics
#' (usually calculated by [pedometrics::statsMS()]) should be plotted using the function
#' [lattice::xyplot()]. See \sQuote{Details} for more information.
#'
#' @param ind Integer value indicating for which group of models the mean rank is to be calculated.
#' See \sQuote{Details} for more information.
#'
#' @param type Vector of character strings indicating some of the effects to be used when plotting
#' the performance statistics using [lattice::xyplot()]. Defaults to `type = c("b", "g")`. See
#' [lattice::panel.xyplot()] for more information on how to set this argument.
#'
#' @param pch Vector with two integer values specifying the symbols to be used to plot points. The
#' first sets the symbol used to plot the performance statistic, while the second sets the symbol
#' used to plot the mean rank of the indicator set using argument `ind`. Defaults to
#' `pch = c(20, 2)`. See [graphics::points()] for possible values and their interpretation.
#'
#' @param size Numeric value specifying the size of the symbols used for plotting the mean rank of
#' the indicator set using argument `ind`. Defaults to `size = 0.5`. See [grid::grid.points()] for
#' more information.
#'
#' @param arrange Character string indicating how the model series should be arranged, which can be
#' in ascending (`"asc"`) or descending (`"desc"`, default) order.
# See [plyr::arrange()] for more information.
#'
#' @param color Vector defining the colors to be used in the grid produced by function
#' [lattice::levelplot()]. If `color = NULL`, defaults to `color = cm.colors(n)`, where `n` is the
#' number of unique values in the columns defined by argument `grid`. See [grDevices::cm.colors()]
#' to see how to use other color palettes.
#'
#' @param xlim Numeric vector of length 2, giving the x coordinates range. If `xlim = NULL` (which
#' is the recommended value), defaults to `xlim = c(0.5, dim(obj)[1] + 0.5)`. This is, so far, the
#' optimum range for adequate plotting.
#'
#' @param ylab Character vector of length 2, giving the y-axis labels. When `obj` is a `data.frame`
#' returned by [pedometrics::statsMS()], and the performance statistic passed to argument
#' `line` is one of those calculated by [pedometrics::statsMS()] (`"candidates"`, `"df"`, `"aic"`,
#' `"rmse"`, `"nrmse"`, `"r2"`, `"adj_r2"`, or `"ADJ_r2"`), the function tries to automatically
#' identify the correct `ylab`.
#'
#' @param xlab Character vector of unit length, the x-axis label. Defaults `xlab = "Model ranking"`.
#'
#' @param at Numeric vector indicating the location of tick marks along the x axis (in native
#' coordinates).
#'
#' @param ... Other arguments for plotting, although most of these have no been tested. Argument
#' `asp`, for example, is not effective since the function automatically identifies the best aspect
#' for plotting based on the dimensions of the design data.
#'
#' @details
#' This section gives more details about arguments `obj`, `grid`, `line`, `arrange`, and `ind`.
#'
#' \subsection{obj}{
#' The argument `obj` usually constitutes a `data.frame` returned by [pedometrics::statsMS()].
#' However, the user can use any `data.frame` object as far as it contains the two basic units of
#' information needed:
#' \enumerate{
#' \item design data passed with argument `grid`
#' \item performance statistic passed with argument `line`
#' }
#' }
#' \subsection{grid}{
#' The argument `grid` indicates the _design_ data which is used to produce the grid output in the
#' top of the model series plot. By _design_ we mean the data that specify the structure of each
#' model and how they differ from each other. Suppose that eight linear models were fit using three
#' types of predictor variables (`a`, `b`, and `c`). Each of these predictor variables is available
#' in two versions that differ by their accuracy, where `0` means a less accurate predictor
#' variable, while `1` means a more accurate predictor variable. This yields 2^3 = 8 total possible
#' combinations. The _design_ data would be of the following form:
#'
#' \verb{
#' > design
#' a b c
#' 1 0 0 0
#' 2 0 0 1
#' 3 0 1 0
#' 4 1 0 0
#' 5 0 1 1
#' 6 1 0 1
#' 7 1 1 0
#' 8 1 1 1
#' }
#' }
#' \subsection{line}{
#' The argument `line` corresponds to the performance statistic that is used to arrange the models
#' in ascending or descending order, and to produce the line output in the bottom of the model
#' series plot. For example, it can be a series of values of adjusted coefficient of determination,
#' one for each model:
#'
#' \verb{
#' adj_r2 <- c(0.87, 0.74, 0.81, 0.85, 0.54, 0.86, 0.90, 0.89)
#' }
#' }
#' \subsection{arrange}{
#' The argument `arrange` automatically arranges the model series according to the performance
#' statistics selected with argument `line`. If `obj` is a `data.frame` returned by
#' [pedometrics::statsMS()], then the function uses standard arranging approaches. For most
#' performance statistics, the models are arranged in descending order. The exception is when
#' `"r2"`, `"adj_r2"`, or `"ADJ_r2"` are used, in which case the models are arranged in ascending
#' order. This means that the model with lowest value appears in the leftmost side of the model
#' series plot, while the models with the highest value appears in the rightmost side of the plot.
#'
#' \verb{
#' > arrange(obj, adj_r2)
#' id a b c adj_r2
#' 1 5 1 0 1 0.54
#' 2 2 0 0 1 0.74
#' 3 3 1 0 0 0.81
#' 4 4 0 1 0 0.85
#' 5 6 0 1 1 0.86
#' 6 1 0 0 0 0.87
#' 7 8 1 1 1 0.89
#' 8 7 1 1 0 0.90
#' }
#'
#' This results suggest that the best performing model is that of `id = 7`, while the model of
#' `id = 5` is the poorest one.
#' }
#' \subsection{ind}{
#' The model series plot allows to see how the design influences model performance. This is achieved
#' mainly through the use of different colors in the grid output, where each unique value in the
#' _design_ data is represented by a different color. For the example given above, one could try to
#' see if the models built with the more accurate versions of the predictor variables have a better
#' performance by identifying their relative distribution in the model series plot. The models
#' placed at the rightmost side of the plot are those with the best performance.
#'
#' The argument `ind` provides another tool to help identifying how the design, more specifically
#' how each variable in the _design_ data, influences model performance. This is done by simply
#' calculating the mean ranking of the models that were built using the updated version of each
#' predictor variable. This very same mean ranking is also used to rank the predictor variables and
#' thus identify which of them is the most important.
#'
#' After arranging the `design` data described above using the adjusted coefficient of
#' determination, the following mean rank is obtained for each predictor variable:
#'
#' \verb{
#' > rank_center
#' a b c
#' 1 5.75 6.25 5.25
#' }
#'
#' This result suggests that the best model performance is obtained when using the updated version
#' of the predictor variable `b`. In the model series plot, the predictor variable `b` appears in
#' the top row, while the predictor variable `c` appears in the bottom row.
#' }
#' @return
#' An object of class `"trellis"` consisting of a model series plot.
#'
#' @references
#' Deepayan Sarkar (2008). _Lattice: Multivariate Data Visualization with R._ Springer, New York.
#' ISBN 978-0-387-75968-5.
#'
#' Roger D. Peng (2008). _A method for visualizing multivariate time series data._ Journal of
#' Statistical Software. v. 25 (Code Snippet), p. 1-17.
#'
#' Roger D. Peng (2012). _mvtsplot: Multivariate Time Series Plot._ R package version 1.0-1.
#' <https://CRAN.R-project.org/package=mvtsplot>.
#'
#' A. Samuel-Rosa, G. B. M. Heuvelink, G. de Mattos Vasques, and L. H. C. dos Anjos, Do more
#' detailed environmental covariates deliver more accurate soil maps?, _Geoderma_, vol. 243–244,
#' pp. 214–227, May 2015, doi: 10.1016/j.geoderma.2014.12.017.
#'
#' @author Alessandro Samuel-Rosa \email{alessandrosamuelrosa@@gmail.com}
#'
#' @section Dependencies:
# The __plyr__ package, provider of tools for splitting, applying and combining data in R, is
# required for [pedometrics::plotModelSeries()] to work. The development version of the __plyr__
# package is available on <https://github.com/hadley/plyr> while its old versions are available on
# the CRAN archive at <https://cran.r-project.org/src/contrib/Archive/plyr/>.
#'
#' The __grDevices__ package, provider of graphics devices and support for colours and fonts in R,
#' is required for [pedometrics::plotModelSeries()] to work.
#'
#' The __grid__ package, a rewrite of the graphics layout capabilities in R, is required for
#' [pedometrics::plotModelSeries()] to work.
#'
#' @note
#' Some of the solutions used to build this function were found in the source code of the R-package
#' __mvtsplot__. As such, the author of that package, Roger D. Peng \email{rpeng@@jhsph.edu}, is
#' entitled \sQuote{contributors} to the R-package __pedometrics__.
#'
#' @section Warning:
#' Use the original functions [lattice::xyplot()] and [lattice::levelplot()] for higher
#' customization.
#'
#' @seealso [lattice::xyplot()] [lattice::levelplot()]
#'
#' @examples
# if (all(require(plyr), require(grDevices), require(grid))) {
#' if (all(require(grDevices), require(grid))) {
#' # This example follows the discussion in section "Details"
#' # Note that the data.frame is created manually
#' id <- c(1:8)
#' design <- data.frame(a = c(0, 0, 1, 0, 1, 0, 1, 1),
#' b = c(0, 0, 0, 1, 0, 1, 1, 1),
#' c = c(0, 1, 0, 0, 1, 1, 0, 1))
#' adj_r2 <- c(0.87, 0.74, 0.81, 0.85, 0.54, 0.86, 0.90, 0.89)
#' obj <- cbind(id, design, adj_r2)
#' p <- plotModelSeries(obj, grid = c(2:4), line = "adj_r2", ind = 1,
#' color = c("lightyellow", "palegreen"),
#' main = "Model Series Plot")
#' }
#' @keywords hplot
#' @importFrom stats update
# FUNCTION #########################################################################################
#' @export
#' @rdname plotModelSeries
plotModelSeries <-
function(obj, grid, line, ind, type = c("b", "g"), pch = c(20, 2), size = 0.5, arrange = "desc",
color = NULL, xlim = NULL, ylab = NULL, xlab = NULL, at = NULL, ...) {
# check if suggested packages are installed
if (!requireNamespace("grDevices")) stop("grDevices package is missing")
# if (!requireNamespace("lattice")) stop("lattice package is missing")
if (!requireNamespace("grid")) stop("grid package is missing")
# if (!requireNamespace("plyr")) stop("plyr package is missing")
# check function arguments
if (missing(obj)) {
stop("'obj' is a mandatory argument")
}
if (missing(grid)) {
stop("'grid' is a mandatory argument")
}
if (missing(line)) {
stop("'line' is a mandatory argument")
}
if (missing(ind)) {
stop("'ind' is a mandatory argument")
}
if (!inherits(obj, "data.frame")) {
stop("'obj' should be of class data.frame")
}
if (!inherits(grid, c("integer", "character", "numeric"))) {
stop("'grid' should be an integer value or a character string")
}
if (!inherits(line, c("integer", "character", "numeric"))) {
stop("'line' should be an integer value or a character string")
}
if (!inherits(ind, c("integer", "numeric")) || round(ind) != ind) {
stop("'ind' should be an integer value")
}
if (inherits(line, c("integer", "numeric"))) {
nam0 <- c("candidates", "df", "aic", "rmse", "nrmse", "r2", "adj_r2", "ADJ_r2")
nam1 <- colnames(obj)[line]
if (!any(colnames(obj)[line] == nam0)) {
stop(paste0("'ylab' should be provided for performance statistics '", nam1, "'"))
}
}
if (!missing(xlab)) {
if (length(xlab) != 1) {
stop("'xlab' should have length equal to 1")
}
}
if (!missing(ylab)) {
if (length(ylab) != 2) {
stop("'ylab' should have length equal to 2")
}
}
if (length(type) != 2) {
stop("'type' should have length equal to 2")
}
if (length(pch) != 2) {
stop("'pch' should have length equal to 2")
}
# prepare data
if (inherits(line, "numeric")) {
line <- colnames(obj)[line]
}
if (any(line == c("r2", "adj_r2", "ADJ_r2"))) {
# obj <- plyr::arrange(obj, plyr::desc(obj[, line]))
idx_arrange <- order(obj[[line]], decreasing = TRUE)
obj <- obj[idx_arrange, ]
} else {
# obj <- plyr::arrange(obj, obj[, line])
idx_arrange <- order(obj[[line]], decreasing = FALSE)
obj <- obj[idx_arrange, ]
}
grid <- as.matrix(obj[, grid])
x <- seq(1, dim(obj)[1], 1)
y <- as.numeric(obj[, line])
if (missing(at)) {
if (max(x) < 100) {
m <- round(max(x) / 10) * 10
at <- c(1, seq(5, m, 5))
} else {
m <- round(max(x) / 10) * 10
at <- c(1, seq(10, m, by = 10))
}
}
if (missing(color)) {
color <- grDevices::cm.colors(length(unique(as.numeric(grid))))
}
if (missing(xlim)) {
xlim <- c(0.5, dim(obj)[1] + 0.5)
}
if (missing(xlab)) {
xlab <- "Model ranking"
}
if (missing(ylab)) {
if (inherits(line, "numeric")) {
line <- colnames(obj)[line]
}
if (line == "candidates") {
yl <- "Candidate predictors"
}
if (line == "df") {
yl <- "Degrees of freedom"
}
if (line == "aic") {
yl <- "AIC"
}
if (line == "rmse") {
yl <- "RMSE"
}
if (line == "nrmse") {
yl <- "NRMSE"
}
if (line == "r2") {
yl <- expression(paste0(R^2))
}
if (any(line == c("adj_r2", "ADJ_r2"))) {
yl <- expression(paste0("Adjusted ", R^2))
}
ylab <- list(c(yl, "Design"))
}
rank_center <- rep(NA, dim(grid)[2])
for (i in seq_along(rank_center)) {
rank_center[i] <- mean(cbind(x, grid)[, 1][which(cbind(x, grid)[, i + 1] == ind)])
}
grid <- grid[, order(rank_center, decreasing = TRUE)]
p1 <- lattice::xyplot(
y ~ x, xlim = rev(grDevices::extendrange(xlim, f = 0)), type = type, pch = pch[1],
scales = list(y = list(rot = 0), x = list(at = at)))
p2 <- lattice::levelplot(
grid, colorkey = FALSE, xlim = rev(grDevices::extendrange(xlim, f = 0)),
col.regions = color, scales = list(y = list(rot = 90)),
panel = function (...) {
lattice::panel.levelplot(...)
grid::grid.points(x = sort(rank_center, decreasing = TRUE),
seq(1, dim(grid)[2], 1), pch = pch[2], size = grid::unit(size, "char"))
})
# Print plot
update(c(p1, p2), layout = c(1, 2), xlab = xlab,
ylab = ylab, aspect = c((dim(grid)[2] * 2) / dim(grid)[1]),
par.settings = list(layout.heights = list(panel = c(0.5, 0.5))), ...)
}
#' @export
#' @rdname plotModelSeries
plotMS <- plotModelSeries
|
e206610f6916ea470c6afdb351756f9ebb2ce388 | 89b074cdd8067f899a2a1179d487aeb52468e964 | /man/hiervis.Rd | a3f6203f1a0e9b25702d20bbc4343c49324bcd56 | [] | no_license | willCode2Surf/hiervis | 417beea08bf0d1a38d125539f0a5ea33378f0bc7 | 12fda7dc13df83c7d0dc708e8752e5423bdf015b | refs/heads/master | 2020-05-21T01:58:58.187010 | 2019-01-17T05:20:46 | 2019-01-17T05:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,262 | rd | hiervis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hiervis.R
\name{hiervis}
\alias{hiervis}
\title{Create a hierarchical visualization from tabular data and data.frames}
\usage{
hiervis(data, vis = NULL, width = NULL, height = NULL,
elementId = NULL, nameField = "name", valueField = "value",
pathSep = NULL, parentField = NULL, stat = "count",
vis.opts = list(transitionDuration = 350, showNumbers = TRUE,
numberFormat = ",d", treeColors = TRUE, treemapHier = TRUE,
sunburstLabelsRadiate = FALSE, circleNumberFormat = ".2s", linkColorChild
= FALSE, sankeyMinHeight = NULL))
}
\arguments{
\item{data}{tabular data or data.frame}
\item{vis}{One of "sankey", "sunburst", "partition", "treemap".}
\item{width}{width of widget}
\item{height}{height of widget}
\item{elementId}{elementId}
\item{nameField}{field in data that has the name or ID}
\item{valueField}{field in data that has quantitative values}
\item{pathSep}{path separator in name field, e.g. "/"}
\item{parentField}{field in data that has the parent name or ID}
\item{stat}{a statistic to calculate the value, e.g. "count"}
\item{vis.opts}{additional parameters given to the javascript hiervis function}
}
\description{
This function can create a variety of interactive d3 visualizations from tables and
data.frames.
}
\details{
- tabular data can be used directly without extra arguments
- For data.frames or matrices with a path (e.g. "A/B/C"), specify
nameField, pathSep and valueField
- For data.frames or matrices with parent and child fields, specify
nameField and parentField
}
\examples{
data(Titanic)
## Tabular data does not need any extra arguments
hiervis(Titanic, "sankey")
hiervis(HairEyeColor, "vertical sankey")
## For data.frames with a path (e.g. A/B/C), supply nameField, pathSep and valueField
hiervis(d3_modules, "sunburst", nameField = "path", pathSep = "/", valueField = "size")
## For data.frames with parent and child field, supply nameField and parentField
data <- data.frame(name = c("Root Node", "Node A", "Node B", "Leaf Node A.1", "Leaf Node A.2"),
parent = c(NA, "Root Node", "Root Node", "Node A", "Node A"))
hiervis(data, "sankey", nameField = "name", parentField = "parent", stat = "count")
}
|
94f65a5741d71b76faae66954367099db82746e3 | 4d190962d6358fa2980fb7cfc32b9d488b1371d5 | /T-test and corresponding p-value - Complete dataset.R | 909a8f9919bd8818a31bda7f0701424a09c81e8c | [] | no_license | bcsdevries/Research-Major-Osteoporotic-Fracture-Risk-calculator | 97726a72b7c410b3ec5327fa9c419360390a9a90 | 2d187d2ca8a1b30d60d701bdcd24f9946e1430b8 | refs/heads/master | 2022-04-14T20:32:22.785733 | 2020-04-15T09:43:46 | 2020-04-15T09:43:46 | 255,870,758 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,338 | r | T-test and corresponding p-value - Complete dataset.R | # T-test and corresponding p-values for ANOVA plot
t.test2 <- function(m1,m2,s1,s2,n1,n2,m0=0,equal.variance=FALSE)
{
if( equal.variance==FALSE )
{
se <- sqrt( (s1^2/n1) + (s2^2/n2) )
# welch-satterthwaite df
df <- ( (s1^2/n1 + s2^2/n2)^2 )/( (s1^2/n1)^2/(n1-1) + (s2^2/n2)^2/(n2-1) )
} else
{
# pooled standard deviation, scaled by the sample sizes
se <- sqrt( (1/n1 + 1/n2) * ((n1-1)*s1^2 + (n2-1)*s2^2)/(n1+n2-2) )
df <- n1+n2-2
}
t <- (m1-m2-m0)/se
dat <- c(m1-m2, se, t, 2*pt(-abs(t),df))
names(dat) <- c("Difference of means", "Std Error", "t", "p-value")
return(dat)
}
Cox_RSF_MICE <- t.test2(0.6966725, 0.6878885, sqrt(0.0002840391), sqrt(0.0003295021), 10, 10)
Cox_RSF_regular <- t.test2(0.6966725, 0.6874454, sqrt(0.0002840391), 0.01352235, 10, 10)
Cox_ANN <- t.test2(0.6966725, 0.6696976,sqrt(0.0002840391),sqrt(0.001562243),10,10)
RSF_MICE_regular <- t.test2(0.6874454, 0.6878885, 0.01352235, sqrt(0.0003295021), 10, 10)
RSF_MICE_ANN <- t.test2(0.6696976, 0.6878885, sqrt(0.001562243), sqrt(0.0003295021), 10, 10)
RSF_regular_ANN <- t.test2(0.6874454, 0.6696976, 0.01352235, sqrt(0.001562243), 10, 10)
ANOVA_outcome <- cbind(Cox_RSF_MICE, Cox_RSF_regular, Cox_ANN,
RSF_MICE_regular, RSF_MICE_ANN, RSF_regular_ANN)
|
89ba611214649ba9f7a159f3164254ef7bcfc8bd | b0401403a3259ce2bf43a0d17fdbbf91b9a48c0a | /R/plot_scale.R | bec7d3e15df24b3dd2cc9ea069a1fc9586376daa | [] | no_license | cran/terra | e619f8d942189daef2f8436d14bdcbe6c89a6d79 | e3ea767455d46d961fc82c8fcb3aef424c9ecad7 | refs/heads/master | 2023-06-23T13:05:17.593647 | 2023-06-23T10:20:02 | 2023-06-23T11:21:46 | 248,760,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,033 | r | plot_scale.R |
..assume_lonlat <- function(pr) {
(pr$usr[1] > -300) && (pr$usr[2] < 300) && (pr$yaxp[1] > -200) && (pr$yaxp[2] < 200)
}
.get_dd <- function(pr, lonlat, d=NULL) {
if (lonlat) {
lat <- mean(pr$usr[3:4])
if (is.null(d)) {
dx <- (pr$usr[2] - pr$usr[1]) / 6
d <- as.vector(distance(cbind(0, lat), cbind(dx, lat), TRUE))
d <- max(1, 5 * round(d/5000))
}
p <- cbind(0, lat)
dd <- .destPoint(p, d * 1000)
dd <- c(dd[1,1], d)
} else {
if (is.null(d)) {
d <- (pr$usr[2] - pr$usr[1]) / 6
digits <- floor(log10(d)) + 1
d <- round(d, -(digits-1))
}
dd <- c(d, d)
}
dd
}
.get_xy <- function(xy, dx=0, dy=0, pr, defpos="bottomleft", caller="") {
if (is.null(xy)) {
xy <- defpos
}
if (!is.character(xy)) {
return( cbind(xy[1], xy[2]) )
}
xy <- tolower(xy)
parrange <- c(pr$usr[2] - pr$usr[1], pr$usr[4] - pr$usr[3])
pad=c(5,5) / 100
if (xy == "bottom") {
xy <- c(pr$usr[1]+0.5*parrange[1]-0.5*dx, pr$usr[3]+(pad[2]*parrange[2])) + c(0,dy)
} else if (xy == "bottomleft") {
xy <- c(pr$usr[1]+(pad[1]*parrange[1]), pr$usr[3]+(pad[2]*parrange[2])) + c(0,dy)
} else if (xy == "bottomright") {
xy <- c(pr$usr[2]-(pad[1]*parrange[1]), pr$usr[3]+(pad[2]*parrange[2])) - c(dx,-dy)
} else if (xy == "topright") {
xy <- c(pr$usr[2]-(pad[1]*parrange[1]), pr$usr[4]-(pad[2]*parrange[2])) - c(dx,dy)
} else if (xy == "top") {
xy <- c(pr$usr[1]+0.5*parrange[1]-0.5*dx, pr$usr[4]-(pad[2]*parrange[2])) - c(0,dy)
} else if (xy == "topleft") {
xy <- c(pr$usr[1]+(pad[1]*parrange[1]), pr$usr[4]-(pad[2]*parrange[2])) - c(0,dy)
} else if (xy == "left") {
xy <- c(pr$usr[1]+(pad[1]*parrange[1]), pr$usr[3]+0.5*parrange[2]-0.5*dy)
} else if (xy == "right") {
xy <- c(pr$usr[2]-(pad[1]*parrange[1])-dx, pr$usr[3]+0.5*parrange[2]-0.5*dy)
} else {
error(caller, 'xy must be a coordinate pair (two numbers) or one of "bottomleft", "bottom", "bottomright", topleft", "top", "topright"')
}
xy
}
.destPoint <- function (p, d, b=90, r=6378137) {
toRad <- pi/180
lon1 <- p[, 1] * toRad
lat1 <- p[, 2] * toRad
b <- b * toRad
lat2 <- asin(sin(lat1) * cos(d/r) + cos(lat1) * sin(d/r) * cos(b))
lon2 <- lon1 + atan2(sin(b) * sin(d/r) * cos(lat1), cos(d/r) - sin(lat1) * sin(lat2))
lon2 <- (lon2 + pi)%%(2 * pi) - pi
cbind(lon2, lat2)/toRad
}
add_N <- function(x, y, asp, label, type=0, user="", angle=0, cex=1, srt=0, xpd=TRUE, ...) {
type <- type[1]
if (type == 0) { symbol = user[1]
} else if (type == 2) { symbol = "\u27A2"
} else if (type == 3) { symbol = "\u2799"
} else if (type == 4) { symbol = "\u27B2"
} else if (type == 5) { symbol = "\u27BE"
} else if (type == 6) { symbol = "\u27B8"
} else if (type == 7) { symbol = "\u27BB"
} else if (type == 8) { symbol = "\u27B5"
} else if (type == 9) { symbol = "\u279F"
} else if (type == 10) { symbol = "\u261B"
} else if (type == 11) { symbol = "\u2708"
} else { symbol = "\u2629"}
if (type == 11) {
rangle <- 45 - angle
mcex <- 1.5
} else {
rangle <- 90 - angle
mcex <- 3
}
text(x, y, symbol, cex=cex*mcex, srt=rangle, xpd=xpd, ...)
xs <- graphics::strwidth(symbol,cex=cex*3)
ys <- graphics::strheight(symbol,cex=cex*3)
b <- pi * angle / 180
rxs <- (abs(xs * cos(b)) + abs(ys * sin(b)))# / asp
rys <- (abs(xs * sin(b)) + abs(ys * cos(b)))# * asp
# xoff <- (rxs - xs) / 2
# yoff <- rys + 0.05 * graphics::strheight(label,cex=cex)
xoff = 0.1 * rxs
yoff = 0.8 * rys * max(0.5, abs(cos(angle)))
if (type == 4) {
.halo(x+xoff, y-0.2*yoff, label, cex = cex, srt = srt, xpd = xpd, ...)
} else if (type == 10) {
.halo(x+xoff, y-yoff, label, cex = cex, srt = srt, xpd = xpd, ...)
} else {
text(x+xoff, y+yoff, label, cex = cex, srt = srt, xpd = xpd, ...)
}
}
north <- function(xy=NULL, type=1, label="N", angle=0, d, head=0.1, xpd=TRUE, ...) {
pr <- graphics::par()
pr$usr <- unlist(get.clip()[1:4])
pa <- c(pr$usr[2] - pr$usr[1], pr$usr[4] - pr$usr[3])
asp <- pa[2]/pa[1]
if (missing(d)) {
d <- 0.07 * pa[2]
}
xy <- .get_xy(xy, 0, d, pr, "topright", caller="arrow")
if (inherits(type, "character")) {
usertype <- type
type = 0
} else {
type <- round(type)
usertype <- ""
}
if (type == 1) {
if (angle != 0) {
b <- angle * pi / 180;
p2 <- xy + c(d * sin(b), d * cos(b))
b <- b + pi
p1 <- xy + c(d * sin(b), d * cos(b))
if ((p2[1] - p1[1]) > (d/asp)) {
m <- xy[1] #p1[1] + (p2[1] - p1[1]) / 2
slope = (p2[2] - p1[2])/(p2[1] - p1[1])
newx <- m - 0.5 * d / asp
p1[2] <- p1[2] + (newx-p1[1]) * slope
p1[1] <- newx
newx <- m + 0.5 * d / asp
p2[2] <- p2[2] - (p2[1]-newx) * slope
p2[1] <- newx
}
} else {
p1 <- xy - c(0,d)
p2 <- xy + c(0,d)
}
lwd <- list(...)$lwd + 2
if (is.null(lwd)) lwd <- 3
graphics::arrows(p1[1], p1[2], p2[1], p2[2], length=head, lwd=lwd, col="white", xpd=xpd)
graphics::arrows(p1[1], p1[2], p2[1], p2[2], length=head, xpd=xpd, ...)
if (label != "") {
if (is.null(list(...)$hw)) {
.halo(xy[1], xy[2], label, hw=.2, xpd=xpd, ... )
} else {
.halo(xy[1], xy[2], label, xpd=xpd, ... )
}
}
} else {
add_N(xy[1], xy[2], asp=asp, label=label, angle=angle, type=type, user=usertype, xpd=xpd, ...)
}
}
sbar <- function(d, xy=NULL, type="line", divs=2, below="", lonlat=NULL, labels, adj=c(0.5, -1), lwd=2, xpd=TRUE, ticks=FALSE, scaleby=1, halo=TRUE, ...){
stopifnot(type %in% c("line", "bar"))
pr <- graphics::par()
clp <- get.clip()
pr$usr <- unlist(clp[,1:4])
if (is.null(lonlat)) {
lonlat <- isTRUE(clp[[5]])
}
if (missing(d)) {
labels <- NULL
d <- NULL
}
dd <- .get_dd(pr, lonlat, d)
d <- dd[2]
dd <- dd[1]
xy <- .get_xy(xy, dd, 0, pr, "bottomleft", caller="sbar")
if (type == "line") {
if (halo) {
lines(matrix(c(xy[1], xy[2], xy[1]+dd, xy[2]), byrow=T, nrow=2), lwd=lwd+1, xpd=xpd, col="white")
}
lines(matrix(c(xy[1], xy[2], xy[1]+dd, xy[2]), byrow=T, nrow=2), lwd=lwd, xpd=xpd, ...)
if (missing(labels) || is.null(labels)) {
ds <- d / scaleby
if (divs > 2) {
labels <- c(0, round(ds/2, 1), ds)
} else {
labels <- paste(ds)
}
}
if (missing(adj)) {
adj <- c(0.5, -0.2-lwd/20 )
}
tadd <- 0
if (!isFALSE(ticks)) {
if (isTRUE(ticks)) {
tadd <- dd / (15 * diff(pr$usr[1:2]) / diff(pr$usr[3:4]))
} else {
tadd <- ticks
}
if (length(labels) == 1) {
xtick <- c(xy[1], xy[1]+dd)
} else {
xtick <- c(xy[1], xy[1]+dd/2, xy[1]+dd)
}
for (i in 1:length(xtick)) {
lines(rbind(c(xtick[i], xy[2]), c(xtick[i], xy[2]+tadd)), lwd=ceiling(lwd/2), ...)
}
}
tadd <- max(0, tadd)
if (length(labels) == 1) labels =c("", labels, "")
if (halo) {
.halo(xy[1], xy[2]+tadd,labels=labels[1], xpd=xpd, adj=adj, ...)
.halo(xy[1]+0.5*dd, xy[2]+tadd,labels=labels[2], xpd=xpd, adj=adj,...)
.halo(xy[1]+dd, xy[2]+tadd,labels=labels[3], xpd=xpd, adj=adj,...)
} else {
text(xy[1], xy[2]+tadd,labels=labels[1], xpd=xpd, adj=adj, ...)
text(xy[1]+0.5*dd, xy[2]+tadd,labels=labels[2], xpd=xpd, adj=adj,...)
text(xy[1]+dd, xy[2]+tadd,labels=labels[3], xpd=xpd, adj=adj,...)
}
xy[2] <- xy[2] - dd/10
} else if (type == "bar") {
stopifnot(divs > 0)
if (missing(adj)) {
adj <- c(0.5, -1 )
}
lwd <- dd / 25
if (divs==2) {
half <- xy[1] + dd / 2
graphics::polygon(c(xy[1], xy[1], half, half), c(xy[2], xy[2]+lwd, xy[2]+lwd, xy[2]), col="white", xpd=xpd)
graphics::polygon(c(half, half, xy[1]+dd, xy[1]+dd ), c(xy[2], xy[2]+lwd, xy[2]+lwd, xy[2]), col="black", xpd=xpd)
if (missing(labels) || is.null(labels)) {
labels <- c("0", "", d/scaleby)
}
text(xy[1], xy[2],labels=labels[1], xpd=xpd, adj=adj,...)
text(xy[1]+0.5*dd, xy[2],labels=labels[2], xpd=xpd, adj=adj,...)
text(xy[1]+dd, xy[2],labels=labels[3], xpd=xpd, adj=adj,...)
} else {
q1 <- xy[1] + dd / 4
half <- xy[1] + dd / 2
q3 <- xy[1] + 3 * dd / 4
end <- xy[1] + dd
graphics::polygon(c(xy[1], xy[1], q1, q1), c(xy[2], xy[2]+lwd, xy[2]+lwd, xy[2]), col="white", xpd=xpd)
graphics::polygon(c(q1, q1, half, half), c(xy[2], xy[2]+lwd, xy[2]+lwd, xy[2]), col="black", xpd=xpd)
graphics::polygon(c(half, half, q3, q3 ), c(xy[2], xy[2]+lwd, xy[2]+lwd, xy[2]), col="white", xpd=xpd)
graphics::polygon(c(q3, q3, end, end), c(xy[2], xy[2]+lwd, xy[2]+lwd, xy[2]), col="black", xpd=xpd)
if (missing(labels) || is.null(labels)) {
ds <- d / scaleby
labels <- c("0", round(0.5*ds), ds)
}
text(xy[1], xy[2], labels=labels[1], xpd=xpd, adj=adj, ...)
text(half, xy[2], labels=labels[2], xpd=xpd, adj=adj,...)
text(end, xy[2],labels=labels[3], xpd=xpd, adj=adj,...)
}
}
if (below != "") {
adj[2] <- -adj[2]
text(xy[1]+(0.5*dd), xy[2], xpd=xpd, labels=below, adj=adj,...)
}
}
|
6171e6ff55ebec54c401049513af8330eea74f47 | 160d17e91f168f19d41fdbda99e58ed42467e2f5 | /Package/R/sfc.getIndex.R | 7946d1fb79f114fb224f84b7d83c4b5149484d78 | [
"MIT"
] | permissive | andersonjames492/PKSFC | fa981c0d4bd3d4ef4d96b4297b8fe9f31f956532 | c32ef687dddd1f3b4c8d9d2f66bd00ca1aa95395 | refs/heads/master | 2023-03-20T18:45:34.882228 | 2020-03-15T14:37:10 | 2020-03-15T14:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 918 | r | sfc.getIndex.R | #' Find index.
#'
#' Find the indexs for sfc object inputs.
#'
#' @param model an sfc object.
#' @param var the variable that you want the index for, if applicable.
#' @param eq the equation that you want the index for, if applicable.
#' @param end the endogenous variable that you want the index for, if applicable.
#' @return the required index.
#'
#' @author Antoine Godin
sfc.getIndex<-function(model=stop("Need a model"),var=NA,eq=NA,end=NA){
if(!is.na(var)){
ind = which(model$variables[,1]==var,arr.ind=T)
if(length(ind)==0){ind=-1}
return(ind)
}else if(!is.na(eq)){
ind = which(model$equations[,1]==eq,arr.ind=T)
if(length(ind)==0){ind=-1}
return(ind)
}else if(!is.na(end)){
ind = which(model$endogenous[,1]==end,arr.ind=T)
if(length(ind)==0){ind=-1}
return(ind)
}else{
stop("Need either a variable (var), and endogenous (end) or an equation (eq)!")
}
}
|
62917a40e67ea37dc18081aaa3bf886ffad34b6a | 4f3e2e15b00b160f9290300e3a03b17d5fc436ef | /man/plot.PriorGen.Rd | 65999686c2188d93f34fc1056903f29b6f233884 | [] | no_license | cran/PriorGen | e5f6b11677647ce4d805359ac2b944fd3f3500fd | 8dd3418fdd7f7b134bdef02a650825b7bda6e571 | refs/heads/master | 2023-04-16T10:42:57.628562 | 2023-04-03T09:50:11 | 2023-04-03T09:50:11 | 135,155,295 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,130 | rd | plot.PriorGen.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findbeta_plot.R
\name{plot.PriorGen}
\alias{plot.PriorGen}
\title{The findbeta plot function}
\usage{
\method{plot}{PriorGen}(x, ...)
}
\arguments{
\item{x}{An object of type findbeta produces of one of
the other PriorGen functions.}
\item{...}{More basic plot arguments}
}
\description{
A function that plots any object of the class findbeta.
}
\examples{
## Example 1
## Based on the available literature the mean value for the
## sensitivity of a test is expected to be generally low and
## its variance not that low but not that much neither.
res_abs_1 <- findbeta_abstract(
themean.cat = "Low",
thevariance.cat = "Average"
)
plot(res_abs_1,
main = "Plot of the findbeta_abstract function",
lwd = 3, ylim = c(0, 7)
)
## Example 2
## Hierarchical prior
res_mult_1 <- findbetamupsi(
themean = 0.10, percentile = 0.79,
lower.v = TRUE, percentile.value = 0.26, psi.percentile = 0.95,
percentile.median = 0.28, percentile95value = 0.3
)
plot(res_mult_1,
main = "Plot of the findbetamupsi function",
lwd = 3, ylim = c(0, 7)
)
}
|
367e4818bc37e71b54617b9d2fbc39b2b3acc491 | e08c4bfb8fbae97a7dc1f278ebc0db5ee3fcb5a1 | /man/NeoRun_data.Rd | 49b69bf2625a183286d8c282cb69fae6f79861d2 | [] | no_license | 419kfj/EpsonView | 2dea2540dfc60b6f22160b6079cbd687f436ebd7 | 5769bf2e2b609e195923ee2a91e94c6844f9baf0 | refs/heads/master | 2023-07-18T07:54:55.810541 | 2021-09-06T04:56:42 | 2021-09-06T04:56:42 | 278,850,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 354 | rd | NeoRun_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NeoRun.R
\name{NeoRun_data}
\alias{NeoRun_data}
\title{Convert backup data to dataframe list.}
\usage{
NeoRun_data(fname, runmemo = NULL)
}
\arguments{
\item{fname}{CSV file name}
\item{runmemo}{memo to add dataframe}
}
\description{
Convert backup data to dataframe list.
}
|
2f31f3128977f2cc90ae46df2f6ad33d8fa30ddf | 27764bf8769ba3d9fe3d0da4af3f5c3215bfe2d5 | /server.R | 6cc6f29c0c52a7b49aaeee3374dab6f062529146 | [] | no_license | patterd2/shiny-server | 5ebecb3951ac4fae30663052ce8e151e24ad897b | 50e7646f1037b6a70c6ccdf2b61d0abc9c5d4c4a | refs/heads/master | 2021-01-10T05:26:59.815070 | 2020-03-20T15:22:10 | 2020-03-20T15:22:10 | 43,453,401 | 0 | 0 | null | 2020-03-20T15:22:11 | 2015-09-30T19:12:55 | null | UTF-8 | R | false | false | 18,710 | r | server.R | library(shiny)
library(wordcloud)
library(stringr)
library(knitr)
library(selectr)
library(data.table)
library(RCurl)
library(RJSONIO)
library(plyr)
require(ggplot2)
library(sp)
library(tm)
library(quanteda)
library(NLP)
library(devtools)
dev_mode(on=TRUE)
library(gender)
library(jsonlite)
library(scales)
######################################################################################
gender_profile <- function(team_profiles) {
names <- word(team_profiles$profile_name,1)
team_genders <- gender(names,method = "ssa",years = c(1900, 2012))
return(c(sum(team_genders$gender=="male"),sum(team_genders$gender=="female")))
}
######################################################################################
simpleCap <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
######################################################################################
clean_data <- function(data) {
data <- sapply(data, as.character)
data[is.na(data)] <- ""
data <- as.data.frame(data)
return(data)
}
#######################################################################################
check_empty <- function(data) {
percentage_empty <- as.data.frame(colnames(data))
for (i in 1:ncol(data)) {
percentage_empty[i,2] <- 100*length(data[data[,i]=="",i])/length(data[,i])
}
colnames(percentage_empty) <- c("field",paste("% empty"," ",company,sep = ""))
return(percentage_empty)
}
########################################################################################
stem_colleges <- function(team_profiles) {
for (i in 1:5) {
team_profiles[,13+i] <- tolower(team_profiles[,13+i])
team_profiles[,13+i] <- gsub("[[:punct:]]","" ,team_profiles[,13+i])
}
return(team_profiles)
}
########################################################################################
identify_team <- function(data,team_keywords) {
team_binary <- rep(0,nrow(data))
for (i in 1:length(team_keywords)) {
team_binary <- team_binary + grepl(team_keywords[i], data$current_title, ignore.case = TRUE)
}
team_binary <- sign(team_binary)
team_profiles <- data[as.logical(team_binary),]
if (nrow(team_profiles)==0) {
return("Keywords not found! Team is empty.")
}
return(team_profiles)
}
#########################################################################################
stem_degrees <- function(team_profiles) {
for (i in 1:5) {
team_profiles[,3+i] <- tolower(team_profiles[,3+i])
team_profiles[,3+i] <- gsub("[[:punct:]]","" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("masters","master" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("ms ","master" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("ma ","master" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("mba ","master" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("mphil","master" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("msc ","master" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("bsc ","bachelor" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("btech","bachelor" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("ba ","bachelor" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("bs","bachelor" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("bachelors","bachelor" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("ab","bachelor" ,team_profiles[,3+i])
team_profiles[,3+i] <- gsub("dphil","phd" ,team_profiles[,3+i])
team_profiles[grepl("phd",team_profiles[,3+i]),3+i] <- "phd"
team_profiles[grepl("master",team_profiles[,3+i]),3+i] <- "master"
team_profiles[grepl("bachelor",team_profiles[,3+i]),3+i] <- "bachelor"
}
return(team_profiles)
}
############################################################################
degree_count <- function(team_profiles) {
highest_degree <- c()
for (ii in 1:nrow(team_profiles)) {
a <- max(grepl("phd",rapply(team_profiles[ii,4:8],as.character), ignore.case = TRUE))>0
b <- max(grepl("master",rapply(team_profiles[ii,4:8],as.character), ignore.case = TRUE))>0
c <- max(grepl("bachelor",rapply(team_profiles[ii,4:8],as.character), ignore.case = TRUE))>0
ifelse(a,highest_degree[ii] <- "PhD",
ifelse(b,highest_degree[ii] <- "Master",
ifelse(c,highest_degree[ii] <- "Bachelor",highest_degree[ii] <- "Other")))
}
highest_degree <- as.data.frame(highest_degree)
return(as.data.frame(table(highest_degree)))
}
#############################################################################
skill_analysis <- function(team_profiles) {
temp <- strsplit(as.character(team_profiles$skills), ",", fixed = TRUE, perl = FALSE, useBytes = FALSE)
temp <- rapply(temp,unlist)
temp <- tolower(temp)
temp <- gsub("[[:punct:]]","" ,temp)
temp <- paste(ifelse(substr(temp, 1, 1)==" ","",substr(temp, 1, 1)), substr(temp, 2, nchar(temp)), sep="")
temp <- paste(toupper(substr(temp, 1, 1)), substr(temp, 2, nchar(temp)), sep="")
return(as.data.frame(table(temp)))
}
##############################################################################
stem_fields <- function(team_profiles) {
for (i in 1:5) {
team_profiles[,8+i] <- tolower(team_profiles[,8+i])
team_profiles[grepl("engineering",team_profiles[,8+i]),8+i] <- "Engineering"
team_profiles[grepl("math",team_profiles[,8+i]),8+i] <- "Math/statistics"
team_profiles[grepl("statistics",team_profiles[,8+i]),8+i] <- "Math/statistics"
team_profiles[grepl("physics",team_profiles[,8+i]),8+i] <- "Physics"
team_profiles[grepl("computer",team_profiles[,8+i]),8+i] <- "Computer Science"
team_profiles[grepl("systems",team_profiles[,8+i]),8+i] <- "Computer Science"
team_profiles[grepl("computational",team_profiles[,8+i]),8+i] <- "computer science"
team_profiles[grepl("market",team_profiles[,8+i]),8+i] <- "Marketing"
team_profiles[grepl("business",team_profiles[,8+i]),8+i] <- "Finance/economics"
team_profiles[grepl("computing",team_profiles[,8+i]),8+i] <- "Computer Science"
team_profiles[grepl("financial",team_profiles[,8+i]),8+i] <- "Finance/economics"
team_profiles[grepl("finance",team_profiles[,8+i]),8+i] <- "Finance/economics"
team_profiles[grepl("economics",team_profiles[,8+i]),8+i] <- "Finance/economics"
team_profiles[grepl("account",team_profiles[,8+i]),8+i] <- "Finance/economics"
team_profiles[grepl("data",team_profiles[,8+i]),8+i] <- "Finance/economics"
}
return(team_profiles)
}
#################################################################################
time_in_role <- function(team_profiles) {
time <- as.character(team_profiles$d1)
time <- time[time != ""]
for (i in 1:length(time)) {
time[i] <- gsub(" ","" ,time[i])
time[i] <- gsub("s","" ,time[i])
time[i] <- gsub("[\\(\\)]", "", regmatches(time[i], gregexpr("\\(.*?\\)", time[i]))[[1]])
if (grepl("l",time[i],ignore.case = TRUE)) {
time[i] <- 5 # average of year to date
}
else if ( grepl("y",time[i],ignore.case = TRUE) & grepl("m",time[i],ignore.case = TRUE) ) {
time[i] <- as.numeric(substring(time[i],1,1))*12 + as.numeric(substr(time[i],6,6))
}
else if ( grepl("y",time[i],ignore.case = TRUE) & !grepl("m",time[i],ignore.case = TRUE)) {
time[i] <- as.numeric(substring(time[i],1,1))*12
}
else if ( !grepl("y",time[i],ignore.case = TRUE) & grepl("m",time[i],ignore.case = TRUE)) {
time[i] <- as.numeric(substring(time[i],1,1))
}
}
months_in_company <- as.numeric(time)
return(months_in_company/12)
}
##################################################################################
# Define server logic
shinyServer(function(input, output) {
dataInput <- reactive({
filename <- paste(as.character(tolower(input$company)),"_DS.csv",sep = "")
#filename <- paste(as.character(input$company),"_DS.csv",sep = "")
data <- read.csv(filename)
#data$X <- NULL
data <- clean_data(data)
#keywords <- c("data science","data scientist","data mining","machine learning")
#team_profiles <- identify_team(data,keywords)
})
output$summary <- renderDataTable({
team_profiles <- dataInput()
company <- as.character(tolower(input$company))
start_request <- "http://api.glassdoor.com/api/api.htm?v=1&format=json&t.p=44360&t.k=bBEoaEGjpLk&action=employers&q="
end_request <- "&userip=79.97.106.19&useragent=Mozilla/%2F4.0"
api_request <- paste(start_request,company,end_request,sep = "")
if (nrow(team_profiles)*1.2 <= 10) {
size_team = "Small (<10)"
}
if (nrow(team_profiles)*1.2 > 10) {
size_team = "Medium (10-25)"
}
if (nrow(team_profiles)*1.2 > 25) {
size_team = "Large (25-50)"
}
if (nrow(team_profiles)*1.2 > 50) {
size_team = "Very Large (>50)"
}
z <- try(fromJSON(api_request),silent = TRUE)
ifelse(inherits(z,"try-error"),{
closeAllConnections()
company_summary <- data.frame(Industry=" no information",
EmployeeRating=" none",
Description=" none",Ratings=" no ratings",DataScience=size_team)},
{
industry <- z$response$employers$industry[1]
rating <- z$response$employers$overallRating[1]
rating_desc <- z$response$employers$ratingDescription[1]
num_ratings <- z$response$employers$numberOfRatings[1]
company_summary <- data.frame(Industry=industry,
EmployeeRating=rating,
Description=rating_desc,Ratings=num_ratings,DataScience=size_team)
})
company_summary
}, options = list(ordering=0,searching=FALSE,paging=FALSE,scrollCollapse=TRUE,info=FALSE))
output$skills <- renderPlot({
team_profiles <- dataInput()
# skill analysis
skill_freq <- skill_analysis(team_profiles)
colnames(skill_freq) <- c("skill","Freq")
skill_freq$skill <- factor(skill_freq$skill, levels = unique(skill_freq$skill[order(skill_freq$Freq)]))
skill_freq <- skill_freq[with(skill_freq, order(-Freq)),]
# create plot object
ggplot(skill_freq[1:10,],aes(x=skill,y=Freq)) + geom_bar(colour="black", fill="#53C253",stat = 'identity') + coord_flip()+ ggtitle("Team Skills") + theme(title=element_text(colour="white"),
axis.title.x=element_blank(),
axis.text.x=element_text(colour="white"),
axis.text.y=element_text(colour="white"),
axis.title.y=element_blank(),
plot.background=element_blank())
},bg="transparent")
output$time_in_company <- renderPlot({
team_profiles <- dataInput()
# time in company analysis
time <- data.frame(time_months=time_in_role(team_profiles))
#team <- as.data.frame(rep("Data Science",length(time)))
#colnames(team) <- c("team")
#duration <- as.data.frame(cbind(time,team))
p <- ggplot(time,aes(time_months)) + geom_histogram(colour="black",fill="#53C253",alpha = 1,position = 'identity',binwidth=0.25)
p + ggtitle("Duration in Current Position (years)") + theme(title=element_text(colour="white"),plot.background=element_blank(),axis.title.x=element_text(colour="white"),axis.title.y=element_text(colour="white"),axis.text.x=element_text(colour="white"),axis.text.y=element_text(colour="white")) + xlab("Time (in years)") + ylab("Number of Employees") + scale_y_continuous(breaks=pretty_breaks())+ scale_x_continuous(breaks=pretty_breaks())
},bg="transparent")
output$colleges <- renderPlot({
team_profiles <- dataInput()
#team_profiles <- stem_colleges(team_profiles)
mytext <- do.call("rbind", list(as.character(team_profiles[,14]), as.character(team_profiles[,15]),
as.character(team_profiles[,16]),as.character(team_profiles[,17]),as.character(team_profiles[,18])))
x=tokenize(toLower(mytext), removePunct = TRUE, ngrams = 2)
y=tokenize(toLower(mytext), removePunct = TRUE, ngrams = 3)
z=tokenize(toLower(mytext), removePunct = TRUE, ngrams = 4)
x <- mapply(c,x, y, SIMPLIFY=FALSE)
x <- mapply(c,x, z, SIMPLIFY=FALSE)
x <- unlist(x)
x <- as.data.frame(table(x))
x$x <- gsub("_"," ",x$x)
unis <- read.csv("unis_clean.csv")
x <- x[x$x %in% unis$x,]
number_degrees <- x[with(x,order(-Freq)),]
number_degrees[grepl("massachusetts institute of technology",number_degrees$x,ignore.case = TRUE),1] <- "MIT"
number_degrees$x <- sapply(number_degrees$x,simpleCap)
pal2 <- brewer.pal(8,"Dark2")
wordcloud(number_degrees$x,number_degrees$Freq,use.r.layout=FALSE,fixed.asp = FALSE,
random.order = FALSE,rot.per = 0,max.words = 6,scale = c(3,1),colors = pal2)
},bg="transparent")
output$highest_degree <- renderPlot({
team_profiles <- dataInput()
#team_profiles <- stem_degrees(team_profiles)
#highest degrees analysis
count_highest_degrees <- degree_count(team_profiles)
colnames(count_highest_degrees) <- c("highest_degree","Freq")
count_highest_degrees$highest_degree <- factor(count_highest_degrees$highest_degree, levels = count_highest_degrees$highest_degree[order(count_highest_degrees$Freq)])
ggplot(count_highest_degrees,aes(x=highest_degree,y=Freq,fill=highest_degree)) + geom_bar(colour="black", fill="#53C253",stat = 'identity') + coord_flip() + ggtitle("Highest Degree Obtained")+ theme(title=element_text(colour="white"),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text.x=element_text(colour="white"),
axis.text.y=element_text(colour="white"),
plot.background=element_blank())
},bg="transparent")
output$field_of_study <- renderPlot({
team_profiles <- dataInput()
#team_profiles <- stem_fields(team_profiles)
x <- unlist(team_profiles[,9:13])
fields <- as.data.frame(table(x))
colnames(fields) <- c("field","Freq")
fields <- fields[fields$field!="",]
fields <- fields[with(fields, order(-Freq)),]
fields <- fields[1:4,]
fields$field <- sapply(as.character(fields$field),simpleCap)
fields$field <- factor(fields$field, levels = fields$field[order(fields$Freq)])
ggplot(fields,aes(x=field,y=Freq,fill=field)) + geom_bar(colour="black", fill="#53C253",stat = 'identity') + coord_flip() + ggtitle("Fields of Study (by number of degrees)") + theme(title=element_text(colour="white"),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text.x=element_text(colour="white"),
axis.text.y=element_text(colour="white"),
plot.background=element_blank())
},bg="transparent")
output$gender <- renderPlot({
team_profiles <- dataInput()
values=gender_profile(team_profiles)
labels=c("Male", "Female")
colors=c("#df691a","#53C253")
percent_str <- paste(round(values/sum(values),2)*100, "%", sep="")
values <- data.frame(val = values, Type = labels, percent=percent_str )
pie <- ggplot(values, aes(x = "", y = val, fill = Type)) +
geom_bar(colour="black",stat="identity",width = 1) +
geom_text(aes(y = val/2 + c(0, cumsum(val)[-length(val)]), label = percent), size=10) + ggtitle("Gender Split Percentages")
pie + coord_polar(theta = "y")+ scale_fill_manual(values = c("#df691a","#53C253"))+ theme(title=element_text(colour="white"),
panel.grid = element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text=element_blank(),
axis.ticks=element_blank(),
plot.background=element_blank())
},bg="transparent")
})
|
ec5a98a27cf7db7e03e987a761a2bda339ce3633 | 67e5a48ce8f6641d2d76c850b946035a5ab6c86a | /tests/testthat/test-NAs.R | 4ae7d37f2d9b803789a4acded92b7dc354d8db94 | [] | no_license | cran/scModels | 71aa7a1048a256887a7ca6e4730d9177c0e591ed | ba14d424c697de7a21f22f10407afbab9c5aaf7d | refs/heads/master | 2023-01-28T18:45:54.758065 | 2023-01-24T07:20:02 | 2023-01-24T07:20:02 | 205,960,963 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 926 | r | test-NAs.R |
test_that("NA parameters in density function", {
expect_true(is.na(dpb(NA, 5, 3, 20)))
expect_true(is.na(dpb(1, NA, 3, 20)))
expect_true(is.na(dpb(1, 5, NA, 20)))
expect_true(is.na(dpb(1, 5, 3, NA)))
})
test_that("NA parameters in distribution function", {
expect_true(is.na(ppb(NA, 5, 3, 20)))
expect_true(is.na(ppb(2, NA, 3, 20)))
expect_true(is.na(ppb(2, 5, NA, 20)))
expect_true(is.na(ppb(2, 5, 3, NA)))
})
test_that("NA parameters in quantile function", {
expect_true(is.na(qpb(NA, 5, 3, 20)))
expect_true(is.na(qpb(0.2, NA, 3, 20)))
expect_true(is.na(qpb(0.2, 5, NA, 20)))
expect_true(is.na(qpb(0.2, 5, 3, NA)))
})
test_that("NA parameters in RNG function", {
expect_error(rpb(NA, 5, 3, 20))
expect_warning(expect_true(is.na(rpb(1, NA, 3, 20))))
expect_warning(expect_true(is.na(rpb(1, 5, NA, 20))))
expect_warning(expect_true(is.na(rpb(1, 5, 3, NA))))
})
|
7ba4cc8fa0b4bf2e0c5fbb16b5b49e58b1204a8b | 490a085d04a2174222c54a3b4664fccb23dc4a3d | /R/functions.R | b8f1cc9f1bd39bf1af8d30bc1b48947a7a33306b | [] | no_license | sdechaumet/ramopls | 648cefd02f4bd8303e0198110e628edfbb633a90 | 9bcaf7d7a79ca8ffd083e8f59f7bba39a3f8c4ba | refs/heads/master | 2023-07-09T16:54:12.587969 | 2021-01-23T13:59:40 | 2021-01-23T13:59:40 | 332,223,156 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 82,312 | r | functions.R | utils::globalVariables(c("."))
.onAttach <- function(libname, pkgname) {
packageStartupMessage(
paste0(
"To use this package, kopls functionalities must be installed.\n",
"Please install `kopls` by one of the following method:\n",
" - Use the internal function: `rAMOPLS::install_kopls()`\n",
" - Download the original source code from `http://kopls.sourceforge.net/download.shtml` and compile it manually with `devtools::install()`"
)
)
}
#' Install the kopls package included in rAMOPLS
#'
#' This function try to install the kopls package from rAMOPLS.
#' It needs Rtools and devtools to run
#'
#' @param ... Argument passed to install
#'
#' @examples
#' install_kopls()
#'
#' @export
install_kopls <- function(...) {
## Check Rtools env
if (!requireNamespace("devtools", quietly = TRUE)) {
stop("Package \"devtools\" needed for this function to work, please install it using install.packages('devtools').",
call. = FALSE)
}
Sys.setenv(PATH = paste("C:/Rtools/bin", Sys.getenv("PATH"), sep=";"))
Sys.setenv(BINPREF = "C:/Rtools/mingw_$(WIN)/bin/")
## Unzip package in temporary folder
temp_dir_path <- tempdir()
zip::unzip(file.path(system.file("package", package = "rAMOPLS"), "kopls.zip"), exdir = temp_dir_path)
## Install from unzipped temporary folder
devtools::install(temp_dir_path, quick = T, ...)
## Remove temporary folder
unlink(temp_dir_path)
if (!requireNamespace("kopls")) {
warning("kopls installation from local file has failed. You can install it directly from the original authors: \n
http://kopls.sourceforge.net/download.shtml")
} else {message("kopls was successfully installed")}
}
#' Return factor names and interaction
#'
#' Return the names of all the studied factors (+ interactions) of the given dataframe.
#'
#' @param data_factors Dataframe to study
#' @param studied_factors String of the studied factors indexes according to their column number
#'
#' @examples
#' M <- data.frame(matrix(nrow = 3, ncol = 3, 1:9))
#' colnames(M) <- c('Dose','Time','Age')
#' fun_factor_names(M,'1,2,23')
#'
#' @export
fun_factor_names <- function(data_factors, studied_factors) {
factor_names <- c()
s <- studied_factors %>% {
strsplit(as.character(.), ",")
} %>% unlist() %>% as.numeric() %>% as.list()
for (l in s) {
if (nchar(l) == 1) {
factor_names <- c(factor_names, colnames(data_factors)[l])
}
if (nchar(l) == 2) {
factor1_name <- colnames(data_factors)[as.numeric(substr(l, 1, 1))]
factor2_name <-
colnames(data_factors)[as.numeric(substr(l, 2, 2))]
factor_int_name <- paste(factor1_name, 'x', factor2_name)
factor_names <- c(factor_names, factor_int_name)
}
if (nchar(l) > 2) {
stop("only 2 factors interaction")
}
}
return(factor_names)
}
#' Load and format data
#'
#' @param datamatrix Datamatrix as a matrix, data.frame or data.table
#' @param samplemetadata Metadata on samples
#' @param factor_names Column(s) name(s) from samplemetadata to use
#'
#' @return a list of two object : dataset and factors
#' @export
#'
fun_load_data <- function(datamatrix, samplemetadata, factor_names) {
Data <- list()
Data$dataset <- switch(class(datamatrix)[[1]],
"matrix" = {datamatrix},
"data.frame" = {
## Check if first column is numeric
if (is.numeric(datamatrix[, 1][[1]])) {
# Return as matrix
temp <- as.matrix(datamatrix)
temp
} else {
temp <- as.matrix(datamatrix[, -1])
## add rownames
rownames(temp) <- datamatrix[, 1][[1]]
temp
}
},
"data.table" = {
## Check if first column is numeric
if (is.numeric(datamatrix[, 1][[1]])) {
# Return as matrix
temp <- as.matrix(datamatrix)
temp
} else {
temp <- as.matrix(datamatrix[, -1])
## add rownames
rownames(temp) <- datamatrix[, 1][[1]]
temp
}
},
stop("datamatrix must be a matrix, a data.frame or a data.table")
)
if (data.table(Data$dataset) %>% .[, lapply(.SD, function(x) {any(is.na(x))})] %>% {any(. == T)}) {
stop("NA are not allowed in the datamatrix, check and treat the NA before AMOPLS analysis")
}
## Format samplemetadata
### Search sampleid in samplemetadata
samplemetadata <- as.data.table(samplemetadata, keep.rownames = T)
sampleid_col <- samplemetadata %>% .[, lapply(.SD, function(x) {all(x %in% rownames(Data$dataset))})] %>% {which(. == T)}
if (length(sampleid_col) == 0) {
stop("No column in samplemetadata corresponds to the sample IDs.")
}
## Check factor_names
if (!any(factor_names %in% colnames(samplemetadata))) {
stop("Some factor_names are not present in samplemetadata, check samplemetadata column names")
}
Data$factors <- samplemetadata[, factor_names, with = F] %>% as.matrix(., rownames = samplemetadata[, sampleid_col, with = F][[1]])
return(Data)
}
#' Get Row Repeats
#'
#' Return the unique row patterns from a given matrix and the indices of the corresponding repeated rows.
#'
#' @param mat Matrix to study
#'
#' @return \item{result}{The single row patterns and the lists of the corresponding indices for each pattern}
#'
#' @examples
#' M <- matrix(nrow = 3, ncol = 3, 1:9)
#' colnames(M) <- c('Dose','Time','Age')
#' rAMOPLS:::fun_GetRowRepeats(M)
#'
fun_GetRowRepeats <- function(mat) {
if (!is.matrix(mat)) {
mat <- matrix(mat)
}
result <- list()
no.rows <- dim(mat)[1]
no.cols <- dim(mat)[2]
result$row.patterns <- matrix(nrow = 0, ncol = no.cols)
no.patterns.found <- 0
result$indices.per.pattern <- list()
fun_IsIdenticalIgnoreNames <- function(x, y) {
# a new function to check for identical matrices was needed, one that ignores column and row names
x.nameless <- c(x)
y.nameless <- c(y)
if (length(x.nameless) != length(y.nameless)) {
return(FALSE)
}
for (i in 1:length(x.nameless)) {
if (x.nameless[i] != y.nameless[i]) {
return(FALSE)
}
}
return(TRUE)
}
for (r in 1:no.rows) {
# go through all the rows in the input matrix, and check whether that row-pattern was already discovered before
pattern.number <- which(apply(result$row.patterns, 1, fun_IsIdenticalIgnoreNames, y = mat[r, ]) ==
TRUE) # which pattern does this row match, if any
if (length(pattern.number) == 0) {
# if the row does not match a previous pattern, then add it to the list of patterns
result$row.patterns <- rbind(result$row.patterns, mat[r, ])
no.patterns.found <- no.patterns.found + 1
result$indices.per.pattern[[no.patterns.found]] <- r
} else {
# the row does match a previous pattern, therefore remember the index of this row as an occurence of that pattern in the matrix
result$indices.per.pattern[[pattern.number]] <-
c(result$indices.per.pattern[[pattern.number]], r)
}
}
factors.order <- sort(as.numeric(apply(result$row.patterns, 1, paste, collapse = "")), index.return = TRUE)$ix # sort the patterns by numerical order
result$indices.per.pattern <- result$indices.per.pattern[factors.order]
result$row.patterns <- result$row.patterns[factors.order, , drop = FALSE]
return(result)
}
#' Get Equation Element
#'
#' Internal function : Return the mean average of the dataset on every level-combination of the given factor.
#'
#' @param model Dataset matrix to study
#' @param evaluation String : index name of the factors to study - ex:'1,2,12'
#' @param previous.model Use of a previous model independant from the data - NULL by default
#'
#' @return \code{s} List of results containing :
#' @return \item{level.combinations}{List containing all the levels combinations of the factor and the corresponding row indices}
#' @return \item{means.matrix}{Means matrix of the dataset on all the level combinations related to the selected factor (or interactions)}
#'
fun_GetEquationElement <- function(model, evaluation, previous.model) {
s <- list()
if (!is.null(previous.model)) {
s$level.combinations <- previous.model[[paste(evaluation, collapse = "")]]$level.combinations
} else {
s$level.combinations <- fun_GetRowRepeats(model$general$factors[, evaluation, drop = FALSE])
}
s$means.matrix <- matrix(nrow = dim(model$general$data)[1],
ncol = dim(model$general$data)[2])
for (p in 1:dim(s$level.combinations$row.patterns)[1]) {
mean.for.this.level.combination <-
colMeans(model$general$data[s$level.combinations$indices.per.pattern[[p]], , drop = FALSE])
for (i in s$level.combinations$indices.per.pattern[[p]]) {
s$means.matrix[i, ] <- mean.for.this.level.combination
}
}
return(s)
}
#' Function to undersample a dataset
#'
#' This function will randomly take n observation in each unique groups of Data$factors
#' with n equal to the smallest subgroup.
#'
#' @inheritParams fun_AMOPLS
#'
#' @return a list with subsampled datamatrix and their corresponding factors
fun_balance_data <- function(Data) {
Factors <- as.matrix(Data$factors[, colnames(Data$factors)])
Factors_id <- as.matrix(unique(Factors)) # name of the associated factors
Pattern_indices <- list()
Length_indices <- list()
for (l in 1:dim(Factors_id)[1]) {
# l <- 2
line_indices <- apply(Factors, 1, identical, Factors_id[l, ]) %>% which()
Pattern_indices[[l]] <- line_indices
Length_indices[[l]] <- length(line_indices)
}
m <- min(unlist(Length_indices))
Data_balanced <- matrix(m, nrow = 0, ncol = dim(Data$data)[2])
Factors_balanced <- matrix(m, nrow = 0, ncol = dim(Data$factors)[2])
if (all(Length_indices == m)) {
message('Data already balanced')
} else {
for (l in 1:dim(Factors_id)[1]) {
Pattern_indices[[l]] <- sample(Pattern_indices[[l]], m)
Data_balanced <- rbind(Data_balanced, Data$data[Pattern_indices[[l]], ])
Factors_balanced <- rbind(Factors_balanced, Data$factors[Pattern_indices[[l]], ])
}
Data$dataset <- Data_balanced
Data$factors <- Factors_balanced
}
return(Data)
}
#' Check if data are balanced in selected factors
#'
#' @inheritParams run_AMOPLS
#' @inheritParams fun_AMOPLS
#' @return TRUE if the data are balanced, FALSE otherwise.
#' @export
fun_is_balanced <- function(Data, factor_names, interaction_level){
# Data <- Data
# factor_names <- factor_names
# interaction_level <- 1
factor_index <- which(factor_names %in% colnames(Data$factors))
## Check for each factors and their interaction if asked
## The number of samples by subgroups
names_fac <- list()
result <- sapply(0:interaction_level, function(int) {
utils::combn(factor_index, int + 1, simplify = F)
# Data$factors[, as.vector(temp)]
}) %>%
unlist(., recursive = F) %>% {
lapply(1:length(.), function(y) {
x <- .[[y]]
col_sel <- colnames(Data$factors)[x]
names_fac[[y]] <<- paste(col_sel, collapse = " x ")
temp <- as.data.table(Data$factors)[, .(sple_nb = .N), by = col_sel]
length(unique(temp$sple_nb)) == 1
})
}
if (any(result == F)) {
message("Data are unbalanced in:")
message(paste(names_fac[!unlist(result)], collapse = "\n"))
return(F)
} else {
return(T)
}
}
#' Data pre-processing
#'
#' Data pre-processing step before the ANOVA decomposition.
#'
#' @inheritParams run_AMOPLS
#' @inheritParams fun_AMOPLS
#'
#' @return \code{s$general} List of results containing general information about the pre-processed dataset :
#' @return \item{Nb_compo_ortho}{Number of orthogonal components}
#' @return \item{studied_factors}{String of the indices of the studied factors}
#' @return \item{equation.elements}{List of numeric indices of the studied factors}
#' @return \item{order.to.evaluate.ee}{List of numeric indices corresponding to the order of evaluation of the factors under study}
#' @return \item{data}{Pre-processed dataset to use in the model}
#' @return \item{ssq.mean}{Mean sum of square of the dataset}
#' @return \item{ssq}{Sum of square of the dataset}
#' @return \item{factors}{Pure factors dataset}
#' @return \item{factor_names}{List of the names of the factors under study (interactions included)}
#' @return \item{factors_data}{Pure factors + interactions dataset}
#' @return \item{PCA}{Principal component analysis on the main dataset}
fun_pre_processing <- function(Data,
Nb_compo_ortho,
equation.elements,
scaling,
only.means.matrix = FALSE) {
s <- list()
s$general <- list("Nb_compo_ortho" = Nb_compo_ortho,
"studied_factors" = equation.elements)
# Data processing : reduced - centered :
dataAdjusted <- MetStaT.ScalePip(Data$dataset,
center = TRUE,
scale = scaling,
quietly = TRUE)
s$general$data <- dataAdjusted$data
# Format factors
if (!is.numeric(Data$factors)) {
# message("The supplied factors are not numeric. Converting levels to numeric values")
temp <- apply(Data$factors, 2, function(x) {as.numeric(as.factor(x))})
rownames(temp) <- rownames(Data$factors)
colnames(temp) <- colnames(Data$factors)
Data$factors <- temp
}
s$general$factors <- Data$factors
if (is.character(equation.elements)) {
equation.elements <- lapply(strsplit(strsplit(equation.elements, split = ",")[[1]], split =
""), as.numeric)
}
for (ee in equation.elements) {
for (f in ee)
if (f > dim(Data$factors)[2] ||
f < 1) {
stop(paste("Factor ", f, " is beyond scope of study-design", sep = ""))
}
}
if (nrow(Data$dataset) != nrow(Data$factors)) {
stop(
paste(
"Number of rows in data (",
dim(Data$dataset)[1],
") and study design (",
dim(Data$factors)[1],
") do not match",
sep = ""
)
)
}
## Establisment of the order of study of the factor, by decreasing complexity : Main effects then interactions
order.to.evaluate.ee <- sort(
as.numeric(
unlist(
lapply(equation.elements, paste, collapse = "")
)), index.return = TRUE)$ix
s$general$equation.elements <- equation.elements
s$general$order.to.evaluate.ee <- order.to.evaluate.ee
s$general$ssq.mean <- sum(rep(dataAdjusted$center.vector / dataAdjusted$scale.vector ,nrow(Data$dataset)) ^ 2)
s$general$ssq <- sum(Data$dataset ^ 2)
s$general$factor_names <- fun_factor_names(s$general$factors, s$general$studied_factors)
## Clustering of data in one matrix for score plots
# pure effect factors :
s$general$factors_data <- fun_factors_data(s$general$factors, s$general$studied_factors)
## PCA on the dataset :
if (!only.means.matrix) {
s$general$PCA <- MetStat.PCA.Calculate(s$general$data)
}
return(s)
}
#' Return the factors_data slot
#'
#' @param factors The samplemetadata with observations groups
#' @param studied_factors The factors under study
#'
#' @return Return a data.table with observations groups for each factors under study and their interactions
fun_factors_data <- function(factors, studied_factors) {
factor_names <- fun_factor_names(factors, studied_factors)
## Clustering of data in one matrix for score plots
# pure effect factors :
temp_data <- factors %>% data.table()
temp_fac_list <- studied_factors %>% {strsplit(as.character(.), ",")[[1]]}
temp_data <- mapply(function(x, z) {
temp <- lapply(1:nchar(x), function(y){
col_ind <- as.numeric(substr(x, y, y))
factors[, col_ind]
})
output <- data.table(interaction(temp, sep = "."))
setnames(output, z)
}, temp_fac_list , factor_names, SIMPLIFY = F) %>%
{Reduce(cbind, .)}
return(temp_data)
}
#' @title Permutation setting function (optional)
#' @description Local permutation of the selected matrix, according to the selected element - cf J.Boccard et al. (2016)
#'
#' @param s List corresponding to the dataset to study
#' @param ee Internal parameter corresponding to the factor element under study
#' @param perm_t Element to permute
#'
#' @return \item{s$general$dataset}{Permuted dataset}
#' @export
fun_perm_settings <- function(s, ee, perm_t) {
ee.name <- paste(s$general$equation.elements[[ee]],collapse="")
## If ee corresponds to a pure effect ('1', or '2' - size <2 <-> pure effect) and to perm :
if (ee.name == perm_t & nchar(perm_t)<2){
Permterm <- colnames(s$general$factors)[as.integer(perm_t)] # Permterm : studied factor name - ex: Dose.Group
Perm_factors <- s$general$factors[, -as.integer(perm_t), drop = F]
# Perm_factors <- as.matrix(s$general$factors) %>% {.[grepl(Permterm, colnames(.)), drop = F]} # perte du nom de colonne
factors_id <- unique(Perm_factors) # name of the associated factors
Permuted_factor <- matrix(0, 1,dim(s$general$factors)[1])
s$general$permuted_factors <- matrix(0, dim(s$general$factors)[1],dim(s$general$factors)[2])
## For each level non-associated to Permterm :
for (l in 1:dim(factors_id)[1]){
# l <- 1
# line_indices <- apply(Perm_factors, 1, identical, factors_id[l, , drop = F]) %>% which()
line_indices <- which(interaction(data.frame(Perm_factors)) %in% interaction(data.frame(factors_id)[l,]))
randperm_indices <- sample(line_indices)
# For each permuted indices dataset : permutation on the column corresponding to ee
for (k in 1:length(line_indices)){
# k <- 1
Permuted_factor[line_indices[k]] <- s$general$factors[randperm_indices[k],Permterm]
s$general$permuted_factors[line_indices[k],] <- s$general$factors[randperm_indices[k],]
}
}
Permuted_factor <- t(Permuted_factor)
s$general$factors[,Permterm] <- Permuted_factor
}
return(s)
}
#' @title ANOVA decomposition
#' @description ANOVA decomposition of the dataset according to the selected factors and their interactions.
#'
#' @param s List containing general information about the dataset and the factors - output of fun_pre_processing
#' @param only.means.matrix 'FALSE' by default
#' @param perm_t Term to permute if Perm = 'TRUE'
#'
#' @return \code{s$decompo_ANOVA} List of results of the ANOVA decomposition for each studied factor (including interactions + residuals) :
#' @return \item{residuals}{Residuals matrix}
#' @return \item{factor_index}{List of results related to the selected factor (interactions) :}
#' \itemize{
#' \item \code{level.combinations} Main results on the level
#' \item \code{means.matrix} Mean average matrix on every level combination of the selected factor
#' \item \code{svd} Result of the Singular Value Decomposition on the means matrix - cf fun_svd_extraction
#' \item \code{means.matrix_res} Mean average + residuals matrix on every level combination of the selected factor}
fun_decompo_ANOVA <- function(s,
only.means.matrix = FALSE,
perm_t = NULL) {
# only.means.matrix <- F
# perm_t <- "1"
# Data <- list("dataset" = as.matrix(liver.toxicity$gene[, 1:20]),
# "factors" = as.matrix(data.table(liver.toxicity$treatment)[, .(Dose = Dose.Group, Time = Time.Group)]))
# Nb_compo_ortho <- 1
# equation.elements <- "1,2,12"
# scaling <- F
# s <-fun_pre_processing(Data = Data,
# s = list(),
# Nb_compo_ortho = Nb_compo_ortho,
# equation.elements = equation.elements,
# scaling = scaling,
# only.means.matrix = only.means.matrix
# )
s$decompo_ANOVA <- list()
s$decompo_ANOVA$residuals <- s$general$data # Residuals matrix
## For each factor (or interactions) i under study :
temp_remainder <- s$general$data
for (ee in s$general$order.to.evaluate.ee) {
# ee <- s$general$order.to.evaluate.ee[[1]]
## Permutation on a pure effect factor or interaction - depending on perm_t
if (!is.null(perm_t)) {s <- fun_perm_settings(s, ee, perm_t)}
## Calculation of the mean submatrix <X_i>, related to the average on the samples of every level of the i factor
# <X_i> : in new.equation.element
# reductions : for an interaction (ij) -> selection of the associated pure effects factors (i,j)
new.equation.element <- fun_GetEquationElement(s, s$general$equation.elements[[ee]], previous.model = NULL) # $means.matrix in new.equation.element
if (length(s$general$equation.elements[[ee]]) > 1) {
for (r in s$general$equation.elements[[ee]]) {
new.equation.element$means.matrix <- new.equation.element$means.matrix - s$decompo_ANOVA[[c(paste(r, collapse = ""))]]$means.matrix
}
}
## For an interaction (ij) : the pure effect submatrices are removed from <X_ij>
# <X_int(i,j)> = <X_ij> - <X_i> - <X_j>
# for (r in reductions) {
# new.equation.element$means.matrix <- new.equation.element$means.matrix - s$decompo_ANOVA[[c(paste(r, collapse = ""))]]$means.matrix
# }
if (nchar(s$general$equation.elements[ee]) > 1 & !is.null(perm_t)) {
# print('interaction')
new.equation.element$means.matrix <- new.equation.element$means.matrix[sample(1:nrow(new.equation.element$means.matrix)[1]), ]
}
## Residual matrix :
# For each factor i : <X_i> is removed from the dataset
# s$decompo_ANOVA$residuals : residuals matrix
#
# For each factor i under study : Singular Value Decomposition (PCA) on <X_i> - result in s$new.equation.element$svd
# Selection of the non-zero eigen values and vectors selected : fix or factor-dependent threshold
if (!only.means.matrix) {
temp_remainder <- temp_remainder - new.equation.element$means.matrix
new.equation.element <- fun_svd_extraction(new.equation.element, threshold = 0.0001)
}
## Results in s$'ee.name' - ee.name : name of the selected factor
ee.name <- paste(s$general$equation.elements[[ee]], collapse = "")
s$general$ee.names <- c(s$general$ee.names, ee.name)
s$decompo_ANOVA[[ee.name]] <- new.equation.element
}
s$decompo_ANOVA$residuals <- temp_remainder
s$general$ee.names <- c(s$general$ee.names, 'residuals')
for (ee.name in s$general$ee.names[s$general$ee.names != 'residuals']) {
s$decompo_ANOVA[[ee.name]]$means.matrix_res <- s$decompo_ANOVA[[ee.name]]$means.matrix + s$decompo_ANOVA$residuals
}
return(s)
}
#' @title SVD extraction
#' @description Singular Value Decomposition (PCA) extraction for the given matrix and selection of the non-zero eigen values and vectors.
#'
#' @param new.equation.element Matrix to study
#' @param threshold Threshold for the non-zero eigen values selection
#'
#' @return \item{svd}{List of results corresponding to :}
#' \itemize{
#' \item \code{d, v, var.explained, t} Results from the SVD : cf PCA.Calculate
#' \item \code{non_zero_eigen_vect} Non-zero singular eigen vectors from the SVD results
#' \item \code{non_zero_eigen_val} Non-zero singular eigen values from the SVD results}
#' @references From MetStaT : PCA.Calculate
#' @export
fun_svd_extraction <- function(new.equation.element, threshold) {
new.equation.element$svd <- MetStat.PCA.Calculate(new.equation.element$means.matrix) # SVD sur la matrice moyenn?e correspondante (interaction ou effet pur)
new.equation.element$svd$non_zero_eigen_vect <- as.matrix(new.equation.element$svd$t[, new.equation.element$svd$d > threshold])
new.equation.element$svd$non_zero_eigen_val <- as.matrix(new.equation.element$svd$d[new.equation.element$svd$d > threshold])
return(new.equation.element)
}
#' ANOVA PCA
#'
#' Principal Component Analysis on every residual-augmented experimental submatrix from ANOVA decomposition (interactions included).
#'
#' @inheritParams fun_outputs
#'
#' @return \item{s$ANOVA_PCA}{List of results including for each factor (+ interactions and residuals) the main results of the PCA}
#'
#' @references From MetStaT : PCA.Calculate
fun_ANOVA_PCA <- function(s) {
s$ANOVA_PCA <- list()
for (ee.name in s$general$ee.names[s$general$ee.names != 'residuals']) {
s$ANOVA_PCA[[ee.name]]$pca <-
MetStat.PCA.Calculate(s$decompo_ANOVA[[ee.name]]$means.matrix_res)
}
return(s)
}
#' Multiblock clustering
#'
#' Clustering of the ANOVA-decomposed experimental submatrices and the non-zero eigen vectors from the SVD analysis.
#'
#' @details The number of predictive components is imposed by the previous SVD - it corresponds to the number of non-zero eigen vectors.
#'
#' @inheritParams fun_outputs
#' @return \code{general$Nb_compo_pred} Number of predictive components
#' @return \code{Multiblock} List including all the results from the Multiblock clustering :
#' \itemize{
#' \item \code{Wmat} Clustering of all the outcome mean matrices from ANOVA decomposition
#' \item \code{AMat} AMat matrix for each factor (+ interactions and residuals) - AMat_i = t<X_i+res>*<X_i+res> (normalized - Frobenius norm)
#' \item \code{Y} Clustering of all the non-zero eigenvectors from the SVD of the ANOVA-decomposed matrices}
fun_Multiblock <- function(s) {
s$Multiblock$W_mat <- dim(s$general$data)[1] %>% {matrix(0, ncol = ., nrow = .)}
s$Multiblock$Y <- NULL
## For each factor i under study :
# Calculation of AMat_i = t<X_i+res>*<X_i+res> normalised - Forbenius norm ('F')
# W_Mat : Addition of all the AMat_i matrices
for (ee.name in s$general$ee.names[s$general$ee.names != 'residuals']) {
# ee.name <- s$general$ee.names[s$general$ee.names != 'residuals'][[1]]
s$Multiblock$AMat[[ee.name]] <- s$decompo_ANOVA[[ee.name]]$means.matrix_res %*% t(s$decompo_ANOVA[[ee.name]]$means.matrix_res) /
norm(s$decompo_ANOVA[[ee.name]]$means.matrix_res %*% t(s$decompo_ANOVA[[ee.name]]$means.matrix_res), 'F')
s$Multiblock$W_mat <- s$Multiblock$W_mat + s$Multiblock$AMat[[ee.name]]
}
s$Multiblock$AMat[['residuals']] <- s$decompo_ANOVA$residuals %*% t(s$decompo_ANOVA$residuals) /
norm(s$decompo_ANOVA$residuals %*% t(s$decompo_ANOVA$residuals), 'F')
s$Multiblock$W_mat <- s$Multiblock$W_mat + s$Multiblock$AMat[['residuals']]
# Clustering of the non-zero eigen vectors into the Y matrix :
for (ee.name in s$general$ee.names[s$general$ee.names != 'residuals']) {
s$Multiblock$Y <- Reduce("cbind", list(s$Multiblock$Y, s$decompo_ANOVA[[ee.name]]$svd$non_zero_eigen_vect)
) # $svd$non_zero_eigen_vects already weighted in t
}
s$general$Nb_compo_pred <- ncol(s$Multiblock$Y)
return(s)
}
#' K-OPLS training function
#'
#' Application of the K-OPLS model training function to the multiblock dataset from kopls package.
#'
#' @inheritParams koplsModel_custom
#'
#' @return Results from the K-OPLS model function
fun_kopls <- function(K, Y, A, nox) {
for (j in 1:100) {
# j <- 1
tol <- 10^-(5*j)
result <- tryCatch(
koplsModel_custom(K = K,
Y = Y,
A = A,
nox = nox,
preProcK = "mc",
preProcY = "mc",
tol = tol),
error = function (e) {e})
if (inherits(result, "error")) {
message("Collinerarity problem in solve function, setting tolerance to: ", tol)
j <- j+1
} else {
j <- 100
}
}
if (inherits(result, "error")) {
message(result)
return(NULL)
} else {
return(result)
}
}
#' Plot R2Y and p-value for each orthogonal models
#'
#' @inheritParams fun_outputs
#'
#' @import ggplot2
#' @import data.table
#' @import magrittr
#'
#' @export
fun_plot_ortho <- function(s) {
# s <- result
## check if there is multiple result
`R2Y p-value` <- Ortho_nb <- R2Y <- R2Y_pval <- Iteration <- x <- y <- NULL
if (!is.list(s)) {stop("Perform run_AMOPLS with multiple nb_compo_orthos")}
lapply(s, function(x) {
# x <- s[[1]]
data.table("R2Y" = x$kOPLS$R2Yhat %>% {.[length(.)]},
"R2Y_pval" = x$output$Permutation_result %>% {.[, sum(`R2Y p-value`) / .N]},
"Ortho_nb" = x$general$Nb_compo_ortho,
"Iteration" = x$output$Permutation_result$PermNb %>% unique())
}) %>%
rbindlist(use.names = T, idcol = "rn") %>% {
ggplot(., aes(Ortho_nb, R2Y)) +
geom_bar(stat = "identity", color = "black", fill = "grey") +
theme_bw() +
ylim(0,1) +
geom_text(aes(label = formatC(R2Y_pval, digits = 3, format = "f")), vjust = -0.5) +
labs(title = "R2Y and p-value for orthogonal component selection",
subtitle = paste0("p-value calculated from ", .[, unique(Iteration)], " iterations"),
x = "Number of orthogonal component",
y = "R2Y")
}
}
#' RSS score
#' Internal function : Calculation of the Relative Sum of Squares (RSS) + Sum of Squares (SSQ) scores.
#'
#' @inheritParams fun_outputs
#'
#' @return \code{SSQ} SSQ score table for each experimental submatrix (+ interactions and residuals)
#' @return \code{RSS} RSS score table for each factor (+ interactions and residuals)
#' @export
fun_Sum_of_Squares <- function(s) {
s[['residuals']]$ssq <- sum(s$decompo_ANOVA$residuals ^ 2)
s$ssq_tot <- sum(s$decompo_ANOVA$residuals ^ 2)
for (ee.name in s$general$ee.names[s$general$ee.names != 'residuals']) {
s[[ee.name]]$ssq <- sum(s$decompo_ANOVA[[ee.name]]$means.matrix ^ 2)
s$ssq_tot <- s$ssq_tot + s[[ee.name]]$ssq
}
for (ee.name in s$general$ee.names) {
s[[ee.name]]$RSS <- s[[ee.name]]$ssq / s$ssq_tot
}
RSS <- c()
SSQ <- c()
for (ee.name in s$general$ee.names) {
RSS <- c(RSS, s[[ee.name]]$RSS)
SSQ <- c(SSQ, s[[ee.name]]$ssq)
}
RSS <- data.table(t(RSS))
names(RSS) <- s$general$ee.names
# RSS <- t(matrix(RSS))
SSQ <- data.table(t(SSQ))
names(SSQ) <- s$general$ee.names
return(list(SSQ, RSS))
}
#' Block saliences
#' Internal function : Calculation of the contribution of each factor on every component (predictive + orthogonal).
#'
#' @inheritParams fun_outputs
#'
#' @return \code{block_saliences} Table of block saliences for each factor (row) and component (column) - raw
#' @return \code{block_saliences_norm} Table of block saliences for each factor (row) and component (column) - normalized
#' @export
fun_block_saliences <- function(s) {
i <- 0
block_saliences <- matrix(0, nrow = length(s$general$ee.names), ncol = s$general$Nb_compo_pred + s$general$Nb_compo_ortho)
for (ee.name in s$general$ee.names) {
i <- i + 1
for (d in 1:s$general$Nb_compo_pred) {
block_saliences[i, d] <- t(as.matrix(s$kOPLS$T[, d])) %*% s$Multiblock$AMat[[ee.name]] %*% as.matrix(s$kOPLS$T[, d])
}
for (o in 1:s$general$Nb_compo_ortho) {
block_saliences[i, s$general$Nb_compo_pred + o] <- t(as.matrix(s$kOPLS$To[, o])) %*% s$Multiblock$AMat[[ee.name]] %*% as.matrix(s$kOPLS$To[, o])
}
}
block_saliences_norm <- block_saliences
# Normalisation of the block_saliences :
block_saliences_norm <- sapply(1:(s$general$Nb_compo_pred + s$general$Nb_compo_ortho), function(x) {
block_saliences[, x] / sum(block_saliences[, x])
})
return(list(block_saliences, block_saliences_norm))
}
#' Most influent factor per component
#'
#' Internal function : Return for each component the index of most influent factor (corresponding to the maximum of block_saliences among all the factors + residuals).
#'
#' @inheritParams fun_outputs
#'
#' @return \code{Most_influent_factor} Table of indexes of the most influent factor for each component (predictive + orthogonal)
#'
#' @export
fun_Most_influent_factor <- function(s) {
Most_influent_factor <- matrix(0, nrow = 1, ncol = s$general$Nb_compo_pred + s$general$Nb_compo_ortho)
for (k in 1:(s$general$Nb_compo_pred + s$general$Nb_compo_ortho)) {
Most_influent_factor[, k] <- which.max(s$outputs$block_saliences_norm[, k])
}
return(Most_influent_factor)
}
#' RSR score
#'
#' Internal function : Calculation of the Residual Structure Ratio (RSR) score.
#'
#' @inheritParams fun_outputs
#'
#' @return \code{RSR} Table of the RSR score for each factor (+ interactions and residuals)
#' @export
fun_RSR <- function(s) {
s$outputs$block_saliences <- fun_block_saliences(s)[[1]]
RSR <- sapply(1:length(s$general$ee.names), function(fact) {
s$outputs$block_saliences[dim(s$outputs$block_saliences)[1], s$general$Nb_compo_pred + 1] / s$outputs$block_saliences[fact, s$general$Nb_compo_pred + 1]
}) %>%
t() %>%
data.table()
setnames(RSR, s$general$ee.names)
return(RSR)
}
#' X-score calculation
#'
#' Internal function : Clustering of the x-scores from the kOPLS::kOPLSModel function (predictive and orthogonal).
#'
#' @inheritParams fun_outputs
#'
#' @return \code{x-scores} Matrix of the X-scores from the kOPLS model for every predictive and orthogonal component
#' @references From kopls : koplsModel
#' @export
fun_xscores <- function(s) {
x_scores <- cbind(s$kOPLS$T, s$kOPLS$To)
return(x_scores)
}
#' X-loadings
#'
#' Internal function : Calculation of the x-loadings corresponding to the contribution of each variable on the components.
#'
#' @inheritParams fun_outputs
#'
#' @return \code{x_loadings} Table corresponding to the X-loadings for each variable (row) and component (column) - predictive + orthogonal
#' @export
fun_xloadings <- function(s) {
x_loadings <- matrix(0, nrow = dim(s$general$data)[2], ncol = s$general$Nb_compo_pred + s$general$Nb_compo_ortho)
for (d in 1:s$general$Nb_compo_pred) {
if (s$general$ee.names[s$outputs$Most_influent_factor[, d]] != 'residuals') {
x_loadings[, d] <- t(s$decompo_ANOVA[[s$general$ee.names[s$outputs$Most_influent_factor[, d]]]]$means.matrix_res) %*% as.matrix(s$kOPLS$T[, d]) / as.numeric((t(s$kOPLS$T[, d]) %*% s$kOPLS$T[, d]))
} else {
x_loadings[, d] <- t(s$decompo_ANOVA$residuals) %*% as.matrix(s$kOPLS$T[, d]) / as.numeric((t(s$kOPLS$T[, d]) %*% s$kOPLS$T[, d]))
}
}
for (o in 1:s$general$Nb_compo_ortho) {
if (s$general$ee.names[s$outputs$Most_influent_factor[, o]] != 'residuals') {
x_loadings[, s$general$Nb_compo_pred + o] <- t(s$decompo_ANOVA[[s$general$ee.names[s$outputs$Most_influent_factor[, o]]]]$means.matrix_res) %*% as.matrix(s$kOPLS$To[, o]) / as.numeric((t(s$kOPLS$T[, o]) %*% s$kOPLS$T[, o]))
} else {
x_loadings[, o] <- t(s$decompo_ANOVA$residuals) %*% as.matrix(s$kOPLS$T[, o]) / as.numeric((t(s$kOPLS$T[, o]) %*% s$kOPLS$T[, o]))
}
}
return(x_loadings)
}
#' Y-loadings
#'
#' Internal function : Calculation of the loadings related to the Y matrix.
#'
#' @inheritParams fun_outputs
#'
#' @return \code{y_loadings} Y-loadings matrix defined as described in T. Mehmood et al.
#' @references T. Mehmood et al. (2012)
#' @export
fun_yloadings <- function(s) {
y_loadings <- matrix(0, nrow = 1, ncol = dim(s$kOPLS$T)[2])
for (i in 1:dim(s$kOPLS$T)[2]) {
y_loadings[, i] <-
t(s$Multiblock$Y[, i]) %*% s$kOPLS$T[, i] / as.numeric(t(s$kOPLS$T[, i]) %*% s$kOPLS$T[, i])
}
return(y_loadings)
}
#' SSa score
#'
#' Internal function : Calculation of the SSa score. SSa : variance of Y explained by the a-th component - in the calculation of the VIP formula
#'
#' @inheritParams fun_outputs
#'
#' @return \code{SSa} score for each component a
#' @return \code{var_explained} Variation explained by each component
#' @export
fun_SSa <- function(s) {
SSa <- diag((t(s$kOPLS$T) %*% s$kOPLS$T) %*% (t(s$outputs$y_loadings) %*% s$outputs$y_loadings)) #%>% abs()
return(list(SSa))
}
#' VIP score
#' Internal function : Calculation of the Variable
#'
#' @inheritParams fun_outputs
#'
#' @return \code{VIP} VIP Table scores for each variable (row) and factor (column) - interactions included
#' @references T. Mehmood et al. (2012) - DOI : 188 (2012) 62-69
#' @export
fun_VIP <- function(s) {
# ### Selection of the related components with Most_influent_factor - ex: factor Dose <-> tp2, tp4
# ### Calculation of the SSa score for each of those components - ex: SS2, SS4
# For each variable j :
# Calculation on every a-component of the contribution of the variable of j on component a : ex: W2j, W4j
# formula : Waj = t(Xi_res) * Y (cf Mehmood et al. 2012)
# with
# Xi_res : i factor related matrix (cf block_saliences calculation)
# Y : clustered eigen vectors matrix
# For a factor a, for the variable j : VIP_a(j) = sum[a-components]{SSa*Waj}/sum[a-components]{SSa} - ex: VIP_a(j) = (SS2*W2j + SS4*W4j) / (SS2 +SS4)
# s <- result$orthoNb_1
names <- data.table(colnames(s$general$data))
p_var <- nrow(names)
X <- s$general$data
## new
VIP_n <- lapply(1:(length(s$general$ee.names) - 1), function(i) {
# i <- 2
## [Update 07/01/2020] Calculate only on predictive component
## Removed
# pred_compos <- which(s$outputs$Most_influent_factor[, 1:s$general$Nb_compo_pred] %>% {.[-length(.)]} == i)
pred_compos <- which(s$outputs$Most_influent_factor %>% {.[-length(.)]} == i)
if (length(pred_compos) == 0) {
VIP_term <- cbind(names, NA)
setnames(VIP_term, c("id", s$general$factor_names[i]))
return(VIP_term)
}
Term <- c()
W <- c()
Q <- c()
for (p in pred_compos) {
q <- t(s$kOPLS$Up) %*% s$kOPLS$T[, p] / as.numeric(t(s$kOPLS$T[, p]) %*% s$kOPLS$T[, p])
u <- s$kOPLS$Up %*% q / as.numeric(t(q) %*% q)
w <- t(X) %*% u / as.numeric(t(u) %*% u)
w <- w / norm(w, '2')
Term <- cbind(Term, s$kOPLS$T[, p])
W <- cbind(W, w)
Q <- cbind(Q, q)
}
Q <- t(as.matrix(Q))
SS <- diag(t(Term) %*% Term %*% Q %*% t(Q))
VIP <- lapply(1:p_var, function(j) {
# j <- 1
weight <- c()
for (p in 1:length(pred_compos)) {
weight <- cbind(weight, (W[j, p] / norm(as.matrix(W[, p]), '2')) ^ 2)
}
weight <- t(weight)
q <- SS %*% weight
# VIP_term <- sqrt(p_var * q / sum(SS)) ## Not needed since erased in the next step ?
VIP_term <- p_var * q / sum(SS)
# VIP_term <- data.table(names[j, 1], VIP_term)
# setnames(VIP_term, c("id", s$general$factor_names[i]))
return(VIP_term)
}) %>%
unlist() %>%
{data.table("id" = names, .)}
setnames(VIP, c("id", s$general$factor_names[i]))
return(VIP)
}) %>%
{Reduce(function(z, w) {merge(z, w, by = "id", all = T)}, .)}
VIP_n <- VIP_n[names$V1]
return(VIP_n)
}
#' @title Outputs wrapper
#' @description Wrapper function to cluster all the outputs of the AMOPLS model.
#' @param s List containing all the information from the AMOPLS model
#' @return \code{s$outputs} Main outputs of AMOPLS corresponding to :
#' \itemize{
#' \item \code{SSQ} Sum of squares scores
#' \item \code{RSS} Relative sum of squares scores
#' \item \code{block_saliences} Block saliences scores
#' \item \code{block_saliences_norm} Normalized block saliences scores
#' \item \code{Most_influent_factor} Most influent factor per component
#' \item \code{RSR} Residual Structure Ratio score
#' \item \code{x_scores} x-scores from K-OPLS
#' \item \code{x_loadings} x-loadings from K-OPLS
#' \item \code{y_loadings} y-loadings from K-OPLS
#' \item \code{SSa} SSa scores
#' \item \code{var_explained} Table of the variance explained by every component
#' \item \code{VIP} VIP score}
fun_outputs <- function(s) {
s$outputs$SSQ <- fun_Sum_of_Squares(s)[[1]]
s$outputs$RSS <- fun_Sum_of_Squares(s)[[2]]
s$outputs$block_saliences <- fun_block_saliences(s)[[1]]
s$outputs$block_saliences_norm <- fun_block_saliences(s)[[2]]
s$outputs$Most_influent_factor <- fun_Most_influent_factor(s)
s$outputs$RSR <- fun_RSR(s)
s$outputs$x_scores <- fun_xscores(s)
s$outputs$x_loadings <- fun_xloadings(s)
s$outputs$y_loadings <- fun_yloadings(s)
s$outputs$SSa <- fun_SSa(s)
s$outputs$VIP <- fun_VIP(s)
s$outputs$R2Y <- s$kOPLS$R2Yhat[2]
return(s)
}
#' @title AMOPLS wrapper
#' @description Wrapper function to process all the steps for the AMOPLS model.
#'
#' @param Data List of 2 numeric matrices - Data$dataset : raw data; Data$factors : factors matrix
#' @param equation.elements String with column indices containing factors and interactions to study; ex: "1,2,12"
#' @param scaling Should scaling be performed : 'TRUE' or 'FALSE'
#' @param only.means.matrix Should the means matrix only be returned : 'TRUE' or 'FALSE'
#' @param use.previous.model Should a previous model be used :'TRUE' or 'FALSE'
#' @param Nb_compo_ortho Number of orthogonal component
#' @param perm_t ... to permute
#'
#' @return \code{s} List containing all the information about the AMOPLS model, organized in 6 groups :
#' \itemize{
#' \item \code{general} General information about the parameters
#' \item \code{decompo_ANOVA} Outcomes from the ANOVA decomposition : experimental submatrices (+ residuals) and svd
#' \item \code{ANOVA_PCA} Outcomes from the ANOVA-PCA (for ANOVA-PCA model)
#' \item \code{Multiblock} Outcomes from the Multiblock_clustering function : multiblock X and Y-matrices
#' \item \code{kOPLS} Outcomes from the kOPLS Model - cf kopls::koplsModels
#' \item \code{outcomes} Main outcomes from AMOPLS - cf fun_outputs for details}
fun_AMOPLS <- function(Data,
equation.elements = "1",
scaling = FALSE,
only.means.matrix = FALSE,
use.previous.model = NULL,
Nb_compo_ortho = 1,
perm_t = NULL) {
t_pre <- fun_pre_processing(
Data = Data,
Nb_compo_ortho = Nb_compo_ortho,
equation.elements = equation.elements,
scaling = scaling,
only.means.matrix = only.means.matrix
)
t_ANOVA <- fun_decompo_ANOVA(
s = t_pre,
only.means.matrix = only.means.matrix,
perm_t = perm_t
)
t_ANOVA_PCA <- fun_ANOVA_PCA(s = t_ANOVA)
t_multiblock <- fun_Multiblock(s = t_ANOVA_PCA)
t_multiblock$kOPLS <- t_multiblock %>% {
fun_kopls(.$Multiblock$W_mat,
.$Multiblock$Y,
.$general$Nb_compo_pred,
.$general$Nb_compo_ortho)
}
# t <- fun_outputs(t) most long task, perform only on demand
return(t_multiblock)
}
#' Function to calculate permutation
#'
#' @param iter Number of iterations to compute
#' @inheritParams run_AMOPLS
#' @inheritParams fun_AMOPLS
#'
fun_temp_perm <- function(Data, equation.elements, scaling, Nb_compo_ortho, perm_t, iter) {
P_Results <- fun_AMOPLS(Data = Data,
equation.elements = equation.elements,
scaling = scaling,
only.means.matrix = FALSE,
use.previous.model = NULL,
Nb_compo_ortho = Nb_compo_ortho,
perm_t = perm_t)
if (is.null(P_Results)) {return(NULL)}
factors_element <- strsplit(P_Results$general$studied_factors, ",") %>% unlist()
output <- data.table(Iter = iter,
RSR = fun_RSR(P_Results)[, which(perm_t %in% factors_element), with = F],
RSS = fun_Sum_of_Squares(P_Results)[[2]][, which(perm_t %in% factors_element), with = F],
R2Y = P_Results$kOPLS$R2Yhat %>% {.[length(.)]})
setnames(output, c("Iter", "RSR", "RSS", "R2Y"))
return(output)
}
#' Wrapper to run AMOPLS models
#'
#' This function is a wrapper to perform AMOPLS model with permutation and
#' subsampling if data are unbalanced.
#'
#' @param scaling Logical for unit variance scaling of the data before running the model
#' @param nb_perm Number of permutation for each effect to compute p-values
#' @param nb_compo_orthos Number of orthogonal component to model
#' @param parallel Number of process to run in parallel using future and furrr
#' @param debug Logical to run a logger with debug messages
#' @param datamatrix The datamatrix with observations id in the first column (observations x variables)
#' @param samplemetadata The observations metadata with groups and levels (the first column must be the observations id)
#' @param factor_names Name of the column in samplemetadata to use for effect decomposition
#' @param interaction_level Order of interaction to consider (0 = pure effect only, 1 first order interaction between each effect)
#' @param subsampling Number of subsampling to perform if the data are unbalanced
#'
#' @import data.table
#' @import magrittr
#' @importFrom stats median
#'
#' @return \code{s} List containing all the information about the AMOPLS model, organized in 2 groups :
#' \itemize{
#' \item \code{general} General information about the parameters
#' \item \code{output} Main outcomes from AMOPLS}
#' @export
#'
#' @examples
#'result <- run_AMOPLS(datamatrix = data_Ruiz2017$datamatrix,
#' samplemetadata = data_Ruiz2017$samplemetadata,
#' factor_names = c("Exposure time", "Dose"))
run_AMOPLS <- function(datamatrix,
samplemetadata,
factor_names,
interaction_level = 1,
scaling = T,
nb_perm = 100,
subsampling = NULL,
nb_compo_orthos = 1:3,
parallel = F,
debug = F) {
# datamatrix = data_Ruiz2017$datamatrix
# samplemetadata = data_Ruiz2017$samplemetadata
# factor_names = c("Exposure time", "Dose")
DEBUG <- Effect <- Iter <- Iteration <- Ortho <- Ortho_nb <- PermNb <- R2Y <- R2Y <- `p-value` <- R2Y_pval <- V_scores <- V_sign <- availableWorkers <- combn <- `cor.test` <- density <- id <- layout <- plot <- rn <- str <- tp_calc <- value <- variable <- variableid <- x <- y <- NULL
if (debug) {
DEBUG <- NULL
requireNamespace("logger")
logger::log_appender(logger::appender_console)
logger::log_threshold(DEBUG)
}
if (debug) {logger::log_info("Starting function")}
## Format data
### Check all column are numeric
Data <- fun_load_data(datamatrix, samplemetadata, factor_names)
factor_index <- which(colnames(Data$factors) %in% factor_names)
## Generate formula with interaction levels
equation.elements <- sapply(0:interaction_level, function(int) {
temp <- utils::combn(factor_index, int + 1, simplify = F)
sapply(temp, paste, collapse = "")
}) %>% unlist() %>% paste(., collapse = ",")
factors_element <- strsplit(equation.elements, ",") %>% unlist()
nb_studied_factors <- length(factors_element)
if (debug) {logger::log_info("Factors to study: {paste(factors_element, collapse = ', ')}")}
## Check if data are balanced
if (!fun_is_balanced(Data, factor_names = factor_names, interaction_level = interaction_level)) {
if (is.null(subsampling)) {
stop("Data are unbalanced, set the subsampling argument to run subsampling stratification.")
} else {
message("Data are unbalanced, running stratified subsampling.")
subsampling <- as.numeric(subsampling)
}
} else {
subsampling <- 1
}
## Run original model
if (debug) {logger::log_info("Calculate full model for {length(nb_compo_orthos)} orthogonal components: ")}
res_subsampling <- lapply(1:subsampling, function(zrf) {
# zrf <- 1
## Balance the data only if subsampling is > 1
if (subsampling > 1) {
message("Run sub-sampling: ", zrf)
temp_data <- fun_balance_data(Data)
} else {
temp_data <- Data
}
result_original <- lapply(1:length(nb_compo_orthos), function(x) {
# x <- 1
output <- fun_AMOPLS(Data = temp_data,
equation.elements = equation.elements,
scaling = scaling,
only.means.matrix = FALSE,
use.previous.model = NULL,
Nb_compo_ortho = nb_compo_orthos[[x]],
perm_t = NULL)
if (is.null(output)) {stop("Resolve the collinearity problems in the data")} else {
output <- fun_outputs(output)
}
if (debug) {logger::log_info("Ortho {nb_compo_orthos[x]}: R2Y={output$outputs$R2Y %>% formatC(., digits = 2)} Cp={ncol(output$outputs$block_saliences_norm)-1}")}
output$outputs$summary <- data.table("Effect" = output$general$ee.names,
Iter = 0,
RSR = t(output$outputs$RSR),
RSS = t(output$outputs$RSS),
R2Y = t(output$kOPLS$R2Yhat %>% {.[length(.)]}))
setnames(output$outputs$summary, c("Effect", "Iter", "RSR", "RSS", "R2Y"))
return(output)
})
## Application of AMOPLS for permuted data - for each factor + interaction - and calculation of the scores :
## Create iteration arguments
iter_template <- CJ("Effect" = factors_element, "PermI" = 1:nb_perm, "Ortho" = nb_compo_orthos)
iter_template[, Iter := 1:.N]
apply_it <- nrow(iter_template)
if (debug) {logger::log_info("Running {nb_perm} permutations for each factor and ortho cp: {nb_perm} x {nb_studied_factors} x {length(nb_compo_orthos)} = {nb_perm*nb_studied_factors*length(nb_compo_orthos)}")}
if (!is.null(parallel) & !isFALSE(parallel)) {
if(!requireNamespace("future")) {
stop("You need to install future and furr packages to use parallelisation.")
} else {requireNamespace("future")}
if(!requireNamespace("furrr")) {
stop("You need to install future and furr packages to use parallelisation.")
} else {requireNamespace("furrr")}
if (is.numeric(parallel)) {
future::plan(future::multiprocess, workers = parallel)
} else {
future::plan(future::multiprocess, workers = (length(future::availableWorkers()) - 1))
}
temp <- furrr::future_map(1:apply_it, function(x) {
# x <- 1
temp_effect <- iter_template[Iter == x, Effect]
temp_ortho <- iter_template[Iter == x, Ortho]
P_Results <- fun_temp_perm(Data = temp_data,
equation.elements = equation.elements,
scaling = scaling,
Nb_compo_ortho = temp_ortho,
perm_t = temp_effect,
iter = x)
return(P_Results)
}, .progress = TRUE)
} else {
pb <- progress::progress_bar$new(format = "[:bar] :current/:total (:percent) :eta", total = apply_it)
pb$tick(0)
temp <- lapply(1:apply_it, function(x) {
# x <- 1
# message(x)
pb$tick()
if (debug) {logger::log_trace("Perm: {x}, Effect: {iter_template[x, Effect]}, Ortho: {iter_template[x, Ortho]}")}
temp_effect <- iter_template[Iter == x, Effect]
temp_ortho <- iter_template[Iter == x, Ortho]
P_Results <- fun_temp_perm(Data = temp_data,
equation.elements = equation.elements,
scaling = scaling,
Nb_compo_ortho = temp_ortho,
perm_t = temp_effect,
iter = x)
return(P_Results)
})
}
temp_dt <- temp %>%
rbindlist() %>%
merge(., iter_template, by = "Iter")
## P-value calculation
output_pval <- lapply(1:length(nb_compo_orthos), function(cp) {
# cp <- 1
lapply(c("RSS", "RSR", "R2Y"), function(x) {
# x <- "R2Y"
lapply(iter_template[, unique(Effect)], function(effect) {
# effect <- iter_template[, unique(Effect)][[2]]
subset_perm <- temp_dt[Ortho == nb_compo_orthos[[cp]] & Effect == effect, x, with = F][[1]]
subset_ori <- unlist(result_original[[cp]]$outputs$summary[Effect == effect, x, with = F])
temp_subset <- length(which(subset_perm >= subset_ori))/length(subset_perm)
if (temp_subset == 0) {temp_subset <- 1/length(subset_perm)}
effect_name <- data.table(Effect_name = result_original[[cp]]$general$factor_names[which(factors_element %in% effect)])
output <- data.table(effect, effect_name, temp_subset)
setnames(output, c("Effect", "Effect_name", paste0(x, "_pvalue")))
return(output)
}) %>% rbindlist(use.names = T)
}) %>% {Reduce(function(z, w) {merge(z, w, by = c("Effect", "Effect_name"))}, .)}
})
output_pval <- lapply(output_pval, function(x) {
x[, PermNb := nb_perm]
x[, Effect := factor(Effect, levels = factors_element)]
x[order(Effect)]
})
output <- mapply(function(x, y, z) {
# x <- result_original[[1]]
# y <- output_pval[[1]]
x$outputs$Permutation_result <- list("summary" = y,
"details" = z)
return(x)
}, result_original, output_pval, split(temp_dt, temp_dt$Ortho), SIMPLIFY = F)
names(output) <- paste0("orthoNb_", nb_compo_orthos)
return(output)
})
if (!isFALSE(parallel)) {
if (inherits(future::plan(), "multiprocess")) {future::plan(future::sequential)}
}
## Aggregate subsampled models using median as in Boccard et al., 2019
### Extract data to combine in each subsampled results
output <- lapply(res_subsampling, function(w) {
# w <- res_subsampling[[1]]
lapply(w, function(z) {
# z <- w[[1]]
temp_dt <- z
output <- list(
"general" = temp_dt$general[c("data",
"factors",
"ssq",
"Nb_compo_pred",
"Nb_compo_ortho",
"ee.names",
"studied_factors",
"equation.elements",
"order.to.evaluate.ee",
"factor_names",
"factors_data")],
"decompo_ANOVA" = temp_dt$decompo_ANOVA,
"kOPLS" = list("R2Yhat" = temp_dt$kOPLS$R2Yhat %>% {.[length(.)]}),
"output" = list(
"x_loadings" = fun_xloadings(temp_dt),
"x_scores" = fun_xscores(temp_dt),
"block_saliences_norm" = fun_block_saliences(temp_dt)[[2]],
"RSS" = fun_get_RSS(temp_dt),
"RSR" = fun_get_RSR(temp_dt),
"SSQ" = fun_Sum_of_Squares(temp_dt)[[2]],
"Permutation_result" = temp_dt$outputs$Permutation_result["summary"],
"VIP" = fun_VIP(temp_dt),
"Summary" = fun_AMOPLS_summary(temp_dt)
)
)
return(output)
})
})
## Extract for each orthogonal component
results_combined <- lapply(1:unique(sapply(output, length)), function(z) {
# z <- 1
temp_orthon <- lapply(output, function(w) {w[[z]]})
## Calculate median
### Need to check scores and loadings orientation of each component (may be arbitrarly reversed between models)
#### SCORES
if (temp_orthon %>% length() <= 1) {
x <- temp_orthon[[1]]
output_scores <- data.table("id" = rownames(x$general$factors), x$output$x_scores) %>% as.matrix(., rownames = "id")
} else {
output_scores <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[1]]
data.table("id" = rownames(x$general$factors), x$output$x_scores)
}) %>%
rbindlist(use.names = T, fill = TRUE, idcol = "rn")
## Check component correlation between each models
scores_sign <- sapply(1:(ncol(output_scores)-2), function(x) {
# x <- 1
var_col <- names(output_scores)[x+2]
col_sel <- c("id", "rn", var_col)
output_scores[, col_sel, with = F] %>% dcast(., id ~ rn, value.var = var_col) %>% {.[, -1][, lapply(.SD, function(z) {if(stats::cor.test(z, .[, 2][[1]])$estimate < 0) {return(-1)} else {return(1)}} %>% round(., 1))]}
}) %>% as.data.table(keep.rownames = 'rn')
scores_sign[, rn := as.numeric(rn)]
output_scores <- lapply(1:(ncol(output_scores)-2), function(x) {
# x <- 1
var_col <- names(output_scores)[x+2]
## Reverse axes with negative correlation by component
temp_merge <- merge(output_scores[, .(id, rn, "V_scores" = get(var_col))],
scores_sign[, .(rn, "V_sign" = get(var_col))], by = "rn")
row_nb <- temp_merge[, .N]
temp_merge[, tp_calc := ifelse(any(is.na(V_scores), is.na(V_sign)), NA, as.numeric(V_scores) * as.numeric(V_sign)), by = 1:row_nb]
## Calculate median
output <- temp_merge[, median(tp_calc), by = "id"]
setnames(output, c("id", var_col))
return(output)
}) %>% {Reduce(function(x, y) {merge(x, y, by = "id", all = T)}, .)} %>%
{.[rownames(Data$dataset)]} %>%
as.matrix(rownames = "id")
}
## LOADINGS
if (temp_orthon %>% length() <= 1) {
x <- temp_orthon[[1]]
output_loadings <- data.table("id" = colnames(x$general$data), x$output$x_loadings) %>% as.matrix(., rownames = "id")
} else {
output_loadings <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[1]]
data.table("id" = colnames(x$general$data), x$output$x_loadings)
}) %>%
rbindlist(use.names = T, fill = TRUE, idcol = "rn")
## Check component correlation between each models
loadings_sign <- sapply(1:(ncol(output_loadings)-2), function(x) {
# x <- 1
var_col <- names(output_loadings)[x+2]
col_sel <- c("id", "rn", var_col)
output_loadings[, col_sel, with = F] %>% dcast(., id ~ rn, value.var = var_col) %>% {.[, -1][, lapply(.SD, function(z) {if(stats::cor.test(z, .[, 2][[1]])$estimate < 0) {return(-1)} else {return(1)}} %>% round(., 1))]}
}) %>%
as.data.table(keep.rownames = 'rn')
loadings_sign[, rn := as.numeric(rn)]
output_loadings <- lapply(1:(ncol(output_loadings)-2), function(x) {
# x <- 1
var_col <- names(output_loadings)[x+2]
## Reverse axes with negative correlation by component
temp_merge <- merge(output_loadings[, .(id, rn, "V_scores" = get(var_col))],
loadings_sign[, .(rn, "V_sign" = get(var_col))], by = "rn")
row_nb <- temp_merge[, .N]
temp_merge[, tp_calc := ifelse(any(is.na(V_scores), is.na(V_sign)), NA, as.numeric(V_scores) * as.numeric(V_sign)), by = 1:row_nb]
## Calculate median
output <- temp_merge[, median(tp_calc), by = "id"]
setnames(output, c("id", var_col))
return(output)
}) %>% {Reduce(function(x, y) {merge(x, y, by = "id", all = T)}, .)} %>%
{.[colnames(Data$dataset)]} %>%
as.matrix(rownames = "id")
}
output_saliences <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[3]]
data.table("id" = c(x$general$factor_names[x$general$order.to.evaluate.ee], "residual"), x$output$block_saliences_norm)
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, median), keyby = "id"]} %>%
as.matrix(rownames = "id")
output_RSS <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[3]]
x$output$RSS
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, median), keyby = c("Effect", "Effect Name")]}
output_RSR <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[3]]
x$output$RSR
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, median), keyby = c("Effect", "Effect Name")]}
output_SSQ <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[3]]
x$output$SSQ
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, median)]}
output_Perm <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[3]]
x$output$Permutation_result$summary
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, median), keyby = c("Effect", "Effect Name")]}
output_R2Y <- sapply(temp_orthon, function(x) {
# x <- temp_orthon[[1]]
x$kOPLS$R2Yhat
}) %>%
median()
output_VIP <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[1]]
x$output$VIP
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, median), keyby = c("id")]}
output_Summary <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[1]]
x$output$Summary
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, function(x) {median(x, na.rm = T)}), keyby = c("Effect", "Effect Name")]}
#### DEV
output_residuals <- lapply(temp_orthon, function(x) {
# x <- temp_orthon[[1]]
x$decompo_ANOVA$residuals %>% as.data.table(keep.rownames = "sampleid")
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, function(x) {median(x, na.rm = T)}), keyby = c("sampleid")]} %>%
{as.data.frame(., row.names = "sampleid")}
output_decompoANOVA <- lapply(1:(length(temp_orthon[[1]]$decompo_ANOVA) - 1), function(y) {
# y <- 1
lapply(temp_orthon, function(x) {
# x <- temp_orthon[[1]]
x$decompo_ANOVA %>%
{.[which(!names(.) %in% "residuals")]} %>% .[[y]] %>% .[["means.matrix_res"]] %>%
as.data.table(keep.rownames = "sampleid")
}) %>%
rbindlist(use.names = T, fill = TRUE) %>%
{.[, lapply(.SD, function(x) {median(x, na.rm = T)}), keyby = c("sampleid")]} %>%
as.data.frame(., row.names = "sampleid")
})
names(output_decompoANOVA) <- names(temp_orthon[[1]]$decompo_ANOVA) %>% {.[!. == "residuals"]}
factors_data <- fun_factors_data(Data$factors, temp_orthon[[1]]$general$studied_factors)
return(
list(
"general" = c(Data, list("factors_data" = factors_data), temp_orthon[[1]]$general %>% {.[!names(.) %in% c("dataset", "factors")]}),
"decompo_ANOVA" = c(list("residuals" = output_residuals), output_decompoANOVA),
"kOPLS" = list("R2Yhat" = output_R2Y),
"output" = list(
"x_loadings" = output_loadings,
"x_scores" = output_scores,
"block_saliences_norm" = output_saliences,
"RSS" = output_RSS,
"RSR" = output_RSR,
"SSQ" = output_SSQ,
"R2Y" = output_R2Y,
"Permutation_result" = output_Perm,
"Summary" = output_Summary,
"VIP" = output_VIP
)
)
)
})
names(results_combined) <- names(output[[1]])
return(results_combined)
}
#' @title Score plot
#' @description Score plot of the x-scores from AMOPLS results, according to the selected factor and components.
#'
#' @param fact Studied factor
#' @param t_1 First component to project the x-scores
#' @param t_2 Second component to project the x-scores
#' @inheritParams fun_outputs
#'
#' @return 2D score plot of the x-scores from AMOPLS results according to the 2 selected components. Every datapoint is colored according to its factor-level. Every color group is surrounded by a convex hull.
#'
#' @import ggplot2
#' @import magrittr
#' @import data.table
#' @import ggpubr
#'
#' @references From grDevices chull
#' @export
fun_score_plot <- function(s, fact, t_1 = NULL, t_2 = NULL) {
# s <- result_optimal
# fact <- "Dose"
x <- y <- NULL
nb <- which(rownames(s$output$block_saliences_norm) == fact)
nb_compo <- (s$general$Nb_compo_pred + 1)
if (all(is.null(t_1), is.null(t_2))) {
t_1 <- which(s$output$block_saliences_norm[nb, ] == max(s$output$block_saliences_norm[nb,-c(nb_compo)]))
t_2 <- which(s$output$block_saliences_norm[nb, ] == max(s$output$block_saliences_norm[nb,-c(t_1, nb_compo)]))
}
temp_scores <- as.data.table(s$output$x_scores, keep.rownames = "sampleid")[, c(1, t_1+1, t_2+1), with = F]
temp_plot <- data.table(temp_scores, s$general$factors_data)
setnames(temp_plot, 2:3, c("x", "y"))
## Convex hulls :
find_hull <- function(df) {df[grDevices::chull(df$x, df$y),]}
hulls <- temp_plot[!is.na(x) | !is.na(y)][, find_hull(.SD), by = fact]
sp <- temp_plot %>%
ggplot2::ggplot(aes(x, y, color = factor(get(fact)))) +
geom_vline(xintercept = 0, linetype = 2) +
geom_hline(yintercept = 0, linetype = 2) +
geom_point() +
geom_polygon(
data = hulls,
alpha = 0.2,
aes(fill = factor(get(fact))),
show.legend = F
) +
labs(
title = "",
subtitle = s$general$factor_names[nb],
x = paste('tp', as.character(t_1)),
y = paste('tp', as.character(t_2)),
color = fact
) +
theme_bw() +
labs(
title = "AMOPLS score plot",
subtitle = paste0("Colored by factor: ", fact),
x = paste("tp", t_1),
y = paste("tp", t_2)
)
return(sp)
}
#' Generate optimal score plots
#'
#' This function creates a plot for each factor considered
#' with the 2 best components for each factor.
#'
#' @inheritParams fun_outputs
#'
#' @import ggplot2
#' @import magrittr
#' @import data.table
#' @import ggpubr
#'
#' @export
#'
fun_plot_optimal_scores <- function(s) {
# s <- result_optimal
nb_factors <- length(s$general$factor_names)
ncol <- ceiling(sqrt(nb_factors))
nrow <- ceiling(sqrt(nb_factors))
s$general$factor_names %>%
lapply(., function (x) {
fun_score_plot(s, x, NULL, NULL) + labs(title = NULL)
}) %>%
ggpubr::ggarrange(plotlist = ., ncol = ncol, nrow = nrow, align = "hv")
}
#' Loading plots
#'
#' Loading plot according to the selected components.
#'
#' @param fact Studied factor
#' @param t_1 First component to project the scores
#' @param VIP_nb Number of VIP to return (top n)
#' @param t_2 Second component to project the scores
#' @inheritParams fun_outputs
#'
#' @import ggplot2
#' @import magrittr
#' @import data.table
#'
#' @return 2D loading plot from the AMOPLS results (to complete).
#' @export
fun_loading_plot <- function(s,
fact,
t_1 = NULL,
t_2 = NULL,
VIP_nb = NULL) {
variableid <- x <- y <- id <- NULL
nb <- which(rownames(s$output$block_saliences_norm) == fact)
nb_compo <- (s$general$Nb_compo_pred + 1)
if (all(is.null(t_1), is.null(t_2))) {
t_1 <- which(s$output$block_saliences_norm[nb, ] == max(s$output$block_saliences_norm[nb,-c(nb_compo)]))
t_2 <- which(s$output$block_saliences_norm[nb, ] == max(s$output$block_saliences_norm[nb,-c(t_1, nb_compo)]))
}
temp_plot <- as.data.table(s$output$x_loadings, keep.rownames = "variableid")[, c(1, t_1+1, t_2+1), with = F]
setnames(temp_plot, 2:3, c("x", "y"))
if (is.null(VIP_nb)) {
## If null, show 10 variable names if available
if (temp_plot[, length(unique(variableid))] > 10) {
VIP_nb <- 10
} else {
VIP_nb <- temp_plot[, length(unique(variableid))]
}
}
## Convex hulls :
sp <- temp_plot %>% {
ggplot2::ggplot(., aes(x, y)) +
geom_vline(xintercept = 0, linetype = 2) +
geom_hline(yintercept = 0, linetype = 2) +
geom_point(alpha = 0.6) +
geom_point(data = .[variableid %in% s$output$VIP[, c("id", fact), with = F][order(-get(fact))][1:VIP_nb, id]], color = "red") +
geom_text(data = .[variableid %in% s$output$VIP[, c("id", fact), with = F][order(-get(fact))][1:VIP_nb, id]], aes(label = variableid), color = "red", vjust = -0.5) +
labs(
title = "AMOPLS score plot",
subtitle = paste0("Factor: ", fact),
caption = paste0("In red: the top ", VIP_nb, " VIPs"),
x = paste("tp", t_1),
y = paste("tp", t_2)
) +
theme_bw()
}
return(sp)
}
#' Generate optimal loading plot
#'
#' This function creates a plot for each factor considered
#' with the 2 best components for each factor.
#'
#' @inheritParams fun_loading_plot
#' @inheritParams fun_outputs
#'
#' @import ggplot2
#' @import magrittr
#' @import data.table
#' @import ggpubr
#' @export
fun_plot_optimal_loadings <- function(s, VIP_nb = NULL) {
# s <- result_optimal
nb_factors <- length(s$general$factor_names)
ncol <- ceiling(sqrt(nb_factors))
nrow <- ceiling(sqrt(nb_factors))
s$general$factor_names %>%
lapply(., function (x) {
fun_loading_plot(s, x, NULL, NULL, VIP_nb = VIP_nb) + labs(title = NULL)
}) %>%
ggpubr::ggarrange(plotlist = ., ncol = ncol, nrow = nrow, align = "hv")
}
#' Plot VIP
#'
#' This function plot the VIP2 of all variables for each factors
#'
#' @param main_factor String to set the main_factor to order the plot
#' @param debugL Boolean to activate the debug mode
#' @inheritParams fun_outputs
#' @inheritParams fun_loading_plot
#'
#' @import ggplot2
#' @import magrittr
#' @import data.table
#'
#' @export
fun_plot_VIPs <- function(s, main_factor = NULL, VIP_nb = NULL, debugL = F) {
# s <- result_optimal
# main_factor <- "Dose"
# VIP_nb <- NULL
# debugL <- F
str <- id <- variable <- value <- NULL
temp_factors <- colnames(s$output$VIP[, -1])
if (is.null(main_factor)) {
main_factor <- temp_factors[[1]]
} else if (!main_factor %in% temp_factors) {stop("The main_factor wasn't found in the dataset")}
data_vips <- as.data.table(s$output$VIP)
if (is.null(VIP_nb)) {
VIP_nb <- data_vips[, .N]
}
if (all(VIP_nb != "Force", VIP_nb >= 200)) {
message("There are a high number of variables (", VIP_nb, ") filter the 200 most significant. To force all variable, set VIP_nb argument to 'Force'.")
VIP_nb <- 200
}
if (debugL) {message("Main fac: ", main_factor)}
if (debugL) {message("Main fac (str): ", str(main_factor))}
if (debugL) {message("VIP nb: ", VIP_nb)}
if (debugL) {message("data_vips colnames: ", paste(names(data_vips), collapse = ", "))}
## Filter the most significant variables for the considered factor
if (debugL) {message("data_vips (class): ", class(data_vips))}
VIP <- data_vips[order(-get(main_factor))][1:VIP_nb]
# Reorder variables order by decreasing order
VIP[, id := factor(id, levels = unique(VIP$id))]
# Melt data
plot_data <- VIP %>% melt(id.vars = "id")
## Set factor order (first is factor of interest)
plot_data[, variable := factor(variable, levels = rev(union(main_factor, temp_factors)))]
ggplot2::ggplot(data = plot_data, aes(x = id, y = value, fill = variable)) +
geom_bar(stat = "identity", col = 'black') +
labs(title = "Variable Important in the Projection (VIP2)",
subtitle = paste0("By decreasing order of importance for factor: ", main_factor),
x = '',
y = bquote(~VIP^2)) +
theme_bw() +
theme(legend.position = c("right"),
axis.text.x = element_text(angle = 60, hjust = 1),
legend.title = element_blank(),
legend.key.size = unit(0.8, "cm"),
legend.text = element_text(size = 11, hjust = 0.3, face = 'bold'))
}
#' Function to get RSS
#'
#' @inheritParams fun_outputs
#'
#' @export
fun_get_RSS <- function(s) {
Effect <- NULL
temp <- fun_Sum_of_Squares(s)[[2]] %>% t() %>% {as.data.table(., keep.rownames = "Effect")}
temp[, "Effect Name" := c(s$general$factor_names, "residuals")[s$general$ee.names == Effect]]
setnames(temp, c("Effect", "RSS", "Effect Name"))
setcolorder(temp, c("Effect", "Effect Name", "RSS"))
return(temp)
}
#' Function to get RSR
#'
#' @inheritParams fun_outputs
#'
#' @export
fun_get_RSR <- function(s) {
Effect <- NULL
temp <- fun_RSR(s) %>% t() %>% {as.data.table(., keep.rownames = "Effect")}
temp[, "Effect Name" := c(s$general$factor_names, "residuals")[s$general$ee.names == Effect]]
setnames(temp, c("Effect", "RSR", "Effect Name"))
setcolorder(temp, c("Effect", "Effect Name", "RSR"))
return(temp)
}
#' Function to get normalized block contribution
#'
#' @inheritParams fun_outputs
#'
#' @export
fun_get_blockcontrib <- function(s) {
Effect <- NULL
temp <- s$outputs$block_saliences_norm %>% as.data.table() %>% {data.table("Effect" = s$general$ee.names, .)}
temp[, "Effect Name" := c(s$general$factor_names, "residuals")[s$general$ee.names == Effect]]
setcolorder(temp, c("Effect", "Effect Name"))
setnames(temp, c("Effect", "Effect Name", paste0("Tp", 1:s$general$Nb_compo_pred), paste0("To", 1:s$general$Nb_compo_ortho)))
return(temp)
}
#' Function to get permutation results
#'
#' @inheritParams fun_outputs
#'
#' @export
fun_get_perm <- function(s) {
temp <- s$outputs$Permutation_result$summary
setnames(temp, c("Effect", "Effect Name", "RSS p-value", "RSR p-value", "R2Y p-value", "PermNb"))
return(temp)
}
#' Summary of AMOPLS results
#'
#' This function retrieve different levels of summary from the output of AMOPLS.
#'
#' @param type String to select the summary to return (All, RSS, RSR, Permutation or Block contrib)
#' @inheritParams fun_outputs
#'
#' @export
fun_AMOPLS_summary <- function(s, type = c("All", "RSS", "RSR", "Permutation", "Block contrib")) {
# s <- temp_dt
# type <- 'All'
switch(type[[1]],
'RSS' = fun_get_RSS(s),
'RSR' = fun_get_RSR(s),
'Permutation' = fun_get_perm(s),
'Block contrib' = fun_get_blockcontrib(s),
{
s %>% {
list(fun_get_RSS(.),
fun_get_RSR(.),
fun_get_perm(.),
fun_get_blockcontrib(.)
)
} %>% {
Reduce(function(x, y) {merge(x, y, by = c("Effect", "Effect Name"), all = T)}, .)
}
}
)
}
### Remove MetStat dependency
#' Function from MetStat package
#'
#' Copy of the MetStat function to remove partial dependency
#'
#' @param x.input The data matrix that needs to be scaled.
#' @param center Boolean. If TRUE the data will also be centered per column (the mean of each column will become zero).
#' @param scale This Argument defines which type of scaling is to be applied. With the default value of TRUE, the data is autoscaled. When set to "pareto", pareto scaling is applied.
#' @param quietly Boolan. If TRUE, no intermediate text output concerning the centering and scaling methods is returned.
#'
#' @export
MetStaT.ScalePip <- function (x.input, center = TRUE, scale = TRUE, quietly = FALSE) {
options(warn = -1)
no.col.x.input <- ncol(x.input)
if (is.null(no.col.x.input)) {
no.col.x.input <- 1
}
tryCatch({
x <- matrix(as.numeric(x.input), ncol = no.col.x.input)
}, error = function(ex) {
bad.matrix <- x.input
stop(ex)
})
colnames(x) <- colnames(x.input)
rownames(x) <- rownames(x.input)
options(warn = 0)
x.scaled <- list()
nc <- ncol(x)
if (is.null(center))
center <- FALSE
if (is.character(center) && center == "true")
center <- TRUE
if (is.character(center) && center == "false")
center <- FALSE
if (is.character(scale) && scale == "true")
scale <- TRUE
if (is.character(scale) && scale == "false")
scale <- FALSE
center.description <- center
if (is.logical(center)) {
if (center) {
center.description <- "Around mean. "
center <- colMeans(x, na.rm = TRUE)
x <- sweep(x, 2L, center, check.margin = FALSE)
}
else {
x.scaled$description <- paste(x.scaled$description,
"Not centered. ", sep = "")
not.centered <- matrix(rep(0, nc), nrow = 1)
colnames(not.centered) <- colnames(x)
x.scaled$center.vector <- not.centered
}
}
else if (is.numeric(center) && (length(center) == nc)) {
center.description <- "Manual input by user used. "
x <- sweep(x, 2L, center, check.margin = FALSE)
}
else {
stop("length of 'center' must equal the number of columns of 'x'")
}
if (is.numeric(center)) {
x.scaled$description <- paste(x.scaled$description, "Centered: ",
center.description, sep = "")
center <- matrix(center, nrow = 1)
colnames(center) <- colnames(x)
x.scaled$center.vector <- center
}
if (is.null(scale))
scale <- FALSE
if (is.logical(scale)) {
if (scale) {
scale = "stdev"
}
}
scale.description <- scale
if (is.logical(scale)) {
x.scaled$description <- paste(x.scaled$description, "Not scaled. ",
sep = "")
not.scaled <- matrix(rep(1, nc), nrow = 1)
colnames(not.scaled) <- colnames(x)
x.scaled$scale.vector <- not.scaled
}
else if (is.character(scale)) {
scale <- tolower(scale)
if (scale == "stdev" || scale == "auto") {
f <- function(v) {
v <- v[!is.na(v)]
sqrt(sum(v^2)/max(1, length(v) - 1L))
}
}
else if (scale == "pareto") {
f <- function(v) {
v <- v[!is.na(v)]
sqrt(sqrt(sum(v^2)/max(1, length(v) - 1L)))
}
}
scale <- apply(x, 2L, f)
x <- sweep(x, 2L, scale, "/", check.margin = FALSE)
}
else if (is.numeric(scale) && length(scale) == nc) {
scale.description <- "Manual input by user used."
x <- sweep(x, 2L, scale, "/", check.margin = FALSE)
}
else {
stop("length of 'scale' must equal the number of columns of 'x'")
}
if (is.numeric(scale)) {
x.scaled$description <- paste(x.scaled$description, "Scaled: ",
scale.description, ".", sep = "")
scale <- matrix(scale, nrow = 1)
colnames(scale) <- colnames(x)
x.scaled$scale.vector <- scale
}
x.scaled$data <- x
if (!quietly) {
print(x.scaled$description)
}
x.scaled
}
#' Title
#'
#' @param data A datamatrix (sample x variables)
#'
#' @export
MetStat.PCA.Calculate <- function (data) {
svd.result <- svd(data)
svd.result$var.explained <- svd.result$d^2
svd.result$var.explained <- svd.result$var.explained/(sum(svd.result$var.explained))
svd.result$t <- svd.result$u %*% diag(svd.result$d)
svd.result$u <- NULL
svd.result
}
#' Cutome koplsModel function with tol param
#'
#' Correct the error returned by solve: system is computationally singular
#'
#' @inheritParams base::solve
#' @inheritParams kopls::koplsModel
#'
koplsModel_custom <- function(K, Y, A, nox, preProcK = "mc", preProcY = "mc", tol = 1e-20) {
if (!requireNamespace("kopls", quietly = TRUE)) {
stop("Package \"kopls\" needed for this function to work. Please install it using install_kopls()",
call. = FALSE)
} else {requireNamespace("kopls")}
n = ncol(K)
I <- diag(rep(1, n))
if (preProcK == "mc") {
Kmc <- kopls::koplsCenterKTrTr(K)
} else {
Kmc <- K
}
K <- matrix(list(), ncol = nox + 1, nrow = nox + 1)
K[1, 1] <- list(Kmc)
Y.old <- Y
scale.params <- list()
if (preProcY == "mc" | preProcY == "uv" | preProcY == "pareto") {
scale.params <- kopls::koplsScale(Y, center = "mc", scale = ifelse(preProcY == "mc", "none", preProcY))
Y <- scale.params$x
}
to <- list()
co <- list()
so <- list()
toNorm <- list()
Tp <- list()
Cp <- list()
Bt <- list()
tmp <- svd(t(Y) %*% K[1, 1][[1]] %*% Y, nu = A, nv = A)
Cp <- tmp$u
if (A > 1) {
Sp <- diag(tmp$d[1:A])
Sps <- diag(tmp$d[1:A]^(-1/2))
} else {
Sp <- tmp$d[1]
Sps <- tmp$d[1]^(-1/2)
}
Up <- Y %*% Cp
if (nox > 0) {
for (i in 1:nox) {
Tp[[i]] <- t(K[1, i][[1]]) %*% Up %*% Sps
solve_res <- solve(t(Tp[[i]]) %*% Tp[[i]], tol = tol)
Bt[[i]] <- solve_res %*% t(Tp[[i]]) %*% Up
tmp <- svd(t(Tp[[i]]) %*% (K[i, i][[1]] - Tp[[i]] %*% t(Tp[[i]])) %*% Tp[[i]], nu = 1, nv = 1)
co[[i]] <- tmp$u
so[[i]] <- tmp$d[1]
to[[i]] <- (K[i, i][[1]] - Tp[[i]] %*% t(Tp[[i]])) %*% Tp[[i]] %*% co[[i]] %*% so[[i]]^(-1/2)
toNorm[[i]] <- c(sqrt(t(to[[i]]) %*% to[[i]]))
to[[i]] <- to[[i]]/toNorm[[i]]
K[1, i + 1][[1]] <- K[1, i][[1]] %*% (I - to[[i]] %*% t(to[[i]]))
K[i + 1, i + 1][[1]] <- (I - to[[i]] %*% t(to[[i]])) %*% K[i, i][[1]] %*% (I - to[[i]] %*% t(to[[i]]))
}
}
Tp[[nox + 1]] = t(K[1, nox + 1][[1]]) %*% Up %*% Sps
Bt[[nox + 1]] = solve(t(Tp[[nox + 1]]) %*% Tp[[nox + 1]]) %*% t(Tp[[nox + 1]]) %*% Up
sstotY <- sum(sum(Y * Y))
F <- Y - Up %*% t(Cp)
R2Y <- 1 - sum(sum(F * F))/sstotY
EEprime <- K[nox + 1, nox + 1][[1]] - Tp[[nox + 1]] %*% t(Tp[[nox + 1]])
sstotK <- sum(diag(K[1, 1][[1]]))
R2X <- NULL
R2XO <- NULL
R2XC <- NULL
R2Yhat <- NULL
for (i in 1:(nox + 1)) {
rss <- sum(diag(K[i, i][[1]] - Tp[[i]] %*% t(Tp[[i]])))
R2X <- c(R2X, 1 - rss/sstotK)
rssc <- sum(diag(K[1, 1][[1]] - Tp[[i]] %*% t(Tp[[i]])))
R2XC <- c(R2XC, 1 - rssc/sstotK)
rsso <- sum(diag(K[i, i][[1]]))
R2XO <- c(R2XO, 1 - rsso/sstotK)
Yhat <- Tp[[i]] %*% Bt[[i]] %*% t(Cp)
R2Yhat <- c(R2Yhat, 1 - sum(sum((Yhat - Y)^2))/sstotY)
}
model <- list()
model$Cp <- Cp
model$Sp <- Sp
model$Sps <- Sps
model$Up <- Up
model$Tp <- Tp
model$T <- as.matrix(Tp[[nox + 1]])
model$co <- co
model$so <- so
model$to <- to
if (nox > 0) {
model$To <- matrix(nrow = nrow(model$T), ncol = nox,
data = unlist(to), byrow = FALSE)
}
else {
model$To <- NULL
}
model$toNorm <- toNorm
model$Bt <- Bt
model$A <- A
model$nox <- nox
model$K <- K
model$EEprime <- EEprime
model$sstot_K <- sstotK
model$R2X <- R2X
model$R2XO <- R2XO
model$R2XC <- R2XC
model$sstot_Y <- sstotY
model$R2Y <- R2Y
model$R2Yhat <- R2Yhat
model$preProc <- list()
model$preProc$K <- preProcK
model$preProc$Y <- preProcY
model$preProc$paramsY <- scale.params
class(model) <- "kopls"
return(model)
}
#' Get summary results of run_AMOPLS
#'
#' @inheritParams fun_outputs
#'
#' @import magrittr
#'
#' @export
fun_get_summary <- function(s) {
s$output$Summary %>% {.[s$general$ee.names]}
}
|
ad2c91d09f02e279c0675ad1e51df4cf9a888f5b | 27138259677acb2efd76acb001d69abdc1437f6d | /exdata/ExData_Plotting2/project2.R | 4b71e34a8b52f873ba53db82620fe0fb220fd6ca | [] | no_license | wangwf/datasciencecoursera | 84c79110fae7b66ab9c95d0194b3691f093a3729 | 9e45325718b4ef4a20de73b3f141137025842122 | refs/heads/master | 2016-09-05T08:49:32.978612 | 2015-02-28T00:55:17 | 2015-02-28T00:55:17 | 18,976,279 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,780 | r | project2.R | downloadFiles<-function(
dataURL="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
){
if(!file.exists("./data/Source_Classification_Code.rds")){
dir.create("./data")
temp <-tempfile()
download.file(dataURL, temp, method="curl")
unzip(temp,exdir="./data/")
## rename dir-name ""UCI HAR Dataset" to "UCI_HAR_Dataset"
# mv UCI\ HAR\ Dataset/ UCI_HAR_Dataset
# file.rename("UCI HAR Dataset", "UCI_HAR_Dataset")
unlink(temp)
}else{
message("data already downloaded.")
}
}
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
#
# 1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#
png("plot1.png")
NEI_by_year <- tapply(NEI$Emissions, NEI$year, sum)
plot(NEI_by_year, type="b", xlab="year", ylab="PM2.5 Emissions",main="PM2.5 in the Baltimore City",xaxt="n")
axis(1,at=1:4,labels = rownames(NEI_by_year), col.axis="blue",las=0)
dev.off()
#
# 2. Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
#
#NEI_Baltimore <- NEI[NEI$fips=="24510",]
#NEI_Baltimore_year <- tapply(NEI_Baltimore$Emissions, NEI_Baltimore$year, sum)
#plot(NEI_Baltimore_year, type="b", xlab="year", ylab="Emissions",main="PM2.5 Emissions in the Baltimore City, Maryland",xaxt="n")
#axis(1,at=1:4,labels = rownames(NEI_Baltimore_year), col.axis="blue",las=0)
NEI_Baltimore_year <- aggregate(Emissions ~year, subset(NEI, fips=="24510"), sum)
png("plot2.png")
plot(NEI_Baltimore_year, type="b", main="PM2.5 Emissions in the Baltimore City, Maryland")
dev.off()
#
# 3. Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
# which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City?
#Which have seen increases in emissions from 1999–2008? Use the ggplot2 plotting system to make a plot answer this question.
#
library(ggplot2)
#NEI_B_yt <- tapply(NEI_Baltimore$Emissions, list(NEI_Baltimore$year, NEI_Baltimore$type), sum)
NEI_B_yt <- aggregate(Emissions~year+type, NEI_Baltimore, sum)
png("plot3.png")
qplot(year,Emissions, data=NEI_B_yt, geom=c("point","smooth"), method="loess",col=type)
dev.off()
# increase POINT,
#plot(NEI_B_yt[,1],type="b", xaxt="n")
#axis(1,at=1:4,labels = rownames(NEI_Baltimore), col.axis="blue",las=0)
#lines(as.numeric(c(1:4)),NEI_B_yt[,2], col="blue",lwd=2)
#lines(as.numeric(c(1:4)),NEI_B_yt[,3], col="blue",lwd=2)
#lines(as.numeric(c(1:4)),NEI_B_yt[,4], col="blue",lwd=2)
#qplot(year, Emissions, data=NEIby, color = type, geom="line")
#plot(NEI$year, NEI$Emission)
dev.off()
# 4. Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
png("plot4.png")
SCC_coal_comb <- SCC[grepl("coal", SCC$SCC.Level.Three, ignore.case=TRUE) |
grepl("Lignite", SCC$SCC.Level.Three, ignore.case=TRUE),]
NEI_coal <- NEI[NEI$SCC %in%SCC_coal_comb$SCC,]
NEI_coal_y <- aggregate(Emissions~ year, NEI_coal,sum)
plot(NEI_coal_y$Emissions/1e3~NEI_coal_y$year, type="b", xlab="year", ylab="PM2.5 Emissions (Kilo tons)",
main="Emissions of PM2.5 per year of coal cumbustors -USA")
SCC_coal_comb <- SCC[
grepl("combustion", SCC$SCC.Level.One, ignore.case=TRUE) &
(grepl("coal", SCC$SCC.Level.Three, ignore.case=TRUE) |
grepl("lignite", SCC$SCC.Level.Three, ignore.case=TRUE)), ]
dev.off()
# 5. How have emissions from motor vehicle sources changed from 1999–2008 in Baltimore City?
png("plot5.png")
NEI_Baltimore_onRoad <- NEI[(NEI$fips=="24510" & NEI$type=="ON-ROAD"),]
NEI_Baltimore_onRoad_year <- aggregate(Emissions~ year, NEI_Baltimore_onRoad,sum)
plot(NEI_Baltimore_onRoad_year$Emissions~NEI_Baltimore_onRoad_year$year,
type="b", xlab="year", ylab="PM2.5 Emissions (Kilo tons)",
main="Emissions of PM2.5 from motor vehicle source in Baltimore")
dev.off()
# 6. Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in Los Angeles County,
# California (fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
NEI_onRoad <- NEI[((NEI$fips=="24510"| NEI$fips=="06037") & NEI$type=="ON-ROAD"),]
NEI_onRoad_y <-aggregate(Emissions ~year+fips, NEI_onRoad, sum)
NEI_onRoad_y$fips <- as.factor(NEI_onRoad_y$fips)
levels(NEI_onRoad_y$fips)[levels(NEI_onRoad_y$fips)=="24510"] <- "Baltimore, MD"
levels(NEI_onRoad_y$fips)[levels(NEI_onRoad_y$fips)=="06037"] <- "Los Angeles, CA"
png("plot6.png")
qplot(year,Emissions, data=NEI_onRoad_y, geom=c("point","smooth"),method="lm",col=fips,
main="Motor vehicle emissions from Baltimore City and Los Angeles County")
dev.off()
|
c65327e08e09617b0f7e454dbb7eef6b3aeb3803 | 43b23a49d0cd9b9f2badaeea1680dd3d704ab9b8 | /Model_Output_Analyses/EcologicalIntegrity/ScholarlyStudies_2019/birds/bird_exploration_glms.R | 74785fc98a1dcfdec30f4537d7c2d5da662f97b7 | [] | no_license | LacherIara/git_CLI_LULC_11-16-17 | f796e86d4e7c0eb469f1b69254c71167db4843b5 | fda70aa45afb45c96a86bcf32d572e9e5bebf3b6 | refs/heads/master | 2022-05-14T06:19:02.439035 | 2022-05-04T17:14:34 | 2022-05-04T17:14:34 | 111,033,766 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,050 | r | bird_exploration_glms.R |
setwd("I:/EI_data/plots2016/7-26-19")
bird.data <- read.csv("U:/CLI/Field Surveys/Birds/CLI_Birds_Environmental_6-17-19.csv")
library(ggplot2)
library(MuMIn)
hist(bird.data$Abundance)
hist(bird.data$SpRichness)
data.dredge.cro <- bird.data[,c(103,105,106,107,108)]
cro.models <- glm(Abundance ~ ., data=data.dredge.cro)
dd.cro <- dredge(cro.models)
data.dredge.gra <- bird.data[,c(103,117,118,119,120)]
gra.models <- glm(Abundance ~ ., data=data.dredge.gra)
dd.gra <- dredge(gra.models)
data.dredge.for <- bird.data[,c(103,113,114,115,116)]
for.models <- glm(Abundance ~ ., data=data.dredge.for)
dd.for <- dredge(for.models)
data.dredge.dev <- bird.data[,c(103,109,110,111,112)]
dev.models <- glm(Abundance ~ ., data=data.dredge.dev)
dd.dev <- dredge(dev.models)
###################################################################################################
# BIRDS : ABUNDANCE, DEVELOPMENT
###################################################################################################
fit250 <- glm(Abundance ~ dev_pct250, data=bird.data, family = "poisson")
fit500 <- glm(Abundance ~ dev_pct500, data=bird.data, family = "poisson")
fit1k <- glm(Abundance ~ dev_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(Abundance ~ dev_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.dev = ggplot() +
geom_point(data=bird.data, aes(y=Abundance, x=dev_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=dev_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=Abundance, x=dev_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=dev_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=Abundance, x=dev_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=dev_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=Abundance, x=dev_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=dev_pct5k, color="5000m")) +
xlab("Percent Development") +
ggtitle("Birds: Abundance, Dev") +
xlim(0,1.0) +
ylim(0, 225) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=200, label=mylabel)
birds.dev
ggsave(birds.dev, file="birds.dev.abundance.png")
###################################################################################################
# BIRDS : ABUNDANCE, FOREST
###################################################################################################
fit250 <- glm(Abundance ~ for_pct250, data=bird.data, family = "poisson")
fit500 <- glm(Abundance ~ for_pct500, data=bird.data, family = "poisson")
fit1k <- glm(Abundance ~ for_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(Abundance ~ for_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.for = ggplot() +
geom_point(data=bird.data, aes(y=Abundance, x=for_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=for_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=Abundance, x=for_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=for_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=Abundance, x=for_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=for_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=Abundance, x=for_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=for_pct5k, color="5000m")) +
xlab("Percent Forest") +
ggtitle("Birds: Abundance, for") +
xlim(0,1.0) +
ylim(0, 225) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=200, label=mylabel)
birds.for
ggsave(birds.for, file="birds.for.abundance.png")
###################################################################################################
# BIRDS : ABUNDANCE, GRASS
###################################################################################################
fit250 <- glm(Abundance ~ gra_pct250, data=bird.data, family = "poisson")
fit500 <- glm(Abundance ~ gra_pct500, data=bird.data, family = "poisson")
fit1k <- glm(Abundance ~ gra_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(Abundance ~ gra_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.gra = ggplot() +
geom_point(data=bird.data, aes(y=Abundance, x=gra_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=gra_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=Abundance, x=gra_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=gra_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=Abundance, x=gra_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=gra_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=Abundance, x=gra_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=gra_pct5k, color="5000m")) +
xlab("Percent Grass") +
ggtitle("Birds: Abundance, gra") +
xlim(0,1.0) +
ylim(0, 225) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=200, label=mylabel)
birds.gra
ggsave(birds.gra, file="birds.gra.abundance.png")
###################################################################################################
# BIRDS : ABUNDANCE, CROP
###################################################################################################
fit250 <- glm(Abundance ~ cro_pct250, data=bird.data, family = "poisson")
fit500 <- glm(Abundance ~ cro_pct500, data=bird.data, family = "poisson")
fit1k <- glm(Abundance ~ cro_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(Abundance ~ cro_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.cro = ggplot() +
geom_point(data=bird.data, aes(y=Abundance, x=cro_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=cro_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=Abundance, x=cro_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=cro_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=Abundance, x=cro_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=cro_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=Abundance, x=cro_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=Abundance, x=cro_pct5k, color="5000m")) +
xlab("Percent Crop") +
ggtitle("Birds: Abundance, cro") +
xlim(0,1.0) +
ylim(0, 225) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=200, label=mylabel)
birds.cro
ggsave(birds.cro, file="birds.cro.abundance.png")
###################################################################################################
# BIRDS : SPRICHNESS, DEVELOPMENT
###################################################################################################
fit250 <- glm(SpRichness ~ dev_pct250, data=bird.data, family = "poisson")
fit500 <- glm(SpRichness ~ dev_pct500, data=bird.data, family = "poisson")
fit1k <- glm(SpRichness ~ dev_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(SpRichness ~ dev_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.dev = ggplot() +
geom_point(data=bird.data, aes(y=SpRichness, x=dev_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=dev_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=dev_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=dev_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=dev_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=dev_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=dev_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=dev_pct5k, color="5000m")) +
xlab("Percent Development") +
ggtitle("Birds: SpRichness, Dev") +
xlim(0,1.0) +
ylim(0, 50) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=45, label=mylabel)
birds.dev
ggsave(birds.dev, file="birds.dev.sprichness.png")
###################################################################################################
# BIRDS : SPRICHNESS, FOREST
###################################################################################################
fit250 <- glm(SpRichness ~ for_pct250, data=bird.data, family = "poisson")
fit500 <- glm(SpRichness ~ for_pct500, data=bird.data, family = "poisson")
fit1k <- glm(SpRichness ~ for_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(SpRichness ~ for_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.for = ggplot() +
geom_point(data=bird.data, aes(y=SpRichness, x=for_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=for_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=for_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=for_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=for_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=for_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=for_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=for_pct5k, color="5000m")) +
xlab("Percent Forest") +
ggtitle("Birds: SpRichness, for") +
xlim(0,1.0) +
ylim(0, 50) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=45, label=mylabel)
birds.for
ggsave(birds.for, file="birds.for.sprichness.png")
###################################################################################################
# BIRDS : SPRICHNESS, GRASS
###################################################################################################
fit250 <- glm(SpRichness ~ gra_pct250, data=bird.data, family = "poisson")
fit500 <- glm(SpRichness ~ gra_pct500, data=bird.data, family = "poisson")
fit1k <- glm(SpRichness ~ gra_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(SpRichness ~ gra_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.gra = ggplot() +
geom_point(data=bird.data, aes(y=SpRichness, x=gra_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=gra_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=gra_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=gra_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=gra_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=gra_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=gra_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=gra_pct5k, color="5000m")) +
xlab("Percent Grass") +
ggtitle("Birds: SpRichness, gra") +
xlim(0,1.0) +
ylim(0, 50) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=45, label=mylabel)
birds.gra
ggsave(birds.gra, file="birds.gra.sprichness.png")
###################################################################################################
# BIRDS : SPRICHNESS, CROP
###################################################################################################
fit250 <- glm(SpRichness ~ cro_pct250, data=bird.data, family = "poisson")
fit500 <- glm(SpRichness ~ cro_pct500, data=bird.data, family = "poisson")
fit1k <- glm(SpRichness ~ cro_pct1k, data=bird.data, family = "poisson")
fit5k <- glm(SpRichness ~ cro_pct5k, data=bird.data, family = "poisson")
sum250 = summary(fit250)
p.250 = sum250$coefficients[2,4]
AIC.250 = sum250$aic
sum500 = summary(fit500)
p.500 = sum500$coefficients[2,4]
AIC.500 = sum500$aic
sum1k = summary(fit1k)
p.1k = sum1k$coefficients[2,4]
AIC.1k = sum1k$aic
sum5k = summary(fit5k)
p.5k = sum5k$coefficients[2,4]
AIC.5k = sum5k$aic
to.round = c(p.250, AIC.250, p.500, AIC.500, p.1k, AIC.1k, p.5k, AIC.5k)
metrics = round(to.round, digits=3)
mylabel=paste0("250m: p=", metrics[1], ", AIC =", metrics[2], "\n",
"500m: p=", metrics[3], ", AIC =", metrics[4], "\n",
"1000m: p=", metrics[5], ", AIC =", metrics[6], "\n",
"5000m: p=", metrics[7], ", AIC =", metrics[8])
birds.cro = ggplot() +
geom_point(data=bird.data, aes(y=SpRichness, x=cro_pct250, color="250m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=cro_pct250, color="250m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=cro_pct500, color="500m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=cro_pct500, color="500m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=cro_pct1k, color="1000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=cro_pct1k, color="1000m")) +
geom_point(data=bird.data, aes(y=SpRichness, x=cro_pct5k, color="5000m")) +
geom_smooth(method="glm",
se = T,
method.args = list(family = "poisson"),
fullrange = FALSE,
data=bird.data,
aes(y=SpRichness, x=cro_pct5k, color="5000m")) +
xlab("Percent Crop") +
ggtitle("Birds: SpRichness, cro") +
xlim(0,1.0) +
ylim(0, 50) +
scale_color_manual(name="Radius", values=c("250m"="red", "500m"="orange", "1000m"="darkgreen", "5000m"="blue"), breaks=c("250m", "500m", "1000m", "5000m")) +
theme(legend.position = c(1,0),
legend.justification = c(1,0)) +
annotate(geom="text", x=0.8, y=45, label=mylabel)
birds.cro
ggsave(birds.cro, file="birds.cro.sprichness.png")
|
8aa22228af5722aebde3d56633240105d65bb803 | a85e87c6a413530b71d7d72e3c75886b24c82590 | /Segregate_images.r | 9e0faf375bef4b4116d8211d48d3f51ab2ce2781 | [] | no_license | jananiigiridhar/Yelp-Image-Classification | 200ea08a5351805fbf9343eb97f10e484d4e9f98 | ca1c1b470a93aff3669346a8f310a7750c668352 | refs/heads/master | 2021-06-29T17:27:43.259555 | 2017-09-04T04:02:43 | 2017-09-04T04:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,771 | r | Segregate_images.r | # install.packages("stringr")
# install.packages("tools")
#
library(stringr)
library(tools)
# Place the pictures and the files contaiting the folder - file mapping in the same directory as below
source_path = "C:/Users/vineeth raghav/Downloads/Uconn/R Proj/train_photos/train_photos"
target_path = "C:/Users/vineeth raghav/Downloads/Uconn/R Proj/train_photos/Processed_Images"
file_name = "C:/Users/vineeth raghav/Downloads/Uconn/R Proj/train_photo_to_biz_ids/train_photo_to_biz_ids.csv"
file_extn= '.JPG'
bad_file_start = c(".")
except_folders = c("Archives")
setwd(source_path)
getwd()
# Add / at the end if not present
source_path = ifelse(str_sub(source_path, -1) == "/", source_path, paste0(source_path,"/"))
target_path = ifelse(str_sub(target_path, -1) == "/", target_path, paste0(target_path,"/"))
# Read the csv file
source_file = read.csv(file_name)
# Get the list of folders to be created
folder_names = unique(source_file$business_id)
# Create the folders
for (folder_name in folder_names) {
if(!(folder_name %in% except_folders))
{
# Create a directory. Ignore if the directory already exists
dir.create(file.path(target_path, folder_name), showWarnings = FALSE)
# Get the list of files under the directory
file_names = source_file[source_file$business_id == folder_name,][1]
print(paste(nrow(file_names), "Number of files found for the folder",folder_name))
# Move the files from the parent folder to the sub folders
for(i in 1:nrow(file_names))
{
file_name = file_names[i,]
# Move (not copy) the files to the target folder
file.rename(paste0(source_path,file_name,file_extn),paste0(target_path,folder_name,"/",file_name,file_extn))
}
}
}
|
aa2896cf7aabece52ef9a63240c1cfc78608a7d5 | 5e28bdc8e8aa84d5a919a92be655cfb381e179f0 | /man/yadirGetSiteLinks.Rd | d778c1c007b7f6df847beb06afe01c645be45d29 | [] | no_license | grkhr/ryandexdirect | bde9248bca476ab852dfc39dbbf2cb2c84883f75 | 1d7adf3ad657ac16aa83f4d1520ffcd816d2cd2f | refs/heads/master | 2020-09-22T11:46:38.738297 | 2019-12-10T12:10:23 | 2019-12-10T12:10:23 | 160,968,163 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,050 | rd | yadirGetSiteLinks.Rd | \name{yadirGetSiteLinks}
\alias{yadirGetSiteLinks}
\title{Get fast links from yandex direct}
\description{yadirGetSiteLinks returns sets of quick links that meet the specified criteria..}
\usage{
yadirGetBalance(Login = NULL, Token = NULL)
}
\arguments{
\item{Login}{character, your logins at Yandex Direct, require}
\item{Token}{character, your Yandex Direct API Token, require}
\item{AgencyAccount}{Your agency account login, if you get statistic from client account}
\item{TokenPath}{Path to directory where you save credential data}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{data frame with SiteLinks parameters, id, title, href and description }
\author{Alexey Seleznev}
\examples{
#For get accounts from client account use
library(ryandexdirect)
my_fast_links <- yadirGetSiteLinks(Login = "login")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
a23c506eb8002fda264c107f2a5f4d0ed9b2bc94 | 1ae11a3faf6e06edd46b6cbb00924f18dd77a09e | /R/ghTreeGusfield.R | 145fa7d9a49bc6007a2842079420e4f39bcb3694 | [] | no_license | cran/optrees | d443b36e21d21f9ec1538a5e451cce721ceda1ae | 98121be6a97b7fd3ed856587da78795fc0ce70b3 | refs/heads/master | 2016-09-05T09:01:25.249779 | 2014-09-01T00:00:00 | 2014-09-01T00:00:00 | 23,565,281 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,924 | r | ghTreeGusfield.R | #-----------------------------------------------------------------------------#
# optrees Package #
# Minimum Cut Tree Problems #
#-----------------------------------------------------------------------------#
# ghTreeGusfield --------------------------------------------------------------
#' Gomory-Hu tree with the Gusfield's algorithm
#'
#' Given a connected weighted and undirected graph, the \code{ghTreeGusfield}
#' function builds a Gomory-Hu tree with the Gusfield's algorithm.
#'
#' @details The Gomory-Hu tree was introduced by R. E. Gomory and T. C. Hu in
#' 1961. Given a connected weighted and undirected graph, the Gomory-Hu tree
#' is a weighted tree that contains the minimum s-t cuts for all s-t pairs
#' of nodes in the graph. Gomory and Hu also developed an algorithm to find it
#' that involves maximum flow searchs and nodes contractions.
#'
#' In 1990, Dan Gusfield proposed a new algorithm that can be used to find a
#' Gomory-Hu tree without nodes contractions and simplifies the implementation.
#'
#' @param nodes vector containing the nodes of the graph, identified by a
#' number that goes from \eqn{1} to the order of the graph.
#' @param arcs matrix with the list of arcs of the graph. Each row represents
#' one arc. The first two columns contain the two endpoints of each arc and the
#' third column contains their weights.
#'
#' @return \code{ghTreeGusfield} returns a list with:
#' tree.nodes vector containing the nodes of the Gomory-Hu tree.
#' tree.arcs matrix containing the list of arcs of the Gomory-Hu tree.
#' stages number of stages required.
#'
#' @references R. E. Gomory, T. C. Hu. Multi-terminal network flows. Journal
#' of the Society for Industrial and Applied Mathematics, vol. 9, 1961.
#'
#' Dan Gusfield (1990). "Very Simple Methods for All Pairs Network Flow
#' Analysis". SIAM J. Comput. 19 (1): 143-155.
#'
#' @seealso A more general function \link{getMinimumCutTree}.
ghTreeGusfield <- function(nodes, arcs) {
# Previous we have a order vector of nodes
# Start a tree with one node
nodesT1 <- nodes[1]
arcsT1 <- matrix(ncol = 4)[-1, ]
# Iterate adding one arc between node i and one node of the tree
for (i in 2:length(nodes)) {
# Method to chose one node of the tree
nodesT <- nodesT1
arcsT <- arcsT1
# Iterate until have a tree with one node
while (length(nodesT) > 1) {
# Search a-b arc with minimum weight
min.arc <- which(arcsT[, 3] == min(arcsT[, 3]))[1]
# This arc has the weight of the minimum a-b cut in the original graph
a <- arcsT[min.arc, 1]
b <- arcsT[min.arc, 2]
# Remove arc by make it and arc with zero capacity
arcsT[min.arc, 4] <- 0
# Duplicate and order arcs to find the cut
arcsT2 <- rbind(arcsT, matrix(c(arcsT[, 2], arcsT[, 1],
arcsT[, 3], arcsT[, 4]), ncol = 4))
arcsT2 <- arcsT2[order(arcsT2[, 1], arcsT2[, 2]), ]
# Have two components
TaTbCut <- findstCut(nodesT, arcsT2, a, b)
# Extract arcs of the two components
nodesTa <- TaTbCut$s.cut
arcsTa <- matrix(arcsT[which(arcsT[, 1] %in% nodesTa
& arcsT[, 2] %in% nodesTa), ], ncol = 4)
nodesTb <- TaTbCut$t.cut
arcsTb <- matrix(arcsT[which(arcsT[, 1] %in% nodesTb
& arcsT[, 2] %in% nodesTb), ], ncol = 4)
# And we have two components in the original graph
# Use function findMinCut to recover them
abCut <- findMinCut(nodes, arcs, source.node = a, sink.node = b)
# Select nodes and arcs connected with node i
if (i %in% abCut$s.cut) {
nodesT <- nodesTa
arcsT <- arcsTa
} else {
nodesT <- nodesTb
arcsT <- arcsTb
}
}
# At the end we hace one tree with only one node
nodesT
# Compute minimum cut i-k
ikCut <- findMinCut(nodes, arcs, source.node = nodesT, sink.node = i)
iCut <- ikCut$s.cut
kCut <- ikCut$t.cut
ikFlow <- ikCut$max.flow
# Connect node from tree with i node with weigth equal to minimum cut i-k
nodesT1 <- c(nodesT1, i)
arcsT1 <- rbind(arcsT1, c(nodesT, i, ikFlow, ikFlow))
}
# Remove columns of capacities
tree.arcs <- arcsT1[, -4]
# Order arcs
tree.arcs <- tree.arcs[order(tree.arcs[, 1], tree.arcs[, 2]), ]
# Column names
colnames(tree.arcs) <- c("ept1", "ept2", "weight")
# Build output
output <- list("tree.nodes" = nodes, "tree.arcs" = tree.arcs,
"stages" = length(nodes))
return(output)
}
#-----------------------------------------------------------------------------# |
51f3eeecc54618f9ef36f969b8913a1a4f89b74f | fac25dea844ebcd772e68175d62c6be4282cf42f | /R/ilc-summary.r | 3bcc10753b7a872f3137727be96221c99db70f6b | [] | no_license | andybega/jpr-forecasting-lessons | 60c086699e5bb7245802c075effbadfc73c5d4de | 9e77db54f9afa92871798bc7289a8d7d7efd7c44 | refs/heads/master | 2021-01-11T17:27:28.188829 | 2017-01-27T13:06:18 | 2017-01-27T13:06:18 | 62,795,120 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,933 | r | ilc-summary.r | #
# Figures 1 (a) and (b): summaries of past ILCs
#
figure1a <- function() {
#
# Map of ILCs
#
library("cshapes")
library("dplyr")
library("RColorBrewer")
library("lubridate")
source("R/utilities/prettyc.r")
load("data/ilc-data-2015-08.rda")
# Aggregate by country
ilc_by_country <- ilc_data %>%
filter(date >= "1991-01-01") %>%
group_by(gwcode) %>%
dplyr::summarize(ilcs = sum(ilc)) %>%
as.data.frame
# Plot
dpi <- 400
jpeg("figures/ilc-map.jpeg", width=3*dpi, height=1.26*dpi, pointsize=20)
data <- ilc_by_country
id <- "gwcode"
x <- "ilcs"
nval <- length(unique(data[, x]))
world <- cshp(date=as.Date("2012-01-01"))
world@data <- data.frame(world@data, data[match(world@data[, 'GWCODE'], data[, id]), ])
# Set fill colors
colorpal <- rev(brewer.pal(nval, 'Reds'))
colors <- ifelse(is.na(world@data[, x])==T, '#B0B0B0', colorpal[match(world@data[, x], sort(unique(world@data[, x]), decreasing=T))])
# Plot map
par(mar=c(1, 1, 1, 1))
plot(world, col='gray30', border='gray30', lwd=1)
plot(world, col=colors, border=F, add=T)
# Legend
legend.text <- c('No data', rev(unlist(dimnames(table(world@data[, x])))))
legend(x=-170, y=0, legend=legend.text, fill=c('#B0B0B0', colorpal),
bty='n')
dev.off()
invisible(NULL)
}
figure1a()
figure1b <- function() {
#
# ILCs by year
#
library("dplyr")
library("lubridate")
source("R/utilities/prettyc.r")
load("data/ilc-data-2015-08.rda")
ilc_by_yr <- ilc_data %>%
mutate(year = year(date)) %>%
group_by(year) %>%
dplyr::summarize(ilcs = sum(ilc)) %>%
filter(year >= 1991)
p <- ggplot(ilc_by_yr, aes(x = year, y = ilcs)) + geom_point() +
stat_smooth() +
labs(x = "Year", y = "ILCs") +
theme_bw()
ggsave(plot=p, file="figures/ilc-by-year.jpeg", width=7, height=2, dpi=400)
invisible(NULL)
}
figure1b()
|
d3ea323b4a883794427a09ec7338e03a78ff2a84 | 998fb13dfe557146f9d6702a8d5697e1cdfafeac | /Plot4.R | 7e029320598cb2ae3d6eb060fed4b3f23e6fa7fd | [] | no_license | mattmogit/ExData_Plotting1 | 1576d8508e66de6009d96a6df7f5d4afae752044 | 8960aac5d12f05ba1e4095ae90dd13269cee37dc | refs/heads/master | 2020-12-03T10:30:30.881349 | 2015-01-11T20:21:34 | 2015-01-11T20:21:34 | 29,101,382 | 0 | 0 | null | 2015-01-11T18:31:27 | 2015-01-11T18:31:25 | null | UTF-8 | R | false | false | 1,088 | r | Plot4.R | #-----------------------------p4
data <- read.csv("E:/Downloads/household_power_consumption.txt", header=T, sep=';', na.strings="?", stringsAsFactors=F, comment.char="", quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
sub <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data)
datetime <- paste(as.Date(sub$Date), sub$Time)
sub$Datetime <- as.POSIXct(datetime)
png(filename = "plot4.png", bg = "white")
par(mfrow=c(2,2))
with(sub, {
plot(Global_active_power~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l", ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="")
})
dev.off()
rm(sub)
|
38cb07b954c4d8ff1ebf613b35914fabef1744b3 | f567f750c98b0b1a0e9b2f3077975029d36bf5be | /S_select-plot.R | b20996b3a0b11da30da362f5dc787800d432de04 | [] | no_license | xansantos/selR-select | cf63768f680369948bf0f0a80a9fb42aa35b56db | 4596414cb97fb51b983a2e9726ee8cc30db8fc14 | refs/heads/master | 2020-06-07T05:49:53.969979 | 2019-06-20T15:30:15 | 2019-06-20T15:30:15 | 192,941,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,329 | r | S_select-plot.R |
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
###
### F_select-plot: Visualization of the average curve
### predicted by paired-gear selective models class SELECT
### Main function (V3)
### Juan Santos - 10.2014
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Arguments:
# mod: Model from class selR::select models.
# nome: Main name of the plot
# Dir if not NULL, figure will be sunk to the given directory
# Dependencies:
#No dependencies
#names(mod1)
#[1] "fun" "Betas" "l" "p.phi" "L" "phi" "modelhood" "aic" "aicc"
P_select<-function(mod,nome,Dir){
fun_sel<-mod[["fun"]]
par_sel<-paste("Split= ",round(mod[["Betas"]]["split"],2)," L50= ", round(mod[["Betas"]]["l50"],2)," SR= ", round(mod[["Betas"]]["sr"],2),sep="")
if(!is.null(Dir)){
setwd(Dir)
pdf(paste(nome,".pdf",sep=""),width=12, height=9)
par(cex=2,cex.axis=1.5,cex.main=1.5,cex.lab=1.5,mar= c(5, 4, 4, 1),mfrow=c(1,1))
with(mod, plot(phi~l,type="n",bty="n",col=2,ylim=c(0,1),xlim=range(L),ylab="Catch sharing ",xlab="length (cm)"))
abline(h=.5,lty=3,lwd=2,col="darkgreen")
with(mod, points(p.phi~l,pch=21,col=2,cex=3,bg="darkgrey"))
with(mod, lines(phi~l,type="l",lwd=3,col=2))
mtext(par_sel, 3, line=-0.2,cex=2,col="darkgrey")
mtext("Equal catch sharing", 3, line=-17.5,cex=1,col="darkgreen",adj=0)
mtext(paste("model:", fun_sel,sep=""),3,line=0,cex=1.5,adj=0.05,padj=2,col="darkgrey",outer=T)
dev.off()
} else {
par(cex=2,cex.axis=1.5,cex.main=1.5,cex.lab=1.5,mar= c(5, 5, 4, 1),mfrow=c(1,1))
with(mod, plot(phi~l,type="n",bty="n",col=2,ylim=c(0,1),xlim=range(l),ylab= expression(paste("Catch comparison ", phi, "(l)")),
xlab="length (cm)"))
abline(h=.5,lty=3,lwd=2,col="darkgreen")
with(mod, points(p.phi~l,pch=21,col=2,cex=3,bg="darkgrey"))
with(mod, lines(phi~l,type="l",lwd=3,col=2))
mtext(par_sel, 3, line=-0.2,cex=2,col="darkgrey")
mtext("Equal catch sharing", 3, line=-17.5,cex=1,col="darkgreen",adj=0)
mtext(paste("model:", fun_sel,sep=""),3,line=0,cex=1.5,adj=0.05,padj=2,col="darkgrey",outer=T)
}}
|
df09675afd1ef326b612eb4ce17547c6203c1f68 | de1db178b5315998fcd0e7a24d0788c1a1f5af9b | /Model/TrailingStopV3.R | 1805fad38c23f27285b44ce4647ece5a89ecb8ec | [] | no_license | fapri/main-model | fbe86eaeaa466d313566602906219240df6fb255 | 98d846130bd868db1fcbd6e8687a36ce70647bfb | refs/heads/master | 2021-06-25T00:55:23.801801 | 2020-10-06T20:22:02 | 2020-10-06T20:22:02 | 170,916,184 | 1 | 0 | null | 2020-09-30T15:17:33 | 2019-02-15T19:18:15 | R | UTF-8 | R | false | false | 10,916 | r | TrailingStopV3.R | # Corn and Soybean
# Trailing Stop
# threeDayTrigger = function(currentDayPercentile, p1, p2, p3){
# # Case 1
# if(currentDayPercentile < p1){
# return(TRUE)
# }
# # Case 2
# else if(currentDayPercentile == p1 && p1 < p2 && p2 == p3){
# return(TRUE)
# }
# # Case 3
# else if(currentDayPercentile == p2 && p1 = p2 && p2 < p3){
# return(TRUE)
# }
# else{
# return(FALSE)
# }
# }
# # Percentile Drops
# fivePercentDrop = function(marketingYear, row){
# price = marketingYear$Price[row]
#
# p3 = marketingYear$Percentile[row - 3]
# p2 = marketingYear$Percentile[row - 2]
# p1 = marketingYear$Percentile[row - 1]
# currentDayPercentile = marketingYear$Percentile[row]
#
# if(threeDayTrigger(currentDayPercentile, p1, p2, p3)){
# if (currentDayPercentile == 60){
# #55
# seventy = marketingYear[row, which(names(marketingYear) == "70th")]
# base = seventy * 0.99
# if(price <= base){
# return(TRUE)
# } else{
# return(FALSE)
# }
# } else if(currentDayPercentile == 70) {
# #65
# eighty = marketingYear[row, which(names(marketingYear) == "80th")]
# base = eighty * 0.99
# if(price <= base){
# return(TRUE)
# } else{
# return(FALSE)
# }
# } else if(currentDayPercentile == 80) {
# #75
# ninety = marketingYear[row, which(names(marketingYear) == "90th")]
# base = ninety * 0.99
# if(price <= base){
# return(TRUE)
# } else{
# return(FALSE)
# }
# } else if(currentDayPercentile == 90) {
# #85
# ninetyFive = marketingYear[row, which(names(marketingYear) == "95th")]
# base = ninetyFive * 0.99
# if(price <= base){
# return(TRUE)
# } else{
# return(FALSE)
# }
# }
# } else{
# return(FALSE)
# }
# }
# Creates new marketing year where baselines are lowered by 1%
adjustMarketingYear = function(cropObject){
baselineCol = which(names(cropObject[["Marketing Year"]]) == "Baseline")
ninetyFifthCol = which(names(cropObject[["Marketing Year"]]) == "95th")
cropObject[["Marketing Year"]][baselineCol:ninetyFifthCol] =
(cropObject[["Marketing Year"]][baselineCol:ninetyFifthCol] * 0.99)
marketingYearAdj = cropObject[["Marketing Year"]]
for(row in 1:nrow(marketingYearAdj)) {
if(marketingYearAdj$Price[row] > marketingYearAdj$`95th`[row])
marketingYearAdj[row, "Percentile"] = 95
else if(marketingYearAdj$Price[row] >= marketingYearAdj$`90th`[row])
marketingYearAdj[row, "Percentile"] = 90
else if(marketingYearAdj$Price[row] >= marketingYearAdj$`80th`[row])
marketingYearAdj[row, "Percentile"] = 80
else if(marketingYearAdj$Price[row] >= marketingYearAdj$`70th`[row])
marketingYearAdj[row, "Percentile"] = 70
else if(marketingYearAdj$Price[row] >= marketingYearAdj$`60th`[row])
marketingYearAdj[row, "Percentile"] = 60
else if(marketingYearAdj$Price[row] >= marketingYearAdj$Baseline[row])
marketingYearAdj[row, "Percentile"] = 50
else
marketingYearAdj[row, "Percentile"] = 0
}
return(marketingYearAdj)
}
if(type == "corn"){
for(i in 1:length(Corn_CropYearObjects)){
Corn_CropYearObjects[[i]][["Marketing Year Adjusted"]] = adjustMarketingYear(Corn_CropYearObjects[[i]])
}
} else if(type == "soybean"){
for(i in 1:length(Soybean_CropYearObjects)){
Soybean_CropYearObjects[[i]][["Marketing Year Adjusted"]] = adjustMarketingYear(Soybean_CropYearObjects[[i]])
}
}
# Checks if currentDayPercentile is a trailing stop trigger
isTrailingStop = function(previousDayPercentile, currentDayPercentile) {
if(previousDayPercentile >= 70 && previousDayPercentile > currentDayPercentile)
return(T)
return(F)
}
# Checks cases where the baseline updates
isTrailingStopSpecial = function(pricePreviousPercentileBelow, currentPrice) {
if (currentPrice <= pricePreviousPercentileBelow) {
return(T)
} else
return(F)
}
# Finds all of the trailing stop triggers for a given crop year
trailingStopTrigger = function(cropYear, featuresObject) {
trailingStopTriggers = data.frame()
marketingYear = cropYear[['Marketing Year']]
marketingYearAdj = cropYear[['Marketing Year Adjusted']]
june = which(month(mdy(marketingYear$Date)) == 6)
juneOC = which(year(mdy(marketingYear$Date[june])) == year(mdy(marketingYear$Date[nrow(marketingYear)])))
EYTSInterval = interval(head(mdy(marketingYear$Date[june[juneOC]]), 1), mdy(marketingYear$Date[nrow(marketingYear)]))
for(row in 2:nrow(marketingYear)) {
# Special case for Feb -> March
# Functions on adjusted marketing year
if (month(mdy(marketingYearAdj$Date[row])) == 3 && month(mdy(marketingYearAdj$Date[row - 1])) == 2){
if(marketingYearAdj$Percentile[row - 1] != 95 && marketingYearAdj$Percentile[row - 1] >= 70) {
if(marketingYearAdj$Percentile[row - 1] == 70) previousPercentileBelow = "60th"
if(marketingYearAdj$Percentile[row - 1] == 80) previousPercentileBelow = "70th"
if(marketingYearAdj$Percentile[row - 1] == 90) previousPercentileBelow = "80th"
if(marketingYearAdj$Percentile[row - 1] == 95) previousPercentileBelow = "90th"
pricePreviousPercentileBelow = marketingYearAdj[row, previousPercentileBelow]
if(previousPercentileBelow == "60th") previousPercentileBelow = 60
if(previousPercentileBelow == "70th") previousPercentileBelow = 70
if(previousPercentileBelow == "80th") previousPercentileBelow = 80
if(previousPercentileBelow == "90th") previousPercentileBelow = 90
# Takes in price for percentile above prevous day, percentile above previous day, current day price
if(isTrailingStopSpecial(pricePreviousPercentileBelow, marketingYearAdj$Price[row])) {
trailingStopTriggers = rbind(trailingStopTriggers, data.frame("Date" = marketingYearAdj$Date[row],
"Previous Percentile" = marketingYearAdj$Percentile[row - 1],
"Percentile" = previousPercentileBelow,
"Type" = "Trailing Stop Special"))
}
}
}
# Special case for Aug -> Sept
# Functions on adjusted marketing year
else if (month(mdy(marketingYearAdj$Date[row])) == 9 && month(mdy(marketingYearAdj$Date[row - 1])) == 8){
next
}
# Functions on adjusted marketing year
else if(isTrailingStop(marketingYearAdj$Percentile[row - 1], marketingYearAdj$Percentile[row]) && !(mdy(marketingYearAdj$Date[row]) %within% EYTSInterval)) {
if(nrow(trailingStopTriggers) == 0 || difftime((mdy(marketingYearAdj$Date[row])), mdy(trailingStopTriggers$Date[nrow(trailingStopTriggers)])) >= 7){
trailingStopTriggers = rbind(trailingStopTriggers, data.frame("Date" = marketingYearAdj$Date[row],
"Previous Percentile" = marketingYearAdj$Percentile[row - 1],
"Percentile" = marketingYearAdj$Percentile[row],
"Type" = "Trailing Stop"))
}
}
# Functions on normal marketing year
else if(isTrailingStop(marketingYear$Percentile[row - 1], marketingYear$Percentile[row]) && (mdy(marketingYear$Date[row]) %within% EYTSInterval)) {
if(!nrow(trailingStopTriggers) == 0 || difftime((mdy(marketingYear$Date[row])), mdy(trailingStopTriggers$Date[nrow(trailingStopTriggers)])) >= 7){
trailingStopTriggers = rbind(trailingStopTriggers, data.frame("Date" = marketingYear$Date[row],
"Previous Percentile" = marketingYear$Percentile[row - 1],
"Percentile" = marketingYear$Percentile[row],
"Type" = "End of Year Trailing Stop"))
}
}
# Functions on normal marketing year
else if (isTenDayHigh(mdy(marketingYear$Date[row]), marketingYear$Price[row], marketingYear$Percentile[row],
cropYear$`Pre/Post Interval`$intervalPre, cropYear$`Pre/Post Interval`$intervalPost,
featuresObject$`95% of Ten Day High`, MY = FALSE)) {
trailingStopTriggers = rbind(trailingStopTriggers, data.frame("Date" = marketingYear$Date[row],
"Previous Percentile" = marketingYear$Percentile[row - 1],
"Percentile" = marketingYear$Percentile[row],
"Type" = "Ten Day High"))
}
# Functions on normal marketing year
else if (isAllTimeHigh(mdy(marketingYear$Date[row]), marketingYear$Price[row], marketingYear$Percentile[row],
cropYear$`Pre/Post Interval`$intervalPre, cropYear$`Pre/Post Interval`$intervalPost,
featuresObject$`95% of Ten Day High`, featuresObject$`All Time High`, MY = FALSE)) {
trailingStopTriggers = rbind(trailingStopTriggers, data.frame("Date" = marketingYear$Date[row],
"Previous Percentile" = marketingYear$Percentile[row - 1],
"Percentile" = marketingYear$Percentile[row],
"Type" = "All Time High"))
}
}
cropYear[['TS Triggers']] = trailingStopTriggers
return(cropYear)
}
if(type == "corn"){
# Gets the price objective triggers for earch crop year
# Gets the trailing stop triggers for earch crop year
for(i in 1:length(Corn_CropYearObjects)) {
Corn_CropYearObjects[[i]] = trailingStopTrigger(Corn_CropYearObjects[[i]], Corn_FeaturesObject)
Corn_CropYearObjects[[i]]$`TS Triggers`$Date = mdy(Corn_CropYearObjects[[i]]$`TS Triggers`$Date)
}
}
if(type == "soybean"){
# Gets the price objective triggers for earch crop year
for(i in 1:length(Soybean_CropYearObjects)) {
Soybean_CropYearObjects[[i]] = trailingStopTrigger(Soybean_CropYearObjects[[i]], Soybean_FeaturesObject)
Soybean_CropYearObjects[[i]]$`TS Triggers`$Date = mdy(Soybean_CropYearObjects[[i]]$`TS Triggers`$Date)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.