blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d52cd58c9cac807c6a1cf3fddc66b9bd56941ba3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ACSWR/examples/ps.Rd.R | 56d432008cefa89318ed1b0179d08d87b3e0151b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 607 | r | ps.Rd.R | library(ACSWR)
### Name: ps
### Title: Simulated Sample from Poisson Distribution
### Aliases: ps
### Keywords: maximum likelihood estimator
### ** Examples
data(ps)
n <- 10
sample_means <- colMeans(ps)
poisson_score_fn <- function(theta,xbar) n*(xbar-theta)/theta
theta <- seq(from=2,to=8,by=0.2)
plot(theta,sapply(theta,poisson_score_fn,xbar=sample_means[1]),"l",xlab=
expression(lambda),ylab=expression(S(lambda)),ylim=c(-5,15))
title(main="B: Score Function Plot of the Poisson Model")
for(i in 2:20)
lines(theta,sapply(theta,poisson_score_fn,xbar=sample_means[i]),"l")
abline(v=4)
abline(h=0)
|
757c0eae40220204847dd33d6bf7bd43ecc018bf | e1f069cb783bb03230f0680a722a425cac31d7a3 | /Project/nn_v2.r | c42922af4ed9ed0cdedbeef546e822e38de45982 | [] | no_license | iZome/MachineLearning-1 | 9cda4d974e5f274173c468deee784f9499fb1b8c | 4c23d2a1da4aea7f1679e3993c32ae6a2e9cdfac | refs/heads/master | 2021-08-15T11:29:29.948639 | 2017-11-17T18:53:35 | 2017-11-17T18:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,233 | r | nn_v2.r | rm(list=ls())
library(mxnet)
library(caret)
input <- read.csv("Train_Digits_20171108.csv")
input$Digit <- input$Digit%%2
input$Digit <- as.factor(input$Digit)
#Split to training and test set
sample <- sample(nrow(input)*0.8)
train <- input[sample,]
test <- input[-sample,]
nzr <- nearZeroVar(train[,-1],saveMetrics=T,freqCut=10000/1,uniqueCut=1/7)
cutvar <- rownames(nzr[nzr$nzv==TRUE,])
var <- setdiff(names(train),cutvar)
train <- train[,var]
test <- test[,var]
# multi-layer perceptron
get_mlp <- function() {
data <- mx.symbol.Variable('data')
fc1 <- mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 <- mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 <- mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 <- mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 <- mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
mlp <- mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
mlp
}
get_lenet <- function() {
data <- mx.symbol.Variable('data')
# first conv
conv1 <- mx.symbol.Convolution(data=data, kernel=c(5,5), num_filter=20)
tanh1 <- mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 <- mx.symbol.Pooling(data=tanh1, pool_type="max",
kernel=c(2,2), stride=c(2,2))
# second conv
conv2 <- mx.symbol.Convolution(data=pool1, kernel=c(5,5), num_filter=50)
tanh2 <- mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 <- mx.symbol.Pooling(data=tanh2, pool_type="max",
kernel=c(2,2), stride=c(2,2))
# first fullc
flatten <- mx.symbol.Flatten(data=pool2)
fc1 <- mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 <- mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 <- mx.symbol.FullyConnected(data=tanh3, num_hidden=10)
# loss
lenet <- mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
lenet
}
get_iterator <- function(data_shape) {
get_iterator_impl <- function(args) {
data_dir = args$data_dir
if (!grepl('://', args$data_dir))
download_(args$data_dir)
flat <- TRUE
if (length(data_shape) == 3) flat <- FALSE
train = mx.io.MNISTIter(
image = paste0(data_dir, "train-images-idx3-ubyte"),
label = paste0(data_dir, "train-labels-idx1-ubyte"),
input_shape = data_shape,
batch_size = args$batch_size,
shuffle = TRUE,
flat = flat)
val = mx.io.MNISTIter(
image = paste0(data_dir, "t10k-images-idx3-ubyte"),
label = paste0(data_dir, "t10k-labels-idx1-ubyte"),
input_shape = data_shape,
batch_size = args$batch_size,
flat = flat)
ret = list(train=train, value=val)
}
get_iterator_impl
}
parse_args <- function() {
parser <- ArgumentParser(description='train an image classifer on mnist')
parser$add_argument('--network', type='character', default='mlp',
choices = c('mlp', 'lenet'),
help = 'the cnn to use')
parser$add_argument('--data-dir', type='character', default='mnist/',
help='the input data directory')
parser$add_argument('--gpus', type='character',
help='the gpus will be used, e.g "0,1,2,3"')
parser$add_argument('--batch-size', type='integer', default=128,
help='the batch size')
parser$add_argument('--lr', type='double', default=.05,
help='the initial learning rate')
parser$add_argument('--mom', type='double', default=.9,
help='momentum for sgd')
parser$add_argument('--model-prefix', type='character',
help='the prefix of the model to load/save')
parser$add_argument('--num-round', type='integer', default=10,
help='the number of iterations over training data to train the model')
parser$add_argument('--kv-store', type='character', default='local',
help='the kvstore type')
parser$parse_args()
}
args = parse_args()
if (args$network == 'mlp') {
data_shape <- c(784)
net <- get_mlp()
} else {
data_shape <- c(28, 28, 1)
net <- get_lenet()
}
|
bcafba605e6e8f304b0fc51f4312d9d15306f27b | fe864041ef16b3cab0202f96e204e2c8af7f5950 | /DHARMa/man/testZeroInflation.Rd | 5efbce4e744cd1bedf5e2752c04f2cb291f5533a | [] | no_license | PasqualeDente/DHARMa | 2bfa352829c442569df08065d5e15a4fff0c58bd | 525d04ba22c6d146c38c4f403a7ca7f01e36a17a | refs/heads/master | 2021-01-01T15:56:06.470762 | 2017-05-09T08:17:58 | 2017-05-09T08:17:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 944 | rd | testZeroInflation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testsResiduals.R
\name{testZeroInflation}
\alias{testZeroInflation}
\title{Tests for zero-inflation}
\usage{
testZeroInflation(simulationOutput, plot = T, alternative = "more")
}
\arguments{
\item{simulationOutput}{an object with simulated residuals created by \code{\link{simulateResiduals}}}
\item{plot}{whether to plot output}
\item{alternative}{whether to test for 'more', 'less', or 'both' more or less zeros in the observed data}
}
\description{
This function compares the observed number of zeros with the zeros expected from simulations.
}
\details{
shows the expected distribution of zeros against the observed
}
\seealso{
\code{\link{testUniformity}}, \code{\link{testSimulatedResiduals}}, \code{\link{testTemporalAutocorrelation}}, \code{\link{testSpatialAutocorrelation}}, \code{\link{testOverdispersion}}, \code{\link{testOverdispersionParametric}}
}
|
ddac7f3c4a73a9aca90ffa54bc75add2815d1af4 | a01970998d9ecd2a59d64d4b23889267b32f14cf | /manuscript/scripts/data_s1_ad.R | 2d06fb95e3daa849666146e16a6564ea2fd70607 | [] | no_license | kgweisman/dimkid | 5ff19c13ff56c4e9a1d3a4a886ae59d1135d1bd9 | b03281ac5cd1ab5c5f57a87339d211289cd522ee | refs/heads/master | 2021-07-17T17:25:48.247433 | 2021-07-10T20:03:27 | 2021-07-10T20:03:27 | 60,885,387 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,799 | r | data_s1_ad.R | # STUDY 1: ADULTS
# read in & tidy data
d1_ad <- read.csv("./anonymized_data/study1_adults_anonymized.csv") %>%
mutate(study = "Study 1: Adults",
age_group = "adults") %>%
mutate_at(vars(ethnicity, religionChild, religionNow),
funs(gsub(" +$", "", .))) %>%
mutate_at(vars(ethnicity, religionChild, religionNow),
funs(cat = case_when(grepl(" ", as.character(.)) ~ "multi",
TRUE ~ .))) %>%
select(study, subid:country_selfrep, age_group, ends_with("_cat")) %>%
rename(character = charName,
cap_abbrev = capacity,
response_num = responseNum) %>%
mutate(capWording = as.character(capWording),
capacity = case_when(
grepl("--", capWording) ~ gsub(" --.*$", "...", capWording),
grepl("close by or far away", capWording) ~ "sense...far away",
grepl("understand how somebody else is feeling", capWording) ~
"understand how someone...feeling",
TRUE ~ capWording)) %>%
distinct()
# clean data
d1_ad <- d1_ad %>%
filter(rt >= 250 | is.na(rt))
# make wideform
d1_ad_wide <- d1_ad %>%
mutate(subid_char = paste(subid, character, sep = "_")) %>%
select(subid_char, capacity, response_num) %>%
spread(capacity, response_num) %>%
column_to_rownames("subid_char")
# impute missing values using the mean by character and capacity
d1_ad_wide_i <- d1_ad_wide %>%
rownames_to_column("subid_char") %>%
mutate(subid = gsub("_.*$", "", subid_char),
character = gsub("^.*_", "", subid_char)) %>%
group_by(character) %>%
mutate_at(vars(-c(subid, character, subid_char)),
funs(replace(., which(is.na(.)), mean(., na.rm = T)))) %>%
ungroup() %>%
select(-subid, -character) %>%
column_to_rownames("subid_char")
|
9b50e7d9de71fabd6cce067872d0b1c578f6e3bf | 6c5d30f9722967d9a40f76a4397728efe9b52d4c | /R/scale_colour_eyp.R | ea8107e1b4575ef9550eccde192935640fa39738 | [] | no_license | tim-dim/rlytics | 1944fc713a558462f1a349c06b1ee0752f1d0945 | 526e575f287b23850656fb130243443c0dbe26d9 | refs/heads/master | 2021-01-19T05:18:34.283457 | 2019-02-28T16:05:40 | 2019-02-28T16:05:40 | 100,576,586 | 1 | 1 | null | 2018-05-14T08:44:42 | 2017-08-17T07:48:46 | R | UTF-8 | R | false | false | 648 | r | scale_colour_eyp.R | #' EYP Color Palette (Discrete) and Scales
#'
#' An 8-color eyp safe qualitative discrete palette.
#'
#' @rdname eyp
#' @references
#' Umrechnung EYP-Farbschema in Hex-Werte.pptx
#'
#'
#' @export
#' @inheritParams ggplot2::scale_colour_hue
#' @family colour
#' @example inst/examples/ex-eyp.R
eyp_pal <- function() {
manual_pal(unname(ggthemes_data$eyp))
}
#' @rdname eyp
#' @export
scale_colour_eyp <- function(...) {
discrete_scale("colour", "eyp", eyp_pal(), ...)
}
#' @rdname eyp
#' @export
scale_color_eyp <- scale_colour_eyp
#' @rdname eyp
#' @export
scale_fill_eyp <- function(...) {
discrete_scale("fill", "eyp", eyp_pal(), ...)
}
|
cf411699726955a7a80ef47cea3a91c3d4f4988a | 0a32ba09b10150aac5c37e4a0dd0e01655cca62b | /man/CAFdist.Rd | 25b5f49a27a695cba3efa0dc72bfe9ee94334aa3 | [] | no_license | sn248/RcppCAF | 64ac3aff4d7e85c22cb80ef216aacede9ec6e926 | 83776df8e5c846db393e6f4805d3ea042d1110f7 | refs/heads/master | 2021-01-16T22:50:28.385972 | 2015-10-06T16:52:33 | 2015-10-06T16:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,148 | rd | CAFdist.Rd | \name{CAFdist}
\alias{CAFdist}
\title{Parallel Distance Matrix Computation}
\description{
This function computes and returns the distance matrix computed by
using the specified distance measure to compute the distances between
the rows of a data matrix, mimicking the structure and return value
of the built in \link{dist} function.
}
\usage{
CAFdist(x, method = "euclidean", diag = FALSE, upper = FALSE, p = 2, cores = 1)
}
\arguments{
\item{x}{a numeric matrix, data frame or \code{"dist"} object.}
\item{method}{the distance measure to be used. This must be one of
\code{"euclidean"}, \code{"maximum"}, \code{"manhattan"},
\code{"canberra"}, \code{"binary"} or \code{"minkowski"}.
Any unambiguous substring can be given.}
\item{diag}{logical value indicating whether the diagonal of the
distance matrix should be printed by \code{print.dist}.}
\item{upper}{logical value indicating whether the upper triangle of the
distance matrix should be printed by \code{print.dist}.}
\item{p}{The power of the Minkowski distance.}
\item{cores}{The number of CAF workers among which computation should be
distributed.}
}
\details{
See \code{\link{dist}} for additional details.
}
\value{
\code{CAFdist} returns an object of class \code{"dist"}.
The lower triangle of the distance matrix stored by columns in a
vector, say \code{do}. If \code{n} is the number of
observations, i.e., \code{n <- attr(do, "Size")}, then
for \eqn{i < j \le n}, the dissimilarity between (row) i and j is
\code{do[n*(i-1) - i*(i-1)/2 + j-i]}.
The length of the vector is \eqn{n*(n-1)/2}, i.e., of order \eqn{n^2}.
See \code{\link{dist}} for additional details.
}
\references{
Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988)
\emph{The New S Language}.
Wadsworth & Brooks/Cole.
Mardia, K. V., Kent, J. T. and Bibby, J. M. (1979)
\emph{Multivariate Analysis.} Academic Press.
Borg, I. and Groenen, P. (1997)
\emph{Modern Multidimensional Scaling. Theory and Applications.}
Springer.
}
\seealso{
\code{\link{dist}}.
}
\examples{
x <- matrix(rnorm(100), nrow = 5)
CAFdist(x, cores = 2)
}
|
9aca5a2dcb4449587d624a1d5e09a714885bf68b | cddc85b8555390e5c0fbc81a3c500529a6a2599c | /beta_div.R | ec32125ad0cee51ea2fa570002797bc1592a9221 | [] | no_license | guigotoe/16Srlib | 5c045a05744651519918f19495e6bf76b1ccde62 | 688ef5b76936ae7050fbf74e81b2078db28e1865 | refs/heads/master | 2020-12-25T14:23:32.002348 | 2016-10-20T10:20:27 | 2016-10-20T10:20:27 | 67,235,337 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,174 | r | beta_div.R | ####################################################
# By Guillermo Torres PhD.c #
# Institue of Clinical Molecular Biology (IKMB) #
# Christian-Albrechts-Universitat zu Kiel (CAU) #
####################################################
# Last update: September 2016
# Created: September 2016
#
# This is written as part of 16S - Mangrove analysis, but could be
# splitted to serve different purposes.
####################################################
# Prepare and filters the data from mothur.
# How to use:
# Rscript beta_div.R /path/dataF.rds association_variable co-variants/path/outfolder/
# Rscript beta_div.R ~/16Srlib_test/results/dataF.rds Salinity Limo,Arena ~/16Srlib_test/results/
#* requirements *#
get_script_path <- function() {
cmdArgs = commandArgs(trailingOnly = FALSE)
needle = "--file="
match = grep(needle, cmdArgs)
if (length(match) > 0) {
# Rscript
return(normalizePath(sub(needle, "", cmdArgs[match])))
} else {
ls_vars = ls(sys.frames()[[1]])
if ("fileName" %in% ls_vars) {
# Source'd via RStudio
return(normalizePath(sys.frames()[[1]]$fileName))
} else {
# Source'd via R console
return(normalizePath(sys.frames()[[1]]$ofile))
}
}
}
script.basename <- dirname(get_script_path())
toolbox <- paste(sep="/", script.basename, "toolbox.R")
toolbox <- '/home/torres/Documents/Projects/Metagenome/r_scripts/16Srlib/toolbox.R'
#toolbox <- "/Users/guillermotorres/Documents/Proyectos/Doctorado/16Srlib/toolbox.R"
source(toolbox)
p <- '/home/torres/ikmb_storage/projects/16Srlib_test/'
#p <- '/Users/guillermotorres/Documents/Proyectos/Doctorado/16Srlib_test/'
packages(c("metagenomeSeq","reshape2"))
###### end ######
#* input *
f <- paste(p,'results/dataF.rds',sep='') #commandArgs()[6] # paste(p,'results/dataF.rds',sep='') #
vs <- 'Salinity' #commandArgs()[7]# 'Salinity' #
#vs <- unlist(strsplit(vs,','))
cf <- ''# commandArgs()[8]# # ,CT,NT,Ca,K,Mg,Na,CICE,Cu,S,P,Fe,Mn,Zn,B,Arcilla,Limo,Arena'
cf <- unlist(strsplit(cf,','))
#th <- 0.90
o <- paste(p,'results/',sep='') #commandArgs()[9] # paste(p,'results/',sep='') #
## ##
df <- readRDS(f)
|
ee2b1948992877432e009c86ef897b569cc0d4d7 | 7c1c4f8059bfa81f25950412b5d24dc8a84ef399 | /edit_text_variables.R | 87c7beed9ab183ba3ebf5b984d62d7fa4594b214 | [] | no_license | JackJianKuang/data_analysis_note | a509c5a780952733b70e2d20f2def1acbc70989c | f7fab5ee9e559896e8053ade12353cff5ab404e9 | refs/heads/master | 2021-01-10T16:27:16.270492 | 2015-12-04T05:29:08 | 2015-12-04T05:29:08 | 46,682,452 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 946 | r | edit_text_variables.R | if (!file.exists("./data")) {
dir.create("./data")
}
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl, destfile = "./data/cameras.csv")
cameraData <- read.csv("./data/cameras.csv")
# Fixing character vectors
## tolower(), toupper()
names(cameraData)
# [1] "address" "direction" "street" "crossStreet" "intersection"
# [6] "Location.1"
tolower(names(cameraData))
# [1] "address" "direction" "street" "crossstreet" "intersection"
# [6] "location.1"
## strsplit()
splitNames = strsplit(names(cameraData), "\\.")
splitNames[[5]]
# [1] "intersection"
splitNames[[6]]
# [1] "Location" "1"
## sapply()
splitNames[[6]][1]
# [1] "Location"
firstElement <- function(x){x[1]}
sapply(splitNames, firstElement)
# [1] "address" "direction" "street" "crossStreet" "intersection"
# [6] "Location"
## sub()
|
0bc597001c237ed9dc8cba867edff802c0efc622 | 511687b0dcfcc27ff4f0bfe403b89dad04b74e46 | /rankhospital.R | c21f4abd45bf893ab3b8258e50c6f72ab1c0f567 | [] | no_license | skirmer/R_Programming | a3501b65834d03a70e06279b43809e93a643bba8 | 61d9060a01061f9cde28554b189f2c7ebd59621c | refs/heads/master | 2021-01-22T05:24:34.842808 | 2015-07-24T18:00:10 | 2015-07-24T18:00:10 | 39,648,368 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,341 | r | rankhospital.R | # Assignment3 Part 2
#Get into the right directory
getwd()
# setwd('Assignment3')
#open up the source data
hospitals <- read.csv('hospital-data.csv')
outcomes <- read.csv('outcome-of-care-measures.csv', header=TRUE,
na.strings="Not Available")
#Create a subset of the original datasets to make it easier to work with
hosp_short <- subset(hospitals, select = c(Hospital.Name, City, State))
outcm_short <- subset(outcomes, select = c(Hospital.Name, City, State, Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack, Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure, Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia))
#Rename the outcome columns to be more useful
colnames(outcm_short)[4]<- "HeartAttackMort"
colnames(outcm_short)[5]<- "HeartFailMort"
colnames(outcm_short)[6]<- "PneumoniaMort"
rankhospital <- function(ST,disease,num){
#Check that the disease is right
if(disease == "heart attack")
outcome <- 'HeartAttackMort'
else if(disease == "pneumonia")
outcome <- 'PneumoniaMort'
else if(disease == "heart failure")
outcome <- 'HeartFailMort'
else stop ("invalid outcome")
#Check that the state is right
states <- c("AK","AL","AZ","AR", "CA", "CO", "CT","DE", "FL", "GA", "HI",
"ID","IL","IN","IA","KS","KY","LA","ME",
"MD","MA","MI","MN", "MS","MO",
"MT","NE","NV","NH","NJ", "NM",
"NY","NC","ND", "OH","OK","OR", "PA", "RI",
"SC","SD", "TN", "TX","UT", "VT","VA", "WA",
"WV","WI", "WY")
if(!ST %in% states)
stop ("invalid state")
#filter and sort based on disease and state
filtered <<- subset(outcm_short, State==ST)
filtered2 <<- filtered[complete.cases(filtered[,outcome]),]
sorted <<- filtered2[order(filtered2[outcome], filtered2$Hospital.Name),]
#check that the number is right
countrows <- nrow(sorted)
if(num <= countrows)
choice <- num
else if (num == "best")
choice <- 1
else if (num == "worst")
choice <- as.numeric(countrows)
else if (num > countrows)
return ("NA")
#grab the hospital and return it
tophosp <<- sorted[choice,]
return(as.character(tophosp$Hospital.Name))
# tophosp2 <<- sorted[1,]
# return(tophosp2$Hospital.Name)
}
rankhospital("MD","heart attack", "worst") |
e91b64cde6d93342e13d5e8cfb0efe69d708e249 | 8886a2a44be4e9eee6fff0e8868de9ef73cee5a5 | /R/mixpack-package.r | 894abbafcab7122355ce084021d9468bcd7bfc90 | [] | no_license | cran/mixpack | cf0bfb57d453d1125adc7b09cb0dd24163baa422 | 698333c0c4aeb113f4b952f0b6bb0a36b8dcdbe4 | refs/heads/master | 2021-01-10T13:14:56.851207 | 2017-01-27T09:18:30 | 2017-01-27T09:18:30 | 48,084,139 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 63 | r | mixpack-package.r | #' mixpack.
#'
#' @name mixpack
#' @docType package
NULL
|
020ad2520a240f5cb9f99d8c69ea13336641cc7e | 2b4e13cf56d27be0e3e791a9c59d1c43665df452 | /R/GetBounds.R | d84cc3c653cba58b8ef45ded1ae9bfc32a0ab050 | [] | no_license | IanMadlenya/rcss | dc16f85956cc9f8c05881965225d12107c183a79 | f6463a8a218c72ddbb773db14b956b82aab72d57 | refs/heads/master | 2020-05-24T18:07:57.996818 | 2017-01-27T22:12:16 | 2017-01-27T22:12:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 621 | r | GetBounds.R | ## Copyright 2015 <Jeremy Yee> <jeremyyee@outlook.com.au>
## Obtaining confidence intervals after performing diagnostic checking
################################################################################
GetBounds <- function(duality, alpha, position) {
n_path <- dim(duality$primal)[3]
primal <- mean(duality$primal[1, position,])
primal_error <- qnorm(1 - alpha/2) * sd(duality$primal[1, position,])/sqrt(n_path)
dual <- mean(duality$dual[1, position,])
dual_error <- qnorm(1 - alpha/2) * sd(duality$dual[1, position,])/sqrt(n_path)
return(c(primal - primal_error, dual + dual_error))
}
|
c1e4c73a0dc33333e470ee9a09db25ca4ad122fc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RandomFields/examples/RFgetModelInfo.Rd.R | 149db9a0e7b5f0c81ce8c74e8f9568b2fa73ed31 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 656 | r | RFgetModelInfo.Rd.R | library(RandomFields)
### Name: RFgetModelInfo
### Title: Information on RMmodels
### Aliases: RFgetModelInfo RFgetModelInfo_register RFgetModelInfo_model
### Keywords: spatial
### ** Examples
## Don't show:
StartExample()
## End(Don't show)
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
model <- RMexp(scale=4, var=2) + RMnugget(var=3) + RMtrend(mean=1)
z <- RFsimulate(model, 1:4, storing=TRUE)
RFgetModelInfo()
model <- RMwhittle(scale=NA, var=NA, nu=NA) + RMnugget(var=NA)
RFgetModelInfo(model)
## Don't show:
FinalizeExample()
## End(Don't show)
|
6d42aac92957f0a30321524532a862b6dfc993c2 | d365fec6f30c1e294aa99e19b0a17fd0eeafbb96 | /tests/testthat/test-scrape_entry.R | 99b0659185d5a7a295e43bda370e6bf6e71c4687 | [] | no_license | sefabey/eksiR | 3cc91da12a523fac8ead67ef795bf9e00437ed11 | d7eb350a8a7a9bd9972c49f6ed557899bdd34c88 | refs/heads/master | 2020-03-27T07:22:08.044721 | 2018-09-22T12:43:30 | 2018-09-22T12:43:30 | 146,185,282 | 2 | 0 | null | 2018-09-21T20:56:01 | 2018-08-26T13:59:41 | R | UTF-8 | R | false | false | 1,438 | r | test-scrape_entry.R | context("eksi_scrape_entry ")
test_that("scraping an existing entry works as expected", {
# test 1=====
library(tidyverse)
result1 <- eksi_scrape_entry(1) %>%
select(text)# first ever written eksi entry from the year 1999, 'pena'
check1 <- readRDS('test_data/result1.rds') %>%
select(text)#
expect_equal(result1,check1)
})
test_that("scraping a removed entry works as expected", {
# test 2=====
result2 <- eksi_scrape_entry(2) # a deleted/removed entry
check2 <- tibble::tibble(id="2", text=NA, date_time=NA, author_name=NA, author_id=NA, favourite_count=NA, title_text=NA, title_id=NA, title_slug=NA, error_message='Error in open.connection(x, "rb") : HTTP error 404.
')
expect_equal(result2,check2)
})
test_that("exporting an existing entry to a .csv file works as expected", {
# test 3======
# tests 2 aspects:
# 1. Exported csv file has the correct name.
# 2. Exported csv file has the correct size.
eksi_scrape_entry(76805451, export_csv = T)
result3 <- file.size("eksi_entry_no_76805451.csv")
check3 <- 676
expect_equal(result3, check3)
})
test_that("exporting deleted/removed entry to a csv file file works as expected", {
# test 4======
# tests 2 aspects:
# 1. Exported csv file has the correct name.
# 2. Exported csv file has the correct size.
eksi_scrape_entry(3, export_csv = T)
result4 <- file.size("eksi_entry_no_3.csv")
check4 <- 184
expect_equal(result4, check4)
})
|
8e2ec8e555020a065737a95b7ceb93e605dd883e | 30897ac2c0439caa254c09b06f6d7028da2e227a | /man/geom_volcano_text.Rd | 8fa2c272b6bd00da010ab105f6c5fbc3d1edc3a1 | [] | no_license | xiayh17/RNAseqStat2 | 41e824cbddf6cc717ded754f8333234af7a2c20a | 4ae8e3a7a3062c3be0ecfea7811320c7f17bfc17 | refs/heads/master | 2023-05-23T22:27:06.868461 | 2023-05-10T12:47:34 | 2023-05-10T12:47:34 | 458,684,900 | 9 | 6 | null | null | null | null | UTF-8 | R | false | true | 621 | rd | geom_volcano_text.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Visualization_DEGvolcano.R
\name{geom_volcano_text}
\alias{geom_volcano_text}
\title{A preset geom of \code{\link[ggrepel]{geom_text_repel}}}
\usage{
geom_volcano_text(
data,
mapping = NULL,
nudge_x = NULL,
nudge_y = -1,
hjust = 0,
size = 1.8,
direction = "y",
segment.size = 0.1,
segment.linetype = 6,
max.overlaps = 10,
max.iter = 1e+06,
max.time = 10,
min.segment.length = 0,
fontface = "bold",
family = "Times",
...
)
}
\value{
geom
}
\description{
A preset geom of \code{\link[ggrepel]{geom_text_repel}}
}
|
0a79a46a5473b93d7f7a833c433da913457ffa99 | 999b24cc5fa218e80af339f4952e6a62d561f781 | /ggplots.R | 8d062851040cf50dadd1c561d55ec088fbb18a18 | [] | no_license | ShivKumar95/Learning-R | 81d0fef098a724ef9be6c2ba0d75aa1cd02f3841 | ea36c1d9453e3a645470eaf50c794f042963e085 | refs/heads/master | 2022-07-17T02:18:59.493185 | 2020-05-21T06:00:23 | 2020-05-21T06:00:23 | 261,987,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,604 | r | ggplots.R | # ggplot
# components to remember
# 1.Data : The data set being summarized
# 2. Geometry: The type of plot(scatterplot, boxplot, histogram, qqplot, sooth density, etc.)
# 3. Asthetics maping: Variables mapped to visual clues, such as x-axis and y-axis values ad color
# There are additional components such as scale, labels, title, legend, theme/style
library(dslabs)
library(ggplot2)
library(tidyverse)
library(dplyr)
data(murders)
murders %>% ggplot()
# note that we get a blank grey background as we haven't added objects or layers
p<- ggplot(data = murders)
# adding points to predefined ggplot objects
# First thing to add is geometry
# we are going to do a scatter plot geom_point is used
#aes is a function inside that specifies the aesthetics
p + geom_point(aes(population/10^6, total))
# Note that now we have a scatter plot with x and y axis
# adding text layer
p + geom_point(aes(population/10^6, total))+ geom_text(aes(population/10^6,total, label = abb))
# we observe that the abbreviation no longer shows the points
# we can manually change thw size of the points
p + geom_point(aes(population/10^6, total), size = 3)+ geom_text(aes(population/10^6,total, label = abb))
# now we face another problem that the abbs are not visible
# we can use a function called nudge to move it a bit
p + geom_point(aes(population/10^6, total), size = 3)+ geom_text(aes(population/10^6,total, label = abb), nudge_x = 1)
# we can simplify the code using a global aesthetic
p <- ggplot(data = murders,aes(population/10^6,total, label = abb))
p + geom_point( size = 3) + geom_text(nudge_x = 1.5)
# the local aesthetic code can override the global
# change in the scale
# log transformation x axis and y axis
p + geom_point( size = 3) + geom_text(nudge_x = 0.075) + scale_x_log10() + scale_y_log10()
# notice here since we changed it into log we have to readjust the nudge
# Add labels and titles
p + geom_point( size = 3) + geom_text(nudge_x = 0.075) + scale_x_log10() + scale_y_log10() + xlab("Population in million (log scale)") + ylab("Total number of murders (log scale)") + ggtitle("US Gun Murders in 2010")
# to add the labels in x axis and y axis we use a function xlab() and ylab()
# to add the title we use the function ggtitle()
# since the code is very big lets redefine the variable in p except geom_point
p <- p + geom_text(nudge_x = 0.075) + scale_x_log10() + scale_y_log10() + xlab("Population in million (log scale)") + ylab("Total number of murders (log scale)") + ggtitle("US Gun Murders in 2010")
# adding colour to the points making all points blue
p + geom_point(size = 3, color = "blue")
# but that isn't the goal our goal is to have color correspond the region of state
p + geom_point(aes(col = region), size = 3)
# we have the state now differentiated by the region
# note that the ggplot automatically adds a legend
# adding a line with average murder rate
# define a variable r for average murder rate
r <- murders %>% summarise(rate = sum(total)/sum(population)*10^6) %>% pull(rate)
# adding the average line
p + geom_point(aes(col = region), size = 3) + geom_abline(intercept = log10(r))
# slope is default of 1
# we now observe that the line is passing through the points
# changing line color and characteristics
p <- p + geom_point(aes(col = region), size = 3) + geom_abline(intercept = log10(r), lty = 2, color = "darkgrey")
# Changing the Legend
p
# we can further modify the plot using some add on functions to make it look even more impressive
library(ggthemes)
library(ggrepel)
|
f6aded6c7eb08d67123eecc1a4a03a3f56f0643c | 3a41e94e107c49296c95b855606d8f27b175a66a | /02A.2mis_prg1_181K_data check.R | 15d37c8bb60c640ec679ad546315b556d2e88407 | [] | no_license | arnelpocsedio/MSc-Plant-Breeding-Massey-University | aea361c4f42315a5c9082d811b08b44143f2ae21 | 51c3ff078d0aaa97e530e9877c37cb277a52dc8e | refs/heads/master | 2023-06-21T09:40:08.533630 | 2021-03-25T08:00:15 | 2021-03-25T08:00:15 | 351,351,372 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,687 | r | 02A.2mis_prg1_181K_data check.R | #'Data quality check of the initial vcf file
#'violin plots of read depth
#'heat map of missingness and allele
#'PCoA looking at (non)clustering of pop/gen, batch and checks
#'
#'Missing data
#'
#'Set up and load the data
setwd("~/Thesis2/Data analysis/Data")
library(vcfR)
prg.vcf <- read.vcfR("arnel_out.recode.vcf") #189K snps
show(prg.vcf)#189K
#'Prepare data
library(poppr)
prg1.gl<- vcfR2genlight(prg.vcf) #7789 non-biallelic snps removed
ploidy(prg1.gl)<-2
#'subset batch and pop
#'
#'population info
pop(prg1.gl)<-substr(indNames(prg1.gl),1,3)
#'batch information
batch1<-c("P42_A1", "P42_A2", "P42_A3", "P42_A4", "P42_A5",
"P42_A6", "P46_A7", "P46_A8", "P46_A9", "P46_A10",
"P46_A11", "P42_E3_02", "P42_B1", "P42_B2", "P42_B4",
"P42_B5", "P42_B6", "P46_B7", "P46_B8", "P46_B9",
"P46_B10", "P46_B11", "P46_B12", "P42_C1", "P42_C2",
"P42_C3", "P42_C4", "P42_C5", "P42_C6", "P46_C7",
"P46_C8", "P46_C9", "P46_C10", "P46_C11", "P46_C12",
"P42_D1","P42_D2", "P42_D3", "P42_D4", "P42_D5",
"P42_D6", "P46_D7", "P46_D8", "P46_D9", "P46_A6_02",
"P46_D11", "GA66_SQ2718", "P46_A6_01", "P42_E2",
"P42_E3_01", "P42_E4", "P42_E5", "P42_E6", "P46_E7",
"P46_E8", "P46_E9", "P46_E10", "P46_E11", "P46_E12",
"P42_F1", "P42_F2", "P42_F3", "P42_F4", "P42_F5", "P42_F6",
"P46_F7", "P46_F8", "P46_F9", "P46_F10", "P46_F11",
"P46_F12", "P42_G1", "P42_G2", "P42_G3", "P42_G4",
"P42_G5", "P42_G6", "P46_G7", "P46_G8", "P46_G9",
"P46_G10", "P46_G11", "P46_G12", "P42_H1", "P42_H2",
"P42_H3", "P42_H4", "P42_H5", "P42_H6", "P46_H7",
"P46_H8", "P46_H9", "P46_H10", "P46_H11", "P46_H12")
#'treat batch info as population
prg1v2.gl<-prg1.gl
pop(prg1v2.gl)<-ifelse(indNames(prg1v2.gl) %in% batch1,"B1","B2")
#'separate data into population and batches
#'
prg_pop<-seppop(prg1.gl)
names(prg_pop)
prg_bat<-seppop(prg1v2.gl)
names(prg_bat)
#missing data heatmap
glPlot(prg1.gl, col = c("black", "black", "black"), legend = FALSE)#whole data
glPlot(prg_pop$P42, col = c("black", "black", "black"), legend = FALSE)
glPlot(prg_pop$P46, col = c("black", "black", "black"), legend = FALSE)
glPlot(prg_bat$B1, col = c("black", "black", "black"), legend = FALSE)
glPlot(prg_bat$B2, col = c("black", "black", "black"), legend = FALSE)
#alleles heatmap
library(RColorBrewer)
mycol<-brewer.pal(9,"Set1")
glPlot(prg_pop$P46, col = mycol[c(3, 2, 1)])
#clean mem
rm(gt)
rm(prg_b1.gl)
rm(prg_b2.gl)
rm(prg_b1.VCF)
rm(prg_b2.VCF)
rm(prg_p42.gl)
rm(prg_p46.gl)
rm(prg_p42.VCF)
rm(prg_p46.VCF)
rm(prg.VCF)
gc() |
68a4b49de2e84a86d62276f8765589a9f8e8160a | 5c9f9e93106a21bb1c1937c9dd972c2ad21ae4cc | /R/print.countgmifs.R | 6f91f54a3d34eab869ffd8c85d6c3e04cc318222 | [] | no_license | cran/countgmifs | f308da0c8baf395cfb6a2f671eab24f7b6113826 | f0b61cd6fc04b5845d0f84fae318cdf89fe34644 | refs/heads/master | 2020-03-27T03:55:49.565073 | 2020-01-08T13:20:02 | 2020-01-08T13:20:02 | 145,899,296 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 360 | r | print.countgmifs.R | #' Print the Contents of a Count GMIFS Fitted Object.
#'
#' This function prints the names of the list objects from an \code{countgmifs} fitted model
#' @param object an \code{countgmifs} fitted object.
#' @param dots other arguments.
#' @keywords methods
#' @export
#' @examples
#' print.countgmifs()
print.countgmifs <-
function(x, ...) {
print(names(x))
} |
e50c02494637537a5ce5fcc2663e77cf2660b5be | 5bde2a086c2bafc585e94459bbab2fdb65cc8b55 | /R/classVarImp.R | 0d757cf412dae64aef2eec85201b41efb1972600 | [] | no_license | cran/fscaret | 39d7c662a7fba40b4952017036f1ffca4a9971e8 | 1c56431c2fb5106d560999e5197797fc589a2409 | refs/heads/master | 2021-01-01T06:18:37.237720 | 2018-05-08T07:15:17 | 2018-05-08T07:15:17 | 17,696,153 | 1 | 4 | null | 2015-12-05T19:07:38 | 2014-03-13T04:44:47 | R | UTF-8 | R | false | false | 7,908 | r | classVarImp.R |
classVarImp <- function(model, xTrain, yTrain, xTest, fitControl, myTimeLimit, no.cores, lk_col, supress.output){
resultVarImpListCombCLASS <- NULL
resultVarImpListCombCLASS <- list()
myTimeLimitSet <- myTimeLimit
fitControlSet <- fitControl
lk_col <- lk_col
supress.output <- supress.output
no.cores <- no.cores
classVarPred <- function(funcClassPred) {
#Print out function names
cat("----------------------------------------\n")
cat("Calculating: ", funcClassPred,"\n")
cat("----------------------------------------\n")
if(.Platform$OS.type != "windows"){
outfile<-paste(tempdir(),"/",(lk_col-1),"in_default_CLASSControl_", paste(funcClassPred),".RData",sep="")
outfileImp<-paste(tempdir(),"/",(lk_col-1),"in_default_CLASSControl_VarImp_", paste(funcClassPred),".txt",sep="")
} else if(.Platform$OS.type == "windows") {
outfile<-paste(tempdir(),"\\",(lk_col-1),"in_default_CLASSControl_", paste(funcClassPred),".RData",sep="")
outfileImp<-paste(tempdir(),"\\",(lk_col-1),"in_default_CLASSControl_VarImp_", paste(funcClassPred),".txt",sep="")
}
#start feature selection method
timer1 <- proc.time()
if(.Platform$OS.type !="windows"){
if(supress.output==TRUE){
# Supress output
sink("/dev/null")
res <- invisible(try(timeout(train(xTrain,yTrain, method=funcClassPred, trControl=fitControlSet),seconds=myTimeLimitSet),silent=TRUE))
sink()
} else {
res <- try(timeout(train(xTrain,yTrain, method=funcClassPred, trControl=fitControlSet),seconds=myTimeLimitSet),silent=TRUE)
}
} else {
if(supress.output==TRUE){
# Supress output
sink("NUL")
res <- try(train(xTrain,yTrain, method=funcClassPred, trControl=fitControlSet),silent=TRUE)
sink()
} else {
res <- try(train(xTrain,yTrain, method=funcClassPred, trControl=fitControlSet),silent=TRUE)
}
}
timer2 <- proc.time() - timer1
variableImportanceRes <- try(varImp(res$finalModel),silent=TRUE)
resultVarImpListCombCLASS[funcClassPred] <- try(list(variableImportanceRes),silent=TRUE)
cat("----------------------------------------\n")
cat("",funcClassPred,"\n")
cat("----------------------------------------\n")
cat("Elapsed time: ",timer2,"\n")
# cat("Variable importance: \n")
if((class(res) != "try-error")&&(class(variableImportanceRes) != "try-error")){
# save results
try(save(res, file=outfile),silent=TRUE)
try(write.table(variableImportanceRes, col.names=TRUE, row.names=TRUE, quote=FALSE, sep="\t", file=outfileImp),silent=TRUE)
} else if(class(res) != "try-error"){
yPred <- try(predict(res,xTest),silent=TRUE)
if(class(yPred)!="try-error"){
variableImportanceRes <- try(filterVarImp(xTest,yPred,nonpara=TRUE),silent=TRUE)
if(class(variableImportanceRes)!="try-error") {
resultVarImpListCombCLASS[funcClassPred] <- try(list(variableImportanceRes),silent=TRUE)
try(write.table(variableImportanceRes,col.names=TRUE, row.names=TRUE, quote=FALSE, sep="\t", file=outfileImp))
} else if(class(variableImportanceRes)=="try-error") {
print("Predicting variable importance (first try) has failed!")
resultVarImpListCombCLASS[funcClassPred] <- try(list(NA),silent=TRUE)
}
}
if((class(res)!="try-error") && (class(variableImportanceRes) != "try-error")){
# save results
try(save(res, file=outfile),silent=TRUE)
try(write.table(variableImportanceRes,col.names=TRUE, row.names=TRUE, quote=FALSE, sep="\t", file=outfileImp))
}
if ((class(res)!="try-error") && (class(variableImportanceRes) == "try-error")){
variableImportanceRes <- try(varImp(res),silent=TRUE)
resultVarImpListCombCLASS[funcClassPred] <- try(list(variableImportanceRes$importance),silent=TRUE)
# save results
try(save(res, file=outfile),silent=TRUE)
try(write.table(variableImportanceRes$importance,col.names=TRUE, row.names=TRUE, quote=FALSE, sep="\t", file=outfileImp))
}
} else if (class(variableImportanceRes) == "try-error"){
variableImportanceRes <- try(varImp(res),silent=TRUE)
if(class(variableImportanceRes)!="try-error") {
resultVarImpListCombCLASS[funcClassPred] <- try(list(variableImportanceRes$importance),silent=TRUE)
# save results
try(save(res, file=outfile),silent=TRUE)
try(write.table(variableImportanceRes$importance,col.names=TRUE, row.names=TRUE, quote=FALSE, sep="\t", file=outfileImp))
} else if(class(variableImportanceRes)!="try-error"){
yPred <- try(predict(res,xTest),silent=TRUE)
if(class(yPred)!="try-error"){
variableImportanceRes <- try(filterVarImp(xTest,yPred,nonpara=TRUE),silent=TRUE)
if(class(variableImportanceRes)!="try-error") {
resultVarImpListCombCLASS[funcClassPred] <- try(list(variableImportanceRes),silent=TRUE)
try(write.table(variableImportanceRes,col.names=TRUE, row.names=TRUE, quote=FALSE, sep="\t", file=outfileImp))
} else if(class(variableImportanceRes)=="try-error") {
print("Predicting variable importance (second try) has failed!")
resultVarImpListCombCLASS[funcClassPred] <- try(list(NA),silent=TRUE)
}
}
} else if(class(res)=="try-error"){
print("Building model has failed or reached timelimit!")
resultVarImpListCombCLASS[funcClassPred] <- try(list(NA),silent=TRUE)
}
} else {
print("Predicting variable importance (third try) has failed!")
resultVarImpListCombCLASS[funcClassPred] <- try(list(NA),silent=TRUE)
}
# Last check to get all possible varImp
tmpSum <- sum(variableImportanceRes$importance[,1])
if(tmpSum==0){
variableImportanceRes <- try(varImp(res),silent=TRUE)
resultVarImpListCombCLASS[funcClassPred] <- try(list(variableImportanceRes$importance),silent=TRUE)
try(write.table(variableImportanceRes$importance,col.names=TRUE, row.names=TRUE, quote=FALSE, sep="\t", file=outfileImp))
}
if(class(variableImportanceRes)!="try-error"){
# Print out variable importance
# try(print(variableImportanceRes),silent=TRUE)
cat("Variable importance has been calculated!", "\n")
} else if(class(variableImportanceRes)=="try-error"){
print("Predicting variable importance (fourth try) has failed!")
resultVarImpListCombCLASS[funcClassPred] <- try(list(NA),silent=TRUE)
}
if((file.exists(paste(outfile))) || file.exists(outfileImp)){
# general check for files RData and VarImp.txt
if(((file.exists(paste(outfile)))==FALSE) || (file.exists(outfileImp)==FALSE)){
try(file.remove(paste(outfile)))
try(file.remove(paste(outfileImp)))
} else {
print("RData and VarImp.txt files exists!")
print("Variable importance:")
print(variableImportanceRes)
}
}
}
if (.Platform$OS.type == "windows"){
# # Windows parallel implementation
#
# # Spawn child processes using fork()
# cl <- makeCluster(no.cores)
#
# # Export objects to the cluster
# clusterExport(
# cl=cl,
# varlist=c("myTimeLimitSet", "fitControlSet", "lk_col", "supress.output",
# "mySystem", "no.cores", "xTrain","yTrain", "funcClassPred", "fitControlSet")
# ,envir=environment())
#
# # Run function
# resultVarImpListCombCLASS[model] <- parLapply(cl, model, classVarPred)
#
# # Stop cluster and kill child processes
# stopCluster(cl)
resultVarImpListCombCLASS[model] <- lapply(model,classVarPred)
} else {
# POSIX OS parallel implementation
resultVarImpListCombCLASS[model] <- mclapply(model,classVarPred,
mc.preschedule=FALSE, mc.cores=no.cores, mc.cleanup=FALSE)
}
# Return variable importance or NULL
for(i in 1:length(resultVarImpListCombCLASS)){
if(class(resultVarImpListCombCLASS[i])=="try-error"){
resultVarImpListCombCLASS[i] <- NULL
}
}
return(resultVarImpListCombCLASS)
}
|
c0bdf61c46545da6d0b7e9d8f94c5d68ef4897dd | 6badea3fb8aa18e620b582a2fee9b595462ca588 | /man/geom_two_group_within.Rd | 67457d69c444b5af79a4bb19c6d267f30c64a0ba | [] | no_license | dstanley4/ggpsyc | e2a0acee544ced1d90b40a576a4085df41abc65d | a91a33e868f32af6012f4b75e1901cecfe3c24f4 | refs/heads/master | 2020-06-04T15:43:38.360448 | 2019-06-26T14:50:40 | 2019-06-26T14:50:40 | 192,087,754 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 372 | rd | geom_two_group_within.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_combine.R
\name{geom_two_group_within}
\alias{geom_two_group_within}
\title{Create a plot for paired groups t-test as per Introduction to the New Statistics}
\usage{
geom_two_group_within()
}
\description{
Create a plot for paired groups t-test as per Introduction to the New Statistics
}
|
8bb789bd4d0d078c15a037edf2d5e87401c9ae8c | cf95921d48afc1cb174cbdbd25f19b3528b2d393 | /R/interactive_plot.R | e2d47cc08150e8da54279474da47f5fa7a99a6f8 | [] | no_license | tbstockton/gisdtR | 0a4dd21c38305c4abd21363a83e303c1bfe04640 | 1e27801b53c0f363c4953f8a06116c46ef938b7e | refs/heads/master | 2021-01-21T19:05:32.299497 | 2017-05-23T00:40:46 | 2017-05-23T00:40:46 | 92,110,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,829 | r | interactive_plot.R | interactive_plot.fn = function(data.obj=gsd.df,gsd.version = "v2.009 CaseC",
response="MaxI129_SectorB_20ky[pCi/L]",
predictor="Reducingmiddle_concrete_kds_redKd_Dist[I][mL/g]",
group=NULL,
ylab="Max I129 SectorB 20ky [pCi/L]",xlab="I Reducing middle concrete Kd [mL/g]",
legend.columns=3,pd.col="darkblue",
#response.colors = c("goldenrod","yellowgreen","palegreen","whitesmoke")
response.colors = c("gray75","gray62","gray50","gray37","gray25")
){
options(stringsAsFactors=F)
library("RSVGTipsDevice");library("RSvgDevice");library("Cairo")
load(file=paste("Rdata_v2/",gsub("\\[.*\\]","",response),"_",gsd.version,"_endpoint.gbm.Rdata",sep=""))
endpoint.gbm$sa = plot.pd.gbm(endpoint.gbm,gsd.df,nVar=grep(gsub("`|\\[|\\]|/","",predictor),gsub("`|\\[|\\]|/","",summary(endpoint.gbm,plotit=F)$var)),res=1000)
#poly.x = signif(c(range(data.obj[,predictor]),rev(range(data.obj[,predictor]))),poly.x.signif)
poly.x = c(floor(min(data.obj[,predictor])),ceiling(max(data.obj[,predictor])))
#poly.x = c(min(pretty(data.obj[,predictor])),max(pretty(data.obj[,predictor])))
response.quan = quantile(data.obj[,response])
response.hist = hist(data.obj[,predictor],plot=F)
response.hist$y = (response.hist$counts)/max(range(response.hist$counts)) * diff(range(data.obj[,response]))+min(data.obj[,response])
response.hist$at = pretty(seq(min(response.hist$count),max(response.hist$count),length=4))
response.hist$at = (response.hist$at)/max(range(response.hist$at)) * diff(range(data.obj[,response]))+min(data.obj[,response])
group.cols=rainbow(length(unique(data.obj[,group])))
point.cols = data.frame(group=sort(unique(data.obj[,group])),cols=group.cols)
data.obj = merge(data.obj,point.cols,by.x=group,by.y="group")
devSVGTips(file="pd.svg",height=8,width=15,toolTipMode = 2, toolTipFontSize = 9)
#dev.off();windows();browser()
par(mai=c(3,1.5,1,5.0))
plot(data.obj[,predictor],data.obj[,response],type="n",las=1,ylab=paste(ylab,"\n"),xlab=xlab)
axis(side=4,las=2,
at=response.hist$at,
labels=pretty(seq(min(response.hist$count),max(response.hist$count),length=4)),
)
ylab.histo = gsub("\\[.*\\]","",xlab)
setSVGShapeToolTip(title="Histogram", desc=ylab.histo)
mtext(text=gsub("\\[.*\\]","",xlab),side=4,line=4)
setSVGShapeToolTip(title="Histogram", desc=ylab.histo)
mtext(text="Count",side=4,line=5.5)
# lapply(1:(length(response.quan)-1),function(i){
# setSVGShapeToolTip(title="Percentile", desc=c("0-25","25-50","50-75","75-100")[i])
# rect(min(poly.x),response.quan[i],max(poly.x),response.quan[i+1],col=response.colors[i],border=0)
# })
for(i in 1:length(response.hist$y)){
setSVGShapeToolTip(title=xlab, desc1=paste(response.hist$breaks[i],"to",response.hist$breaks[i+1]),desc2=paste("count=",response.hist$count[i]))
rect(response.hist$breaks[i],response.quan[1],response.hist$breaks[i+1],response.hist$y[i],col="gray90",border="gray75")
}
if(is.null(endpoint.gbm$realization)) endpoint.gbm$realization = 1:length(endpoint.gbm$fit)
# label each point
for(i in 1:nrow(data.obj)){
setSVGShapeToolTip(paste("Realization: ",endpoint.gbm$realization[i],sep=""),
desc1=paste(ylab,": ",signif(data.obj[i,response],3),sep=""),
desc2=paste(xlab,": ",signif(data.obj[i,predictor],3),sep="")
)
points(data.obj[i,predictor],data.obj[i,response],col=data.obj[i,"cols"],pch=16,cex=.5)
}
for(i in 1:length(response.quan)){
setSVGShapeToolTip(title=ylab, desc1=paste("Percentile",c("min","25","50","75","max")[i]))
lines(x=c(min(poly.x),max(poly.x)),c(response.quan[i],response.quan[i]),col=response.colors[i],lty=3,lwd=3)
}
lines(endpoint.gbm$sa$x,endpoint.gbm$sa$y,type="l",lwd=4,col=pd.col)
# legend
uni.groups = paste(". ",group,": ",sort(unique(data.obj[,group])),sep="")
setSVGShapeToolTip(title="Realizations", desc1=paste(nrow(data.obj),"Simulated values from the GoldSim model"),desc2=paste("colored by",group))
mtext(uni.groups,side=1,line=seq(5.0,5+(length(uni.groups)-1)*1.5,by=1.5),adj=0.25,col=group.cols,font=3)
setSVGShapeToolTip(title="Partial Dependence", desc1=paste("Change in the",ylab),desc2=paste("wrt a change in",xlab))
mtext("---- Partial Dependence",side=1,line=5,adj=0.75,col=pd.col,font=2)
setSVGShapeToolTip(title="Percentiles", desc=ylab)
mtext(paste(ylab,"Percentiles"),side=1,line=6.5,adj=1,col="black",font=2)
for(i in 1:length(response.quan)){
setSVGShapeToolTip(title="Quantiles", desc=ylab)
mtext(c("----- min","----- 25th","----- 50th","----- 75th","----- max")[i],side=1,line=c(8.0,9.5,11,12.5,14.0)[i],adj=0.70,col=response.colors[i],font=2)
}
dev.off()
browseURL(paste("file://",getwd(),"/pd.svg",sep=""),browser="C:/Program Files/Mozilla Firefox/firefox.exe")
}
|
61e4cd35f0a40dec77af4f0f90bcadc937f9c986 | a28f6e0a0b5e0a41d5c8b49c8b2df2203fa9ff43 | /R/R_reference.r | b322c448d0f3d2ff958cd83891849e11c1927aa6 | [] | no_license | stanleysfang/code_reference | 7005dba9ec6cf1c45f32d29f0c06ee36bda8dd6d | de0b399d97a045734de14d36bc6d8da8932e27c6 | refs/heads/master | 2021-09-08T16:47:26.101827 | 2021-09-06T18:25:05 | 2021-09-06T18:25:05 | 248,426,805 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 132 | r | R_reference.r |
#### Common Commands ####
#### Installing R kernel for Jupyter Notebook ####
install.packages('IRkernel')
IRkernel::installspec() |
2f6af48212bf1f38e5e57775c5d7c0c266b30009 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/etm/R/plot.etmCIF.R | c003bc39deb187a3e9645f6215e46844d83448f0 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,499 | r | plot.etmCIF.R | plot.etmCIF <- function(x, which.cif, xlim, ylim,
ylab = "Cumulative Incidence", xlab = "Time",
col = 1, lty, lwd = 1, ci.type = c("none", "bars", "pointwise"),
ci.fun = "cloglog", ci.col = col, ci.lty = 3,
legend = TRUE, legend.pos, curvlab, legend.bty = "n",
pos.ci = 27, ci.lwd = 3,
...) {
if (!inherits(x, "etmCIF")) {
stop("'x' must be of class 'etmCIF'")
}
ci.type <- match.arg(ci.type)
tr.choice <- paste(x[[1]]$trans[, 1], x[[1]]$trans[, 2])
l.x <- NCOL(x$X)
n.trans <- length(tr.choice)
if (missing(which.cif)) {
tr.choice <- paste(0, x$failcode, sep = " ")
} else {
tr.choice <- paste(0, which.cif, sep = " ")
## A small test on tr.choice
ref <- sapply(1:length(x[[1]]$state.names), function(i) {
paste(x[[1]]$state.names, x[[1]]$state.names[i])
})
ref <- matrix(ref)
if (sum(tr.choice %in% ref == FALSE) > 0)
stop("Argument 'which.cif' and causes of failure must match")
}
n.what <- length(tr.choice)
max.time <- max(sapply(x[1:l.x], function(ll) {
max(ll$time)
}))
if (missing(ylim)) ylim <- c(0, 1)
if (missing(xlim)) xlim <- c(0, max.time)
if (missing(lty)) {
lty <- seq_len(n.what * l.x)
}
else if (length(lty) < (l.x * n.what)) {
lty <- lty * rep(1, l.x * n.what)
}
if (length(col) < l.x * n.what)
col <- col * rep(1, l.x * n.what)
conf.int <- if (ci.type == "pointwise") TRUE else FALSE
if (ci.type != "none") {
if (missing(ci.col)) {
ci.col <- col
} else {
if (length(ci.col) < (l.x * n.what)) {
ci.col <- ci.col * rep(1, l.x * n.what)
}
}
if (missing(ci.lty)) {
ci.lty <- lty
} else {
if (length(ci.lty) < (l.x * n.what)) {
ci.lty <- ci.lty * rep(1, l.x * n.what)
}
}
}
plot(xlim, ylim, xlab = xlab, ylab = ylab,
xlim = xlim, ylim = ylim, type = "n", ...)
summx <- lapply(x[1:l.x], summary, ci.fun = ci.fun)
if (length(pos.ci) < l.x) pos.ci <- rep(pos.ci, l.x)
for (i in seq_len(l.x)) {
for (j in seq_along(tr.choice)) {
lines(x[[i]], tr.choice = tr.choice[j],
col = col[j + (i - 1) * n.what], lty = lty[j + (i - 1) * n.what],
lwd = lwd, conf.int = conf.int,...)
if (ci.type == "bars") {
ind <- findInterval(pos.ci[i], summx[[i]][[tr.choice[j]]]$time)
graphics::segments(pos.ci[i], summx[[i]][[tr.choice[j]]]$lower[ind],
pos.ci[i], summx[[i]][[tr.choice[j]]]$upper[ind],
lwd = ci.lwd, col = ci.col[j + (i - 1) * n.what],
lty = ci.lty[j + (i - 1) * n.what],...)
}
}
}
if (legend) {
if (missing(legend.pos)) {
legend.pos <- "topleft"
}
if (missing(curvlab)) {
cdc <- sapply(strsplit(sub("\\s", "|", tr.choice), "\\|"),
"[", 2)
## cdc <- sapply(strsplit(tr.choice, " "), "[", 2)
if (l.x == 1) {
curvlab <- paste("CIF ", cdc, sep = "")
} else {
if (length(cdc) == 1) {
curvlab <- paste("CIF ", cdc, "; ", rownames(x$X), "=", x$X, sep = "")
} else {
curvlab <- as.vector(sapply(seq_along(x$X), function(j){
paste("CIF ", cdc, "; ", rownames(x$X), "=", x$X[j], sep = "")
}))
}
}
}
if (is.list(legend.pos)) legend.pos <- unlist(legend.pos)
if (length(legend.pos) == 1) {
xx <- legend.pos
yy <- NULL
}
if (length(legend.pos) == 2) {
xx <- legend.pos[1]
yy <- legend.pos[2]
}
args <- list(...)
ii <- pmatch(names(args),
names(formals("legend")[-charmatch("bty",names(formals("legend")))]))
do.call("legend", c(list(xx, yy, curvlab, col=col, lty=lty, lwd = lwd, bty = legend.bty),
args[!is.na(ii)]))
}
invisible()
}
|
b768d2668985ff6bf66793ca6238d85f7b74f39a | 818dd3954e873a4dcb8251d8f5f896591942ead7 | /Monika/clustering.R | 271408a2c66b6481bc730e7ebe1314537ab49434 | [] | no_license | DannyArends/HU-Berlin | 92cefa16dcaa1fe16e58620b92e41805ebef11b5 | 16394f34583e3ef13a460d339c9543cd0e7223b1 | refs/heads/master | 2023-04-28T07:19:38.039132 | 2023-04-27T15:29:29 | 2023-04-27T15:29:29 | 20,514,898 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,801 | r | clustering.R | setwd("D:/Edrive/Horse/DNA/SNPchip_Monika_Feb19")
mdata <- readLines("HorseBreeds_SNPchip.txt")
splitted <- lapply(mdata, strsplit, "\t")
header <- splitted[1:3]
splitted <- splitted[-c(1:3)]
nRow <- length(splitted)
nCol <- length(splitted[[1]][[1]]) - 5
mData <- matrix(NA, nRow, nCol)
nItems <- length(splitted[[1]][[1]])
for(x in 1:nRow){
mData[x,] <- splitted[[x]][[1]][6:nItems]
}
horses <- unique(header[[2]][[1]][6:nItems])
myData <- matrix(NA, nRow, length(horses))
colnames(myData) <- horses
rownames(myData) <- gsub("_", "-", unlist(lapply(lapply(splitted, unlist), "[",1 )))
colnames(mData) <- header[[2]][[1]][6:nItems]
for(cX in seq(1, 94, 2)){
myData[,colnames(mData)[cX]] <- paste0(mData[,cX],mData[,(cX+1)])
}
for(cX in 95:ncol(mData)){
myData[,colnames(mData)[cX]] <- mData[,cX]
}
myMap <- cbind(gsub("_", "-", unlist(lapply(lapply(splitted, unlist), "[",1 ))),
unlist(lapply(lapply(splitted, unlist), "[",2)),
unlist(lapply(lapply(splitted, unlist), "[",3)),
unlist(lapply(lapply(splitted, unlist), "[",4)),
unlist(lapply(lapply(splitted, unlist), "[",5)))
colnames(myMap) <- c("ID", "Index", "Chr", "Position", "SNP")
write.table(myData, "genotypes.txt", sep = "\t", quote=FALSE)
write.table(myMap, "map.txt", sep = "\t", quote=FALSE, row.names=FALSE)
genotypes <- read.csv("genotypes.txt", sep = "\t", row.names=1, na.strings=c("", "NA", "--"), colClasses="character")
map <- read.csv("map.txt", sep = "\t", row.names = 1)
### Add peterson data
setwd("D:/Edrive/Horse/DNA/Petersen2013/raw")
for(mfile in list.files(path = ".", pattern = "csv.gz")){
splitted <- strsplit(readLines(gzfile(mfile)), ",")
markers <- unlist(lapply(splitted, "[", 1))
horses <- unlist(lapply(splitted, "[", 2))
GTs <- paste0(unlist(lapply(splitted, "[", 5)), unlist(lapply(splitted, "[", 6)))
for (horse in na.omit(unique(horses))) {
genotypes <- cbind(genotypes, cHorse = NA)
colnames(genotypes)[which(colnames(genotypes) == "cHorse")] <- horse
cHorseData <- which(horses == horse)
cHorseGTs <- GTs[cHorseData]
names(cHorseGTs) <- markers[cHorseData]
genotypes[, horse] <- cHorseGTs[rownames(genotypes)]
cat("Done horse: ", horse, "\n")
}
}
genotypes[genotypes == "--"] <- NA
setwd("D:/Edrive/Horse/DNA/SNPchip_Monika_Feb19")
write.table(genotypes, "genotypes_peterson_merged.txt", sep = "\t", quote=FALSE)
opposite <- function(x){
if(any(is.na(x))) return("")
ret <- NULL
for(e in x){
if(e == "A") ret <- c(ret, "T")
if(e == "C") ret <- c(ret, "G")
if(e == "G") ret <- c(ret, "C")
if(e == "T") ret <- c(ret, "A")
}
return(ret)
}
# Fix direction of alleles in Peterson data
unfixable <- c()
for(x in 1:nrow(genotypes)) {
alleles_monika <- sort(as.character(na.omit(unique(unlist(strsplit(as.character(genotypes[x,1:92]), ""))))))
alleles_peterson <- sort(as.character(na.omit(unique(unlist(strsplit(as.character(genotypes[x,93:ncol(genotypes)]), ""))))))
if(length(alleles_monika) == 2 && length(alleles_peterson) == 2 && !all(alleles_monika == alleles_peterson)){
nAlleles <- opposite(alleles_peterson)
genotypes[x, 93:ncol(genotypes)] <- gsub(alleles_peterson[2], nAlleles[2], gsub(alleles_peterson[1], nAlleles[1], genotypes[x,93:ncol(genotypes)]))
}else{
cat(x, " ", length(alleles_monika), " ", length(alleles_peterson), "\n")
}
}
write.table(genotypes, "genotypes_peterson_merged_and_flipped.txt", sep = "\t", quote=FALSE)
noSeg <- c()
numgeno <- matrix(NA, nrow(genotypes), ncol(genotypes), dimnames=list(rownames(genotypes), colnames(genotypes)))
for(x in 1:nrow(genotypes)) {
alleles <- sort(as.character(na.omit(unique(unlist(strsplit(as.character(genotypes[x,]), ""))))))
if(length(alleles) == 2) {
HomA <- paste0(alleles[1],alleles[1])
Het0 <- paste0(alleles[1],alleles[2])
Het1 <- paste0(alleles[2],alleles[1])
HomB <- paste0(alleles[2],alleles[2])
numgeno[x, genotypes[x,] == HomA] <- 0
numgeno[x, genotypes[x,] == Het0] <- 1
numgeno[x, genotypes[x,] == Het1] <- 1
numgeno[x, genotypes[x,] == HomB] <- 2
} else if(length(alleles) < 2) {
cat("No seg at",x,"\n")
noSeg <- c(noSeg, x)
} else {
noSeg <- c(noSeg, x)
cat("[WARNING] ", x,"more then 2 alleles present (", length(alleles), ") shouldnt have happened after flipping:",alleles,"\n")
}
}
numgeno <- numgeno[-noSeg, ]
write.table(numgeno, "numgeno_peterson_merged.txt", sep = "\t", quote=FALSE)
map <- map[-noSeg, ]
write.table(map, "numgeno_peterson_merged_map.txt", sep = "\t", quote=FALSE)
setwd("D:/Edrive/Horse/DNA/SNPchip_Monika_Feb19")
numgeno <- read.table("numgeno_peterson_merged.txt", sep = "\t")
map <- read.table("numgeno_peterson_merged_map.txt", sep = "\t")
monika <- 1:92
caspian <- grep("CS_", colnames(numgeno)) # Caspian
colnames(numgeno)[caspian] <- paste0("Caspian ", 1:length(caspian))
tuva <- grep("Tu", colnames(numgeno)) # Tuva
colnames(numgeno)[tuva] <- paste0("Tuva ", 1:length(tuva))
mongolian <- grep("_MON", colnames(numgeno)) # Mongolian
colnames(numgeno)[mongolian] <- paste0("Mongolian ", 1:length(mongolian))
exmoor <- grep("EX_", colnames(numgeno)) # Exmoor
colnames(numgeno)[exmoor] <- paste0("Exmoor ", 1:length(exmoor))
thoroughbred <- grep("_TB", colnames(numgeno)) # Thoroughbred
colnames(numgeno)[thoroughbred] <- paste0("Thoroughbred ", 1:length(thoroughbred))
arabian <- grep("ARR", colnames(numgeno)) # Arabian
colnames(numgeno)[arabian] <- paste0("Arabian ", 1:length(arabian))
akhalteke <- grep("AH_", colnames(numgeno)) # Akhal_Teke
colnames(numgeno)[akhalteke] <- paste0("AkhalTeke ", 1:length(akhalteke))
# Columns of interest
cOI <- c(monika, caspian, tuva, mongolian, exmoor, thoroughbred, arabian, akhalteke)
n <- 2500
r <- 50
sS <- (nrow(numgeno) / n)
distances <- vector("list", r)
for(x in 1:r){
distances[[x]] <- dist(t(numgeno[sample(nrow(numgeno), n), cOI]), method="manhattan")
cat("Done", x, "/", r, "\n")
}
dSum <- (distances[[1]] * sS)
for(x in 2:r){
dSum <- dSum + (distances[[x]] *sS)
}
dAvg <- dSum / r
library(dendextend) # library(circlize)
library(RColorBrewer)
clustering <- hclust(dAvg)
ordering <- clustering$labels[clustering$order]
mdendrogram <- as.dendrogram(clustering)
mcolors = brewer.pal(7, "Paired")
names(mcolors) <- c("Caspian", "Tuva", "Mongolian", "Exmoor", "Thoroughbred", "Arabian", "AkhalTeke")
labelCol <- function(x){
if(is.leaf(x)){
label <- strsplit(attr(x, "label"), " ")[[1]][1]
attr(x, "nodePar") <- list(lab.col = mcolors[label], pch=NA) # Set the label color based on the strain
#attr(x, "label") <- label
}
return(x)
}
dendrocol <- dendrapply(mdendrogram, labelCol)
op <- par(cex=0.7)
plot(dendrocol)
circlize_dendrogram(dendrocol, labels_track_height=NA)
plot(dendrocol) |
aee7eee8697b3d2b7719cf3eb2f0bc4cc955a49a | 92f9858a118acf0921b252da7ea86a76ea9c0a73 | /bootstrap/initial/software/utilities/SAN_Age0_consitecy_area1_and_2.R | c0f7b9f1fe9ae41a0aaa99ca166dddbea40dcc84 | [] | no_license | Scruff92/Sandeel_TAF_DTU | 2275723cdcb001bc6abf2a55da59b890a9a5817b | 869e1e0350a8f12c570e42bb3353309b8e48797a | refs/heads/master | 2020-09-14T15:55:54.935498 | 2020-01-29T15:42:48 | 2020-01-29T15:42:48 | 223,175,558 | 0 | 1 | null | 2019-11-21T14:49:34 | 2019-11-21T13:03:21 | Smarty | UTF-8 | R | false | false | 1,029 | r | SAN_Age0_consitecy_area1_and_2.R |
d11<-sub('area-2','area-1',data.path) # path to sandeel area 1
d1 <- read.table(file.path(d11,'summary_table_raw.out') ,header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
d2 <- read.table(file.path(data.path,"summary_table_raw.out"),header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
ly <- (dim(d1)[1])-1
d1$Rec<-d1$Rec/1000000 # billions
d2$Rec<-d2$Rec/1000000 # billions
fil<-'ag0_area_1_area2_consitency'
#newplot(dev="screen",filename=fil,nox=1,noy=1,Portrait=F,w8=5,w11=5);
par(mar=c(5,4,4,5)+.1)
plot(d1$Rec,d2$Rec, pch = 21, cex = 1, bg = "black",
xlab = "Area 1 recruits (bill)", ylab = "Area 2 recruits (bill)",cex.lab = 1.1, cex.axis = 1.1)
points(d1$Rec[ly],d2$Rec[ly], pch = 21, cex = 1, bg = "red")
fit <- lm(d2$Rec~d1$Rec)
abline(fit$coefficients[1],fit$coefficients[2])
r2 <- round(summary(fit)$r.squared,2)
text(max(d1$Rec,na.rm=T)*0.1,max(d2$Rec,na.rm=T)*0.9,paste("R-squared =",r2),pos=4)
#savePlot(filename = file.path(mdDir,fil),type = "png")
|
e9d5cb1d0c052f43d25290aae9d7937bca2a427b | 983d5d1a6b95050d2d68df08de6a4726f7891d8a | /code/functions/hex-color.R | 564a096e5f517b13944aa904118f35ab9114058f | [] | no_license | CoraBai/Workout02 | dfafe76e414dfa211a573e762add7f5494595c43 | 18687842fcd665beb6282033bd902cbf70a2baed | refs/heads/master | 2020-04-04T13:12:07.676312 | 2018-11-03T21:58:11 | 2018-11-03T21:58:11 | 155,952,579 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 796 | r | hex-color.R | #' @title Is Hex
#' @description test if the input string is a hex color without alpha transparency
#' @param string x
#' @return logical value of if the input is a hex color without alpha transparency
is_hex = function(x){
if(!(is.character(x) && length(x) ==1)){
stop("invalid input; a string was expected")
}
else{
return(grepl("^#(\\d|[a-f]){6}$", x, ignore.case = TRUE))
}
}
#' @title Is Hex Alpha
#' @description test if the input string is a hex color with alpha transparency
#' @param string x
#' @return logical value of if the input is a hex color with alpha transparency
is_hex_alpha = function(x){
if(!(is.character(x) && length(x) ==1)){
stop("invalid input; a string was expected")
}
else{
return(grepl("^#(\\d|[a-f]){8}$", x, ignore.case = TRUE))
}
} |
b515173d335c26ad1ec4f397dd9a56c7cdbbd0a7 | f5511623880f9baaf7549fd7c44dcf3f3417d927 | /test.R | 222ec49dc985eb88b25bab19ed1a832f9bd89b9a | [] | no_license | dvdcprague/Movie_Project | bcecd779f64584080db9c77d2ed61041ae9e093e | 333637249fb6deab0481dc3f032887409183faae | refs/heads/master | 2021-07-19T16:49:50.251169 | 2020-04-24T22:56:44 | 2020-04-24T22:56:44 | 141,197,171 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50,776 | r | test.R | # Load required libraries
require(data.table)
# library(sqldf)
library(dplyr)
library(DT)
library(rCharts)
library(ggplot2)
library(ggcorrplot)
library(tidyr)
library(reshape2)
library(lubridate)
library(GGally)
library(plyr)
library(gridExtra)
library(Matrix)
library(glmnet)
# Read data
load("movies_merged")
data = movies_merged[movies_merged$Type=='movie',]
#explore data
table(data$Year) # 1888 ~ 2018
colnames(data)
str_to_num = function(s){
if (grepl('h', s) && grepl('min', s)) {
hour = as.numeric(unlist(strsplit(s, ' '))[1])
min = as.numeric(unlist(strsplit(s, ' '))[3])
return(60*hour+min)
}
else if (grepl('h', s) && !grepl('min', s)) {
hour = as.numeric(unlist(strsplit(s, ' '))[1])
return(60*hour)
}
else if (!grepl('h', s) && grepl('min', s)) {
min = as.numeric(unlist(strsplit(s, ' '))[1])
return(min)
}
else {
return(NA)
}
}
str_to_sum = function(s){
l = unlist(strsplit(s, "[^0-9]+"))
l = l[l != '']
result = sum(as.numeric(l))
return(result)
}
convert = function(n){
if (n == 0) {
return('no nominations or awards')
}
else if (n > 12) {
return('many nominations or awards')
}
else {
return('some nominations or awards')
}
}
## Helper functions
#' Aggregate dataset by year
#'
#' @param dt data.table
#' @param minYear
#' @param maxYear
#' @return data.table
#'
groupByYear <- function(dt, minYear, maxYear) {
result <- dt %>% filter(Year >= minYear, Year <= maxYear)
return(result)
}
#' histogram plot
#'
#' @param dt data.table
#' @param feature
plotRuntime <- function(dt) {
dt$Runtime = unname(sapply(dt$Runtime, str_to_num))
#histogram to show the distribution of runtime value
ggplot(dt, aes(x=Runtime)) + geom_histogram(binwidth=20, fill = "blue", alpha=0.3) + xlab("Runtime in Minutes") + ylab('Count') +
theme_bw(base_size = 20)
}
plotNoOfWins <- function(dt) {
movies_award = dt
movies_award$wins_or_nominations = unname(sapply(movies_award$Awards, str_to_sum))
movies_award1 = subset(movies_award, movies_award$wins_or_nominations != 0)
#a histogram show the distribution of number of wins or nominations
ggplot(movies_award1, aes(x=wins_or_nominations)) + geom_histogram(binwidth=20, fill = "blue", alpha=0.3) +
xlab("Number of Wins/Nominations") + ylab('Count') + coord_cartesian(xlim = c(0, 100)) + theme_bw(base_size = 20)
}
plotRuntimeAndYear <- function(dt) {
dt$Runtime = unname(sapply(dt$Runtime, str_to_num))
ggplot(dt, aes(x=Year, y=Runtime)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="Runtime") +
xlab("Year") + theme_bw(base_size = 20)
}
plotRuntimeAndBudget <- function(dt) {
dt$Runtime = unname(sapply(dt$Runtime, str_to_num))
ggplot(dt, aes(x=Budget, y=Runtime)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="Runtime") +
xlab("Budget") + theme_bw(base_size = 20)
}
plotTitleAndGenres <- function(dt) {
#parse each text string in Genre into a binary vector
movies = dt[dt$Genre != 'N/A',]
movies$Genre = strsplit(movies$Genre, ', ')
movies_long = unnest(movies, Genre)
movies_wide = dcast(movies_long, Title ~ Genre, function(x) 1, fill = 0)
movies = merge(dt, movies_wide) #add the binary vector to the original dataframe
#plot the disctribution of title counts across different genres
movies_count = data.frame(Genre=names(movies[,40:67]), Count=colSums(movies[,40:67]))
movies_count$Proportion = (movies_count$Count/sum(movies_count$Count))*100
ggplot(movies_count, aes(reorder(Genre, Count), Count)) + geom_bar(stat='identity',fill = "blue", alpha=0.3) +
coord_flip() + ylab('Count of Titles') + xlab('Genres') + theme_bw(base_size = 20)
}
plotGrossAndGenres <- function(dt) {
#parse each text string in Genre into a binary vector
movies = dt[dt$Genre != 'N/A',]
movies$Genre = strsplit(movies$Genre, ', ')
movies_long = unnest(movies, Genre)
movies_wide = dcast(movies_long, Title ~ Genre, function(x) 1, fill = 0)
movies = merge(dt, movies_wide) #add the binary vector to the original dataframe
#plot the distribution of gross revenue across top 10 genres
movies_gross = movies[!is.na(movies$Gross),]
DF = movies_gross[c(38, 40:67)] #create a subset of orignial dataframe which contains only the Gross column and the indicator variables of Genres
DF1 = melt(DF, id.vars="Gross")
DF2 = subset(DF1, value>0)
DF3 = rbind(subset(DF2, variable == 'Drama'), subset(DF2, variable == 'Comedy'), subset(DF2, variable == 'Short'), subset(DF2, variable == 'Romance'), subset(DF2, variable == 'Action'), subset(DF2, variable == 'Crime'), subset(DF2, variable == 'Thriller'), subset(DF2, variable == 'Documentary'), subset(DF2, variable == 'Adventure'), subset(DF2, variable == 'Animation'))
ggplot(DF3, aes(reorder(variable, -Gross, median), Gross)) + geom_boxplot(varwidth=T, fill="blue", alpha=0.3) +
coord_flip() + scale_x_discrete("Genres") + ylab(label="Gross") + theme_bw(base_size = 20)
}
plotGrossAndMonth <- function(dt) {
movies = dt[dt$Genre != 'N/A',]
movies$Genre = strsplit(movies$Genre, ', ')
movies_long = unnest(movies, Genre)
movies_wide = dcast(movies_long, Title ~ Genre, function(x) 1, fill = 0)
#No discrepancy is found between Year and Release, so no rows are removed.
movies_remove_final = dt
movies_remove_final$Released_Month = month(movies_remove_final$Released)#create a release month column
movies_release = subset(merge(movies_remove_final, movies_wide), !is.na(Gross))#add the genre indicator variables to dataframe movies_remove_final, and remove the rows where Gross is NA
#plot shows the relationship between Gross and Released Month
ggplot(movies_release, aes(x=Released_Month, y=Gross)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="Gross Revenue") +
xlab("Release Month") + theme_bw(base_size = 20)
}
plotGrossAndMonthAndGenres <- function(dt) {
movies = dt[dt$Genre != 'N/A',]
movies$Genre = strsplit(movies$Genre, ', ')
movies_long = unnest(movies, Genre)
movies_wide = dcast(movies_long, Title ~ Genre, function(x) 1, fill = 0)
#No discrepancy is found between Year and Release, so no rows are removed.
movies_remove_final = dt
movies_remove_final$Released_Month = month(movies_remove_final$Released)#create a release month column
movies_release = subset(merge(movies_remove_final, movies_wide), !is.na(Gross))#add the genre indicator variables to dataframe movies_remove_final, and remove the rows where Gross is NA
#create a dataframe subset that contains Gross, Released Month, and Genre indicator variables
DF_release = movies_release[c(38, 40:68)]
DF_release1 = melt(DF_release, id.vars=c("Gross", 'Released_Month'))
DF_release2 = subset(DF_release1, value>0)
DF_release3 = rbind(subset(DF_release2, variable == 'Drama'), subset(DF_release2, variable == 'Comedy'), subset(DF_release2, variable == 'Short'), subset(DF_release2, variable == 'Romance'), subset(DF_release2, variable == 'Action'), subset(DF_release2, variable == 'Crime'), subset(DF_release2, variable == 'Thriller'), subset(DF_release2, variable == 'Documentary'), subset(DF_release2, variable == 'Adventure'), subset(DF_release2, variable == 'Animation'))
DF_release4 = rbind(subset(DF_release2, variable == 'Horror'), subset(DF_release2, variable == 'Family'), subset(DF_release2, variable == 'Mystery'), subset(DF_release2, variable == 'Sci-Fi'), subset(DF_release2, variable == 'Fantasy'), subset(DF_release2, variable == 'Musical'), subset(DF_release2, variable == 'Western'), subset(DF_release2, variable == 'Music'), subset(DF_release2, variable == 'Biography'), subset(DF_release2, variable == 'War'))
DF_release5 = rbind(subset(DF_release2, variable == 'History'), subset(DF_release2, variable == 'Sport'), subset(DF_release2, variable == 'Adult'), subset(DF_release2, variable == 'Film-Noir'), subset(DF_release2, variable == 'Reality-TV'), subset(DF_release2, variable == 'Talk-Show'), subset(DF_release2, variable == 'News'), subset(DF_release2, variable == 'Game-Show'))
#plot shows the relationship between Gross and Released Month for different genres,
#since there are 28 different genres, plot them in a single facet will be too crowded. So
#Three separate facet plots are created.
# p1 <- qplot(x=Released_Month, y=Gross, facets=variable~., data=DF_release3, main='Gross Revenue vs. Release Month')
# p2 <- qplot(x=Released_Month, y=Gross, facets=variable~., data=DF_release4, main='Gross Revenue vs. Release Month')
# p3 <- qplot(x=Released_Month, y=Gross, facets=variable~., data=DF_release5, main='Gross Revenue vs. Release Month')
p1 <- ggplot(DF_release3, aes(x=Released_Month, y=Gross)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="Gross") +
facet_grid(variable~.) + xlab("Released Month") + theme_bw(base_size = 20)
p2 <- ggplot(DF_release4, aes(x=Released_Month, y=Gross)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="Gross") +
facet_grid(variable~.) + xlab("Released Month") + theme_bw(base_size = 20)
p3 <- ggplot(DF_release5, aes(x=Released_Month, y=Gross)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="Gross") +
facet_grid(variable~.) + xlab("Released Month") + theme_bw(base_size = 20)
grid.arrange(p1, p2, p3, ncol=1)
}
plotImdbVotesAndRating <- function(dt) {
ggplot(dt, aes(x=imdbRating, y=imdbVotes)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="IMDb Votes") +
xlab("IMDb Rating") + theme_bw(base_size = 20)
}
plotTomatoRatingAndReviews <- function(dt) {
ggplot(dt, aes(x=tomatoRating, y=tomatoReviews)) + geom_point(size=2, color='blue', alpha=0.3) + ylab(label="Number of Reviews") +
xlab("Rotten Tomato Critic Rating") + theme_bw(base_size = 20)
}
plotGrossAndAward <- function(dt) {
movies_award = dt
movies_award$wins_or_nominations = unname(sapply(movies_award$Awards, str_to_sum))
movies_award$award_category = unname(sapply(movies_award$wins_or_nominations, convert))
movies_award_binary = dcast(movies_award, Title ~ award_category, function(x) 1, fill = 0)
movies_award = merge(movies_award, movies_award_binary)
#boxplot to show the distribution of gross revenue across different award categories
ggplot(movies_award, aes(reorder(award_category, -Gross, median), Gross)) + geom_boxplot(varwidth=T, fill="blue", alpha=0.3) +
coord_flip() + scale_x_discrete("Award Category") + ylab(label="Gross") + theme_bw(base_size = 20)
}
plotYear <- function(dt) {
movies_remove_final = dt
movies_remove_final$decade = cut(movies_remove_final$Year, seq(1880, 2020, 10), labels = c('1880-1890', '1890-1900', '1900-1910','1910-1920', '1920-1930', '1930-1940', '1940-1950', '1950-1960', '1960-1970', '1970-1980', '1980-1990', '1990-2000', '2000-2010', '2010-2020'))#binning the Year into decades
movies_decade = count(movies_remove_final, c('decade'))
ggplot(data = movies_decade, aes(y = freq, x = decade)) +
geom_bar(stat = "identity", width = 0.5, position = "identity", fill = "blue", alpha=0.3) +
guides(fill = FALSE) + xlab("Decades") + ylab("Number of movies") +
theme_bw(base_size = 20) + theme(axis.text.x = element_text(angle = 45, hjust = 1))
}
plotDirector <- function(dt) {
movies <- dt[!is.na(dt$Gross),]
movies$Profit <- movies$Gross - movies$Budget
movies_nonnum <- movies[, !sapply(movies, is.numeric)]
movies_nonnum$Profit <- movies$Profit
#based on observation and intuition, choose the columns that may be useful for predicting Profit
movies_nonnum <- movies_nonnum[, names(movies_nonnum) %in% c('Title', 'Rated', 'Released','Runtime','Genre','Director', 'Writer', 'Actors', 'Language', 'Country','Awards', 'tomatoImage', 'Production', 'Profit')]
#parse each text string in Director into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Director != 'N/A',]
movies_nonnum$Director <- strsplit(movies_nonnum$Director, "(\\s)?,(\\s)?")
movies_long1 <- unnest(movies_nonnum, Director)
movies_long1$Director <- paste0("Director_", gsub("\\s","_",movies_long1$Director))
movies_wide1 <- dcast(movies_long1, Title ~ Director, function(x) 1, fill = 0)
number_of_directors <- rowSums(movies_wide1[,-1])
#plot the disctribution of title counts for different directors
count <- data.frame(Director=names(movies_wide1[-1]), Count=colSums(movies_wide1[,-1]))
count <- count[order(-count$Count),]
ggplot(count[1:50,], aes(reorder(Director, Count), Count)) + geom_bar(stat='identity', fill = "blue", alpha=0.3) +
coord_flip() + ylab('Count of Titles') + xlab('Director') + theme_bw(base_size = 20)
}
plotActor <- function(dt) {
movies <- dt[!is.na(dt$Gross),]
movies$Profit <- movies$Gross - movies$Budget
movies_nonnum <- movies[, !sapply(movies, is.numeric)]
movies_nonnum$Profit <- movies$Profit
#based on observation and intuition, choose the columns that may be useful for predicting Profit
movies_nonnum <- movies_nonnum[, names(movies_nonnum) %in% c('Title', 'Rated', 'Released','Runtime','Genre','Director', 'Writer', 'Actors', 'Language', 'Country','Awards', 'tomatoImage', 'Production', 'Profit')]
movies_nonnum <- movies_nonnum[movies_nonnum$Actors != 'N/A',]
movies_nonnum$Actors <- strsplit(movies_nonnum$Actors, "(\\s)?,(\\s)?")
movies_long2 <- unnest(movies_nonnum, Actors)
movies_long2$Actors <- paste0("Actor_", gsub("\\s","_",movies_long2$Actors))
movies_wide2 <- dcast(movies_long2, Title ~ Actors, function(x) 1, fill = 0)
number_of_actors <- rowSums(movies_wide2[,-1])
#plot the disctribution of title counts for different directors
count1 <- data.frame(Actors=names(movies_wide2[-1]), Count=colSums(movies_wide2[,-1]))
count1 <- count1[order(-count1$Count),]
ggplot(count1[1:50,], aes(reorder(Actors, Count), Count)) + geom_bar(stat='identity', fill = "blue", alpha=0.3) +
coord_flip() + ylab('Count of Titles') + xlab('Actor') + theme_bw(base_size = 20)
}
plotPairwiseCor <- function(dt) {
DF_plot = dt[, c("imdbRating", "imdbVotes", "tomatoMeter", "tomatoRating", "tomatoReviews", "tomatoFresh", "tomatoRotten", "tomatoUserMeter", "tomatoUserRating", "tomatoUserReviews", "Gross")]
DF_plot = na.omit(DF_plot)
corr <- round(cor(DF_plot), 1)
ggcorrplot(corr, hc.order = TRUE,
type = "lower",
lab = TRUE,
lab_size = 3,
method="circle",
colors = c("tomato2", "white", "springgreen3"),
ggtheme=theme_bw)
}
plotProfit <- function(dt) {
movies <- dt[!is.na(dt$Gross),]
#add the Profit column
movies$Profit <- movies$Gross - movies$Budget
#remove all movies released prior to 2000
movies <- movies[movies$Year >= 2000,]
#drop gross, domestic_gross, and boxoffice columns
movies <- movies[ , !(names(movies) %in% c('Gross', 'Domestic_Gross', 'BoxOffice'))]
#keep only the numeric columns
movies_numeric <- movies[, sapply(movies, is.numeric)]
#convert Metascore to numeric and add it to movies_numeric
movies_numeric <- cbind(as.numeric(movies$Metascore), movies_numeric)
colnames(movies_numeric)[1] <- 'Metascore'
#since Year and Date columns are almost identical, drop the Date column
movies_numeric <- movies_numeric[, names(movies_numeric) != 'Date']
#explore the relationships between profit and all 13 variables
p1 <- ggplot(movies_numeric, aes(x=Metascore, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.19", x = 25, y = 1.8e+09, color = "red", size = 8)
p2 <- ggplot(movies_numeric, aes(x=Year, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.13", x = 2005, y = 1.8e+09, color = "red", size = 8)
p3 <- ggplot(movies_numeric, aes(x=imdbRating, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.23", x = 5.0, y = 1.8e+09, color = "red", size = 8)
p4 <- ggplot(movies_numeric, aes(x=imdbVotes, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.66", x = 500000, y = 1.8e+09, color = "red", size = 8)
p5 <- ggplot(movies_numeric, aes(x=tomatoMeter, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.19", x = 25, y = 1.8e+09, color = "red", size = 8)
p6 <- ggplot(movies_numeric, aes(x=tomatoRating, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.21", x = 5.0, y = 1.8e+09, color = "red", size = 8)
p7 <- ggplot(movies_numeric, aes(x=tomatoReviews, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.51", x = 100, y = 1.8e+09, color = "red", size = 8)
p8 <- ggplot(movies_numeric, aes(x=tomatoFresh, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.44", x = 100, y = 1.8e+09, color = "red", size = 8)
p9 <- ggplot(movies_numeric, aes(x=tomatoRotten, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.13", x = 80, y = 1.8e+09, color = "red", size = 8)
p10 <- ggplot(movies_numeric, aes(x=tomatoUserMeter, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.24", x = 25, y = 1.8e+09, color = "red", size = 8)
p11 <- ggplot(movies_numeric, aes(x=tomatoUserRating, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.30", x = 2, y = 1.8e+09, color = "red", size = 8)
p12 <- ggplot(movies_numeric, aes(x=tomatoUserReviews, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.22", x = 1e+07, y = 1.8e+09, color = "red", size = 8)
p13 <- ggplot(movies_numeric, aes(x=Budget, y=Profit)) + geom_point(size=2, color='blue', alpha=0.3) + theme_bw(base_size = 20) + annotate("text", label = "cor = 0.64", x = 1e+08, y = 1.8e+09, color = "red", size = 8)
grid.arrange(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, ncol=4, nrow=4)
}
plotLRNumeric <- function(dt) {
movies <- dt[!is.na(dt$Gross),]
#add the Profit column
movies$Profit <- movies$Gross - movies$Budget
#remove all movies released prior to 2000
movies <- movies[movies$Year >= 2000,]
#drop gross, domestic_gross, and boxoffice columns
movies <- movies[ , !(names(movies) %in% c('Gross', 'Domestic_Gross', 'BoxOffice'))]
##code for assignment 1
#keep only the numeric columns
movies_numeric <- movies[, sapply(movies, is.numeric)]
#convert Metascore to numeric and add it to movies_numeric
movies_numeric <- cbind(as.numeric(movies$Metascore), movies_numeric)
colnames(movies_numeric)[1] <- 'Metascore'
#since Year and Date columns are almost identical, drop the Date column
movies_numeric <- movies_numeric[, names(movies_numeric) != 'Date']
final_MSE_train <- NULL
final_MSE_test <- NULL
f <- seq(0.05, 0.95, by = 0.05)
for (fraction in f) {
#divide data into training and test sets
smp_size <- floor(fraction * nrow(movies_numeric))
all_MSE_train <- NULL
all_MSE_test <- NULL
#repeat the random partition of dataset 10 times
for (n in c(12, 47, 35, 67, 85, 91, 55, 102, 219, 49)) {
set.seed(n)
train_ind <- sample(seq_len(nrow(movies_numeric)), size = smp_size)
train <- movies_numeric[train_ind,]
test <- movies_numeric[-train_ind,]
mylm <- lm(Profit ~., train)
MSE_train <- mean(residuals(mylm)^2)
all_MSE_train <- rbind(all_MSE_train, MSE_train)
test_fitted <- predict(mylm, newdata=test[,names(test) != 'Profit'])
MSE_test <- mean((test$Profit-test_fitted)^2, na.rm = TRUE)
all_MSE_test <- rbind(all_MSE_test, MSE_test)
}
final_MSE_train <- rbind(final_MSE_train, mean(all_MSE_train))
final_MSE_test <- rbind(final_MSE_test, mean(all_MSE_test))
}
training_set_size <- floor(f * nrow(movies_numeric))
MSE1 <- data.frame(cbind(training_set_size, final_MSE_train, final_MSE_test))
colnames(MSE1)[2] <- 'MSE_Train'
colnames(MSE1)[3] <- 'MSE_Test'
MSE11 <- melt(MSE1, id.vars = 'training_set_size')
ggplot(MSE11, aes(x=training_set_size, y=value, colour=variable)) + geom_line(size=2) +
geom_point(size=4, color='blue', alpha=0.3) + ylab(label="MSE") +
xlab("Size of Training Set") + theme_bw(base_size = 20)
}
plotLRTrans <- function(dt) {
movies <- dt[!is.na(dt$Gross),]
#add the Profit column
movies$Profit <- movies$Gross - movies$Budget
#remove all movies released prior to 2000
movies <- movies[movies$Year >= 2000,]
#drop gross, domestic_gross, and boxoffice columns
movies <- movies[ , !(names(movies) %in% c('Gross', 'Domestic_Gross', 'BoxOffice'))]
##code for assignment 1
#keep only the numeric columns
movies_numeric <- movies[, sapply(movies, is.numeric)]
#convert Metascore to numeric and add it to movies_numeric
movies_numeric <- cbind(as.numeric(movies$Metascore), movies_numeric)
colnames(movies_numeric)[1] <- 'Metascore'
#since Year and Date columns are almost identical, drop the Date column
movies_numeric <- movies_numeric[, names(movies_numeric) != 'Date']
X <- movies_numeric[,!grepl("Profit", colnames(movies_numeric))]
X <- apply(X, 2, as.numeric)
Y <- as.matrix(movies_numeric$Profit)
#create transformed variables data frame based on highest correlation result
X <- as.data.frame(X)
X$Year <- X$Year^3
X$imdbRating <- X$imdbRating^3
X$tomatoRating <- X$tomatoRating^3
X$tomatoReviews <- X$tomatoReviews^3
X$tomatoFresh <- X$tomatoFresh^2
X$tomatoRotten <- X$tomatoRotten^3
X$tomatoUserMeter <- X$tomatoUserMeter^2
X$tomatoUserRating <- X$tomatoUserRating^3
X$tomatoUserReviews <- X$tomatoUserReviews^(1/3)
X$Budget <- X$Budget^2
colnames(X) <- c('Metascore', 'Year^3', 'imdbRating^3', 'imdbVotes', 'tomatoMeter', 'tomatoRating^3', 'tomatoReviews^3', 'tomatoFresh^2', 'tomatoRotten^3', 'tomatoUserMeter^2', 'tomatoUserRating^3', 'tomatoUserReviews^(1/3)', 'Budget^2')
#based on intuition and the fact that imdbVotes and Budget have the highest correlations with Profit
#create product of two variables
X$`imdbVotesBudget^2` <- X$imdbVotes * X$`Budget^2`
X$`tomatoReviews^3Budget^2` <- X$`tomatoReviews^3` * X$`Budget^2`
X$`imdbRating^3Budget^2` <- X$`imdbRating^3` * X$`Budget^2`
X$`tomatoRating^3Budget^2` <- X$`tomatoRating^3` * X$`Budget^2`
X$`tomatoMeterBudget^2` <- X$tomatoMeter * X$`Budget^2`
X$`tomatoFresh^2Budget^2` <- X$`tomatoFresh^2` * X$`Budget^2`
X$`tomatoUserMeter^2Budget^2` <- X$`tomatoUserMeter^2` * X$`Budget^2`
X$`tomatoUserRating^3Budget^2` <- X$`tomatoUserRating^3` * X$`Budget^2`
X$`MetascoreBudget^2` <- X$Metascore * X$`Budget^2`
#in the plot of tomatoUserReviews and Profit, a clear separation of data can be seen. so a
#new variable is_tomatoUserReviews_smaller_than_5M is added which is derived from binning the tomatoUserReviews variable
X$is_tomatoUserReviews_smaller_than_5M <- ifelse(movies_numeric$tomatoUserReviews < 5e+06, 1, 0)
movies_numeric_transformed <- cbind(movies$Title, X, Y)
names(movies_numeric_transformed)[1] <- 'Title'
names(movies_numeric_transformed)[ncol(movies_numeric_transformed)] <- 'Profit'
movies_numeric_transformed <- na.omit(movies_numeric_transformed)
#divide the data into training and test set, train the model using selected transformed variables
final_MSE_train <- NULL
final_MSE_test <- NULL
f <- seq(0.05, 0.95, by = 0.05)
for (fraction in f) {
#divide data into training and test sets
smp_size <- floor(fraction * nrow(movies_numeric_transformed))
all_MSE_train <- NULL
all_MSE_test <- NULL
#repeat the random partition of dataset 10 times
for (n in c(12, 47, 35, 67, 85, 91, 55, 102, 219, 49)) {
set.seed(n)
train_ind <- sample(seq_len(nrow(movies_numeric_transformed)), size = smp_size)
train <- movies_numeric_transformed[train_ind,-1]
test <- movies_numeric_transformed[-train_ind,-1]
mylm <- lm(Profit~., train)
MSE_train <- mean(residuals(mylm)^2)
all_MSE_train <- rbind(all_MSE_train, MSE_train)
test_fitted <- predict(mylm, newdata=test[,names(test) != 'Profit'])
MSE_test <- mean((test$Profit-test_fitted)^2)
all_MSE_test <- rbind(all_MSE_test, MSE_test)
}
final_MSE_train <- rbind(final_MSE_train, mean(all_MSE_train))
final_MSE_test <- rbind(final_MSE_test, mean(all_MSE_test))
}
training_set_size <- floor(f * nrow(movies_numeric_transformed))
MSE2 <- data.frame(cbind(training_set_size, final_MSE_train, final_MSE_test))
colnames(MSE2)[2] <- 'MSE_Train'
colnames(MSE2)[3] <- 'MSE_Test'
MSE21 <- melt(MSE2, id.vars = 'training_set_size')
ggplot(MSE21, aes(x=training_set_size, y=value, colour=variable)) + geom_line(size=2) +
geom_point(size=4, color='blue', alpha=0.3) +
ylab(label="MSE") +
xlab("Size of Training Set") + theme_bw(base_size = 20)
}
plotLRCate <- function(dt) {
movies <- dt[!is.na(dt$Gross),]
#add the Profit column
movies$Profit <- movies$Gross - movies$Budget
#remove all movies released prior to 2000
movies <- movies[movies$Year >= 2000,]
#drop gross, domestic_gross, and boxoffice columns
movies <- movies[ , !(names(movies) %in% c('Gross', 'Domestic_Gross', 'BoxOffice'))]
#keep only the non-numeric columns
movies_nonnum <- movies[, !sapply(movies, is.numeric)]
movies_nonnum$Profit <- movies$Profit
#based on observation and intuition, choose the columns that may be useful for predicting Profit
movies_nonnum <- movies_nonnum[, names(movies_nonnum) %in% c('Title', 'Rated', 'Released','Runtime','Genre','Director', 'Writer', 'Actors', 'Language', 'Country','Awards', 'tomatoImage', 'Production', 'Profit')]
#convert a Runtime string into a numeric value in minutes
#code from project part I
str_to_num <- function(s){
if (grepl('h', s) && grepl('min', s)) {
hour = as.numeric(unlist(strsplit(s, ' '))[1])
min = as.numeric(unlist(strsplit(s, ' '))[3])
return(60*hour+min)
}
else if (grepl('h', s) && !grepl('min', s)) {
hour <- as.numeric(unlist(strsplit(s, ' '))[1])
return(60*hour)
}
else if (!grepl('h', s) && grepl('min', s)) {
min <- as.numeric(unlist(strsplit(s, ' '))[1])
return(min)
}
else {
return(NA)
}
}
movies_nonnum$Runtime = unname(sapply(movies_nonnum$Runtime, str_to_num))
#end code from project part I
#convert Awards string to total number of wins and nominations
#code from project part I
str_to_sum <- function(s){
if (s == 'N/A') {
return(NA)
}
else {
l <- unlist(strsplit(s, "[^0-9]+"))
l <- l[l != '']
result <- sum(as.numeric(l))
return(result)
}
}
movies_nonnum$Awards <- unname(sapply(movies_nonnum$Awards, str_to_sum))
#end code from project part I
#convert Released to Released_Month (Released Year information is captured in numeric variables, month information was shown
#to be related to Gross in project part I)
movies_nonnum$Released <- month(movies_nonnum$Released)
colnames(movies_nonnum)[3] <- 'Released_Month'
#parse each text string in Genre into a binary vector
#code from project part I
movies_nonnum <- movies_nonnum[movies_nonnum$Genre != 'N/A',]
movies_nonnum$Genre <- strsplit(movies_nonnum$Genre, ', ')
movies_long <- unnest(movies_nonnum, Genre)
movies_long$Genre <- paste0("Genre_", gsub("\\s","_",movies_long$Genre))
movies_wide <- dcast(movies_long, Title ~ Genre, function(x) 1, fill = 0)
movies_nonnum <- merge(movies_nonnum, movies_wide)
#end code from project part I
#parse each text string in Director into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Director != 'N/A',]
movies_nonnum$Director <- strsplit(movies_nonnum$Director, "(\\s)?,(\\s)?")
movies_long1 <- unnest(movies_nonnum, Director)
movies_long1$Director <- paste0("Director_", gsub("\\s","_",movies_long1$Director))
movies_wide1 <- dcast(movies_long1, Title ~ Director, function(x) 1, fill = 0)
number_of_directors <- rowSums(movies_wide1[,-1])
#consider the top 123 directors where count of titles >= 5
count$Director <- as.character(count$Director)
movies_wide1_top <- movies_wide1[,c('Title',count$Director[1:123])]
#add a 124th variable,Director_others, if there is no other directors, Director_others equals to 0, else, it equals to 1
movies_wide1_top$number_of_top_directors <- rowSums(movies_wide1_top[,-1])
movies_wide1_top <- cbind(movies_wide1_top, number_of_directors)
movies_wide1_top$Director_others <- ifelse((movies_wide1_top$number_of_directors-movies_wide1_top$number_of_top_directors) == 0, 0, 1)
movies_wide1_top <- movies_wide1_top[, !colnames(movies_wide1_top) %in% c('number_of_directors','number_of_top_directors')]
movies_nonnum <- merge(movies_nonnum, movies_wide1_top)
#parse each text string in Actor into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Actors != 'N/A',]
movies_nonnum$Actors <- strsplit(movies_nonnum$Actors, "(\\s)?,(\\s)?")
movies_long2 <- unnest(movies_nonnum, Actors)
movies_long2$Actors <- paste0("Actor_", gsub("\\s","_",movies_long2$Actors))
movies_wide2 <- dcast(movies_long2, Title ~ Actors, function(x) 1, fill = 0)
number_of_actors <- rowSums(movies_wide2[,-1])
#consider the top 167 actors where count of titles > 10
count1$Actors <- as.character(count1$Actors)
movies_wide2_top <- movies_wide2[,c('Title',count1$Actors[1:167])]
#add a 168th variable,Actor_others, if there is no other actors, Actor_others equals to 0, else, it equals to 1
movies_wide2_top$number_of_top_actors <- rowSums(movies_wide2_top[,-1])
movies_wide2_top <- cbind(movies_wide2_top, number_of_actors)
movies_wide2_top$Actor_others <- ifelse((movies_wide2_top$number_of_actors-movies_wide2_top$number_of_top_actors) == 0, 0, 1)
movies_wide2_top <- movies_wide2_top[, !colnames(movies_wide2_top) %in% c('number_of_actors','number_of_top_actors')]
movies_nonnum <- merge(movies_nonnum, movies_wide2_top)
#parse each text string in Writer into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Writer != 'N/A',]
movies_nonnum$Writer <- strsplit(movies_nonnum$Writer, "(\\s)?,(\\s)?")
movies_long3 <- unnest(movies_nonnum, Writer)
movies_long3$Writer <- gsub("\\s*\\([^\\)]+\\)","",movies_long3$Writer)
movies_long3$Writer <- paste0("Writer_", gsub("\\s","_",movies_long3$Writer))
movies_wide3 <- dcast(movies_long3, Title ~ Writer, function(x) 1, fill = 0)
number_of_writers <- rowSums(movies_wide3[,-1])
#consider the top 129 writers where count of titles > 5
count2$Writer <- as.character(count2$Writer)
movies_wide3_top <- movies_wide3[,c('Title',count2$Writer[1:129])]
#add a 130th variable,Writer_others, if there is no other writers, Writer_others equals to 0, else, it equals to 1
movies_wide3_top$number_of_top_writers <- rowSums(movies_wide3_top[,-1])
movies_wide3_top <- cbind(movies_wide3_top, number_of_writers)
movies_wide3_top$Writer_others <- ifelse((movies_wide3_top$number_of_writers-movies_wide3_top$number_of_top_writers) == 0, 0, 1)
movies_wide3_top <- movies_wide3_top[, !colnames(movies_wide3_top) %in% c('number_of_writers','number_of_top_writers')]
movies_nonnum <- merge(movies_nonnum, movies_wide3_top)
#convert Language to number of languages
movies_nonnum$Language <- strsplit(movies_nonnum$Language, ', ')
movies_nonnum$Language <- sapply(movies_nonnum$Language, length)
#convert Country to number of countries
movies_nonnum$Country <- strsplit(movies_nonnum$Country, ', ')
movies_nonnum$Country <- sapply(movies_nonnum$Country, length)
#convert Rated into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Rated != 'N/A',]
convert <- function(s) {
if(s == 'UNRATED') {
s <- 'NOT RATED'
return(s)
}
else {
return(s)
}
}
movies_nonnum$Rated <- sapply(movies_nonnum$Rated, convert)
movies_nonnum_wide <- dcast(movies_nonnum, Title ~ Rated, function(x) 1, fill = 0)
movies_nonnum <- merge(movies_nonnum, movies_nonnum_wide)
#convert tomatoImage into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$tomatoImage != 'N/A',]
movies_nonnum_wide1 <- dcast(movies_nonnum, Title ~ tomatoImage, function(x) 1, fill = 0)
movies_nonnum <- merge(movies_nonnum, movies_nonnum_wide1)
#convert Production into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Production != 'N/A',]
movies_wide4 <- dcast(movies_nonnum, Title ~ Production, function(x) 1, fill = 0)
#consider the top 49 productions where count of titles >= 10
count3$Production <- as.character(count3$Production)
movies_wide4_top <- movies_wide4[,c('Title',count3$Production[1:49])]
#add a 50th variable,Production_others, if production is not in top productions, Writer_others equals to 1, else, it equals to 0
movies_wide4_top$Production_others <- ifelse(movies_nonnum$Production %in% count3$Production[1:49], 0, 1)
movies_nonnum <- merge(movies_nonnum, movies_wide4_top)
#drop the Genre, Director, and Actors columns
movies_nonnum <- movies_nonnum[,!colnames(movies_nonnum) %in% c('Genre','Director','Actors','Rated','Writer','tomatoImage','Production')]
##code for assignment 4
movies_nonnum_lm <- movies_nonnum[, names(movies_nonnum) != 'Title']
final_MSE_train <- NULL
final_MSE_test <- NULL
f <- seq(0.05, 0.95, by = 0.05)
for (fraction in f) {
#divide data into training and test sets
smp_size <- floor(fraction * nrow(movies_nonnum_lm))
all_MSE_train <- NULL
all_MSE_test <- NULL
#repeat the random partition of dataset 10 times
for (n in c(12, 47, 35, 67, 85, 91, 55, 102, 219, 49)) {
set.seed(n)
train_ind <- sample(seq_len(nrow(movies_nonnum_lm)), size = smp_size)
train <- movies_nonnum_lm[train_ind,]
test <- movies_nonnum_lm[-train_ind,]
mylm <- lm(Profit ~., train)
MSE_train <- mean(residuals(mylm)^2)
all_MSE_train <- rbind(all_MSE_train, MSE_train)
test_fitted <- predict(mylm, newdata=test[,names(test) != 'Profit'])
MSE_test <- mean((test$Profit-test_fitted)^2, na.rm = TRUE)
all_MSE_test <- rbind(all_MSE_test, MSE_test)
}
final_MSE_train <- rbind(final_MSE_train, mean(all_MSE_train))
final_MSE_test <- rbind(final_MSE_test, mean(all_MSE_test))
}
training_set_size <- floor(f * nrow(movies_nonnum_lm))
MSE4 <- data.frame(cbind(training_set_size, final_MSE_train, final_MSE_test))
colnames(MSE4)[2] <- 'MSE_Train'
colnames(MSE4)[3] <- 'MSE_Test'
MSE41 <- melt(MSE4, id.vars = 'training_set_size')
ggplot(MSE41, aes(x=training_set_size, y=value, colour=variable)) + geom_line(size=2) +
geom_point(size=4, color='blue', alpha=0.3) +
ylab(label="MSE") +
xlab("Size of Training Set") + coord_cartesian(ylim=c(0,5e+16)) + theme_bw(base_size = 20)
}
plotLRAll <- function(dt) {
movies <- dt[!is.na(dt$Gross),]
#add the Profit column
movies$Profit <- movies$Gross - movies$Budget
#remove all movies released prior to 2000
movies <- movies[movies$Year >= 2000,]
#drop gross, domestic_gross, and boxoffice columns
movies <- movies[ , !(names(movies) %in% c('Gross', 'Domestic_Gross', 'BoxOffice'))]
##code for assignment 1
#keep only the numeric columns
movies_numeric <- movies[, sapply(movies, is.numeric)]
#convert Metascore to numeric and add it to movies_numeric
movies_numeric <- cbind(as.numeric(movies$Metascore), movies_numeric)
colnames(movies_numeric)[1] <- 'Metascore'
#since Year and Date columns are almost identical, drop the Date column
movies_numeric <- movies_numeric[, names(movies_numeric) != 'Date']
X <- movies_numeric[,!grepl("Profit", colnames(movies_numeric))]
X <- apply(X, 2, as.numeric)
Y <- as.matrix(movies_numeric$Profit)
#create transformed variables data frame based on highest correlation result
X <- as.data.frame(X)
X$Year <- X$Year^3
X$imdbRating <- X$imdbRating^3
X$tomatoRating <- X$tomatoRating^3
X$tomatoReviews <- X$tomatoReviews^3
X$tomatoFresh <- X$tomatoFresh^2
X$tomatoRotten <- X$tomatoRotten^3
X$tomatoUserMeter <- X$tomatoUserMeter^2
X$tomatoUserRating <- X$tomatoUserRating^3
X$tomatoUserReviews <- X$tomatoUserReviews^(1/3)
X$Budget <- X$Budget^2
colnames(X) <- c('Metascore', 'Year^3', 'imdbRating^3', 'imdbVotes', 'tomatoMeter', 'tomatoRating^3', 'tomatoReviews^3', 'tomatoFresh^2', 'tomatoRotten^3', 'tomatoUserMeter^2', 'tomatoUserRating^3', 'tomatoUserReviews^(1/3)', 'Budget^2')
#based on intuition and the fact that imdbVotes and Budget have the highest correlations with Profit
#create product of two variables
X$`imdbVotesBudget^2` <- X$imdbVotes * X$`Budget^2`
X$`tomatoReviews^3Budget^2` <- X$`tomatoReviews^3` * X$`Budget^2`
X$`imdbRating^3Budget^2` <- X$`imdbRating^3` * X$`Budget^2`
X$`tomatoRating^3Budget^2` <- X$`tomatoRating^3` * X$`Budget^2`
X$`tomatoMeterBudget^2` <- X$tomatoMeter * X$`Budget^2`
X$`tomatoFresh^2Budget^2` <- X$`tomatoFresh^2` * X$`Budget^2`
X$`tomatoUserMeter^2Budget^2` <- X$`tomatoUserMeter^2` * X$`Budget^2`
X$`tomatoUserRating^3Budget^2` <- X$`tomatoUserRating^3` * X$`Budget^2`
X$`MetascoreBudget^2` <- X$Metascore * X$`Budget^2`
#in the plot of tomatoUserReviews and Profit, a clear separation of data can be seen. so a
#new variable is_tomatoUserReviews_smaller_than_5M is added which is derived from binning the tomatoUserReviews variable
X$is_tomatoUserReviews_smaller_than_5M <- ifelse(movies_numeric$tomatoUserReviews < 5e+06, 1, 0)
movies_numeric_transformed <- cbind(movies$Title, X, Y)
names(movies_numeric_transformed)[1] <- 'Title'
names(movies_numeric_transformed)[ncol(movies_numeric_transformed)] <- 'Profit'
movies_numeric_transformed <- na.omit(movies_numeric_transformed)
#keep only the non-numeric columns
movies_nonnum <- movies[, !sapply(movies, is.numeric)]
movies_nonnum$Profit <- movies$Profit
#based on observation and intuition, choose the columns that may be useful for predicting Profit
movies_nonnum <- movies_nonnum[, names(movies_nonnum) %in% c('Title', 'Rated', 'Released','Runtime','Genre','Director', 'Writer', 'Actors', 'Language', 'Country','Awards', 'tomatoImage', 'Production', 'Profit')]
#convert a Runtime string into a numeric value in minutes
#code from project part I
str_to_num <- function(s){
if (grepl('h', s) && grepl('min', s)) {
hour = as.numeric(unlist(strsplit(s, ' '))[1])
min = as.numeric(unlist(strsplit(s, ' '))[3])
return(60*hour+min)
}
else if (grepl('h', s) && !grepl('min', s)) {
hour <- as.numeric(unlist(strsplit(s, ' '))[1])
return(60*hour)
}
else if (!grepl('h', s) && grepl('min', s)) {
min <- as.numeric(unlist(strsplit(s, ' '))[1])
return(min)
}
else {
return(NA)
}
}
movies_nonnum$Runtime = unname(sapply(movies_nonnum$Runtime, str_to_num))
#end code from project part I
#convert Awards string to total number of wins and nominations
#code from project part I
str_to_sum <- function(s){
if (s == 'N/A') {
return(NA)
}
else {
l <- unlist(strsplit(s, "[^0-9]+"))
l <- l[l != '']
result <- sum(as.numeric(l))
return(result)
}
}
movies_nonnum$Awards <- unname(sapply(movies_nonnum$Awards, str_to_sum))
#end code from project part I
#convert Released to Released_Month (Released Year information is captured in numeric variables, month information was shown
#to be related to Gross in project part I)
movies_nonnum$Released <- month(movies_nonnum$Released)
colnames(movies_nonnum)[3] <- 'Released_Month'
#parse each text string in Genre into a binary vector
#code from project part I
movies_nonnum <- movies_nonnum[movies_nonnum$Genre != 'N/A',]
movies_nonnum$Genre <- strsplit(movies_nonnum$Genre, ', ')
movies_long <- unnest(movies_nonnum, Genre)
movies_long$Genre <- paste0("Genre_", gsub("\\s","_",movies_long$Genre))
movies_wide <- dcast(movies_long, Title ~ Genre, function(x) 1, fill = 0)
movies_nonnum <- merge(movies_nonnum, movies_wide)
#end code from project part I
#parse each text string in Director into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Director != 'N/A',]
movies_nonnum$Director <- strsplit(movies_nonnum$Director, "(\\s)?,(\\s)?")
movies_long1 <- unnest(movies_nonnum, Director)
movies_long1$Director <- paste0("Director_", gsub("\\s","_",movies_long1$Director))
movies_wide1 <- dcast(movies_long1, Title ~ Director, function(x) 1, fill = 0)
number_of_directors <- rowSums(movies_wide1[,-1])
#consider the top 123 directors where count of titles >= 5
count$Director <- as.character(count$Director)
movies_wide1_top <- movies_wide1[,c('Title',count$Director[1:123])]
#add a 124th variable,Director_others, if there is no other directors, Director_others equals to 0, else, it equals to 1
movies_wide1_top$number_of_top_directors <- rowSums(movies_wide1_top[,-1])
movies_wide1_top <- cbind(movies_wide1_top, number_of_directors)
movies_wide1_top$Director_others <- ifelse((movies_wide1_top$number_of_directors-movies_wide1_top$number_of_top_directors) == 0, 0, 1)
movies_wide1_top <- movies_wide1_top[, !colnames(movies_wide1_top) %in% c('number_of_directors','number_of_top_directors')]
movies_nonnum <- merge(movies_nonnum, movies_wide1_top)
#parse each text string in Actor into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Actors != 'N/A',]
movies_nonnum$Actors <- strsplit(movies_nonnum$Actors, "(\\s)?,(\\s)?")
movies_long2 <- unnest(movies_nonnum, Actors)
movies_long2$Actors <- paste0("Actor_", gsub("\\s","_",movies_long2$Actors))
movies_wide2 <- dcast(movies_long2, Title ~ Actors, function(x) 1, fill = 0)
number_of_actors <- rowSums(movies_wide2[,-1])
#consider the top 167 actors where count of titles > 10
count1$Actors <- as.character(count1$Actors)
movies_wide2_top <- movies_wide2[,c('Title',count1$Actors[1:167])]
#add a 168th variable,Actor_others, if there is no other actors, Actor_others equals to 0, else, it equals to 1
movies_wide2_top$number_of_top_actors <- rowSums(movies_wide2_top[,-1])
movies_wide2_top <- cbind(movies_wide2_top, number_of_actors)
movies_wide2_top$Actor_others <- ifelse((movies_wide2_top$number_of_actors-movies_wide2_top$number_of_top_actors) == 0, 0, 1)
movies_wide2_top <- movies_wide2_top[, !colnames(movies_wide2_top) %in% c('number_of_actors','number_of_top_actors')]
movies_nonnum <- merge(movies_nonnum, movies_wide2_top)
#parse each text string in Writer into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Writer != 'N/A',]
movies_nonnum$Writer <- strsplit(movies_nonnum$Writer, "(\\s)?,(\\s)?")
movies_long3 <- unnest(movies_nonnum, Writer)
movies_long3$Writer <- gsub("\\s*\\([^\\)]+\\)","",movies_long3$Writer)
movies_long3$Writer <- paste0("Writer_", gsub("\\s","_",movies_long3$Writer))
movies_wide3 <- dcast(movies_long3, Title ~ Writer, function(x) 1, fill = 0)
number_of_writers <- rowSums(movies_wide3[,-1])
#consider the top 129 writers where count of titles > 5
count2$Writer <- as.character(count2$Writer)
movies_wide3_top <- movies_wide3[,c('Title',count2$Writer[1:129])]
#add a 130th variable,Writer_others, if there is no other writers, Writer_others equals to 0, else, it equals to 1
movies_wide3_top$number_of_top_writers <- rowSums(movies_wide3_top[,-1])
movies_wide3_top <- cbind(movies_wide3_top, number_of_writers)
movies_wide3_top$Writer_others <- ifelse((movies_wide3_top$number_of_writers-movies_wide3_top$number_of_top_writers) == 0, 0, 1)
movies_wide3_top <- movies_wide3_top[, !colnames(movies_wide3_top) %in% c('number_of_writers','number_of_top_writers')]
movies_nonnum <- merge(movies_nonnum, movies_wide3_top)
#convert Language to number of languages
movies_nonnum$Language <- strsplit(movies_nonnum$Language, ', ')
movies_nonnum$Language <- sapply(movies_nonnum$Language, length)
#convert Country to number of countries
movies_nonnum$Country <- strsplit(movies_nonnum$Country, ', ')
movies_nonnum$Country <- sapply(movies_nonnum$Country, length)
#convert Rated into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Rated != 'N/A',]
convert <- function(s) {
if(s == 'UNRATED') {
s <- 'NOT RATED'
return(s)
}
else {
return(s)
}
}
movies_nonnum$Rated <- sapply(movies_nonnum$Rated, convert)
movies_nonnum_wide <- dcast(movies_nonnum, Title ~ Rated, function(x) 1, fill = 0)
movies_nonnum <- merge(movies_nonnum, movies_nonnum_wide)
#convert tomatoImage into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$tomatoImage != 'N/A',]
movies_nonnum_wide1 <- dcast(movies_nonnum, Title ~ tomatoImage, function(x) 1, fill = 0)
movies_nonnum <- merge(movies_nonnum, movies_nonnum_wide1)
#convert Production into a binary vector
movies_nonnum <- movies_nonnum[movies_nonnum$Production != 'N/A',]
movies_wide4 <- dcast(movies_nonnum, Title ~ Production, function(x) 1, fill = 0)
#consider the top 49 productions where count of titles >= 10
count3$Production <- as.character(count3$Production)
movies_wide4_top <- movies_wide4[,c('Title',count3$Production[1:49])]
#add a 50th variable,Production_others, if production is not in top productions, Writer_others equals to 1, else, it equals to 0
movies_wide4_top$Production_others <- ifelse(movies_nonnum$Production %in% count3$Production[1:49], 0, 1)
movies_nonnum <- merge(movies_nonnum, movies_wide4_top)
#drop the Genre, Director, and Actors columns
movies_nonnum <- movies_nonnum[,!colnames(movies_nonnum) %in% c('Genre','Director','Actors','Rated','Writer','tomatoImage','Production')]
movies_nonnum <- movies_nonnum[, names(movies_nonnum) != 'Profit']
movies_all <- merge(movies_numeric_transformed, movies_nonnum)
movies_all <- na.omit(movies_all)
#in project part I, it was found that movies with budget greater than 1e+08 are mostly
#Action, Adventure, and Comedy. create interaction features based on this insight
movies_all$`is_budget_greater_than_1e+08` <- ifelse(movies_all$`Budget^2` > 1e+08, 1, 0)
movies_all$`is_budget_greater_than_1e+08*Genre_Action` <- movies_all$`is_budget_greater_than_1e+08` * movies_all$Genre_Action
movies_all$`is_budget_greater_than_1e+08*Genre_Adventure` <- movies_all$`is_budget_greater_than_1e+08` * movies_all$Genre_Adventure
movies_all$`is_budget_greater_than_1e+08*Genre_Comedy` <- movies_all$`is_budget_greater_than_1e+08` * movies_all$Genre_Comedy
#explore the correlations between transformed variables and profit
high_cor(movies_all$Released_Month, movies_all$Profit)
high_cor(movies_all$Runtime, movies_all$Profit)
high_cor(movies_all$Language, movies_all$Profit)
high_cor(movies_all$Country, movies_all$Profit)
high_cor(movies_all$Awards, movies_all$Profit)
#based on the correlation results, create new transformed variables
movies_all$`log(Released_Month)` <- log(movies_all$Released_Month)
movies_all$`Runtime^3` <- movies_all$Runtime^3
movies_all$`log(Language)` <- log(movies_all$Language)
movies_all$`Country^2` <- movies_all$Country^2
movies_all$`Awards^(1/3)` <- movies_all$Awards^(1/3)
movies_all <- movies_all[!names(movies_all) %in% c("Released_Month", "Runtime","Language","Country","Awards")]
X_all <- Matrix(as.matrix(movies_all[!names(movies_all) %in% c("Title", "Profit")]), sparse = TRUE)
Y_all <- movies_all$Profit
final_MSE_train <- NULL
final_MSE_test <- NULL
f <- seq(0.05, 0.95, by = 0.05)
for (fraction in f) {
#divide data into training and test sets
smp_size <- floor(fraction * nrow(movies_all))
all_MSE_train <- NULL
all_MSE_test <- NULL
#repeat the random partition of dataset 10 times
for (n in c(29, 35, 67, 85, 102, 219, 175, 199, 143, 139)) {
set.seed(n)
train_ind <- sample(seq_len(nrow(movies_all)), size = smp_size)
X_train <- X_all[train_ind,]
X_test <- X_all[-train_ind,]
Y_train <- Y_all[train_ind]
Y_test <- Y_all[-train_ind]
#use LASSO to select from a large number of variables
cvfit <- cv.glmnet(X_train,
Y_train,
family = "gaussian", ## linear regression
alpha = 1, ## select Lasso
type.measure = "mse", ## train to minimize mse
nfolds = 5) ## 5-folds cross-validation
train_fitted <- predict(cvfit, newx = X_train, s = "lambda.min")
MSE_train <- mean((train_fitted-Y_train)^2)
all_MSE_train <- rbind(all_MSE_train, MSE_train)
test_fitted <- predict(cvfit, newx = X_test, s = "lambda.min")
MSE_test <- mean((Y_test-test_fitted)^2)
all_MSE_test <- rbind(all_MSE_test, MSE_test)
}
final_MSE_train <- rbind(final_MSE_train, mean(all_MSE_train))
final_MSE_test <- rbind(final_MSE_test, mean(all_MSE_test))
}
training_set_size <- floor(f * nrow(movies_all))
MSE <- data.frame(cbind(training_set_size, final_MSE_train, final_MSE_test))
colnames(MSE)[2] <- 'MSE_Train_q5'
colnames(MSE)[3] <- 'MSE_Test_q5'
MSE$training_set_size <- seq(0.05, 0.95, by = 0.05)
colnames(MSE)[1] <- 'Percentage of Training Set Size'
#combine all the MSE results from previous questions, and make comparison in one plot
colnames(MSE1)[2] <- 'MSE_Train_q1'
colnames(MSE1)[3] <- 'MSE_Test_q1'
colnames(MSE2)[2] <- 'MSE_Train_q2'
colnames(MSE2)[3] <- 'MSE_Test_q2'
colnames(MSE4)[2] <- 'MSE_Train_q4'
colnames(MSE4)[3] <- 'MSE_Test_q4'
MSE_all <- cbind(MSE,MSE1[-1], MSE2[-1], MSE4[-1])
MSE_all_1 <- melt(MSE_all, id.vars = 'Percentage of Training Set Size')
ggplot(MSE_all_1, aes(x=`Percentage of Training Set Size`, y=value, colour=variable)) + geom_line(size=2) +
geom_point(size=4, color='blue', alpha=0.3) +
ylab(label="MSE") +
xlab("Percentage of Training Set") + coord_cartesian(ylim=c(0,5e+16)) + theme_bw(base_size = 20)
}
|
cc6b594ce8266753191501372ffea531c4931d81 | 01a7679dcb4e444a071da478e4f4a988b9808754 | /engagement_correlations.R | 05fb93f8a9eba3c37e7e845b07042dad5a4d3011 | [] | no_license | nripeshtrivedi/Engagement_Analysis | 866da2f1eea4df5ce306e65cb49b64ba0130bf1f | 2c6c3ecf618206eb65f2fece142a42c7cb7b8bab | refs/heads/master | 2020-12-24T07:03:17.908226 | 2016-11-20T07:30:20 | 2016-11-20T07:30:20 | 73,382,787 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,986 | r | engagement_correlations.R | library(ggplot2)
library(scales)
source('../multiplot.r')
## Filter away data where signupdate and lastlogin is NA
members <- members[!duplicated(members$memID),]
members <- unique(members)
m <- members[logged_members,]
m <- m[(which(is.na(m$signupDate) == FALSE)),]
m <- m[(which(is.na(m$lastLogin) == FALSE)),]
m <- m[(which(is.na(m$signupDistressLevel) == FALSE)),]
s <- sample(1:length(m[,1]), length(m[,1])*0.75, replace=F)
train <- m[s,]
test <- m[setdiff(1:length(m[,1]),s),]
test <- na.omit(test)
cols <- c("coins","groupSupportMsgs","forumUpvotes","growthPoints","compassionHearts",
"signupDate","lastLogin",
"signupDistressLevel","numMsgUser","numLogins","convRequests","forumViews","helpViews",
"pageViewsApp","pageViewsWeb","message_rate")
##ggplot theme:
t <- theme(axis.text.x = element_blank(),
axis.title.x=element_blank(),
axis.text.y = element_text(colour = 'black', size = 12),
axis.title.y = element_blank(),
strip.text.y = element_text(size = 16, hjust = 0.5, vjust = 0.5, face = 'bold'),
title = element_text(size=34),
legend.position="none")
tyn <- theme(axis.text.x = element_blank(),
axis.title.x=element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
strip.text.y = element_blank(),
title = element_text(size=34),
legend.position="none")
c <- ggplot(m[which(m$coins > 0 & m$message_rate > 0),],aes(x=coins, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_log10() +
theme_bw() + t + ggtitle("Number of coins")
g <- ggplot(m[which(m$groupSupportMsgs > 0 & m$message_rate > 0),],aes(x=groupSupportMsgs, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
t + ggtitle("Number of group messages")
gr <- ggplot(m[which(m$growthPoints > 0 & m$message_rate > 0),],aes(x=growthPoints, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Growth points")
ch <- ggplot(m[which(m$compassionHearts > 0 & m$message_rate > 0),],aes(x=compassionHearts, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Compassion hearts")
su <- ggplot(m[which(m$signupDate > 0 & m$message_rate > 0),],aes(x=signupDate, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Signup Date")
login <- ggplot(m[which(m$lastLogin > 0 & m$message_rate > 0),],aes(x=lastLogin, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans(),limits=c(1.402E09,1.416E09)) + theme_bw() +
tyn + ggtitle("Last login")
d <- ggplot(m[which(m$signupDistressLevel > 0 & m$message_rate > 0),],aes(x=signupDistressLevel, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Distress level")
conv <- ggplot(m[which(m$convRequests > 0 & m$message_rate > 0),],aes(x=convRequests, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ geom_smooth(method="lm",col="red",cex=4) + scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Conversation requests sent")
nlogin <- ggplot(m[which(m$numLogins > 0 & m$message_rate > 0),],aes(x=numLogins, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ geom_smooth(method="lm",col="red",cex=4) + scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Number of Logins")
f <- ggplot(m[which(m$forumUpvotes > 0 & m$message_rate > 0),],aes(x=forumUpvotes, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Number of forum upvotes")
fview <- ggplot(m[which(m$forumViews > 0 & m$message_rate > 0),],aes(x=forumViews, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Number of forum views")
numPosts <- ggplot(m[which(m$numForumPosts > 0 & m$message_rate > 0),],aes(x=numForumPosts, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Forum posts")
plot(numPosts)
aview <- ggplot(m[which(m$helpViews > 0 & m$message_rate > 0),],aes(x=helpViews, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Number of help article views")
pvapp <- ggplot(m[which(m$pageViewsApp > 0 & m$message_rate > 0),],aes(x=pageViewsApp, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Number of Pages viewed (iOS)")
pvweb <- ggplot(m[which(m$pageViewsWeb > 0 & m$message_rate > 0),],aes(x=pageViewsWeb, y=message_rate)) + geom_point(cex=3) + geom_smooth(method="lm",col="red",cex=4)+ scale_y_continuous(limits=c(0.005,128),trans=log2_trans()) + scale_x_continuous(trans=log2_trans()) + theme_bw() +
tyn + ggtitle("Number of pages viewed (web)")
#64x15 inches
pdf("2col.pdf", width=64, height=13)
multiplot(c,g,gr,ch,su,login,d,conv,nlogin,numPosts,f,fview,aview,pvapp,pvweb,cols=8)
dev.off()
#128x15 inches
pdf("scatter.pdf",width=64,height=13)
multiplot(c,g,gr,ch,su,login,d,conv,nlogin,numPosts,f,fview,aview,pvapp,pvweb,cols=8)
dev.off()
png("scatter.png",width = 6400,height=1300)
multiplot(c,g,gr,ch,su,login,d,conv,nlogin,numPosts,f,fview,aview,pvapp,pvweb,cols=8)
dev.off()
remove(c,g,gr,ch,su,login,d,conv,nlogin,numPosts,f,fview,aview,pvapp,pvweb,s,cols,t,tyn)
remove(m)
|
8662ef5a66abcca44559b147987b7c0d8b742146 | 57744ab6fedc2d4b8719fc51dce84e10189a0a7f | /rrdfqb/R/qb.add.prefixlist.to.store.R | 54e9d22867717c083be68b98b5ff07e2541b6cc0 | [] | no_license | rjsheperd/rrdfqbcrnd0 | 3e808ccd56ccf0b26c3c5f80bec9e4d1c83e4f84 | f7131281d5e4a415451dbd08859fac50d9b8a46d | refs/heads/master | 2023-04-03T01:00:46.279742 | 2020-05-04T19:10:43 | 2020-05-04T19:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 751 | r | qb.add.prefixlist.to.store.R | ##' Add prefixes to a rrdf store and returns list of prefixes
##'
##' Sideeffect: the prefixes are added to the store
##' @param store A rrdf store, if NULL then not added to a store
##' @param prefixes A data.frame with column prefix and namespace
##' @return The list with member names prefixUPPERCASEPREFIX and namespace as value
##' TODO(mja): this should be changed - at least the naming, or use another data structure
##' @export qb.add.prefixlist.to.store
qb.add.prefixlist.to.store<- function(store=NULL, prefixes) {
for (i in 1:nrow(prefixes))
{
# Use as.character to typecast
if (!is.null(store)) {
add.prefix(store, as.character(prefixes[i,"prefix"]),
as.character(prefixes[i,"namespace"]))
}
}
return(TRUE)
}
|
0253f12b6393c312fd4ae7be09d3e4a91b12e3ef | d08e69198fbd60086aa35d765c7675006d06cf3f | /man/GetBiplotScales.Rd | 36324a104c2327f7be67ee28a07eaa6fcc4b60c3 | [] | no_license | villardon/MultBiplotR | 7d2e1b3b25fb5a1971b52fa2674df714f14176ca | 9ac841d0402e0fb4ac93dbff078170188b25b291 | refs/heads/master | 2023-01-22T12:37:03.318282 | 2021-05-31T09:18:20 | 2021-05-31T09:18:20 | 97,450,677 | 3 | 2 | null | 2023-01-13T13:34:51 | 2017-07-17T08:02:54 | R | UTF-8 | R | false | false | 2,212 | rd | GetBiplotScales.Rd | \name{GetBiplotScales}
\alias{GetBiplotScales}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculates the scales for the variables on a linear biplot
}
\description{
Calculates the scales for the variables on a linear prediction biplot
There are several types of scales and values that can be shown on the graphical representation. See details.
}
\usage{
GetBiplotScales(Biplot, nticks = 4, TypeScale = "Complete", ValuesScale = "Original")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Biplot}{
Object of class PCA.Biplot
}
\item{nticks}{
Number of ticks for the biplot axes
}
\item{TypeScale}{
Type of scale to use : "Complete", "StdDev" or "BoxPlot"
}
\item{ValuesScale}{
Values to show on the scale: "Original" or "Transformed"
}
}
\details{
The function calculates the points on the biplot axes where the scales should be placed.
There are three types of scales when the transformations of the raw data are made by columns:
"Complete": Covers the whole range of the variable using the number of ticks specified in "nticks". A smaller number of points could be shown if some fall outsite the range of the scatter.
"StdDev": The mean +/- 1, 2 and 3 times the standard deviation.A smaller number of points could be shown if some fall outsite the range of the scatter.
"BoxPlot": Median, 25, 75 percentiles maximum and minimum values are shown. The extremes of the interquartile range are connected with a thicker line. A smaller number of points could be shown if some fall outsite the range of the scatter.
There are two kinds of values that can be shown on the biplot axis:
"Original": The values before transformation. Only makes sense when the transformations are for each column.
"Transformed": The values after transformation, for example, after standardization.
Although the function is public, the end used will not normally use it.
}
\value{
A list with the following components:
\item{Ticks}{A list containing the ticks for each variable}
\item{Labels}{A list containing the labels for each variable}
%% ...
}
\author{
Jose Luis Vicente Villardon
}
\examples{
data(iris)
bip=PCA.Biplot(iris[,1:4])
GetBiplotScales(bip)
}
|
30090b2a135c88073008fcf48a70cda4610b88c0 | ed0651dc37740c45798b4c6ff7c34e5937da989d | /BBS_Hierarchical_Model.R | 5c978b6683772fca61ae82ffce1545a55126d82b | [] | no_license | Tglaser178/BBS_Analysis | a013d6a3752ac937fb8c6f2a812cef75ddc918bc | e389ea01386b81735a1018fb0f885eb7c760ce96 | refs/heads/master | 2022-07-17T21:21:39.804527 | 2020-05-12T20:28:13 | 2020-05-12T20:28:13 | 250,376,040 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,673 | r | BBS_Hierarchical_Model.R | ##############################################################################
# Analysis of bbs data to get trends by BCR
# Created by T. Glaser April 09 2020
# Based on code sent by Jim Sarroco for WIWA BBS analysis (wiwa_network_bbs_model1.R)
# which was in turn based on code from J. Sauer
##############################################################################
rm(list=ls())
library("ff") # to read in large BBS data set
library("rgeos")
library("rgdal")
library("maptools")
library("sp")
library("jagsUI")
library('tidyverse')
SPECIES="Wilson's Warbler"
YEAR=c(1967:2018)
OUTFILE="data/outBBS/BBS_WIWA_BCR.RData"
out<-list()
#-----------------------------------------------------------------------------
# 1 - Read in and process BBS data.
#-----------------------------------------------------------------------------
### read in filtered data for species
bbs.dat <- read.csv("./data/BBS_WIWA_BCR.filtered.csv")
#Create unique values for filtered BCRs
BCR=unique(bbs.dat$BCR)
### Create unique route field
bbs.dat$Route <- factor(do.call(paste, list(bbs.dat$BCR, bbs.dat$Route, sep=".")))
# select years of interest
#bbs.dat <- bbs.dat[bbs.dat$Year>(YEAR[1]-1) & bbs.dat$Year<(YEAR[2]+1),]
# Save number of routes to "out" list
out$nRoutes<-length(unique(bbs.dat$Route))
### Observer data
bbs.dat$Rte <- factor(do.call(paste, list(bbs.dat$State, bbs.dat$Route, sep=".")))
# Create Observer ID based on route x observer
bbs.dat$obs <- factor(do.call(paste, list(bbs.dat$Obs, bbs.dat$Rte, sep=".")))
# create first year indicator variable
bbs.dat$fy <- ifelse(duplicated(bbs.dat$ObsN), 0, 1)
# fill in zeros for years where route counted with no observations of species
bbs.dat$SpeciesTotal[is.na(bbs.dat$SpeciesTotal)] <- 0
# Extract Coordinates
bbs.locs <- bbs.dat[,c("Longitude","Latitude")]
#-----------------------------------------------------------------------------
# 2 - Read in and process spatial data, match to BBS data
#-----------------------------------------------------------------------------
### Read in regions = BCR shapefile
REGIONS <- readOGR(dsn="./data/bcr_terrestrial_shape",layer="BCR_Terrestrial_master_International")
#regions.proj=proj4string(REGIONS)
#project to Albers equal area so areas calculated will be in sq meters
REGIONS<-spTransform(REGIONS,CRS("+proj=aea +ellps=WGS84 +lat_1=29.5 +lat_2=45.5"))
# Apply area to each filtered BCR
bbs.BCR=BCR
BCR=data.frame(BCR=bbs.BCR, AreaBCR=sapply(1:length(BCR),function(i){gArea(subset(REGIONS,BCR==BCR[i]),byid=T)}))
# Merge BCR areas with bbs dataframe
BCR$BCR=as.factor(bbs.BCR)
AreaBCR.df=as.data.frame(tapply(BCR$AreaBCR,BCR$BCR,sum,simplify=T))
names(AreaBCR.df)<-c("AreaBCR")
AreaBCR.df$AreaBCR<-row.names(AreaBCR.df)
BCR=merge(BCR,AreaBCR.df,by.x="BCR")
bbs.dat=merge(bbs.dat,BCR,by.x="BCR")
bbs.dat$BCR<-factor(bbs.dat$BCR,levels=as.character(unique(bbs.dat$BCR))) # remove strata from which ther are no observations
#-----------------------------------------------------------------------------
# Write BBS model to file. Code provided by J. Sauer. (via Jim Sarocco)
#-----------------------------------------------------------------------------
source("LinkSauerModel.R")
#-----------------------------------------------------------------------------
# 4 - prep/bundle data for bbs trend model, set inits, run model
#-----------------------------------------------------------------------------
### Set Parameters for BBS Model
count <- bbs.dat$SpeciesTotal
ncounts <- length(bbs.dat$SpeciesTotal)
obser <- bbs.dat$ObsN
obser <- as.numeric(factor(bbs.dat$ObsN))
nobservers <- length(unique(obser))
firstyr <- bbs.dat$fy
year <- as.numeric(factor(bbs.dat$Year))
nyears <- length(unique(year))
strat <- as.numeric(factor(bbs.dat$BCR))
nstrata <- length(unique(strat))
# Set area weight for BCRs
aw <- unique(subset(bbs.dat, select = c(BCR, AreaBCR)))
aw <- aw[order(aw$BCR),]
areaweight <- aw$AreaBCR
# calculate z weights
rte.all <- read.csv("./data/routes.csv")
rte.all$Rte <- factor(do.call(paste, list(rte.all$BCR, rte.all$Route, sep=".")))
#rte.all <- rte.all[rte.all$Year>(YEAR[1]-1) & rte.all$Year<(YEAR[2]+1),]
rte.all=rte.all[sapply(rte.all$BCR,function(x){any(x==BCR$BCR)}),]
BCR.df=data.frame(BCR=as.factor(bbs.BCR))
rte.all=merge(rte.all,BCR.df,by="BCR")
Rte <- read.csv("./data/routes.csv")
rte.sum <- aggregate(Rte~BCR, rte.all, length) # use BCR scale here
names(rte.sum)[2] <- "tot.rtes"
spec.rte.sum <- aggregate(Rte~BCR, unique(subset(bbs.dat, select=c(Rte, BCR))), length)
names(spec.rte.sum)[2] <- "detec.rtes"
wts <- merge(spec.rte.sum, rte.sum)
wts$nonzeroweight <- wts$detec.rtes/wts$tot.rtes
wts<-wts[order(wts$BCR),]
nonzeroweight <- wts$nonzeroweight
### bundle data:
nYears<-length(YEAR)-YEAR[1]
jags.data <- list(count=count, year = year, obser=obser, nyears=nyears, firstyr=firstyr, ncounts=ncounts, strat=strat,
nobservers=nobservers, nstrata=nstrata, areaweight=areaweight, nonzeroweight=nonzeroweight, fixedyear=round(nYears/2))
### bundle data:
nYears<-YEAR[2]-YEAR[1]
jags.data <- list(count=count, year = year, obser=obser, nyears=nyears, firstyr=firstyr, ncounts=ncounts, strat=strat,
nobservers=nobservers, nstrata=nstrata, areaweight=areaweight, nonzeroweight=nonzeroweight, fixedyear=round(nYears/2))
# Initial values
inits <- function(){
list(tauyear=rep(1,nstrata),taunoise=1,tauobs=1,beta=rep(0,nstrata),strata=rep(0,nstrata),eta=0)
}
# Parameters monitored
parameters <- c("eta", "N", "sdnoise", "sdobs", "CompIndex", "Bbar")
# Set MCMC to desired settings
ni <- 50
nt <- 3
nb <- 10
nc <- 1
print("Calling JAGS")
bbs.out <- jags(jags.data, inits, parameters, "bbs_model_13.txt",n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, parallel=TRUE)
bbs.BCR=levels(bbs.dat$BCR)
bbs.years=YEAR
bbs.areas<-as.data.frame(aw,row.names=1:nrow(aw))
save(bbs.out,bbs.BCR,bbs.areas,bbs.years,file=OUTFILE)
# years<-seq(1967,2018)
# reg.counts<-matrix(NA,length(years),11)
# reg.ci.025<-matrix(NA,length(years),11)
# reg.ci.975<-matrix(NA,length(years),11)
# for(i in 1:length(years)) reg.counts[i,]<-apply(bbs.out$sims.list$n[,,i],2,mean)
# for(i in 1:length(years)) reg.ci.025[i,]<-apply(bbs.out$sims.list$n[,,i],2,quantile, probs=c(0.025))
# for(i in 1:length(years)) reg.ci.975[i,]<-apply(bbs.out$sims.list$n[,,i],2,quantile, probs=c(0.975))
# reg.trends <- 100*((bbs.out$sims.list$n[,,17]/bbs.out$sims.list$n[,,1])^(1/16)-1)
# reg.tr.mn <- apply(reg.trends, 2, mean)
# reg.tr.ci <- apply(reg.trends, 2, quantile, probs=c(0.025, 0.975))
#
#
# reg.tr.df <- data.frame(Reg = levels(bbs.dat$Reg), mn = reg.tr.mn, t(reg.tr.ci))
|
a4e673da2ccf61560196f670bdfe0cfe2d247572 | 15221096e68c551499d8df7e54b25e4ec302f27f | /assignment_3/best.R | 65c5e5a225193a39cbd51908581f3004c8df208f | [] | no_license | abhinaykr/JHDS_Rprogramming | 793131fc4b9fd5e914d80511175ec110c80c9b27 | 8c595669e2d615289c7c9d8c5679108b7d3f05de | refs/heads/master | 2016-09-11T08:57:02.497768 | 2014-12-12T20:36:58 | 2014-12-12T20:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,147 | r | best.R | best <- function (state,outcome){
## Read outcome data
data <- read.csv("data/outcome-of-care-measures.csv", colClasses="character")
s <- state
o <- outcome
## Check that state and outcome are valid
isostate <- which(data$State == s)
if (length(isostate) <=0) stop("invalid state")
## Return hospital name in that state with lowest 30-day death rate
nameVector <- data$Hospital.Name[isostate]
if (outcome == "heart attack") {
pV <- as.numeric(data$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack[isostate])
isobest <- which(pV == min(pV,na.rm=T))
}
else if (outcome == "heart failure") {
pV <- as.numeric(data$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure[isostate])
isobest <- which(pV == min(pV,na.rm=T))
}
else if (outcome == "pneumonia") {
pV <- as.numeric(data$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia[isostate])
isobest <- which(pV == min(pV,na.rm=T))
}else {
stop(sprintf("invalid outcome"))
}
name <- nameVector[isobest]
return(name)
} |
6ba1fc9d98df6436660b022ddf7f8e2fb577dbd7 | 05884bd8afb3222aec86c6a2b363e67ed3c64590 | /toolbox/R/spiderBDA.R | c44c25937616b2c4e5371ac20c6d3a8fbb8e4f68 | [] | no_license | nmarticorena/mineria_datos | bcfbea31e6de6f292e4404068b360638ab8a3cbb | 6e3f22c2fb79fe551a5d8c94136f495638088813 | refs/heads/master | 2020-03-09T00:36:28.806062 | 2018-06-14T03:12:35 | 2018-06-14T03:12:35 | 128,492,056 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,226 | r | spiderBDA.R | #'Spider charts BDA
#'
#'Make awesome spider charts
#'@param centroids dataframe, centroids to plot as rows and variables as columns.
#'@param show.max boolean, show the maximum vlaues of each variable in the plot?
#'@param show.values boolean, show the centroid's value of each varaible in the plot?
#'@param show.axis boolean, show X and Y axis in plots?
#'@param color character, color of the chart like \code{"orange"} or \code{"#234567"}.
#'@param label.digits integer, number of digits after point for labels with the values.
#'@param label.size numeric, label size.
#'@param nudge.x numeric, nudge for the labels in X-axis.
#'@param nudge.y numeric > 0, nudge for the y-axis. The maximum's and centroid's labels moves in opposite directions to not overlap themself.
#'@param legend.position the position of legends ("none", "left", "right", "bottom", "top", or two-element numeric vector).
#'@param legend.text.size numeric, legend text size.
#'@param many.vars boolean, if \code{TRUE}, then it plot with diferents shapes to make it easier to identify each variable.
#'@param multiplot boolean, print all plots together ?
#'@param multiplot.file character, path where storage the multiplot in PDF like "../plots/multiplot.pdf". By default it takes \code{""}, so it doesn't storage anything.
#'@param multiplot.cols integer, number of columns to make the multiplot
#'@param multiplot.layout matrix, custom layout fot the multiplot.
#'@details IT is recomended to try multiple combination of parameters before storage any multiplot to get better plots.
#'@author Martin Vicencio
#'@return It return a list of ggplots.
#'@examples
#' centroids=data.frame(rnorm(9),rnorm(9),rnorm(9),rnorm(9),rnorm(9),rnorm(9),rnorm(9),rnorm(9))
#' names(centroids)=letters[1:8]
#' a=spiderBDA(centroids,show.max=F,show.values=F,nudge.x=0.1,nudge.y=0.1,legend.position = "bottom",legend.text.size = 7,multiplot = T, multiplot.layout=rbind(c(1,1,2,3),c(1,1,4,5),c(6,7,8,9)))
#'
#'
#'
#' a$'1'
#'
#'@export
spiderBDA = function(centroids,
show.max = T,
show.values = T,
show.axis = F,
color = "orange",
label.digits = 3,
label.size = 3,
nudge.x = 0.25,
nudge.y = 0.05,
legend.position = c(0.75, 0.05),
legend.text.size= 8,
many.vars=F,
multiplot= F,
multiplot.file = "",
multiplot.cols = 2,
multiplot.layout=NULL) {
safeLibrary(ggplot2)
safeLibrary(plyr)
centroids=centroids[,order(colnames(centroids))]
edges = ncol(centroids)
maxradius = colwise(max)(centroids)
minradius = colwise(min)(centroids)
angles = seq(pi * (5 * edges - 4) / (2 * edges), pi / 2, by = -2 * pi /edges)
label = t(round(maxradius, digits = label.digits))
max_points = data.frame(x = cos(angles),
y = sin(angles),
label = label)
max_points["Var"] = row.names(max_points)
nudgex = (as.integer(max_points$x >= 0) * 2 - 1) * nudge.x
nudgey = (as.integer(max_points$y >= 0) * 2 - 1) * nudge.y
center = data.frame(
x = rep(0, times = edges),
y = rep(0, times = edges),
angles = angles,
radius = rep(0, times = edges)
)
if(many.vars){
shapes=rep(1:ceiling(edges/5),times=5)
shapes=shapes[1:edges]}
plots = alply(centroids, 1, function(values){
row = t((values - minradius) / (maxradius - minradius))
dimnames(row)[[2]] = "radius"
center_aux=center
center_aux[, "radius"] = row
points = data.frame(row * cos(angles), row * sin(angles), t(round(values, digits = label.digits)))
names(points) = c("x", "y", "label")
ifelse(many.vars,
{plot=ggplot()+geom_point(data = max_points,mapping = aes(x=x,y=y,colour=Var),size=2,shape=shapes)},
{plot=ggplot()+geom_point(data = max_points,mapping = aes(x=x,y=y,colour=Var),size=4)})
plot=plot+geom_polygon(data = points ,mapping = aes(x=x,y=y),alpha=0.3,fill=color)+
geom_spoke(data=center,aes(x=x,y=y,angle=angles,radius=radius),color=color)+
theme_bw()+theme(legend.position = legend.position,legend.direction = "horizontal",legend.title = element_blank(),legend.text = element_text(size = legend.text.size))+
xlim(-1.25,1.25)+ylim(-1.25,1.25)
if(!show.axis){plot=plot+theme(axis.title = element_blank(),axis.ticks = element_blank(),axis.text = element_blank())}
if(show.max){plot=plot+geom_label(data=max_points,aes(x=x,y=y,label=label),size=label.size,nudge_x = nudgex,nudge_y = nudgey,label.size = label.size/15)}
if(show.values){plot=plot+geom_label(data=points,aes(x=x,y=y,label=label),size=label.size,nudge_x = nudgex,nudge_y = -nudgey,fill=color,label.size = label.size/15)}
return(plot)
})
if(multiplot){
multiplot(plotlist = plots,
file = multiplot.file,
cols = multiplot.cols,
layout = multiplot.layout)}
return(plots)
} |
97c5e22beed5917e2190cdc8036be853d146af38 | f2bf9e707872a91d60461dd6dfd551221e13bf9d | /code/merge_matches_with_ess.R | 4ada24a54e97d9a954dec8280bc86539828ec84b | [] | no_license | jvieroe/IntMatches | 3654ca1942bb126bf45c9f408f041514f44d5167 | 671e66bfb03be6011ddd5f19a74a278c4f336199 | refs/heads/main | 2023-07-03T00:50:56.826704 | 2021-08-10T16:57:00 | 2021-08-10T16:57:00 | 394,699,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,283 | r | merge_matches_with_ess.R | # ---------------------------------------------------------
# Preamble
# ---------------------------------------------------------
rm(list=ls())
library(rio)
library(tidyverse)
library(knitr)
library(haven)
library(lubridate)
library(survival)
library(janitor)
library(tidylog)
library(fixest)
setwd("/Users/jeppeviero/Dropbox/03 Football/IntMatches")
# ---------------------------------------------------------
# Load data
# ---------------------------------------------------------
games_full <- rio::import("data/IntMatches1872_2021.Rdata") %>%
rename(cntry_games = cntry,
year_games = year) #%>%
#tibble()
ess_full <- rio::import("/Users/jeppeviero/Library/Mobile Documents/com~apple~CloudDocs/Data/ESS/ess_surveys.Rdata") %>%
tibble() %>%
mutate(ess_id = row_number()) %>%
select(-c(bdate_long, edate_short, edate_long))
ess_full <- ess_full %>%
rename(cntry_ess = cntry) %>%
mutate(cntry = case_when(cntry_ess == "AL" ~ "Albania",
cntry_ess == "AT" ~ "Austria",
cntry_ess == "BE" ~ "Belgium",
cntry_ess == "BG" ~ "Bulgaria",
cntry_ess == "AT" ~ "Austria",
cntry_ess == "CH" ~ "Switzerland",
cntry_ess == "CY" ~ "Cyprus",
cntry_ess == "CZ" ~ "Czech Republic",
cntry_ess == "DE" ~ "Germany",
cntry_ess == "DK" ~ "Denmark",
cntry_ess == "EE" ~ "Estonia",
cntry_ess == "ES" ~ "Spain",
cntry_ess == "FI" ~ "Finland",
cntry_ess == "FR" ~ "France",
cntry_ess == "GB" ~ "United Kingdom", # no national teams
cntry_ess == "GR" ~ "Greece",
cntry_ess == "HR" ~ "Croatia",
cntry_ess == "HU" ~ "Hungary",
cntry_ess == "IE" ~ "Ireland",
cntry_ess == "IL" ~ "Israel",
cntry_ess == "IS" ~ "Iceland",
cntry_ess == "IT" ~ "Italy",
cntry_ess == "LT" ~ "Lithuania",
cntry_ess == "LU" ~ "Luxembourg",
cntry_ess == "LV" ~ "Latvia",
cntry_ess == "ME" ~ "Montenegro",
cntry_ess == "NL" ~ "Netherlands",
cntry_ess == "NO" ~ "Norway",
cntry_ess == "PL" ~ "Poland",
cntry_ess == "PT" ~ "Portugal",
cntry_ess == "RO" ~ "Romania",
cntry_ess == "RS" ~ "Serbia",
cntry_ess == "SE" ~ "Sweden",
cntry_ess == "SI" ~ "Slovenia",
cntry_ess == "SK" ~ "Slovakia",
cntry_ess == "TR" ~ "Turkey",
cntry_ess == "UA" ~ "Ukraine",
cntry_ess == "RO" ~ "Romania",
cntry_ess == "RU" ~ "Russia",
cntry_ess == "XK" ~ "Kosovo"))
ess_full <- ess_full %>%
filter(cntry %in% games_full$cntry_games)
# ---------------------------------------------------------
# Prep data
# ---------------------------------------------------------
ess <- ess_full %>%
select(bdate_short,
cntry,
ess_id) %>%
arrange(bdate_short)
games <- games_full %>%
arrange(date)
# ---------------------------------------------------------
# Match respondents to nearest game
# https://stackoverflow.com/questions/23342647/how-to-match-by-nearest-date-from-two-data-frames
# ---------------------------------------------------------
cntry_seq <- unique(ess$cntry)
cntry_seq
cntry_list <- list()
# ----- Match to nearest game
for (i in seq_along(cntry_seq)) {
# Subset data
print(cntry_seq[i])
df1 <- ess %>%
filter(cntry == cntry_seq[i])
df2 <- games %>%
filter(cntry_games == cntry_seq[i])
# Calculate time to nearest game
temp <- outer(df1$bdate_short, df2$date, "-")
ind_temp <- apply(temp, 1, function(i) which.min(abs(i)))
# Merge with ESS data
df3 <- df1 %>%
cbind(., df2[ind_temp,]) %>%
tibble()
cntry_list[[i]] <- df3
}
# ----- Unpack data
ess_games <- bind_rows(cntry_list)
rm(df1,
df2,
df3,
cntry_list)
ess_games <- ess_games %>%
mutate(time_diff = bdate_short - date) %>%
mutate(time_diff = as.numeric(time_diff)) %>%
mutate(abs_time_diff = abs(time_diff))
# ---------------------------------------------------------
# Match respondents to nearest previous game
# https://stackoverflow.com/questions/23342647/how-to-match-by-nearest-date-from-two-data-frames
# ---------------------------------------------------------
# ----- Create duplicated match data
col_names <- colnames(games)
col_names_before <- paste0(col_names, "_before")
games_before <- games %>%
rename_with(~ col_names_before[which(col_names == .x)], .cols = col_names)
colnames(games_before)
# ----- Match to nearest previous game
cntry_list <- list()
for (i in seq_along(cntry_seq)) {
# ----- Subset data
print(cntry_seq[i])
df1 <- ess %>%
filter(cntry == cntry_seq[i])
df2 <- games_before %>%
filter(cntry_games_before == cntry_seq[i])
# ----- Calculate time to nearest previous game
temp <- outer(df1$bdate_short, df2$date, "-")
temp[temp < 0] <- NA
ind_temp <- apply(temp, 1, function(i) which.min(abs(i)))
# ----- Merge with ESS data
df3 <- df1 %>%
cbind(., df2[ind_temp,]) %>%
tibble()
cntry_list[[i]] <- df3
}
# ----- Unpack data
ess_games_before <- bind_rows(cntry_list)
rm(df1,
df2,
df3,
cntry_list)
ess_games_before <- ess_games_before %>%
mutate(time_diff_before = bdate_short - date_before) %>%
mutate(time_diff_before = as.numeric(time_diff_before)) %>%
mutate(abs_time_diff_before = abs(time_diff_before))
rm(ess)
ess <- ess_full %>%
tidylog::left_join(.,
ess_games,
by = c("ess_id", "bdate_short", "cntry")) %>%
tidylog::left_join(.,
ess_games_before,
by = c("ess_id", "bdate_short", "cntry"))
ess <- ess %>%
mutate(cntry_match = paste(cntry,
match_id,
sep = "_"))
ess <- ess %>%
mutate(cntry_match = factor(cntry_match))
temp <- ess %>%
select(bdate_short,
date,
date_before,
time_diff,
time_diff_before,
abs_time_diff,
abs_time_diff_before)
rm(temp)
ess <- ess %>%
mutate(after = ifelse(time_diff > 0, 1, 0)) %>%
mutate(after = ifelse(time_diff == 0, NA, after)) %>%
mutate(after = factor(after))
tabyl(ess$result)
class(ess$result)
ess <- ess %>%
mutate(result = factor(result)) %>%
mutate(goal_diff = goals_for - goals_against)
# ---------------------------------------------------------
# Export data
# ---------------------------------------------------------
getwd()
save(ess,
file = "data/DataMerged.Rdata")
rm(list=ls())
|
c70084d05e3a5f128b8c6dcf06ed9573e44ef583 | 58a338ec973eb9a451e96b8114d3aabeb97a5268 | /plot2.R | 396e7db11bad3cde6854bf37169b88d9db83f9c7 | [] | no_license | FilipeLima/ExData_Plotting1 | 028e3191f11fca07b0211daf5b12d5b2bdeca356 | 81f4e8efcebfd97ccccff07c573ed4c666420914 | refs/heads/master | 2022-10-31T07:38:51.496943 | 2020-06-18T21:02:56 | 2020-06-18T21:02:56 | 273,251,558 | 0 | 0 | null | 2020-06-18T13:52:21 | 2020-06-18T13:52:20 | null | UTF-8 | R | false | false | 555 | r | plot2.R | names_df <- read.table("household_power_consumption.txt",nrows=1,sep=";",na.strings="NA")
df <- read.table("household_power_consumption.txt",skip=66637,nrows=2880,sep=";",na.strings="NA")
names(df) <- names_df[1,]
View(df)
df$Date <- as.Date(df$Date,"%d/%m/%Y")
df$Time <- as.Date(df$Time,"%h:%m:%s")
df$DateTime <- strptime(paste(df$Date, df$Time), format="%Y-%m-%d %H:%M:%S")
par(fin=c(480,480))
plot(df$DateTime,df$Global_active_power,main="",xlab="",ylab="Global Active Power (Killowatts)",type='l')
dev.copy(png,"plot2.png")
dev.off()
|
725219038e41889d49b85100b68fc0df66971c30 | 9e87d7b269fbd2adb83ca0b2302378b23cf47fd8 | /R/frequentist.R | 6f075d01b36ce08c040eeacef1b739465611339e | [] | no_license | alhostat/perinataldep2019 | 6d58d98481f84cb5be1ef889c414e982fb46db42 | 39aa44158b601bd7c846ce12ff72b308698e701f | refs/heads/master | 2020-06-29T07:57:06.078272 | 2019-09-01T01:16:33 | 2019-09-01T01:16:33 | 200,479,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,651 | r | frequentist.R | # utf-8 encoding
"
@title: Meta-analysis
@author: Alan Ho
@date: 17-11-2018
#description: Generates the forest plot + all associated statistics
"
# LIBRARIES ----------------------------------------------------------
library(meta)
library(metafor)
library(metaviz)
library(rlist)
# FUNCTIONS --------------------------------------------------------------------
" For computing the trim-fill and regtest for funnel plot asymmetry"
diagnostics = function(df){
res <- rma(d, vd, data = df)
regtest.mod = regtest(res, model = "lm")
rtf <- trimfill(res)
list(
'rma' = res,
'regtest' = regtest.mod,
'regtest_results' = tibble(
z = regtest.mod$zval,
p = regtest.mod$pval
),
'trim' = rtf,
'trim_results' = tibble(
estimate = rtf$beta[1,1],
z = rtf$zval,
p = rtf$pval
)
)
}
"Generates forest plot with subgroup analyses included"
generate_forestplot = function(results, df){
meta_results = metagen(
TE=d,
seTE=sqrt(vd),
studlab = paste(author, year),
byvar = type,
data = df,
sm = "Cohen's d",
title = 'Meta-Analysis',
method.tau = 'REML')
meta::forest(meta_results,
slab = paste(df$author, sep = ", "),
ilab = df$year,
leftcols = c('author','year'),
ilab.xpos = -5,
xlab = 'Symptom Reduction (relative to control/baseline)',
comb.random = T,
comb.fixed = F,
subgroup = T,
print.I2 = T,
print.subgroup.labels=T,
bylab = df$type)
}
# FREQUENTIST RESULTS ----------------------------------------------------------
meta_results = metagen(
TE=d,
seTE=sqrt(vd),
studlab = paste(author, year),
byvar = type,
data = perinatal_data,
sm = "Cohen's d",
title = 'Meta-Analysis',
method.tau = 'REML')
trim_fill.results = list(
'perinatal_data',
'depression',
'anxiety',
'worry'
) %>%
map(function(df){
labels = df
if(df == 'perinatal_data') labels = 'Overall'
res = diagnostics(get(df))$trim_results %>%
mutate(Subgroup = labels) %>%
select(Subgroup, everything())
}) %>%
reduce(bind_rows)
"Subgroup Analysis Results"
subgroup_analysis = list.append(
summary(meta_results )$within.random[1:6],
summary(meta_results )$I2.w[[1]]
) %>%
enframe() %>%
unnest() %>%
mutate(
name = ifelse(name == "", 'I2', name),
groups = rep(meta_results$bylevs, 7)
) %>%
spread(name, value) %>%
select(groups, TE, lower, upper, z, p, I2)
|
7097f69d47047a851284d988039dc2fc0ac3778a | fe87f6ab9a32b3bfbe20f5ffa852cbd338fc19c0 | /Intro_to_Programming/Week_3_In_Class_Codes.R | 8b60c122f3a30edb2c033ebc83fbff1b5a9e52ed | [] | no_license | priyaankasaxena/Notes | 42208f587629dddff490e8e1289a4d3139cb9b1b | 33bdd733ee0e296808e6122fcb0b80d61509d688 | refs/heads/master | 2020-07-12T02:47:57.659088 | 2019-04-10T17:03:49 | 2019-04-10T17:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,259 | r | Week_3_In_Class_Codes.R | # Comparison of logicals
TRUE == FALSE
# Comparison of numerics
-6 * 14 != 17 - 101
# Comparison of character strings
"useR" == "user"
# Compare a logical with a numeric
TRUE == 1
#2. Greater and Less Than
# Comparison of numerics
-6 * 5 + 2 >= -10 + 1
# Comparison of character strings
"raining" <= "raining dogs"
# Comparison of logicals
TRUE > FALSE
Instagram
# The linkedin and instagram vectors have already been created as follows
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
instagram <- c(17, 7, 5, 16, 8, 13, 14)
# Popular days
linkedin > 15
# Quiet days
linkedin <= 5
# LinkedIn more popular than instagram
linkedin > instagram
# When does views equal 13?
views == 13
# When is views less than or equal to 14?
views <= 14
##########Logical Operators
# Is last under 5 or above 10?
last < 5 | last > 10
# Is last between 15 (exclusive) and 20 (inclusive)?
last > 15 & last <= 20
# linkedin exceeds 10 but instagram below 10
linkedin > 10 & instagram < 10
# When were one or both visited at least 12 times?
linkedin >= 12 | instagram >= 12
# When is views between 11 (exclusive) and 14 (inclusive)?
views > 11 & views <= 14
attach(mtcars)
li_df<-mtcars
power <- li_df$hp
# Build a logical vector, TRUE if value in second is extreme: extremes
extremes <- power > 200 | power < 50
# Count the number of TRUEs in extremes
sum(extremes)
# List of cars who are in extremes list, beware that index is th emodel of the car
li_df[extremes,]
#a<-which(li_df$hp==extremes)
###IF STATEMENT RECAP
# Variables related to your last day of recordings
medium <- "LinkedIn"
num_views <- 14
# Control structure for medium
if (medium == "LinkedIn") {
print("Showing LinkedIn information")
} else if (medium == "instagram") {
# Add code to print correct string when condition is TRUE
print("Showing instagram information")
} else {
print("Unknown medium")
}
rm(list=ls())#ls(all.names=TRUE)
gc()
# Control structure for num_views
if (num_views >= 16) {
print("You're popular!")
} else if (num_views <= 15 & num_views > 10) {
# Add code to print correct string when condition is TRUE
print("Your number of views is average")
} else {
print("Try to be more visible!")
}
#### Final Practice
li <- 15
ins <- 9
# Code the control-flow construct
if (li >= 15 & ins >= 15) {
sms <- 2 * (li + ins)
} else if (li < 10 & ins < 10) {
sms <- 0.5 * (li + ins)
} else {
sms <- li + ins
}
###FOR LOOP
nyc <- list(pop = 8405837,
boroughs = c("Manhattan", "Bronx", "Brooklyn", "Queens", "Staten Island"),
capital = FALSE)
# Loop version 1
for (info in nyc) {
print(info)
}
# Loop version 2
for (i in 1:length(nyc)) {
print(nyc[[i]])
}
# Add if statement with break
if (li > 16) {
print("This is ridiculous, I'm outta here!")
break
}
# Add if statement with next
if (li < 5) {
print("This is too embarrassing!")
next
}
print(li)
####Break Practice
# dont forget to Initialize ucount
# Print out rcount
tweet <- "Without data, you are just another person with an opinion!" #E. Deming
chars <- strsplit(tweet, split = "")[[1]]
ucount <- 0
# Finish the for loop
for (char in chars) {
if (char == "u") {
ucount <- ucount + 1
}
if (char == "!") {
break
}
}
# Print out rcount
ucount
##WHILE LOOP
# Initialize the speed variable
speed <- 60
# Code the while loop
while (speed > 30) {
print("Slow down!")
speed <- speed - 7
}
# Print out the speed variable
speed
# Print the resulting sms to the console
sms
#EXERCISE 2
# Initialize the speed variable
speed <- 60
# Extend/adapt the while loop
while (speed > 30) {
print(paste("Your speed is", speed))
if (speed > 48) {
print("Slow down big time!")
speed <- speed - 11
} else {
print("Slow down!")
speed <- speed - 6
}
}
speed <- 88
while (speed > 30) {
print(paste("Your speed is", speed))
# Break the while loop when speed exceeds 80
if (speed > 80) {
break
}
if (speed > 48) {
print("Slow down big time!")
speed <- speed - 11
} else {
print("Slow down!")
speed <- speed - 6
}
}
#Exercise 4.
# Initialize i as 1
i <- 1
# Code the while loop
while (i <= 10) {
print(3 * i)
if ( (3 * i) %% 8 == 0) {
break
}
i <- i + 1
}
getwd()
|
704e68c86d2f8dd52a746c3209e53230fe45c9dc | 394b0b27a68e590165d0dfb9243e7b2d5deaf4d5 | /man/batchFlattenSelf.Rd | 5b3b59e9d1b4913197c75cdc0b346a2726c20671 | [
"MIT"
] | permissive | NastashaVelasco1987/zoomGroupStats | 5b414b28e794eecbb9227d4b1cd81d46b00576e4 | 8f4975f36b5250a72e5075173caa875e8f9f368d | refs/heads/main | 2023-05-05T18:23:17.777533 | 2021-05-24T16:08:23 | 2021-05-24T16:08:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,422 | rd | batchFlattenSelf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/batchFlattenSelf.R
\name{batchFlattenSelf}
\alias{batchFlattenSelf}
\title{Wrapper to run the flattenSelf function on a batch of meetings}
\usage{
batchFlattenSelf(inputData, inputType, meetingId, speakerId, gapLength)
}
\arguments{
\item{inputData}{data.frame with multiple meetings from batchProcessZoomOutput (either transcript or chat)}
\item{inputType}{character indicating 'transcript' or 'chat'}
\item{meetingId}{character name of the variable containing the meeting identifier}
\item{speakerId}{character name of the variable containing the speaker identifier}
\item{gapLength}{integer giving the number of seconds for marking distinct turns by the same speaker. Consecutive utterances by the same speaker of greater than or equal to this value will be treated as different conversational turns.}
}
\value{
a data.frame that is the same format as inputData, but where the observations are the new, compressed conversational turns.
}
\description{
Wrapper to run the flattenSelf function on a batch of meetings
}
\examples{
newChat = flattenSelf(inputData = sample_chat_processed,
inputType="chat", meetingId = "batchMeetingId",
speakerId="userName", gapLength=120)
newTranscript = flattenSelf(inputData = sample_transcript_processed,
inputType="transcript", meetingId = "batchMeetingId",
speakerId="userName", gapLength=120)
}
|
36d64888a049ddfd1fae0cb11e53ecc3ef5968a4 | 51267717b05c34e4df3a66ca9958d9d94d490f2a | /DatosMiercoles_week10.R | 1f479423e109a8ee969fd6c0b2e6cf5d61da7455 | [] | no_license | gonzalofichero/DatosDeMiercoles | 969d4a9eadea8874d6b4544ba6df797182d1ae1d | 8195c76022e864806d9f3151a424bada3dfe3d98 | refs/heads/master | 2020-05-15T09:02:23.121987 | 2019-06-26T20:32:06 | 2019-06-26T20:32:06 | 182,169,806 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,090 | r | DatosMiercoles_week10.R | # Cargo las librerías a utilizar
library(tidyverse)
library(ggplot2)
library(readr)
library(stringr)
# Levanto los datos
rating_vinos <- readr::read_csv("vinos.csv")
glimpse(rating_vinos)
# Pasando a factores
rating_vinos$pais <- as.factor(rating_vinos$pais)
rating_vinos$provincia <- as.factor(rating_vinos$provincia)
rating_vinos$region_1 <- as.factor(rating_vinos$region_1)
rating_vinos$variedad <- as.factor(rating_vinos$variedad)
#####################
# Exploración
# Por país, relación precio-puntaje
rating_vinos %>%
#filter(pais %in% c("Argentina","Chile","Uruguay","Estados Unidos")) %>%
filter(pais %in% c("Argentina","Chile")) %>%
filter(precio < 1000) %>%
ggplot(aes(x=precio, y=puntos, color=pais)) +
geom_point(alpha = 0.2) +
stat_smooth(method = 'lm', formula = y ~ log(x))
# Por país, boxplot de precios
rating_vinos %>%
filter(pais %in% c("Argentina","Chile")) %>%
filter(precio < 1000) %>%
ggplot(aes(x=pais, y=puntos, color=pais)) +
geom_boxplot()
# Tipo de Vino por país
freq_variedad <- rating_vinos %>%
filter(pais == "Argentina") %>%
group_by(variedad) %>%
summarise(n = n()) %>%
arrange(desc(n))
# Flag de Tinto vs Blanco
vino_arg <- rating_vinos %>%
filter(pais == "Argentina") %>%
mutate(tipo = case_when(variedad %in% c("Chardonnay","Torrontés",
"Sauvignon Blanc","Ensamblaje Blanco",
"Pinot Grigio","Viognier",
"Sémillon","Moscato",
"Pinot Gris","Chardonnay-Viognier",
"Chenin Blanc-Chardonnay","Chenin Blanc",
"Chardonnay-Sauvignon","Riesling",
"Chardonnay-Semillon","Gewürztraminer",
"Moscatel","Tocai",
"Trebbiano") ~ "blanco",
variedad %in% c("Rosé","Rosado") ~ "rosado",
variedad == "Champagne Ensamblaje" ~ "espumante",
TRUE ~ "tinto"))
# Ploteo relación precio/calidad por tipo de vino
vino_arg %>%
ggplot(aes(x=precio, y=puntos, color=tipo)) +
geom_jitter(alpha=0.1, height = 0.5) +
stat_smooth(method = 'lm', formula = y ~ log(x))
# Estimo función puntos = log(precio) para tintos y blancos
red <- vino_arg %>%
filter(tipo == "tinto")
red_class <- lm(puntos ~ log(precio), data = red)
white <- vino_arg %>%
filter(tipo == "blanco")
white_class <- lm(puntos ~ log(precio), data = white)
red$puntaje_estimado <- predict(red_class, red)
white$puntaje_estimado <- predict(white_class, white)
# Vuelvo a juntar los datos
vino_arg_estimado <- rbind(red,white)
# Clasifico cada vino por si está por arriba o por debajo de lo esperado
vino_arg_estimado <- vino_arg_estimado %>%
mutate(clase_esperada = case_when(puntos >= puntaje_estimado ~ "01 Debajo de lo Esperado",
puntos < puntaje_estimado ~ "02 Arriba de los Esperado",
TRUE ~ "Sin Clasificar"))
# Ploteo
vino_arg_estimado %>%
ggplot(aes(x=precio, y=puntos, color=as.factor(clase_esperada))) +
geom_point(alpha=0.2, size=2)
# Marco los 10 vinos tintos y blancos q mejor performan por el precio que tienen
blanco_order <- vino_arg_estimado %>%
filter(tipo == "blanco") %>%
arrange(clase_esperada, desc(puntos))
blanco_order$order_t <- NA
blanco_order$order_b <- 1:length(blanco_order$puntos)
tinto_order <- vino_arg_estimado %>%
filter(tipo == "tinto") %>%
arrange(clase_esperada, desc(puntos))
tinto_order$order_t <- 1:length(tinto_order$puntos)
tinto_order$order_b <- NA
# Vuelvo a juntar...
vino_arg_order <- rbind(blanco_order, tinto_order)
vino_arg_order <- vino_arg_order %>%
mutate(label_b = case_when(order_b < 6 ~ str_c(nombre, vina, sep=" - "),
TRUE ~ ""),
label_t = case_when(order_t < 6 ~ str_c(nombre, vina, sep=" - "),
TRUE ~ ""))
# Plotting
library(ggrepel)
vino_arg_order %>%
#filter(n > 10) %>%
ggplot(aes(x=precio ,y=puntos, color=tipo)) +
geom_point(alpha = 0.15) +
scale_fill_manual(values=c("maroon3", "lightgoldenrod"))+
#geom_label_repel(aes(label=label_b), size=2.5, colour="limegreen") +
geom_label_repel(aes(label=label_t), size=2, colour="indianred3") +
xlab("Precio") + ylab("Puntaje Wine Spectator")
|
20020979732e4510b297628b30cf38ab7bc6f9bf | 3b603bda5876a277d40c782d823d9ba87bf9a300 | /man/compute_equivrelation.Rd | 762412c701518241923a69aa5f160fb11f3d8fa3 | [] | no_license | KonstantinRK/tempnetwork | 65815a7c05c0253a8cb73a7a932acea00368840a | 58897c2b8bcb19b938f031ee6e1a990cb1dd9480 | refs/heads/master | 2020-11-27T11:24:14.780416 | 2019-12-21T11:41:15 | 2019-12-21T11:41:15 | 229,419,117 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,789 | rd | compute_equivrelation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tempnetwork-getter.R
\name{compute_equivrelation}
\alias{compute_equivrelation}
\title{Computes a default equivalence relation under the assumption that the vertex sets are identical.
That is, it starts by creating a list indexed by the names found in the parameter "defaultNameList".
Furthermore, each entry will be populated by a named list with it's names being the one's found in the "pointNameList" parameter
and with its values corresponding index of said list in the primary list.
E.g. compute_equivrelation(c("a","b","c","d"), c("t1","t2","t3")) ==
list("a"= list("t1"=a ,"t2"=a ,"t3"=a),"b"= list("t1"=b ,"t2"=b ,"t3"=b) ,"c"= list("t1"=c ,"t2"=c ,"t3"=c), "d"= list("t1"=d ,"t2"=d ,"t3"=d))}
\usage{
compute_equivrelation(defaultNameList, pointNameList)
}
\arguments{
\item{defaultNameList}{A vector containing strings representing temporal vertex names, as well as the vertex names of each graph in some graph list.}
\item{pointNameList}{A vector containing strings representing the point names of an tempflow.}
}
\value{
An equivalence relation as described above.
}
\description{
Computes a default equivalence relation under the assumption that the vertex sets are identical.
That is, it starts by creating a list indexed by the names found in the parameter "defaultNameList".
Furthermore, each entry will be populated by a named list with it's names being the one's found in the "pointNameList" parameter
and with its values corresponding index of said list in the primary list.
E.g. compute_equivrelation(c("a","b","c","d"), c("t1","t2","t3")) ==
list("a"= list("t1"=a ,"t2"=a ,"t3"=a),"b"= list("t1"=b ,"t2"=b ,"t3"=b) ,"c"= list("t1"=c ,"t2"=c ,"t3"=c), "d"= list("t1"=d ,"t2"=d ,"t3"=d))
}
|
c39cbe6d46059983023f0376133a3df8a3f702d6 | 9a6b362e947082f68fa190f7bb9f67c0f57f869d | /man/complete_time.Rd | 1ae31d12b28da0e7dabf0a2ea37c92ff1dab9add | [] | no_license | WillemSleegers/eyepatch | 4b26974b87f05de128a7faa2edf7fcebb57d3e78 | 1a06b168089ef0632a04ec76b74995c1852cc9bc | refs/heads/master | 2021-08-14T23:14:48.878488 | 2021-08-02T07:53:13 | 2021-08-02T07:53:13 | 172,603,004 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,220 | rd | complete_time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/complete_time.R
\name{complete_time}
\alias{complete_time}
\title{Complete time}
\usage{
complete_time(data, time, difference)
}
\arguments{
\item{data}{The data frame containing the timestamps.}
\item{difference}{The supposed difference between two timestamps.}
\item{timestamp}{The timestamp column in the data frame.}
}
\description{
\code{complete_time} adds missing timestamps to a data frame.
}
\details{
Completes a timestamp column by turning implicit missing values into
explicit missing values, followed by a linear interpolation between missing
values.
The data frame can be grouped using dplyr's \code{group_by}.
}
\examples{
library(dplyr)
# Example 1: Simple data
df <- tibble(
timestamp = 1:30,
trial = rep(1:3, each = 10),
event = rep(c(rep("baseline", 3), rep("event", 7)), times = 3)
)
# Remove some random observations, creating implicit missing values
set.seed(2020)
df <- slice_sample(df, n = 20) \%>\%
arrange(trial, timestamp)
# Add missing rows
df <- complete_time(df, timestamp, difference = 1)
# Example 2: Realistic data
missing_complete <- complete_time(missing, timestamp, difference = 1000/60)
}
|
e840b7cd657bec5d250a8420a560b6f47ad08410 | bd142e7cf42dba3058791b4b4b80577f31c15641 | /R/print_plotSmooth.R | e6cc0cc6a86c3955c614a820b12b65e99a1802c9 | [] | no_license | valnoacco/mgcViz | 5a8b3e1651219e3ca4c1e13dfae2385bc86064ee | d50e8e4cc1f59b59397c4a696a2c012ecf8ab0a6 | refs/heads/master | 2020-03-24T07:17:19.099233 | 2018-06-30T17:31:25 | 2018-06-30T17:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 423 | r | print_plotSmooth.R | #'
#' Printing plots of smooth effects
#'
#' @description This method prints objects of class \code{plotSmooth}.
#' @param x an object of class \code{plotSmooth}.
#' @param ... currently unused.
#' @return Returns \code{NULL}, invisibly.
#' @name print.plotSmooth
#' @rdname print.plotSmooth
#' @export print.plotSmooth
#' @export
#'
print.plotSmooth <- function(x, ...) {
print(x$ggObj)
return( invisible(NULL) )
} |
fbf7552308f46446fccbcb4008bf1743d99fd562 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Numerical_Methods_For_Engineers_by_S._C._Chapra_And_R._P._Canale/CH6/EX6.11/Ex6_11.R | 8f349e7c1d98b15cce2842bebc83ea82a12f22ac | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 413 | r | Ex6_11.R | u <- function(x,y) {
x^2+x*y-10
}
v <- function(x,y) {
y+3*x*y^2-57
}
x=1.5
y=3.5
e<-c(100, 100)
while (e[1]>0.0001 & e[2]>0.0001){
J=matrix(data = c(2*x+y, x, 3*y^2, 1+6*x*y),nrow = 2,ncol = 2,byrow = TRUE)
deter=det(J)
u1=u(x,y)
v1=v(x,y)
x=x-((u1*J[2,2]-v1*J[1,2])/deter)
y=y-((v1*J[1,1]-u1*J[2,1])/deter)
e[1]=abs(2-x)
e[2]=abs(3-y)
}
bracket<-c(x, y)
cat(bracket) |
8366180dff837459fdcff003d9cba0d03d7ec69c | 6af188c45ed21260e94f099f06318f50fbe319e4 | /packages.R | c8b39011765541d18423ff27cc1f53fb6fa3d93d | [] | no_license | rmflight/mapart | 1a88d78e423870d0911557d103a0b291d10ca359 | 7f2b437f9dbf0ff8faf3709f1fa0152c7e2e25f9 | refs/heads/main | 2023-03-16T22:04:41.026186 | 2021-03-22T00:41:22 | 2021-03-22T00:41:22 | 350,159,066 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 185 | r | packages.R | ## library() calls go here
library(drake)
library(osmdata)
library(dplyr)
library(ggplot2)
library(tigris)
library(sf)
library(jsonlite)
library(purrr)
library(osrm)
library(lubridate)
|
e5614ba187ca9797cf0a130a2ee07e603c2e082f | da240952753caf3a3b79e777b1bfe24140aaba86 | /ZAnc/plot_slope_elev.R | 45dabaae865f834d77ff24a46eff7c3674762cd7 | [] | no_license | cooplab/hilo | ea5ea9d472ee7cf2cab17aa83e8f568c54fce34c | 64483aaf0abd40d25846969b8732e07abf9b7667 | refs/heads/master | 2023-08-18T13:03:07.458675 | 2021-09-20T20:12:10 | 2021-09-20T20:12:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,958 | r | plot_slope_elev.R | #!/usr/bin/env Rscript
library(dplyr)
library(ggplot2)
# this script plots slope from lm ancestry ~ elev across the genome,
# and calculates FDRs to show outlier loci
# load variables from Snakefile
# prefix = "HILO_MAIZE55_PARV50"
# K = 3
zea = snakemake@params[["zea"]]
# zea = "maize"
colors_file = snakemake@input[["colors"]]
# colors_file = "colors.R"
fdr_file = snakemake@input[["fdr"]]
# fdr_file = paste0("ZAnc/results/", prefix, "/K", K, "/Ne10000_yesBoot/", zea, ".lmElev.fdr.RData")
fit_file = snakemake@input[["fit"]]
# fit_file = paste0("ZAnc/results/", prefix, "/K", K, "/Ne10000_yesBoot/", zea, ".lmElev.fit.RData")
meta_file = snakemake@input[["meta_pop"]]
# meta_file = paste0("local_ancestry/results/ancestry_hmm/", prefix, "/K", K, "/Ne10000_yesBoot/anc/", zea, ".pop.meta.RData")
sites_file = snakemake@input[["sites"]]
# sites_file = paste0("local_ancestry/results/thinnedSNPs/", prefix, "/K", K, "/whole_genome.var.sites")
genome_file = snakemake@input[["genome"]]
# genome_file = "data/refMaize/Zea_mays.AFPv4.dna.chr.autosome.lengths"
centromeres_file = snakemake@input[["centromeres"]]
# centromeres_file = "data/refMaize/centromere_positions_v4.txt"
png_out = snakemake@output[["png"]]
# png_out = paste0("ZAnc/plots/", prefix, "_K", K, "_Ne10000_yesBoot_", zea, "_slope_elev.png")
rds = snakemake@output[["rds"]]
# rds = paste0("ZAnc/plots/", prefix, "_K", K, "_Ne10000_yesBoot_", zea, ".lmElev.plot.rds")
# load data
source(colors_file)
load(fdr_file)
load(fit_file)
load(meta_file)
# load centromere positons
centromeres <- read.table(centromeres_file, header = T, stringsAsFactors = F,
sep = "\t")
# load chromosome lengths
genome <- read.table(genome_file, header = F, stringsAsFactors = F,
sep = "\t") %>%
data.table::setnames(c("chr", "length")) %>%
dplyr::mutate(chr_end = cumsum(length),
chr_start = c(0, chr_end[1:(nrow(.)-1)])) %>%
left_join(.,
centromeres %>%
dplyr::group_by(chr) %>% # chr9 has 2 segments that map to centromere region, so I get an approximate midpoint using both pieces
summarise(cent_mid = 10^6 * (max(end) + min(start))/2), # convert from Mb to bp
by = "chr") %>%
dplyr::mutate(centromere = chr_start + cent_mid) # cumulative centromere positions
# and site/position information for SNPs with ancestry calls
sites <- read.table(sites_file, header = F, stringsAsFactors = F,
sep = "\t") %>%
data.table::setnames(c("chr", "pos", "major", "minor")) %>%
left_join(., genome, by = "chr") %>%
dplyr::mutate(pos_cum = chr_start + pos) # get cumulative chromosomal position
# outlier plot whole genome
p_elev = bind_cols(sites, fits) %>%
mutate(even_chr = ifelse(chr %% 2 == 0, "even", "odd"),
zea = zea) %>%
ggplot(., aes(pos_cum, envWeights,
color = even_chr)) +
geom_hline(yintercept = filter(FDRs, FDR == 0.05)$threshold, linetype = "solid", color = "#00BFC4") +
geom_point(size = .1) +
geom_hline(yintercept = mean(fits$envWeights), color = "black", linetype = "dashed") +
xlab("bp position on chromosomes (total length = 2.3Gb)") +
ylab("slope ancestry ~ elev") +
ylim(c(-0.7, 1.2)) +
scale_colour_manual(values = c(odd = "darkgrey",
even = unname(col_maize_mex_parv[zea]))) +
scale_x_continuous(label = genome$chr,
breaks = genome$centromere,
expand = expansion(mult = c(0, 0),
add = c(0, 0))) +
theme(legend.position = "none") +
theme_classic() +
ggtitle(paste("Sympatric", zea, "- Change in mexicana ancestry over 1 km elevation gain")) +
guides(color = F)
# p_elev
ggsave(plot = p_elev,
file = png_out,
height = 3, width = 12,
units = "in", dpi = 300,
device = "png")
# also save ggplot as R object
saveRDS(object = p_elev, file = rds)
|
2056525132ca0f2f83f93b7b4f86c17ea919e2c2 | 2995eba3e39521845263e55777beb80cb702b60c | /Excel_help.R | 4e6f98af803a82025610b7b7916511291edc1d84 | [] | no_license | VictorMartinezRech/TFG_R | 074c669e603d217ebd28b5991da4b25d97ed2d9d | 62b3d29d26f1b877889d1634cf657d2148a7e511 | refs/heads/main | 2023-06-07T11:41:45.891096 | 2021-06-27T22:24:43 | 2021-06-27T22:24:43 | 380,693,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 138 | r | Excel_help.R | #Citation packages
citation("RDota2")
#Check data
install.packages("RDota2")
library(RDota2)
data()
data("B00data")
B00.teams |
0a002f3d51e027d708b06fe38fb44d722cbd70c0 | a3c78700a65f10714471a0d307ab984e8a71644d | /apps/api/R/general.R | 5f5c9ec36b2bbdadbc7517c5a27ae4358263275b | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PecanProject/pecan | e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f | ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c | refs/heads/develop | 2023-08-31T23:30:32.388665 | 2023-08-28T13:53:32 | 2023-08-28T13:53:32 | 6,857,384 | 187 | 217 | NOASSERTION | 2023-09-14T01:40:24 | 2012-11-25T23:48:26 | R | UTF-8 | R | false | false | 987 | r | general.R | #* Function to be executed when /api/ping endpoint is called
#* If successful connection to API server is established, this function will return the "pong" message
#* @return Mapping containing response as "pong"
#* @author Tezan Sahu
ping <- function(req){
res <- list(request="ping", response="pong")
res
}
#* Function to get the status & basic information about the Database Host
#* @return Details about the database host
#* @author Tezan Sahu
status <- function() {
## helper function to obtain environment variables
get_env_var = function (item, default = "unknown") {
value = Sys.getenv(item)
if (value == "") default else value
}
res <- list(host_details = PEcAn.DB::dbHostInfo(global_db_pool))
res$host_details$authentication_required = get_env_var("AUTH_REQ")
res$pecan_details <- list(
version = get_env_var("PECAN_VERSION"),
branch = get_env_var("PECAN_GIT_BRANCH"),
gitsha1 = get_env_var("PECAN_GIT_CHECKSUM")
)
return(res)
} |
64ba13ed5552b797929364dcfe767a31b86a716a | b2d394dbc7335cccaf559d9d6fcc4c2272cc8e25 | /R/underscoreReplacements.R | a1bda65dfc7deb6e4f3536d22d1a22a7429b7989 | [] | no_license | NanaAkwasiAbayieBoateng/replyr | 7e52bfceb96b5dba9f068b4d824abf90d26d7333 | 7d4f2ed8924c4a4f463ea1bf768713fd953bdb88 | refs/heads/master | 2021-07-25T07:37:07.734022 | 2017-11-07T15:31:01 | 2017-11-07T15:31:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,736 | r | underscoreReplacements.R |
# replacements for a few of the underbar/underscore forms form dplyr 0.5 and earlier
#' Rename a column
#'
#' @param .data data object to work on
#' @param ... force later arguments to bind by name
#' @param newName character new column name
#' @param oldName character old column name
#'
#' @examples
#'
#' d <- data.frame(Sepal_Length= c(5.8,5.7),
#' Sepal_Width= c(4.0,4.4),
#' Species= 'setosa', rank=c(1,2))
#' replyr_rename(d, newName = 'family', oldName = 'Species')
#'
#' @export
#'
replyr_rename <- function(.data,
...,
newName, oldName) {
if(length(list(...))>0) {
stop("replyr::replyr_rename unexpected arguments")
}
newName <- as.character(newName)
oldName <- as.character(oldName)
if((length(newName)!=1)||(length(oldName)!=1)) {
stop("replyr::replyr_rename newName and oldName must be length 1 character vectors")
}
if(newName!=oldName) {
REPLYR_PRIVATE_NEWNAME <- NULL # declare not an unbound name
REPLYR_PRIVATE_OLDNAME <- NULL # declare not an unbound name
wrapr::let(
c(REPLYR_PRIVATE_NEWNAME=newName,
REPLYR_PRIVATE_OLDNAME=oldName),
strict = FALSE,
.data <- dplyr::rename(.data,
REPLYR_PRIVATE_NEWNAME = REPLYR_PRIVATE_OLDNAME)
)
}
.data
}
#' arrange by a single column
#'
#' @param .data data object to work on
#' @param colname character column name
#' @param descending logical if true sort descending (else sort ascending)
#'
#' @examples
#'
#' d <- data.frame(Sepal_Length= c(5.8,5.7),
#' Sepal_Width= c(4.0,4.4))
#' replyr_arrange(d, 'Sepal_Length', descending= TRUE)
#'
#' @export
#'
replyr_arrange <- function(.data, colname, descending = FALSE) {
colname <- as.character(colname) # remove any names
REPLYR_PRIVATE_NEWNAME <- NULL # declare not an unbound name
desc <- function(.) {.} # declare not an unbound name
if(descending) {
wrapr::let(
c(REPLYR_PRIVATE_NEWNAME=colname),
.data <- dplyr::arrange(.data,
desc(REPLYR_PRIVATE_NEWNAME))
)
} else {
wrapr::let(
c(REPLYR_PRIVATE_NEWNAME=colname),
.data <- dplyr::arrange(.data,
REPLYR_PRIVATE_NEWNAME)
)
}
.data
}
#' group_by columns
#'
#' See also: \url{https://gist.github.com/skranz/9681509}
#'
#' @param .data data object to work on
#' @param colnames character column name (can be a vector)
#'
#' @examples
#'
#' d <- data.frame(Sepal_Length= c(5.8,5.7),
#' Sepal_Width= c(4.0,4.4),
#' Species= 'setosa')
#' replyr_group_by(d, 'Species')
#'
#' @export
#'
replyr_group_by <- function(.data, colnames) {
.data <- dplyr::ungroup(.data) # make sure no other grouping
colnames <- as.character(colnames) # remove any names
if(length(colnames)>1) {
expr <- paste('dplyr::group_by( .data ,',
paste(colnames, collapse=', '),
')')
.data <- eval(parse(text= expr))
} else {
REPLYR_PRIVATE_NEWNAME <- NULL # declare not an unbound name
wrapr::let(
c(REPLYR_PRIVATE_NEWNAME= colnames), # strip off any outside names
.data <- dplyr::group_by(.data,
REPLYR_PRIVATE_NEWNAME)
)
}
.data
}
#' select columns
#'
#' @param .data data object to work on
#' @param colnames character column names
#'
#' @examples
#'
#' d <- data.frame(Sepal_Length= c(5.8,5.7),
#' Sepal_Width= c(4.0,4.4),
#' Species= 'setosa', rank=c(1,2))
#' replyr_select(d, c('Sepal_Length', 'Species'))
#'
#' @export
#'
replyr_select <- function(.data, colnames) {
dplyr::select(.data, one_of(colnames))
}
|
41c3bc3544a0c43e26283343b04cea2bac764122 | ea0635e26b53007212f87178aad1ed0ee378623e | /code/tools.R | 114f7dfc072169fdfb4e8a2f6293af7081779798 | [] | no_license | mqwu/project-fitcar | 5c505c5f2a19b485d1c36a65795cc8bbfd9329ba | 3aed3d8950ded26a34298352f94327e4db4e8308 | refs/heads/master | 2021-05-07T00:41:39.285889 | 2017-12-07T21:49:46 | 2017-12-07T21:49:46 | 110,168,091 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,922 | r | tools.R |
#--------------------------------------------------------
# Tools: collection of useful functions
#--------------------------------------------------------
load_libs <- function(requiredPackages) {
# load all required libs, if they are not installed try to install them automatically
#
# Args:
# requiredPackages: List of strings with package names that are about to be loaded
# If they are not installed automated installation is attempted
missingPackages <- requiredPackages[!(requiredPackages %in% installed.packages()[, "Package"])]
if (length(missingPackages)) {
install.packages(missingPackages, dependencies = TRUE)
}
for (package in requiredPackages) {
library(package, character.only = TRUE)
}
}
plot_HistDensity <- function(x, title){
# plot histogram with density curve overlaid
# Arg:
# x: numeric vector
# Return:
# plot of histogram with density curve overlaid
x <- x[!is.na(x)] # rm NA records
hist(x, prob=TRUE, col="grey", nclass=50, main=NA) # prob=TRUE for probabilities not counts
lines(density(x, adjust=1), col="blue", lty="solid", lwd=2) # add a density est
title(main=title)
dev.copy(png,paste0(title,"_hist.png"))
dev.off()
}
plot_Box <- function(d, x="1", y, title=""){
# plot boxplot
# Arg:
# d: data frame
# x: catergorical var
# y: numerical var
# return:
# box plot of y catergoried by x
p <- d %>%
filter_(!is.na(y)) %>% # rm NA records
ggplot(., aes_string(x=x, y=y)) +
geom_boxplot(aes_string(fill=x)) +
ggtitle(title)
if(x=="1"){ # categorical var with 1 level
p + theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
plot.title = element_text(hjust = 0.5),
legend.position="none")
} else { # categorical var with multiple level
p + theme(plot.title = element_text(hjust = 0.5))
}
ggsave(paste0(title,"_box.png"))
}
plot_StackingProp <- function(d, x="1", y, title=""){
# stacking proportion plot
# Arg:
# d: data frame
# x: catergorical var
# y: catergorical var for count (proportion)
# return:
# stacking proportion plot of y for different x catergory
ggplot(data = d) +
geom_bar(mapping = aes_string(x = x, fill = y), position = "fill") +
guides(fill=guide_legend(title=title))
ggsave(paste0(title,"_stackingProp.png"))
}
plot_BarCount <- function(d, x="1", y, title=""){
# Side by side Bar count plot category on x and y
# Arg:
# d: data frame
# x: main catergorical var
# y: catergorical var within x
# return:
# Side by side Bar count plot category on x and y
ggplot(data = d) +
geom_bar(mapping = aes_string(x = x, fill = y), position = "dodge")
ggsave(paste0(title,"_barcount.png"))
}
plotRFVarImp <- function(rf.mod){
# Plot variable importance of a RF model
#
# Args:
# rf.mod: a rf model obj
#
# Returns:
# Two Plots: 1. based on pred accuracy 2. based on gini index
# Importance data
dat <- data.frame(rownames(importance(rf.mod)), round(importance(rf.mod),2))
names(dat)[c(1, ncol(dat)-1, ncol(dat))] <- c("Predictor","mda","mdg")
rownames(dat) <- NULL
pred.acc <- select(dat, Predictor, mda) # mean decrease in accuracy
pred.gini <- select(dat, Predictor, mdg) # mean decrease in gini
# Var importance plot function
importancePlot <- function(d,ylb,fontsize){
fontsize <- as.numeric(fontsize)
d <- d[order(d[,2],decreasing=T),]
d$Predictor <- factor(as.character(d$Predictor),levels=rev(as.character(d$Predictor)))
rownames(d) <- NULL
d[,2] <- d[,2]/abs(max(d[,2])) * 100 # normalize relative to the variable with maximum score
abs.min <- abs(min(d[,2]))
g1 <- ggplot(data=d,aes_string(x="Predictor",y=ylb,group="Predictor")) +
geom_bar(stat="identity", colour="#d62d20", fill="#d62d20") + theme_grey(base_size=fontsize)
#geom_bar(stat="identity", colour="#639f89", fill="#639f89") + theme_grey(base_size=fontsize)
#if(ylb=="mda") g1 <- g1 + labs(y="Mean decrease in accuracy")
#else if(ylb=="mdg") g1 <- g1 + labs(y="Mean decrease in Gini")
g1 <- g1 + labs(y="Variable Importance") # Simplify for presentation purpose
g1 <- g1 + theme(axis.title=element_text(size=25,face="bold"),
axis.text.x=element_text(angle=0,hjust=1,vjust=0.4,colour='black'),
axis.text.y= element_text(colour='black', size=25)) +
geom_hline(yintercept=abs.min,linetype="dashed",colour="black") + coord_flip()
print(g1)
}
importancePlot(d=pred.acc, ylb="mda", 20)
importancePlot(d=pred.gini, ylb="mdg", 20)
}
|
8b12f377fe83306aae5e92057c4e68ec4c98dbf9 | a81e1ca6fe4c13be28d29f639ec768b51d016501 | /R/get_estimates.R | 26ca6b2ece96d32c58b01d6eabef4d394d937e13 | [] | no_license | cran/bain | d5f139bd8e644ffc339f8e6eea9c702cddfa5329 | 5bfb947c1569788eb959f5b45a39126b7f9b0ed6 | refs/heads/master | 2021-12-25T13:33:41.105293 | 2021-12-06T12:20:02 | 2021-12-06T12:20:02 | 169,492,749 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,184 | r | get_estimates.R | #' @importFrom utils tail
rename_function <- function(text){
fulltext <- paste(text, collapse = "")
new_names <- names_est <- text
#if(grepl("[\\(\\)]", fulltext)){
# text <- gsub("\\(", "___O___", text)
# text <- gsub("\\)", "___C___", text)
#}
text[text == "(Intercept)"] <- "Intercept"
if(grepl(":", fulltext)){
text <- gsub(":", "___X___", text)
}
if(grepl("mean of ", fulltext)){
text <- gsub("mean of the differences", "difference", text)
text <- gsub("mean of ", "", text)
}
# If any variables are subsetted from data.frames: remode the df part of the name
remove_df <- sapply(text, grepl, pattern = "[\\]\\$]+", perl = TRUE)
if(any(remove_df)){
text[remove_df] <- sapply(text[remove_df], function(x){
tmp_split <- strsplit(x, "[\\]\\$]+", perl = TRUE)[[1]]
if(length(tmp_split)==1){
x
} else {
tail(tmp_split, 1)
}
})
}
text <- gsub(":", "___text___", text)
text <- gsub("\\|", "___thres___", text)
text <- gsub("=~", "___by___", text)
text <- gsub("~~", "___w___", text)
text <- gsub("~1", "___int___", text)
text <- gsub("~", "___on___", text)
text
}
reverse_rename_function <- function(x){
x <- gsub("___X___", ":", x)
x <- gsub("___thres___", "\\|", x)
x <- gsub("___by___", "=~", x)
x <- gsub("___w___", "~~", x)
x <- gsub("___int___", "~1", x)
x <- gsub("___on___", "~", x)
x
}
#' @importFrom utils tail
rename_estimate <- function(estimate){
new_names <- names_est <- names(estimate)
if(any(new_names == "(Intercept)")) new_names[match(new_names, "(Intercept)")] <- "Intercept"
if(is.null(names_est)){
stop("The 'estimates' supplied to bain() were unnamed. This is not allowed, because estimates are referred to by name in the 'hypothesis' argument. Please name your estimates.")
}
browser()
if(length(new_names) < 3){
new_names <- gsub("mean of the differences", "difference", new_names)
new_names <- gsub("mean of ", "", new_names)
}
# If any variables are subsetted from data.frames: remode the df part of the name
remove_df <- sapply(new_names, grepl, pattern = "[\\]\\$]+", perl = TRUE)
if(any(remove_df)){
new_names[remove_df] <- sapply(new_names[remove_df], function(x){
tmp_split <- strsplit(x, "[\\]\\$]+", perl = TRUE)[[1]]
if(length(tmp_split)==1){
x
} else {
tail(tmp_split, 1)
}
})
}
# Any interaction terms: replace : with _X_
new_names <- gsub(":", "___X___", new_names)
legal_varnames <- sapply(new_names, grepl, pattern = "^[a-zA-Z\\.][a-zA-Z0-9\\._]{0,}$")
if(!all(legal_varnames)){
stop("Could not parse the names of the 'estimates' supplied to bain(). Estimate names must start with a letter or period (.), and can be a combination of letters, digits, period and underscore (_).\nThe estimates violating these rules were originally named: ", paste("'", names_est[!legal_varnames], "'", sep = "", collapse = ", "), ".\nAfter parsing by bain, these parameters are named: ", paste("'", new_names[!legal_varnames], "'", sep = "", collapse = ", "), call. = FALSE)
}
names(estimate) <- new_names
estimate
}
#' @title Get estimates from a model object
#' @description Get estimates from a model object.
#' This convenience function allows you to see that coefficients are properly
#' extracted, note how their names will be parsed, and inspect their values.
#' @param x A model object.
#' @param ... Parameters passed to and from other functions.
#' @return An object of class 'model_estimates'
#' @examples
#' \dontrun{
#' # Example 1
#' m_tt <- t.test(iris$Sepal.Length[1:20], iris$Sepal.Length[21:40])
#' get_estimates(m_tt)
#' # Example 2
#' m_lm <- lm(Sepal.Length ~., iris)
#' get_estimates(m_lm)
#' }
#' @rdname get_estimates
#' @export
#' @keywords internal
get_estimates <- function(x, ...){
UseMethod("get_estimates", x)
}
#' @method get_estimates matrix
#' @export
get_estimates.matrix <- function(x, ...){
if(!(nrow(x) == ncol(x) & all(x^2 <= 1) & all(diag(x) == 1))){
stop("Attempted to get_estimates from a matrix, but the matrix does not appear to be a correlation matrix.")
}
if(is.null(rownames(x)) | is.null(colnames(x))){
warning("Running get_estimates on a (correlation) matrix without rownames or colnames. The names of the extracted estimates will be generated automatically.")
colnames(x) <- rownames(x) <- paste0("V", 1:nrow(x))
}
x <- as.data.frame.table(x)
estimate <- x$Freq
names(estimate) <- paste0(x$Var1, "_with_", x$Var2)
out <- list(estimate = estimate,
Sigma = NULL)
class(out) <- "model_estimates"
attr(out, "analysisType") <- "correlation"
out
}
#' @method get_estimates lm
#' @export
get_estimates.lm <- function(x, ...){
out <- list(estimate = coef(x),
Sigma = vcov(x))
class(out) <- "model_estimates"
attr(out, "analysisType") <- "lm"
out
}
#' @method get_estimates t_test
#' @export
get_estimates.t_test <- function(x, ...){
out <- list(estimate = coef(x),
Sigma = vcov(x))
nams <- gsub("mean difference", "difference", names(out$estimate), fixed = TRUE)
nams <- gsub("mean of the differences", "difference", nams, fixed = TRUE)
nams <- gsub("mean of ", "", nams, fixed = TRUE)
# if(x$method == "One Sample t-test"){
# nams <- "x"
# } else if (x$method == "Paired t-test"){
# nams <- "difference"
# } else if (x$method == "Welch Two Sample t-test"){
# nams <- gsub("mean of ", "", names(out$estimate), fixed = TRUE)
# } else {names(rval$estimate) <- c("x","y")}
names(out$estimate) <- nams
class(out) <- "model_estimates"
attr(out, "analysisType") <- "htest"
out
}
#' @method get_estimates lavaan
#' @export
get_estimates.lavaan <- function(x, standardize = FALSE, ...){
cl <- as.list(match.call()[-1])
out <- do.call(lav_get_estimates, cl)
names(out)[which(names(out) == "x")] <- "estimate"
class(out) <- "model_estimates"
attr(out, "analysisType") <- "lavaan"
out
}
#' @method get_estimates htest
get_estimates.htest <- function(x, ...) {
stop("To be able to run get_estimates on an object returned by t.test(), you must first load the 'bain' package, and then conduct your t.test. The standard t.test does not return group-specific variances and sample sizes, which are required by get_estimates. The 'bain' package contains a function, t_test(), which does return this necessary information.")
}
#' @title Label estimates from a model object
#' @description Label estimates from a model object, before passing it on to the
#' \code{\link{bain}} function.
#' @param x A model object for which a \code{\link{bain}} method exists.
#' @param labels Character vector. New labels (in order of appearance) for the
#' model object in \code{x}. If you are unsure what the estimates in \code{x}
#' are, first run \code{\link{get_estimates}}.
#' @param ... Parameters passed to and from other functions.
#' @return A model object of the same class as x.
#' @seealso get_estimates bain
#' @rdname label_estimates
#' @keywords internal
label_estimates <- function(x, labels, ...){
x
#UseMethod("label_estimates", x)
}
#' @method label_estimates lm
label_estimates.lm <- function(x, labels, ...){
if(length(x$coefficients) != length(labels)) stop("The length of the vector of 'labels' must be equal to the length of the vector of coefficients in the model. To view the vector of coefficients, use 'get_estimates()'.")
if(grepl("^\\(?Intercept\\)?$", names(x$coefficients)[1])){
current_label <- 2
} else {
current_label <- 1
}
names(x$coefficients) <- labels
# Now, process the data
variable_types <- sapply(x$model, class)
for(thisvar in 2:length(variable_types)){
if(variable_types[thisvar] == "factor"){
x$model[[thisvar]] <- ordered(x$model[[thisvar]], labels = labels[current_label:(current_label+length(levels(x$model[[thisvar]]))-1)])
current_label <- current_label + length(levels(x$model[[thisvar]]))
#fac_name <- names(x$model)[thisvar]
#fac_levels <- levels(x$model[[thisvar]])
#which_coef <- match(paste0(fac_name, fac_levels), names(x$coefficients))
#fac_levels[which(!is.na(which_coef))] <- labels[which_coef[!is.na(which_coef)]]
#x$model[[fac_name]] <- ordered(x$model[[fac_name]], labels = fac_levels)
} else {
#x$call$formula[3] <- gsub(paste0("\\b", names(x$model)[thisvar], "\\b"), labels[current_label], x$call$formula[3])
#substitute(x$call$formula, list(names(x$model)[thisvar] = labels[current_label]))
x$call$formula <- do.call("substitute", list(x$call$formula,
setNames(list(as.name(labels[current_label])), names(x$model)[thisvar])
)
)
names(x$model)[thisvar] <- labels[current_label]
current_label <- current_label+1
}
}
invisible(get_estimates(x))
x
}
#' @method label_estimates t_test
label_estimates.t_test <- function(x, labels, ...){
names(x$estimate) <- labels
invisible(get_estimates(x))
x
}
#' @method label_estimates htest
label_estimates.htest <- function(x, labels, ...) {
stop("To be able to run bain on the results of an object returned by t_test(), you must first load the 'bain' package, and then conduct your t_test. The standard t_test does not return group-specific variances and sample sizes, which are required by bain. When you load the bain package, the standard t_test is replaced by a version that does return this necessary information.")
}
#' @method print model_estimates
#' @export
print.model_estimates <- function(x,
digits = 3,
na.print = "", ...){
dat <- x$estimate
dat <- formatC(dat, digits = digits, format = "f")
print(dat, quote = FALSE)
}
|
b699290ce2f42f9f4323f4100367b0350fd0c53d | eeb8dea0dc1ff39545996395f8d4d36290edfb8e | /man/sgdm.best.Rd | debe74fbc9cdab68b98223027296f3e0173ada28 | [] | no_license | steppebird/sparsegdm | e71c66fbf11aee1fe61b0e86f655ebb9a216e938 | 16280b2b7bd2c1e08c460f1e01a15a37cb49894e | refs/heads/master | 2021-04-30T08:19:37.879407 | 2017-02-23T09:18:01 | 2017-02-23T09:18:01 | 121,371,197 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,290 | rd | sgdm.best.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sgdm.best.R
\name{sgdm.best}
\alias{sgdm.best}
\title{Retrieves the best SGDM model, SCCA canonical components or SCCA canonical vectors, as resulting from the SGDM parameter estimation}
\usage{
sgdm.best(perf.matrix, predData, bioData, output = "m", k = 10, geo = F)
}
\arguments{
\item{perf.matrix}{Performance matrix as output from \code{sgdm.train} function.}
\item{predData}{Predictor dataset ("predData" format).}
\item{bioData}{Biological dataset ("bioData" format).}
\item{output}{Type of output: "m" = gdm model; "c" = sparse canonical components; "v" = sparse canonical vectors; Set as "m" per default.}
\item{k}{Number of sparce canonical components to be calculated, set as 10 per default}
\item{geo}{only needed if output = "m"; optional use of geographical distance as predictor in GDM model, set as FALSE per default}
}
\value{
Returns a GDM model, the sparse canonical components, or the sparse canonical vectors, depending on the output defined. The default is \code{output} = "m", which returns a GDM model object.
}
\description{
This function retrieves the best SGDM model, SCCA canonical components or SCCA canonical vectors, as resulting from the SGDM parameter estimation with the \code{gdm.train} function.
The parameter pair with the lowest RMSE value is selected to run the SCCA on the biological and predictor datasets. If \code{output} = "m" delivers the GDM model built on the extracted SCCA components; if \code{output} = "c" delivers the SCCA components that result in the best GDM model; and if If \code{output} = "v" delivers the SCCA canonical vectors used to tranform the predictor data into the canonical components.
It requires a performance matrix as resulting from the \code{gdm.train} function, a predictor dataset ("predData" format), a biological dataset ("bioData" format), the type of output, the number of components to be extracted in the SCCA and the optional use of geographical distance as predictor variable in the GDM.
This current implementation only allows biological data in the format 1 using abundance values, as described in the \code{gdm} package.
For more details relating to "bioData" and "predData" data formats, check \code{gdm} package.
}
|
b203e5cd1fed36182e857ed4d5bca89d16cefc29 | 7f386b1678e1a09050ade6b571cb0718e4c0f072 | /man/junit_reporter.Rd | ec5432b2443815977abd448fc323e4fe5656a120 | [] | no_license | MultiBUGS/multibugstests | cd7ba608b00d3cc47fa713d58bedd1bb03c58c2e | 47609931fe0fa40dae4a4e493f9d540cd22ee71b | refs/heads/master | 2020-04-12T10:26:36.572757 | 2020-02-18T15:02:45 | 2020-02-18T15:02:45 | 162,429,920 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 510 | rd | junit_reporter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/junit.R
\name{junit_reporter}
\alias{junit_reporter}
\title{Report result of test for junit}
\usage{
junit_reporter(type, ...)
}
\arguments{
\item{type}{One of \code{"pre"}, \code{"post"} and \code{"wrapup"}
specifying the stage that needs reporting.}
}
\value{
The function \code{\link{junit_reporter_pre}},
\code{\link{junit_reporter_post}} or \code{\link{junit_reporter_wrapup}}
}
\description{
Report result of test for junit
}
|
6cc534b466fd4ad09ec1cd09b736caa44bedb525 | 1ca95bcfeb6c33a310544efbab4dcf326dca30d7 | /man/today.Rd | b5295e5cbb04ee1ef4b68d89329df6ec9637d4ab | [
"MIT"
] | permissive | reconhub/epirisk | ea180c4802b29a7b2cbde0eaf29802b0adff4187 | b4de6b6fea543b068bcb0a14c17ee53272a63ff4 | refs/heads/master | 2020-05-03T03:21:06.311933 | 2019-03-29T16:47:26 | 2019-03-29T16:47:26 | 178,395,646 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 240 | rd | today.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/today.R
\name{today}
\alias{today}
\title{Return current date}
\usage{
today()
}
\description{
This function returns the current date, as a \code{Date} object.
}
|
b0c0e05bfa97b8a97204d758c5831f721232c02b | 2aad4a718def7f616426e80b0bb7b38c34b97384 | /naivebayes.R | 61bcb799601681113988ef9b8dd49242aed7eda6 | [] | no_license | anniepan21/Machine-Learning-in-R | 1997239caba861f4038ff6af522d50c53cf048b9 | 5aa2e85e0aabda52e8c94a6ea44508d1f9d27ec5 | refs/heads/master | 2021-08-07T16:16:08.236098 | 2017-11-08T14:31:32 | 2017-11-08T14:31:32 | 106,716,224 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,383 | r | naivebayes.R | ### Lab – Chapter 4 Probabilistic Learning – Classification Using Naïve Bayes
### Read data
eegIDRecord_raw <- read.csv("eegIDRecord.csv")
str(eegIDRecord_raw)
### Normalize all 12 columns
normalize <- function(x) { return ((x - min(x)) / (max(x) - min(x))) }
eegIDRecord_n <- as.data.frame(lapply(eegIDRecord_raw[1:12],normalize))
### Discretize values in each column
discretize <- function(x){ifelse(x<0.5,0,1)}
eegIDRecord_n_d <- as.data.frame(lapply(eegIDRecord_n[1:12],discretize))
### Create training and test data
test_data <- runif(518,min=1,max=2518)
eegIDRecord_test <- eegIDRecord_n_d [ test_data,]
eegIDRecord_train <- eegIDRecord_n_d [-test_data,]
### Save the labels
eegIDRecord_train$meditation <- factor(eegIDRecord_train$meditation)
eegIDRecord_test$meditation <- factor(eegIDRecord_test$meditation)
train_label <- eegIDRecord_train$meditation
test_label <- eegIDRecord_test$meditation
eegIDRecord_test2 <- eegIDRecord_test [,-3]
eegIDRecord_train2 <- eegIDRecord_train [,-3]
### Training model on the data
library(e1071)
library(gmodels)
classifier <- naiveBayes(eegIDRecord_train2,train_label,laplace = 1)
p <- predict(classifier,eegIDRecord_test2,type="class")
### CrossTable
CrossTable(p, test_label,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
|
bcfeda9d3b37fff3cfc45ac14a4005ffe9a41c01 | 351fe3d417a5cb5f5b3c8c9d291e6f55df93ec19 | /analysis/two_part_mixed_with_smote_category_code.R | 7486ec1149766a28201226958590ce004a48d2de | [
"MIT"
] | permissive | desaip2468/deguri | 7b47378027d3f51284eb9d1bbc421a7c5dbeb585 | a75c2a6f40674e0e91d27c88f3e12b2bc0ed8d47 | refs/heads/master | 2020-05-02T05:37:13.152683 | 2019-10-02T05:54:36 | 2019-10-02T05:54:36 | 177,775,748 | 3 | 0 | null | 2019-06-13T08:55:11 | 2019-03-26T11:35:21 | HTML | UTF-8 | R | false | false | 1,536 | r | two_part_mixed_with_smote_category_code.R | # select, filter, %>%
library(dplyr)
## Loading DMwR to balance the unbalanced class
library(DMwR)
## Train test split
library(caTools)
data <- read.csv('data/payments_ppdb_app_g2_category_code_aggregated.csv')
x <- data %>% filter(age >= 50 & approval_real_price_sum_by_by_approval_type_LT01 < 800000)
target <- x %>% select(approval_real_price_sum_by_by_approval_type_LT01)
category_code_data <- x %>% select(matches("^category_code_\\d{1,2}_count$")) %>% select(-contains('17'))
category_code <- cbind(target, category_code_data) %>% mutate(non_zero = approval_real_price_sum_by_by_approval_type_LT01 != 0)
## Train test split
category_code_split <- sample.split(category_code$non_zero, SplitRatio = 0.90)
category_code_train <- category_code %>% filter(category_code_split)
category_code_test <- category_code %>% filter(!category_code_split)
m1 <- glm(non_zero ~ ., data = category_code_train %>% select(-approval_real_price_sum_by_by_approval_type_LT01), family = binomial(link = logit))
m2 <- glm(approval_real_price_sum_by_by_approval_type_LT01 ~ ., data = category_code_train %>% filter(non_zero) %>% select(-non_zero), family = poisson(link = log))
## Predict the Values
predict_m1 <- predict(m1, category_code_test %>% select(-approval_real_price_sum_by_by_approval_type_LT01), type = 'response')
predict_m2 <- predict(m2, category_code_test %>% filter(non_zero) %>% select(-non_zero), type = 'response')
formula <- 'approval_real_price_sum_by_by_approval_type_LT01 ~ .' %>% as.formula
summary(model_category_code)
|
fd2b3fd263a63cf61b30b014d20262daa16ae735 | 145e98e2c62ff472b7f20cab718f772d911fa6ee | /PBSawatea/man/plt.quantBio.Rd | dcdaf5fccd9f9f9a866ff7cb4f39c7f18faa03a0 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pbs-software/pbs-awatea | f69546fedd94945bba4162ea36b88c7a10adc0c3 | a895bb969a3e24ab55dd854dcccd73252af0eddb | refs/heads/master | 2023-07-09T20:16:48.449213 | 2023-07-04T17:30:58 | 2023-07-04T17:30:58 | 37,491,610 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,404 | rd | plt.quantBio.Rd | \name{plt.quantBio}
\alias{plt.quantBio}
\alias{plt.quantBioBB0}
\title{
Plot Quantile Boxes of MCMC and Projected Biomass
}
\description{
Plots MCMC and projected biomass as quantile boxes, the former in black, the latter in red.
}
\usage{
plt.quantBio(obj, projObj=NULL, policy=NULL,
p=tcall(quants5), xyType="lines", lineType=c(3,2,1,2,3),
refLines=NULL, xLim=NULL, yLim=NULL,
userPrompt=FALSE, save=TRUE, yaxis.lab="Spawning biomass", lang="e")
plt.quantBioBB0(obj, projObj=NULL, policy=NULL,
p = tcall(quants5), xyType="quantBox", lineType=c(3,2,1,2,3),
delta=0.25, lwd=0.75, refLines=NULL, xLim=NULL, yLim=NULL,
userPrompt=FALSE, save=TRUE, main="", cex.main="",
tcl.val=-0.1, xaxis.by=1, yaxis.by=10000, cex.axis=1, cex.lab=1,
xaxis.lab="Year", yaxis.lab= "Spawning biomass", lang="e")
}
\arguments{
\item{obj}{\code{list} -- an Awatea MCMC object (\emph{e.g.}, \code{currentMCMC}).}
\item{projObj}{\code{list} -- an Awatea projected biomass object (\emph{e.g.}, \code{currentProj}).}
\item{policy}{\code{numeric} -- vector specifying catch policy.}
\item{p}{\code{numeric} -- quantiles to use from the biomass samples.}
\item{xyType}{\code{character} -- string specifying type of plot.}
\item{lineType}{\code{numeric} -- vector of line types for the quantiles if \code{xyType="lines"}.}
\item{delta}{\code{numeric} -- x-offset for plotting boxes.}
\item{lwd}{\code{numeric} -- line width device (see \code{'par'}).}
\item{refLines}{\code{numeric} -- reference points.}
\item{xLim}{\code{numeric} -- limits of the x-axis.}
\item{yLim}{\code{numeric} -- limits of the y-axis.}
\item{userPrompt}{\code{logical} -- if \code{TRUE} prompts user before figure is drawn.}
\item{save}{\code{logical} -- if \code{TRUE} save figure as a raster file \code{.png}.}
\item{main}{\code{character} -- string specifying a title for the plot.}
\item{cex.main}{\code{numeric} -- font size for figure title.}
\item{tcl.val}{\code{numeric} -- tick length.}
\item{xaxis.by}{\code{numeric} -- tick mark intervals for x-axis.}
\item{yaxis.by}{\code{numeric} -- tick mark intervals for y-axis.}
\item{cex.axis}{\code{numeric} -- size of labels for the axis ticks}
\item{cex.lab}{\code{numeric} -- size of labels along the axes}
\item{xaxis.lab}{\code{character} -- label for x-axis.}
\item{yaxis.lab}{\code{character} -- label for y-axis.}
\item{lang}{\code{character} -- a letter that denotes the language for output: currently only \code{"e"} (english) or \code{"f"} (french).}
}
\value{
List of the reconstructed (MCMC) and projected results.
}
\author{
\href{mailto:andrew.edwards@dfo-mpo.gc.ca}{Andrew Edwards}, Research Scientist, PBS, DFO, Nanaimo BC
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Regional Headquarters (RHQ), Vancouver BC\cr
Last modified \code{Rd: 2023-06-28}
}
\note{
\code{plt.quantBioBB0} performs similarly as for \code{plt.quantBio} but uses
\eqn{B_t/B_0}{Bt/B0} instead of \eqn{B_t}{Bt}.
}
\seealso{
\code{\link[PBSawatea]{plotVBcatch}},
\code{\link[PBSawatea]{plotBVBnorm}}, \code{\link[PBSawatea]{plotRmcmcPOP}}
}
\keyword{hplot}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
1ff350e857f9033bf16eb676afc2a61c9ffe76a5 | b0e9772ea69ac06e39cade7ac2d60fc256317eef | /inst/scripts/population.R | ef3f94a4df42394e231dce8b47fb05fed6545437 | [] | no_license | UCLouvain-CBIO/rWSBIM1207 | 69d302f70d05592c1abce3979c6260cecc1ec372 | 43745eae061dbb193d39a2f1f58a74d02fe29fec | refs/heads/master | 2023-07-20T19:00:59.404868 | 2023-07-19T07:09:48 | 2023-07-19T07:09:48 | 164,664,929 | 0 | 1 | null | 2019-02-16T21:19:34 | 2019-01-08T14:21:51 | R | UTF-8 | R | false | false | 1,754 | r | population.R | ## Data downloaded from the Belgian statbel.fgov.be page describing
## the population density on 2023-07-18.
##
## https://statbel.fgov.be/fr/themes/population/structure-de-la-population/densite-de-la-population#figures
library(readxl)
library(tidyverse)
## sheet names
sheets <- readxl::excel_sheets("../extdata/Population_par_commune.xlsx")
years <- substring(sheets, nchar(sheets) - 3, nchar(sheets))
filenames <- paste0("../extdata/population_BE_", years, ".csv")
pop <- lapply(seq_along(sheets),
function(i) {
sheet <- sheets[i]
## assumes that missing values only in non-records
pop <- read_xlsx("../extdata/Population_par_commune.xlsx", skip = 1, sheet = i) |>
na.omit() |>
janitor::clean_names()
## fix ’
pop[[2]] <- gsub("’", "'", pop[[2]])
pop$annee <- years[i]
write_csv(pop, file = filenames[i])
pop
})
## ## testing
## x <- read_csv(list.files("../extdata", pattern = "_BE_", full.names = TRUE))
## lieux <- x[[2]][1:4]
## x |>
## filter(lieu_de_residence %in% lieux) |>
## filter(annee > 1990) |>
## ggplot(aes(x = annee, y = total,
## colour = lieu_de_residence)) +
## geom_line() +
## geom_point()
## x |>
## pivot_longer(names_to = "variable",
## values_to = "value",
## 3:5) |>
## filter(lieu_de_residence %in% lieux) |>
## filter(annee > 1990) |>
## ggplot(aes(x = annee, y = value,
## colour = variable)) +
## geom_line() +
## geom_point() +
## facet_wrap(~ lieu_de_residence, scale = "free_y")
|
13f3dce69caa8dede9fc26b6985c67ae882676b5 | 3ca1380eefc7eaba25fc6149e32ea8fc088babf8 | /man/butter_classify.Rd | f04e0f9cf2b7ded77363ce8fa9f0e93e256b0add | [
"MIT"
] | permissive | skannan4/singleCellNet | a52341f029d5c7f74c36a0ffd28989a9ee557eff | 2c91d3a05bbbb5857c6af9a4cc691124b977b1ca | refs/heads/master | 2020-04-19T07:11:07.169109 | 2019-01-28T22:58:50 | 2019-01-28T22:58:50 | 168,039,964 | 0 | 0 | MIT | 2019-01-28T21:24:54 | 2019-01-28T21:24:54 | null | UTF-8 | R | false | true | 378 | rd | butter_classify.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/butter.R
\name{butter_classify}
\alias{butter_classify}
\title{classify}
\usage{
butter_classify(washedDat, classList)
}
\arguments{
\item{washedDat}{result of wash()}
\item{classList}{result of pipeButter()}
}
\value{
matrix of classifier results
}
\description{
classify, wrapper to sc_classify
}
|
806002cb3e1696993e201f3cf8d4b0e93706dde4 | 2fe9c1959aec00d2cfd7f9ec417a28ba5c92a9f9 | /sw/FIgraph/FIG/man/FItable.Rd | cdb1b9993d4f6f5d31204b2f084e04e57470d044 | [] | no_license | BitlDku/home | 0cd8cbf0c9d75e30d0a965a49668bb72a2831e34 | 6593826c872fb72891fcaa7c6030de635dc38d1d | refs/heads/master | 2022-03-10T05:59:33.976156 | 2022-02-15T07:52:03 | 2022-02-15T07:52:03 | 154,793,577 | 2 | 1 | null | 2021-07-26T11:40:39 | 2018-10-26T07:19:22 | HTML | UTF-8 | R | false | true | 1,575 | rd | FItable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FItable.R
\name{FItable}
\alias{FItable}
\title{Create a feature interaction table}
\usage{
FItable(
model,
train,
target.name,
grid = 50,
task = "regression",
interaction_type = "OH2",
all.class = TRUE
)
}
\arguments{
\item{model}{A prediction model (Classification or regression)}
\item{train}{Training dataset (data frame) that is used to building model}
\item{target.name}{Name of target label name in train dataset}
\item{grid}{Number of grid to calculate partial dependency function. Default is 50)}
\item{task}{Prediction task. "regression" (default) or "classification".}
\item{interaction_type}{Type of feature interaction algorithms. One of "OH2" (default), "FRIEDMAN","GREENWELL".}
\item{all.class}{When task is classification, Feature importance can be calculated for overall classed (all.class==TRUE) or for each class (all.class==FALSE).}
}
\value{
[list] feature interaction & feature importance table
}
\description{
This function create a feature interaction table.
}
\examples{
library("FIG")
# for regression
data("Boston", package = "MASS")
model <- lm(medv ~ ., data = Boston)
FIobj1 <- FItable(model, train=Boston, target.name="medv", grid=50,
task="regression", interaction_type="OH2")
print(FIobj1)
# for classification
library(e1071)
model2 <- svm(Species~., data=iris)
FIobj2 <- FItable(model2, train=iris, target.name="Species", grid=50,
task="classification", interaction_type="OH2", all.class=F)
print(FIobj2)
}
|
c264337ca38fcff0a77192a9109d6a759b9022a6 | acca4ebb9fec1728a5a9004193b98b830c0c74ac | /r10_control.r | b430feb94fc7018b201b864b30f4ee14bc9a5921 | [] | no_license | Minki96/lab-r | 8e43bcff537319511e6a2694bd0afb885370333b | c274088237e99057f8c9fa6b2e6b6bb98b686948 | refs/heads/master | 2022-06-17T06:10:15.525771 | 2020-05-06T01:48:18 | 2020-05-06T01:48:18 | 261,624,227 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,253 | r | r10_control.r | # Control Statement (제어문)
# 1) 조건문
# if(조건식) {
# 조건식이 참일 때 실행 할 문장들.
# } else {
# 조건식이 거짓일 때 실행 할 문장들.
# }
x <- 11
if (x > 0) {
print("Good") # 0보다 클때
} else if (x < 0) {
print("Bad") # 0보다 작을 때
} else {
print("zero") # 그 외 나머지
}
v <- c(1, -2, 0)
v =! 0
# >, <, --, !=
# if - else 문에서 조건식에 벡터가 사용되면,
# 벡터의 첫번째 원소만 조건 검사에 사용됨.
# ifelse(조건식, 참일 때 값, 거짓일 때 값)
t <- ifelse( v > 0, "YES" ,
ifelse(v < 0,"NO", "zero"))
t
# data/csv_exam.csv 파일에서 데이터 프레임 생성.
exam_test <- read.csv("data/csv_exam.csv")
is.data.frame(exam_test)
# 벡터 프레임에 세과목 총점/평균 컬럼(변수)을 추가.
exam_test$sum <- exam_test$math + exam_test$english + exam_test$science
exam_test$mean <- exam_test$sum / 3
# 데이터 프레임에 grade 컬럼(변수)를 추가
# - 평균 >= 90이면, "A"
# - 평균 >= 60이면, "B"
# - 평균 < 60이면, "F"
exam_test$grade <- ifelse(exam_test$mean >= 80, "A", ifelse(exam_test$mean >= 60,"B","F"))
exam_test
# 2) 반복문
|
d6f95e364273dc9381ea9fc14485974eba948912 | 98882947bd6b74ac8645a3d1c8843d953f1a808c | /FeatSel.R | d9d5d3371429e46cf55ed22792504a0fdc5db054 | [] | no_license | dakshasrani/Horcruxes | 393be9851af6710b783ed3ec837da6cd094d1107 | 9628689060ff1ba225de38298a34994d5862dd08 | refs/heads/master | 2021-01-22T01:55:11.421432 | 2015-03-13T19:49:34 | 2015-03-13T19:49:34 | 32,130,047 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 539 | r | FeatSel.R | library(CORElearn)
estReliefF <- attrEval(Cover_Type ~ ., train1, estimator="DistAUC")
Elevation, Soil_Type, Horizontal_Distance_To_Roadways, Wilderness_Area,
Horizontal_Distance_To_Fire_Points,
Horizontal_Distance_To_Hydrology, Hillshade_3pm, Hillshade_9am, Cover_Type))
test1 <- subset(test, select=c(Elevation, Soil_Type, Horizontal_Distance_To_Roadways, Wilderness_Area,
Horizontal_Distance_To_Fire_Points,
Horizontal_Distance_To_Hydrology, Hillshade_3pm, Hillshade_9am)) |
84e6b798102977c98d181a018247f72ad092d4f6 | 16d40a7332a4bcfebcc2d21842f0fc71cef36e46 | /Análisis Exploratorio de Datos/Clase 1/Clase 1.R | 5b361be781ead8e10002e2c0187c973b0e8dac7c | [] | no_license | primo1594/Maestr-a-en-Ciencia-de-Datos | bce42981d6d2dadf9c2947bc6b4e21f554542a81 | 3f2c10db047f5a349661731fddaea640bd9e1e0e | refs/heads/master | 2020-04-16T07:57:16.657613 | 2019-05-11T15:52:23 | 2019-05-11T15:52:23 | 165,405,351 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,001 | r | Clase 1.R | data<-read.table("custdata.tsv", sep = '\t', header = TRUE)
head(data)
plot(data)
summary(data)
library(dplyr)
data2<-filter(data,data$health.ins==F)
summary(data2)
library(ggplot2)
ggplot(data, aes(x=" ",y=data$income))+
geom_boxplot()+
geom_jitter(width=0.2)+ylab("yearly income in thousands")
+xlab("# of customers")
ggplot(data,aes(x=data$age))+
geom_histogram()+xlab("Customer age")+
geom_density()+
geom_vline(aes(xintercept=mean(age)),color="blue",linetype="dashed",size=1)
?geom_density
intervalosIncome = c(100,1000,10000,100000)
sapply(intervalosIncome,log10)
library(scales)
ggplot(data)+geom_density(aes(x<-income))+scale_x_log10(breaks=c(100,1000,10000,100000),labels=dollar)+annotation_logticks(sides="bt")
ggplot(data)+geom_density(aes(x<-log10(income)))
tabla<-table(data$sex)
prop.table(tabla)
getmode <- function(v){
uniqv<-unique(v)
uniqv[which.max(tabulate(match(v,uniqv)))]
}
getmode(data$state.of.res)
ggplot(data)+geom_bar(aes(x=data$marital.stat),fill="gray")
|
872bcc4a8742be2cb457433a24017b7b0a53bc6b | a1241d111c801c927dc800722e82efd2329c1474 | /man/visit_occurrence_data.Rd | c17516deafe2936bae931a0914db045e0c235dd1 | [] | no_license | zkzofn/GEMINI | 819203296a8e6181aac2e8dfae868ee7a3e6c69b | 90cea036dc9fe851032c53dd3e85fb922aac7f6f | refs/heads/master | 2020-05-16T15:58:49.945475 | 2019-03-28T01:54:38 | 2019-03-28T01:54:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 313 | rd | visit_occurrence_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visit_occurrence_data.R
\name{visit_occurrence_data}
\alias{visit_occurrence_data}
\title{visit occurrence data}
\usage{
visit_occurrence_data()
}
\description{
This function extract data from visit occurrence table
}
\keyword{gemini}
|
0a0dbfa34beba7f377f0f634cbb2e3200382ab9d | d80c94901adad9211f8cffb782d0e91e9860299b | /man/goccu.Rd | ef9f08c2111a9bc4ed69571be2d82dd69dd5d607 | [] | no_license | rbchan/unmarked | b104daaa4d05178e5be74a509cc771ae5f246e55 | ac40eede793b0997209117080912a866a2ff18ae | refs/heads/master | 2023-08-16T22:44:39.138574 | 2023-08-11T20:21:52 | 2023-08-11T20:21:52 | 564,481 | 30 | 36 | null | 2023-08-11T20:21:53 | 2010-03-16T03:57:15 | R | UTF-8 | R | false | false | 3,062 | rd | goccu.Rd | \name{goccu}
\alias{goccu}
\title{
Fit multi-scale occupancy models
}
\description{
Fit multi-scale occupancy models as described in Nichols et al. (2008) to
repeated presence-absence data collected using the robust design. This model
allows for inference about occupancy, availability, and detection probability.
}
\usage{
goccu(psiformula, phiformula, pformula, data, linkPsi = c("logit", "cloglog"),
starts, method = "BFGS", se = TRUE, ...)
}
\arguments{
\item{psiformula}{
Right-hand sided formula describing occupancy covariates
}
\item{phiformula}{
Right-hand sided formula describing availability covariates
}
\item{pformula}{
Right-hand sided formula for detection probability covariates
}
\item{data}{
An object of class unmarkedFrameGOccu or unmarkedMultFrame
}
\item{linkPsi}{Link function for the occupancy model. Options are
\code{"logit"} for the standard occupancy model or \code{"cloglog"}
for the complimentary log-log link, which relates occupancy
to site-level abundance.
}
\item{starts}{
Starting values
}
\item{method}{
Optimization method used by \code{\link{optim}}
}
\item{se}{
Logical. Should standard errors be calculated?
}
\item{\dots}{
Additional arguments to \code{\link{optim}}, such as lower and upper
bounds
}
}
\details{
Primary periods could represent spatial or temporal sampling replicates.
For example, you could have several spatial sub-units within each site, where each
sub-unit was then sampled repeatedly. This is a frequent design for eDNA studies.
Or, you could have multiple primary periods of sampling at each site
(conducted at different times within a season), each of which contains
several secondary sampling periods. In both cases the robust design structure
can be used to estimate an availability probability in addition to
detection probability. See Kery and Royle (2015) 10.10 for more details.
}
\value{
An object of class unmarkedFitGOccu
}
\references{
Kery, M., & Royle, J. A. (2015). Applied hierarchical modeling in ecology:
Volume 1: Prelude and static models. Elsevier Science.
Nichols, J. D., Bailey, L. L., O'Connell Jr, A. F., Talancy, N. W.,
Campbell Grant, E. H., Gilbert, A. T., Annand E. M., Husband, T. P., & Hines, J. E.
(2008). Multi-scale occupancy estimation and modelling using multiple detection methods.
Journal of Applied Ecology, 45(5), 1321-1329.
}
\author{
Ken Kellner \email{contact@kenkellner.com}
}
\seealso{
\code{\link{occu}}, \code{\link{colext}},
\code{\link{unmarkedMultFrame}}, \code{\link{unmarkedFrameGOccu}}
}
\examples{
set.seed(123)
M <- 100
T <- 5
J <- 4
psi <- 0.5
phi <- 0.3
p <- 0.4
z <- rbinom(M, 1, psi)
zmat <- matrix(z, nrow=M, ncol=T)
zz <- rbinom(M*T, 1, zmat*phi)
zz <- matrix(zz, nrow=M, ncol=T)
zzmat <- zz[,rep(1:T, each=J)]
y <- rbinom(M*T*J, 1, zzmat*p)
y <- matrix(y, M, J*T)
umf <- unmarkedMultFrame(y=y, numPrimary=T)
\dontrun{
mod <- goccu(psiformula = ~1, phiformula = ~1, pformula = ~1, umf)
plogis(coef(mod))
}
}
|
db5b8c08d510f1ab74a7ddac1602ddcc5b4d8011 | 3a7274f84c17fbaf0d27c791bcc217a9bfba1bef | /4.28.R | 624d2b1077674550f61225e785a0436dcc353753 | [] | no_license | kyh31876/R-studio | 75f5fbcb4fd7331668f45b6b0bba81a7a4f7a29c | 6408b5cd5df45b431049d0e20aea399b640082fb | refs/heads/main | 2023-05-09T14:09:15.314846 | 2021-06-02T04:08:45 | 2021-06-02T04:08:45 | 346,007,723 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,780 | r | 4.28.R | install.packages("readxl")
library(readxl)
install.packages("dplyr")
library(dplyr)
#4번
exam_4 <- read_excel("C:/Users/OWNER/Downloads/smoke.xlsx")
#a)
table(exam_4$Gender)
#b)
exam_4$BMI=(exam_4$Weight/((exam_4$Height)^2))
mean(exam_4$BMI)
sd(exam_4$BMI)
#c)
exam_4 %>%
filter(Age >30,Gender=="F") %>%
group_by(Gender) %>%
summarise(median = median(Height))
#d)
exam_4 %>%
filter(HowLong >10 | Cigarettes >10) %>%
select(Gender) -> exam_4_1
table(exam_4_1)
#5번
exam_5 <- read.csv("C:/Users/OWNER/Downloads/서울교통공사_관할역별_일별_시간대별_이용인원_20181231.csv")
#a)
exam_51<-exam_5 %>%
select(X08...09) %>%
mutate(sum1=sum(X08...09))
exam_52<-exam_5 %>%
select(X18...19) %>%
mutate(sum2=sum(X18...19))
which(exam_51$sum1 >exam_52$sum2) #오후 6시 부터 9시이용객이 더크다
#b)
exam_5 %>%
filter(구분.1=="승차") -> exam_5up
exam_5 %>%
filter(구분.1=="하차") -> exam_5down
sum(exam_5up$합.계) #승차인원
sum(exam_5down$합.계)#하차인원
#c)
exam_5 %>%
select(역명,X08...09) %>%
arrange(desc(X08...09)) %>%
head(3)#가장 붐비는
exam_5 %>%
select(역명,X08...09) %>%
arrange(desc(X08...09)) %>%
tail(3)#가장 한적한
#d)
exam_5 %>%
filter(역명=="고속터미널역",구분=="평일") %>%
select(역명,구분,합.계) %>%
summarise(mean=mean(합.계))
exam_5 %>%
filter(역명=="고속터미널역",구분=="휴일") %>%
select(역명,구분,합.계) %>%
summarise(mean=mean(합.계))
exam_5 %>%
filter(역명=="고속터미널역",구분=="토") %>%
select(역명,구분,합.계) %>%
summarise(mean=mean(합.계))
#e)
exam_5 %>%
select(합.계,호선) %>%
arrange(desc(합.계)) ->exam_5sub
head(exam_5sub)
|
6d43df78db9259d57fda586610c7d5abf83bdbb9 | ab61691ce9b799bb097e5f2233140bb6350ae51b | /man/get_avatar.Rd | a7df4bf45d2652627b7bde6d995de1afa17f01b0 | [] | no_license | klapaukh/tumblR | a4c929de30574484bdf907a31ccc9b81b3d675ad | 7a98ca33c18e141c0c2433e58da2435ea7a25acb | refs/heads/master | 2021-01-19T16:59:03.471819 | 2014-02-22T03:25:31 | 2014-02-22T03:25:31 | 16,942,229 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 429 | rd | get_avatar.Rd | \name{get_avatar}
\alias{get_avatar}
\title{Get an blogs avatar}
\usage{
get_avatar(blog, size = 64)
}
\arguments{
\item{blog}{The blog url}
\item{size}{Size of the image to get (16, 24, 30, 40, 48
,64, 96, 128, 512)}
}
\description{
Get the avatar of a blog in a range of sizes. The allowed
sizes are 16, 24, 30, 40, 48 ,64, 96, 128, 512.
}
\examples{
get_avatar("staff.tumblr.com")
get_avatar("staff.tumblr.com",512)
}
|
1755a3fc94058155fc56561a75841f621d01cb14 | 186908f9645ce49d0e008968a57db6a9989c79ba | /inst/doc/estimate_RACS_properties.R | 32b46876bc5c5cd84f5faba136366a48d4071edb | [] | no_license | cran/lacunaritycovariance | 93cc93364dfd128ededd21c4b6dc21000edea5d1 | 6ad7f8fca8b8608c3a0336a744ffd533c2eca2b0 | refs/heads/master | 2023-03-06T04:51:29.485917 | 2023-02-08T07:12:33 | 2023-02-08T07:12:33 | 198,593,815 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,590 | r | estimate_RACS_properties.R | ### R code from vignette source 'estimate_RACS_properties.Rnw'
###################################################
### code chunk number 1: loadbinarymap
###################################################
library(lacunaritycovariance)
load(system.file("extdata/egbinarymap.RData",
package="lacunaritycovariance"))
# the following converts egbinarymap to logically-valued pixels
egbinarymap <- as.im(egbinarymap, eps = egbinarymap$xstep*8)
egbinarymap <- eval.im(egbinarymap > 0.5)
plot(egbinarymap,
col = c("grey", "black"),
main = "The Binary Map")
###################################################
### code chunk number 2: coverageprobability
###################################################
phat <- coverageprob(egbinarymap)
phat
###################################################
### code chunk number 3: estimatecovariance
###################################################
cvchat <- racscovariance(egbinarymap, estimators = "pickaH",
drop = TRUE)
plot(cvchat, main = "Estimated RACS Covariance", axes = TRUE)
###################################################
### code chunk number 4: paircorr
###################################################
pclnest <- eval.im(cvchat / (phat^2))
plot(pclnest, main = "Pair Correlation Estimate")
###################################################
### code chunk number 5: gblestimation
###################################################
gblest <- gbl(xi = egbinarymap, seq(1, 200/4, by = 1),
estimators = c("GBLcc.pickaH", "GBLemp"))
plot(gblest[[1]], main = "GBL Estimate")
|
180849843c6f29ce6bb5db59ae15c5c68f5a5ee4 | 43f6d2a89e611f49d1bff870e6381fa182184ce2 | /man/calc_AICc_vals.Rd | 1e91813315c58e9ed4ef874cf1bb241a8bc189bb | [] | no_license | pedroreys/BioGeoBEARS | 23bab5299c44b4cfa2ab0e9dbe0de4ecf2196f69 | 9aef25ebf57b854e6f02d5a3a2ca420e31833123 | refs/heads/master | 2021-01-17T17:12:08.479890 | 2013-08-04T02:35:28 | 2013-08-04T02:35:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,018 | rd | calc_AICc_vals.Rd | \name{calc_AICc_vals}
\alias{calc_AICc_vals}
\title{Calculate AICc values for a list of models}
\usage{
calc_AICc_vals(LnL_vals, nparam_vals, samplesize)
}
\arguments{
\item{LnL_vals}{A vector of log-likelihoods (typically
negative, but may not be for continuous data).}
\item{nparam_vals}{A vector of the number of parameters
for each model.}
\item{samplesize}{A single samplesize, or a vector of the
samplesizes each model. However, samplesize should
always be the same for all comparisons, since maximum
likelihood and AIC/AICc model-selection methods are
always comparing different models on the \emph{same}
data, not different data on the same mode.}
}
\value{
\code{AICc_vals} A vector of AICc results.
}
\description{
A list of AICc values (second order Akaike Information
Criterion) is calculated from two input lists. Lower
values of AICc indicate some combination of better fit to
the data and more parsimony in the model (fewer free
parameters). AICc contains a correction for sample size.
}
\details{
The two input lists are:
\bold{1.} A list of data likelihoods under a variety of
models.\cr \bold{2.} A list of the number of free
parameters under each model.\cr
\code{samplesize} can be a scalar or vector; but see
below.
See \cite{Burnham et al. (2002)} and
\url{http://www.brianomeara.info/tutorials/aic} for
discussion of AIC, AICc and their uses.
}
\note{
Go BEARS!
}
\examples{
LnL_vals = c(-34.5, -20.9)
nparam_vals = c(2, 3)
calc_AICc_vals(LnL_vals, nparam_vals, samplesize=20)
LnL_vals = c(-20.9, -20.9, -20.9, -20.9)
nparam_vals = c(3, 4, 5, 6)
calc_AICc_vals(LnL_vals, nparam_vals, samplesize=20)
}
\author{
Nicholas J. Matzke \email{matzke@berkeley.edu}
}
\references{
\url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
\url{http://www.brianomeara.info/tutorials/aic}
Burnham_Anderson_2002
Matzke_2012_IBS
}
\seealso{
\code{\link{calc_AIC_vals}},
\code{\link{calc_AICc_column}}
}
|
3b78993d74f07e4ff47d0180f7006f639a10c049 | ef2bbc765a62c44ca93a586fbc6b0e8232048bfe | /height_predictor.R | 34763f907a7026d8a0765b2a3e779d20b2849434 | [] | no_license | jayzuniga/R | 460e814bd193907770b037871b963b19c485b06a | 7f62122bb73f6eb75ae4a98cd3e9c9c32d93b9f3 | refs/heads/master | 2021-07-17T18:25:45.326958 | 2017-10-25T18:58:22 | 2017-10-25T18:58:22 | 106,640,679 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 527 | r | height_predictor.R | predict_height <- function(sex,
dad_height,
mom_height) {
total_height = dad_height + mom_height
if ( sex = "M") {
total_height = total_height + 5
} else {
total_height = total_height -5
}
return total_height/2
predict_height <- function(sex, dad_height, mom_height) {
tot_height <- dad_height + mom_height
if (sex == "") {
tot_heigh <- tot_height + 5
} else {
tot_height <- tot_height - 5
}
return tot_height/2
}
}
|
5df1c14aa63af6cacfb7040ad88de9f9b64bf611 | 62504fe2d38b03b23010d79a33be8e775d3ccbdf | /scripts/heatmap_subset_of_Results.R | ebe4fe551539bf66827fe1eb4f0ce5577a5f732c | [] | no_license | orshalevsk/GWAS_project | fa6531b6bdd807b623ea094e66f19ffa3619de9a | 4333bc668174d85a7b985eb90fe6455829471c95 | refs/heads/main | 2023-08-06T11:14:56.844273 | 2021-10-08T08:58:41 | 2021-10-08T08:58:41 | 371,060,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,862 | r | heatmap_subset_of_Results.R | '''
Presenting significant results in heatmap, clustered by the collection of strains of interest.
'''
library("devtools")
library("lme4")
library("lmtest")
library(ggplot2)
library(MASS)
library(FSA)
library(agricolae)
require(vegan)
library(multcomp)
require(nlme)
library(randomForest)
library(tidyr)
library(treeWAS)
library(ape)
library(adegenet)
#library(emma)
library(gplots)
library(plyr)
library(RColorBrewer)
library(tiger)
# read experimental results and metadata
dataset_GWAS <- read.csv2("full_dataset_updated_20190415.csv")
dataset_GWAS <- dataset_GWAS[!dataset_GWAS$treatment%in%c("flexible",""),]
metadata <- read.csv2("../../FINAL_Pseudomonas_Phyletic_Pattern_all_geneClusters.Clusterd_by_Jaccard_cd_hit_like.0.99.rep_strains.NonOTU5.csv")
metadata$GWAS_index <- as.character(metadata$GWAS_index)
metadata$GWAS_index[!metadata$GWAS_index%in%c("pathogen_only","control")] <- paste(metadata$GWAS_index[!metadata$GWAS_index%in%c("pathogen_only","control")],"+p",sep = "")
for (treat in unique(dataset_GWAS$treatment)){
dataset_GWAS$strain[dataset_GWAS$treatment==treat] <- as.character(metadata$strain[metadata$GWAS_index==treat])
}
#re-arrange the pxls data to fit R growth models
dataset_GWAS_nopxls <- dataset_GWAS[,c(1:7,length(dataset_GWAS))]
dataset_GWAS_nopxls_7_no <- cbind(dataset_GWAS_nopxls,"pxls"=dataset_GWAS$X7dpi_nolid)
dataset_GWAS_nopxls_7_no$dpi <- "7_nolid"
dataset_GWAS_nopxls_7 <- cbind(dataset_GWAS_nopxls,"pxls"=dataset_GWAS$X7dpi)
dataset_GWAS_nopxls_7$dpi <- "7"
dataset_GWAS_nopxls_6 <- cbind(dataset_GWAS_nopxls,"pxls"=dataset_GWAS$X6dpi)
dataset_GWAS_nopxls_6$dpi <- "6"
dataset_GWAS_nopxls_5 <- cbind(dataset_GWAS_nopxls,"pxls"=dataset_GWAS$X5dpi)
dataset_GWAS_nopxls_5$dpi <- "5"
dataset_GWAS_nopxls_4 <- cbind(dataset_GWAS_nopxls,"pxls"=dataset_GWAS$X4dpi)
dataset_GWAS_nopxls_4$dpi <- "4"
dataset_GWAS_nopxls_1 <- cbind(dataset_GWAS_nopxls,"pxls"=dataset_GWAS$X1dpi)
dataset_GWAS_nopxls_1$dpi <- "1"
dataset_GWAS_nopxls_0 <- cbind(dataset_GWAS_nopxls,"pxls"=dataset_GWAS$X0dpi)
dataset_GWAS_nopxls_0$dpi <- "0"
dataset_GWAS <- rbind(dataset_GWAS_nopxls_7_no,dataset_GWAS_nopxls_7,dataset_GWAS_nopxls_6,dataset_GWAS_nopxls_5,
dataset_GWAS_nopxls_4,dataset_GWAS_nopxls_1,dataset_GWAS_nopxls_0)
# creating 7-0 pxls dataset, to bin by OTU
dataset_GWAS$pxls_7dpi_minus_0dpi[dataset_GWAS$dpi==7] <- dataset_GWAS$pxls[dataset_GWAS$dpi==7]-dataset_GWAS$pxls[dataset_GWAS$dpi==0]
dataset_GWAS_delta_pixels <- dataset_GWAS[dataset_GWAS$dpi==7,]
dataset_GWAS_delta_pixels <- dataset_GWAS_delta_pixels[!dataset_GWAS_delta_pixels$treatment%in% c("boiled_strain_17+p","control"),] # remove boiled protector and control
for (strain in unique(dataset_GWAS_delta_pixels$strain)){
dataset_GWAS_delta_pixels$median_delta_pxls[dataset_GWAS_delta_pixels$strain==strain] <- (median(dataset_GWAS_delta_pixels$pxls_7dpi_minus_0dpi[dataset_GWAS_delta_pixels$strain==strain]))
}
# read phyletic pattern (a bit long)
phy_pattern <- read.csv("/Volumes/small_projects/hashkenazy/Pseudomonas/PhyleticPatterns/Pseudomonas_Phyletic_Pattern_all_geneClusters.csv")
phy_pattern_my_strains <- phy_pattern[phy_pattern$Species%in%unique(dataset_GWAS$strain[!dataset_GWAS$treatment%in%c("boiled_strain_17+p","control")]),] # subset to my strains only
phy_pattern_my_strains$median_delta_pxls <- dataset_GWAS_delta_pixels$median_delta_pxls[match(phy_pattern_my_strains$Species, dataset_GWAS_delta_pixels$strain)] # add the phenotype (median pxls 7dpi minus 0 dpi) to phyletic pattern variable
#### chose OG of interest
subset_siginifact_OGs <- read.csv2("~/ownCloud/My papers/bacterial_GWAS_paper/figures/Final_figures/Figure3/OTU2_significant_OGs.csv", header = F) #subset to best OTU2 hits (signal from all strains)
# Add OTU for subsequent subsetting (e.g. heatmap of only OTU2 strains)
dataset_GWAS_delta_pixels$OTU <- metadata$O_T_U[match(dataset_GWAS_delta_pixels$strain,metadata$strain)]
dataset_GWAS_delta_pixels$OTU <- as.character(dataset_GWAS_delta_pixels$OTU)
dataset_GWAS_delta_pixels$OTU[dataset_GWAS_delta_pixels$treatment=="control"] <- "Bacteria free" #adding "OTU" level to control for subsequent plotting
dataset_GWAS_delta_pixels$OTU[dataset_GWAS_delta_pixels$OTU==""] <- "OTU5" # adding OTU5
#########plotting heatmap, using Haims script. a function so i can do it repeatedly
heatmap_sig_OGs <- function(OTU_of_interest ="all", sig_hits, phenotype_continious, output_file_path, phy_pattern_my_strains){
ScriptPath="~/ownCloud/My papers/bacterial_GWAS_paper/scripts/"
source (paste (ScriptPath,"plot_phyletic_and_trait.R",sep=""))
#store all possible trees path
tree_phyl_all_strains <- "/Volumes/small_projects/oshalev/GWAS_project/core_genome_my_strains_aligned_no_p4.C9.fasta.treefile"
tree_phyl_non_OTU2_strains <- "/Volumes/small_projects/oshalev/GWAS_project/core_genome_non_OTU2_strains_aligned.fasta.treefile"
tree_phyl_OTU2_strains <- "/Volumes/small_projects/oshalev/GWAS_project/core_genome_OTU2_strains_aligned.fasta.treefile"
tree_phyl_OTU4_strains <- "/Volumes/small_projects/oshalev/GWAS_project/core_genome_OTU4_strains_aligned.fasta.treefile"
tree_phyl_OTU3_strains <- "/Volumes/small_projects/oshalev/GWAS_project/core_genome_OTU3_strains_aligned.fasta.treefile"
#choose OTU to work with
if (OTU_of_interest=="all"){
tree= tree_phyl_all_strains
OTU <- paste("OTU",c(11,2,3,4,6,7,8,9,10,5,"_NEW"),sep = "")
} else if (OTU_of_interest=="OTU2"){
tree= tree_phyl_OTU2_strains
OTU <- "OTU2"
} else if (OTU_of_interest=="OTU3"){
tree= tree_phyl_OTU3_strains
OTU <- "OTU3"
} else if (OTU_of_interest=="OTU4"){
tree= tree_phyl_OTU4_strains
OTU <- "OTU4"
}
strains_by_OTU <- unique(dataset_GWAS_delta_pixels$strain[dataset_GWAS_delta_pixels$OTU %in% OTU])
strains_by_OTU <- strains_by_OTU[!is.na(strains_by_OTU)]
phy_pattern_my_strains_by_OTU <- phy_pattern_my_strains[phy_pattern_my_strains$Species %in% strains_by_OTU,]
OTU <- metadata$O_T_U[metadata$strain %in% strains_by_OTU & !(metadata$GWAS_index%in%c("control","boiled_strain_17+p"))]
strain <- metadata$strain[metadata$strain %in% strains_by_OTU & !(metadata$GWAS_index%in%c("control","boiled_strain_17+p"))]
OTU <- as.character(OTU)
OTU[OTU==""] <- "OTU5"
OTU_strain <- data.frame("strain"=strain,"OTU"=OTU)
# build a phyletic pattern of only chosen OGs
best_hits_phy_pattern <- phy_pattern_my_strains_by_OTU[,as.character(sig_hits[[1]])]
row.names(best_hits_phy_pattern) <- phy_pattern_my_strains_by_OTU$Species
#create a phenotype df with colors by the phenotype (required from this script, made by Haim)
my_pheno <- as.data.frame(phy_pattern_my_strains_by_OTU[,phenotype_continious])
colnames(my_pheno) <- "phenotype"
row.names(my_pheno) <- phy_pattern_my_strains_by_OTU$Species
my_pheno <- my_pheno[order(my_pheno$phenotype), , drop=F]
#OPTIONAL: if green pixels of cdl50 is less than 0 => make it 0
#my_pheno$phenotype[my_pheno$phenotype<0] <- 0
my_pheno$color <- color.factor("darkgreen",(my_pheno$phenotype-min(my_pheno$phenotype))^3, max(my_pheno$phenotype-min(my_pheno$phenotype))^3)
# color tree branches by OTU
#plotting (this will create a file)
plot_phyletic_stretch_and_trait(PhyP.df = as.matrix(best_hits_phy_pattern), TreeFile = tree, Traits_Colors_df = my_pheno,outFile = output_file_path,dend_height = 10,dend_width = 7)
}
#! output_path="~/ownCloud/My papers/bacterial_GWAS_paper/figures/Final_figures/Figure3/heatmap_OTU2_sig_all_strains_temp.pdf"
heatmap_sig_OGs(OTU_of_interest = "all",sig_hits = subset_siginifact_OGs,phenotype_continious = "median_delta_pxls",
phy_pattern_my_strains = phy_pattern_my_strains, output_file_path = output_path)
#! output_path="~/ownCloud/My papers/bacterial_GWAS_paper/figures/Final_figures/Figure3/heatmap_OTU2_sig_OTU2_strains.pdf"
heatmap_sig_OGs(OTU_of_interest = "OTU2",sig_hits = subset_siginifact_OGs,phenotype_continious = "median_delta_pxls", phy_pattern_my_strains = phy_pattern_my_strains, output_file_path = output_path)
######growth scatter plot by number of significant OGs
# now lets scatter plot this, using mean or median
MinMeanSEMMax <- function(x) {
v <- c(min(x), mean(x) - sd(x)/sqrt(length(x)), mean(x), mean(x) + sd(x)/sqrt(length(x)), max(x))
names(v) <- c("ymin", "lower", "middle", "upper", "ymax")
v
}
#now lets create the dataframe that can show mean/median + error bars
scatter_weight_df <- data.frame(t(rep(NA,5)))
full_dataset <- dataset_GWAS
for (dpi in unique(full_dataset$dpi)){
for (treat in unique(full_dataset$treatment)){
mean_per_treat_per_gen <- full_dataset$pxls[full_dataset$dpi==dpi & full_dataset$treatment==treat]
mean_pxls <- median(mean_per_treat_per_gen[!is.na(mean_per_treat_per_gen)])
sd_pxls <- sd(mean_per_treat_per_gen[!is.na(mean_per_treat_per_gen)])
sem_pxls <- sd_pxls/sqrt(length(mean_per_treat_per_gen[!is.na(mean_per_treat_per_gen)]))
scatter_weight_df <- rbind(scatter_weight_df,c(mean_pxls,sd_pxls,sem_pxls,dpi,treat))
}
}
colnames(scatter_weight_df) <- c("pxls_average","sd","sem","dpi","treatment")
scatter_weight_df$pxls_average <- as.numeric(scatter_weight_df$pxls_average)
scatter_weight_df$sd <- as.numeric(scatter_weight_df$sd)
scatter_weight_df$sem <- as.numeric(scatter_weight_df$sem )
scatter_weight_df$dpi <- as.factor(scatter_weight_df$dpi)
scatter_weight_df$treatment <- as.factor(scatter_weight_df$treatment)
scatter_weight_df_subset <- scatter_weight_df[scatter_weight_df$dpi%in%c("0","1","4","5","7","6") ,]
scatter_weight_df_subset$strain <- metadata$strain[match(scatter_weight_df_subset$treatment,metadata$GWAS_index)]
scatter_weight_df_subset <- scatter_weight_df_subset[!scatter_weight_df_subset$treatment %in% c("boiled_strain_17+p","pathogen_only", "control"),]
#strating with OTU2
chosen_OTU <- "OTU2"
phy_pattern_best_hits <- phy_pattern_my_strains[,c(as.character(subset_siginifact_OGs[[1]]),"Species")]
rownames(phy_pattern_my_strains) <- phy_pattern_my_strains$Species
phy_best_hits_sums <- data.frame("strain"=phy_pattern_best_hits$Species,"number_of_genes"= as.numeric(rowSums(phy_pattern_best_hits[,subset_siginifact_OGs[[1]]])))
scatter_weight_df_subset$number_of_genes <- phy_best_hits_sums$number_of_genes[match(scatter_weight_df_subset$strain,phy_best_hits_sums$strain)]
scatter_weight_df_subset$number_of_genes[is.na(scatter_weight_df_subset$number_of_genes)] <- 0
scatter_weight_df_subset$OTU <- metadata$O_T_U[match(scatter_weight_df_subset$strain,metadata$strain)]
#! pdf("~/ownCloud/My papers/bacterial_GWAS_paper/figures/Final_figures/Figure3/OTU2_sig_OG_growth_scatter_plot.pdf")
ggplot(aes(x=dpi, y=pxls_average, group=treatment, color=number_of_genes),
data = scatter_weight_df_subset[scatter_weight_df_subset$OTU==chosen_OTU,]) +
geom_line() +
geom_line(aes(x=dpi, y=pxls_average, group=treatment, color=number_of_genes)) +
geom_point()+
geom_point(aes(x=dpi, y=pxls_average, group=treatment, color=number_of_genes)) +
scale_colour_gradient(low = "grey87", high = "#756bb1")+
theme_bw() +
ylab(label = "median pixels") +
xlab(label = "dpi") +
theme(legend.position="none")
#! dev.off()
#! pdf("~/ownCloud/My papers/bacterial_GWAS_paper/figures/Final_figures/Figure3/OTU2_sig_OG_growth_scatter_plot_all_strains.pdf")
ggplot(aes(x=dpi, y=pxls_average, group=treatment, color=number_of_genes),
data = scatter_weight_df_subset) +
geom_line() +
geom_line(aes(x=dpi, y=pxls_average, group=treatment, color=number_of_genes)) +
geom_point()+
geom_point(aes(x=dpi, y=pxls_average, group=treatment, color=number_of_genes)) +
scale_colour_gradient(low = "grey87", high = "#756bb1")+
theme_bw() +
ylab(label = "median pixels") +
xlab(label = "dpi") +
theme(legend.position="none")
#! dev.off()
|
6652be9e3e662dc01e5a56f3dc48af72fbc9c0a9 | e6f4bf5ad733a79c4743440010c771b5171ccbae | /Documents/DataScience/GandC/run_analysis.R | 090135f3da95cfb30c7dbe083b32462604df9ae4 | [] | no_license | gmlander/getting-and-cleaning-data-project | 8008f36e85d1689cfd274aff25707773be4d6194 | 4cd0baf68cc5acfa01a2e810ceca3234b6e83ba8 | refs/heads/master | 2021-01-10T10:48:28.352577 | 2018-05-27T05:35:09 | 2018-05-27T05:35:09 | 51,799,276 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,227 | r | run_analysis.R | # READ each of the 8 txt files into individual data.frames
#Training datasets
xTrain <- read.table("UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("UCI HAR Dataset/train/y_train.txt")
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
#Testing datasets
xTest <- read.table("UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("UCI HAR Dataset/test/y_test.txt")
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt")
#Labelling datasets
features <- read.table("UCI HAR Dataset/features.txt", colClasses = c("character"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("ActivityId", "Activity"))
#--------------------------------------------------------
# 1
# MERGE training and test data.frames into one data.frame
# add subject ID and activity ID to xTrain and xTest, store results as trainData and testData
trainData <- cbind(subjectTrain, yTrain, xTrain)
testData <- cbind(subjectTest, yTest, xTest)
# merge train data and test data.
data <- rbind(trainData, testData)
# add column labels from second column of features data.frame
names(data) <- c("SubjectId", "ActivityId", features[,2])
#----------------------------------------------------------------
# 2
# EXTRACT mean and standard deviation measurements from data by subsetting
# data by columns 1 & 2 (SubjectId and ActivityId) plus any columns with std
# or mean in its name.
meanSD <- data[,c(1,2,grep("std|mean", names(data), value = FALSE))]
#----------------------------------------------------------------
# 3
# NAMES activities in the dataset with descriptive activity names. Uses
# the activities data.frame to create factor levels and labels for meanSD$ActivityId.
meanSD$ActivityId <- factor(meanSD$ActivityId, levels = activities$ActivityId, labels=activities$Activity)
#--------------------------------------------------------------------
# 4.
# LABELS the data with descriptive variable names:
# mean() --> Mean
names(meanSD) <- gsub("mean\\(\\)", "Mean", names(meanSD))
# std() --> SD
names(meanSD) <- gsub("std\\(\\)", "SD", names(meanSD))
# t --> Time
names(meanSD) <- gsub("^t", "Time", names(meanSD))
# f --> Frequency
names(meanSD) <- gsub("^f", "Frequency", names(meanSD))
# "-" --> "_"
names(meanSD) <- gsub("-", "_", names(meanSD))
#-----------------------------------------------------------------------
# 5
# CREATE tidy data set with the average of each variable for each activity and each subject.
# use ddply() to take the mean of each variable grouped by SubjectId and ActivityId.
# Use numcolwise(mean) as FUN arg of ddply so that only numeric variables are averaged.
library(plyr)
meanSD_grouped <- ddply(meanSD, .(SubjectId, ActivityId), numcolwise(mean))
names(meanSD_grouped)[-c(1,2)] <- paste0("Mean_", names(meanSD_grouped)[-c(1,2)])
# Write meanSD_grouped to tidy_grouped_means.txt.
write.table(meanSD_grouped, file = "tidy_grouped_means.txt")
# Optional -- 1: Write variable names for code book use.
# writeLines(names(meanSD_grouped), "codeNames.txt")
# Optional -- 2:
# - Inspect tidy_grouped_means.txt
# - force self to open eyes when looking
# - confirm file not jumbled dog doo
# - Success! Breathe sigh, drink beer. |
c0e56934e577a6c33be4e9d852e748933949cc17 | 2b5728585d67ad9f0210a21189459a1515faa72f | /R/scatterPlot.R | af1c50d7f027e4cb8696bc3bb7dbae95e8808872 | [] | no_license | Matherion/userfriendlyscience | 9fb8dd5992dcc86b84ab81ca98d97b9b65cc5133 | 46acf718d692a42aeebdbe9a6e559a7a5cb50c77 | refs/heads/master | 2020-12-24T16:35:32.356423 | 2018-09-25T06:41:14 | 2018-09-25T06:41:14 | 49,939,242 | 15 | 9 | null | 2018-11-17T10:34:37 | 2016-01-19T08:50:54 | R | UTF-8 | R | false | false | 4,222 | r | scatterPlot.R | #' Easy ggplot2 scatter plots
#'
#' This function is intended to provide a very easy interface to generating
#' pretty (and pretty versatile) \code{\link{ggplot}} scatter plots.
#'
#' Note that if \code{position} is set to 'jitter', unless \code{width} and/or
#' \code{height} is set to a non-zero value, there will still not be any
#' jittering.
#'
#' @param x The variable to plot on the X axis.
#' @param y The variable to plot on the Y axis.
#' @param pointsize The size of the points in the scatterplot.
#' @param theme The theme to use.
#' @param regrLine Whether to show the regression line.
#' @param regrCI Whether to display the confidence interval around the
#' regression line.
#' @param regrLineCol The color of the regression line.
#' @param regrCIcol The color of the confidence interval around the regression
#' line.
#' @param regrCIalpha The alpha value (transparency) of the confidence interval
#' around the regression line.
#' @param width If \code{position} is 'jitter', the points are 'jittered': some
#' random noise is added to change their location slightly. In that case
#' 'width' can be set to determine how much the location should be allowed to
#' vary on the X axis.
#' @param height If \code{position} is 'jitter', the points are 'jittered':
#' some random noise is added to change their location slightly. In that case
#' 'height' can be set to determine how much the location should be allowed to
#' vary on the Y axis.
#' @param position Whether to 'jitter' the points (adding some random noise to
#' change their location slightly, used to prevent overplotting). Set to
#' 'jitter' to jitter the points.
#' @param xVarName,yVarName Can be used to manually specify the names of the
#' variables on the x and y axes.
#' @param \dots And additional arguments are passed to \code{\link{geom_point}}
#' or \code{\link{geom_jitter}} (if \code{jitter} is set to 'jitter').
#' @return A \code{\link{ggplot}} plot is returned.
#' @author Gjalt-Jorn Peters
#'
#' Maintainer: Gjalt-Jorn Peters <gjalt-jorn@@userfriendlyscience.com>
#' @seealso \code{\link{geom_point}}, \code{\link{geom_jitter}},
#' \code{\link{geom_smooth}}
#' @keywords hplot
#' @examples
#'
#'
#' ### A simple scatter plot
#' scatterPlot(mtcars$mpg, mtcars$hp);
#'
#' ### The same scatter plot, now with a regression line
#' ### and its confidence interval added.
#' scatterPlot(mtcars$mpg, mtcars$hp, regrLine=TRUE, regrCI=TRUE);
#'
#'
#' @export scatterPlot
scatterPlot <- function(x, y, pointsize=3,
theme = theme_bw(),
regrLine = FALSE,
regrCI = FALSE,
regrLineCol = "blue",
regrCIcol = regrLineCol,
regrCIalpha = .25,
width = 0,
height = 0,
position="identity",
xVarName=NULL,
yVarName=NULL,
...) {
xVarName <- ifelse(is.null(xVarName),
extractVarName(deparse(substitute(x))),
xVarName);
yVarName <- ifelse(is.null(yVarName),
extractVarName(deparse(substitute(y))),
yVarName);
dat <- data.frame(x, y);
names(dat) <- c(xVarName, yVarName);
plot <- ggplot(dat, aes_string(xVarName, yVarName)) +
theme;
if (regrLine && regrCI) {
plot <- plot + geom_smooth(method='lm', color = regrLineCol,
fill = regrCIcol, alpha = regrCIalpha,
na.rm=TRUE);
} else if (regrLine) {
plot <- plot + geom_smooth(method='lm', color = regrLineCol,
se=FALSE, na.rm=TRUE);
}
if (!is.null(position) && (tolower(position)=='identity') && (width==0) && (height==0)) {
plot <- plot + geom_point(na.rm=TRUE,
size=pointsize,
...);
} else {
plot <- plot + geom_jitter(na.rm=TRUE,
size=pointsize,
width = width,
height = height,
...);
}
return(plot);
}
|
0493e19c75c7d1dd22e76a1b0715d33c044f6722 | b139c8d1d44220e6f1d609c14891462d6899e255 | /theapp/man/redisSetThis.Rd | 40ee93b9f781cb71e1d7b0f1b151a57652049e5a | [
"MIT"
] | permissive | Pivot-Sciences/positive-possum | 0f8fc8a939c4c0a75532bdf7c1860590ca6e6dab | 251158bfe759a5950da1835e7cf90f96dd7fc0ff | refs/heads/master | 2021-04-09T15:42:06.827512 | 2018-03-19T08:45:13 | 2018-03-19T08:45:13 | 125,510,962 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 455 | rd | redisSetThis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redis_SetThis.R
\name{redisSetThis}
\alias{redisSetThis}
\title{Wrapper for redis get}
\usage{
redisSetThis(conf, key, value)
}
\arguments{
\item{host}{the users short name}
\item{port}{the range of dates}
\item{inputQuery}{the range of dates}
}
\value{
this function returns the redis result
}
\description{
This function takes as input the host ip the port and the query
}
|
7da2e9d0d45061f80033da84045f2d94f418e154 | 750f329c6f7f9b0b67fd5425b369ec7a7bfd70ef | /SouthWest.R | 4a1a033516a71cb4b697e585a593e84100abc7a3 | [] | no_license | irecasens/southwest | 27161980222411829c7a9cb271924856c887ce3f | 749188acf0a1308dea211fe4c2152ed263aafc22 | refs/heads/master | 2021-07-04T04:02:06.229938 | 2017-09-27T20:55:36 | 2017-09-27T20:55:36 | 105,067,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 52,237 | r | SouthWest.R |
library(xlsx)
library(ggplot2)
library(tidyr)
library(scales)
library(hexbin)
library(data.table)
library(plotly)
library(PerformanceAnalytics)
library(dplyr)
# LOAD DATA and MERGE with GMT
df <- read.csv("Airline_ontime_data (1).csv",
header = TRUE,
quote="\"",
stringsAsFactors= TRUE,
strip.white = TRUE)
as.data.frame.matrix(df)
tzo <- read.csv("TimeZonesORIGIN.csv",
header = TRUE,
quote="\"",
stringsAsFactors= TRUE,
strip.white = TRUE)
tzd <- read.csv("TimeZonesDESTINATION.csv",
header = TRUE,
quote="\"",
stringsAsFactors= TRUE,
strip.white = TRUE)
as.data.frame.matrix(tzo)
as.data.frame.matrix(tzd)
df <- merge(x = df, y = tzo, by = "ORIGIN", all.x = TRUE)
df <- merge(x = df, y = tzd, by = "DEST", all.x = TRUE)
# FILTER DATA (for temporal analysis only)
df_subset <- df[ (df$CARRIER != ''), ] # & (df$FL_DATE == '1/01/14' | df$FL_DATE == '2/01/14') , ]
#SELECT VARIABLES
df_subset <- subset(df_subset, select=c("CARRIER","Carrier_Type","FL_DATE","TAIL_NUM","FL_NUM","ORIGIN_AIRPORT_ID","ORIGIN_CITY_NAME","DEST_AIRPORT_ID","DEST_CITY_NAME",
"CRS_DEP_TIME","DEP_TIME","DEP_TIME_BLK","DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15","DEP_DELAY_GROUP",
"CRS_ARR_TIME","ARR_TIME","ARR_TIME_BLK","ARR_DELAY","ARR_DELAY_NEW","ARR_DEL15","ARR_DELAY_GROUP",
"CANCELLED","DIVERTED",
"CRS_ELAPSED_TIME","ACTUAL_ELAPSED_TIME","AIR_TIME","DISTANCE","DISTANCE_GROUP",
"CARRIER_DELAY","WEATHER_DELAY","NAS_DELAY","SECURITY_DELAY","LATE_AIRCRAFT_DELAY",
"GMTO","GMTD"))
# REMOVE CANCELED AND DIVERTED
df_subset <- df_subset[ df_subset$CANCELLED == 0, ]
df_subset <- df_subset[ df_subset$DIVERTED == 0, ]
df_subset$CARRIER_DELAY<-ifelse( is.na(df_subset$CARRIER_DELAY) ,0,df_subset$CARRIER_DELAY)
df_subset$WEATHER_DELAY<-ifelse( is.na(df_subset$WEATHER_DELAY) ,0,df_subset$WEATHER_DELAY)
df_subset$NAS_DELAY<-ifelse( is.na(df_subset$NAS_DELAY) ,0,df_subset$NAS_DELAY)
df_subset$SECURITY_DELAY<-ifelse( is.na(df_subset$SECURITY_DELAY) ,0,df_subset$SECURITY_DELAY)
df_subset$LATE_AIRCRAFT_DELAY<-ifelse( is.na(df_subset$LATE_AIRCRAFT_DELAY) ,0,df_subset$LATE_AIRCRAFT_DELAY)
df_subset$DEP_DELAY_NEW<-ifelse( is.na(df_subset$DEP_DELAY_NEW) ,0,df_subset$DEP_DELAY_NEW)
x <- df_subset[ df_subset$CARRIER== 'WN' , ]
x$DEP_DEL15<-ifelse( is.na(x$DEP_DEL15) ,0,x$DEP_DEL15)
ontime_before <- 1-(sum(x$DEP_DEL15))/nrow(x)
ontime_before
# FORMAT FL_DATE
df_subset$Dates <- as.Date(df_subset$FL_DATE, "%d/%m/%Y")
# DEPARTURE TIME INTO GMT ABSOLUTE
df_subset$dep_hours <-
ifelse(nchar(df_subset$CRS_DEP_TIME)==4,substr(df_subset$CRS_DEP_TIME,1,2) ,
ifelse(nchar(df_subset$CRS_DEP_TIME)==3,substr(df_subset$CRS_DEP_TIME,1,1) ,0))
df_subset$dep_minutes <-
ifelse(nchar(df_subset$CRS_DEP_TIME)==4,substr(df_subset$CRS_DEP_TIME,3,4) ,
ifelse(nchar(df_subset$CRS_DEP_TIME)==3,substr(df_subset$CRS_DEP_TIME,2,3),
ifelse(nchar(df_subset$CRS_DEP_TIME)==2,substr(df_subset$CRS_DEP_TIME,1,2),
ifelse(nchar(df_subset$CRS_DEP_TIME)==1,substr(df_subset$CRS_DEP_TIME,0,1),0))))
df_subset$dep_hours_temp <-
ifelse(nchar(df_subset$DEP_TIME)==4,substr(df_subset$DEP_TIME,1,2) ,
ifelse(nchar(df_subset$DEP_TIME)==3,substr(df_subset$DEP_TIME,1,1) ,0))
df_subset$dep_minutes_temp <-
ifelse(nchar(df_subset$DEP_TIME)==4,substr(df_subset$DEP_TIME,3,4) ,
ifelse(nchar(df_subset$DEP_TIME)==3,substr(df_subset$DEP_TIME,2,3),
ifelse(nchar(df_subset$DEP_TIME)==2,substr(df_subset$DEP_TIME,1,2),
ifelse(nchar(df_subset$DEP_TIME)==1,substr(df_subset$DEP_TIME,0,1),0))))
df_subset$CRS_DEP <- as.POSIXct(paste(df_subset$Dates, " ", df_subset$dep_hours,":", df_subset$dep_minutes,sep = "", collapse = NULL))
df_subset$DEP <- as.POSIXct(paste(df_subset$Dates, " ", df_subset$dep_hours_temp,":", df_subset$dep_minutes_temp,sep = "", collapse = NULL))
df_subset$CRS_DEP_GMT <- df_subset$CRS_DEP - df_subset$GMTO*60*60
df_subset$DEP_GMT <- df_subset$DEP - df_subset$GMTO*60*60
# ARRIVAL TIME INTO GMT ABSOLUTE
df_subset$arr_hours <-
ifelse(nchar(df_subset$CRS_ARR_TIME)==4,substr(df_subset$CRS_ARR_TIME,1,2) ,
ifelse(nchar(df_subset$CRS_ARR_TIME)==3,substr(df_subset$CRS_ARR_TIME,1,1) ,0))
df_subset$arr_minutes <-
ifelse(nchar(df_subset$CRS_ARR_TIME)==4,substr(df_subset$CRS_ARR_TIME,3,4) ,
ifelse(nchar(df_subset$CRS_ARR_TIME)==3,substr(df_subset$CRS_ARR_TIME,2,3),
ifelse(nchar(df_subset$CRS_ARR_TIME)==2,substr(df_subset$CRS_ARR_TIME,1,2),
ifelse(nchar(df_subset$CRS_ARR_TIME)==1,substr(df_subset$CRS_ARR_TIME,0,1),0))))
df_subset$CRS_ARR <- as.POSIXct(paste(df_subset$Dates, " ", df_subset$arr_hours,":", df_subset$arr_minutes,sep = "", collapse = NULL))
df_subset$CRS_ARR_GMT <- df_subset$CRS_ARR - df_subset$GMTD*60*60
# DEPARTURE AND ARRIVAL IN MINUTES (used to calculate turn-around time in minutes)
df_subset$CRS_DEPARTURE_MINUTES <-
as.numeric(df_subset$Dates - min(df_subset$Dates))*24*60+
ifelse(nchar(df_subset$CRS_DEP_TIME)==1,0,
ifelse(nchar(df_subset$CRS_DEP_TIME)==2,0,
ifelse(nchar(df_subset$CRS_DEP_TIME)==3,as.numeric(substr(df_subset$CRS_DEP_TIME, 1, 1)),
ifelse(nchar(df_subset$CRS_DEP_TIME)==4,as.numeric(substr(df_subset$CRS_DEP_TIME, 1, 2)),0))))*60+
as.numeric(substr(df_subset$CRS_DEP_TIME, nchar(df_subset$CRS_DEP_TIME)-1, nchar(df_subset$CRS_DEP_TIME)))-
df_subset$GMTO*60
df_subset$CRS_ARRIVAL_MINUTES <-
as.numeric(df_subset$Dates - min(df_subset$Dates))*24*60+
ifelse(nchar(df_subset$CRS_ARR_TIME)==1,0,
ifelse(nchar(df_subset$CRS_ARR_TIME)==2,0,
ifelse(nchar(df_subset$CRS_ARR_TIME)==3,as.numeric(substr(df_subset$CRS_ARR_TIME, 1, 1)),
ifelse(nchar(df_subset$CRS_ARR_TIME)==4,as.numeric(substr(df_subset$CRS_ARR_TIME, 1, 2)),0))))*60+
as.numeric(substr(df_subset$CRS_ARR_TIME, nchar(df_subset$CRS_ARR_TIME)-1, nchar(df_subset$CRS_ARR_TIME)))-
df_subset$GMTD*60
df_subset$DEPARTURE_MINUTES <-
as.numeric(df_subset$Dates - min(df_subset$Dates))*24*60+
ifelse(nchar(df_subset$DEP_TIME)==1,0,
ifelse(nchar(df_subset$DEP_TIME)==2,0,
ifelse(nchar(df_subset$DEP_TIME)==3,as.numeric(substr(df_subset$DEP_TIME, 1, 1)),
ifelse(nchar(df_subset$DEP_TIME)==4,as.numeric(substr(df_subset$DEP_TIME, 1, 2)),0))))*60+
as.numeric(substr(df_subset$DEP_TIME, nchar(df_subset$DEP_TIME)-1, nchar(df_subset$DEP_TIME)))-
df_subset$GMTO*60
df_subset$ARRIVAL_MINUTES <-
as.numeric(df_subset$Dates - min(df_subset$Dates))*24*60+
ifelse(nchar(df_subset$ARR_TIME)==1,0,
ifelse(nchar(df_subset$ARR_TIME)==2,0,
ifelse(nchar(df_subset$ARR_TIME)==3,as.numeric(substr(df_subset$ARR_TIME, 1, 1)),
ifelse(nchar(df_subset$ARR_TIME)==4,as.numeric(substr(df_subset$ARR_TIME, 1, 2)),0))))*60+
as.numeric(substr(df_subset$ARR_TIME, nchar(df_subset$ARR_TIME)-1, nchar(df_subset$ARR_TIME)))-
df_subset$GMTD*60
df_subset$ELAPSED_DEP_TO_ARR = df_subset$ARRIVAL_MINUTES - df_subset$DEPARTURE_MINUTES
# PREVIOUS FLIGHT ARRIVAL (by lag)
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM) %>%
mutate(Previous_ARR = lag(CRS_ARRIVAL_MINUTES, 1))
df_subset$Buffer = df_subset$CRS_DEPARTURE_MINUTES - df_subset$Previous_ARR
# ACTUAL BUFFER
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM) %>%
mutate(Previous_ACTUAL_ARR = lag(ARRIVAL_MINUTES, 1))
df_subset$Actual_Buffer = df_subset$DEPARTURE_MINUTES - df_subset$Previous_ACTUAL_ARR
# ADD VARIABLE FOR FIRST FLIGHT OF THE DAY
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM) %>%
mutate(Previous_Date = lag(FL_DATE, 1))
df_subset$Day <-
ifelse(is.na(df_subset$Previous_Date),"New Day",
ifelse(df_subset$FL_DATE==df_subset$Previous_Date,"","New Day"))
# VARIABLE TO CHECK IF ITS STILL THE SAME PLANE (or there's an error in scheduling)
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM) %>%
mutate(Previous_Arrival_Airport = lag(DEST_CITY_NAME, 1))
df_subset$Flight <-
ifelse(is.na(df_subset$Previous_Arrival_Airport),"Not Applicable",
ifelse(df_subset$Previous_Arrival_Airport==df_subset$ORIGIN_CITY_NAME,"Same Plane","Different Plane"))
# ADD ID FOR THE FLIGHT NUMBER OF THE DAY
df_subset$id <- ave(as.numeric(df_subset$CRS_DEP_GMT), df_subset$TAIL_NUM,df_subset$FL_DATE, FUN=order)
df_subset[ df_subset$TAIL_NUM == 'N476HA', c("CARRIER", "TAIL_NUM", "FL_DATE","id","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW", "DEP_DELAY_GROUP", "ARR_TIME") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
# MAX id by TAIL and DAY
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(Max_Id = max(id))
df_flights <- df_subset %>% group_by(CARRIER, TAIL_NUM, FL_DATE) %>% summarise(Number_Flights = max(Max_Id))
# TO calculate Point-to-Point without Southwest without WN: df_flights <- df_flights[ df_flights$CARRIER != 'WN', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","Number_Flights" ) ]
df_flights_Carrier <- df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Number_Flights_per_day = mean(Number_Flights))
df_flights$Carrier_Type<-ifelse(df_flights$CARRIER=="WN",'Southwest',
ifelse(df_flights$CARRIER=="OO",'Point-to-Point without Southwest',
ifelse(df_flights$CARRIER=="HA",'Point-to-Point without Southwest',
ifelse(df_flights$CARRIER=="MQ",'Point-to-Point without Southwest',
ifelse(df_flights$CARRIER=="EV",'Point-to-Point without Southwest',
ifelse(df_flights$CARRIER=="F9",'Point-to-Point without Southwest',
ifelse(df_flights$CARRIER=="FL",'Point-to-Point without Southwest', 'Hub and Spoke')))))))
df_flights_Carrier_Type <- df_flights %>% group_by(Carrier_Type) %>% summarise(Number_Flights_per_day = mean(Number_Flights))
df_flights_Carrier[order(df_flights_Carrier$Number_Flights_per_day), ]
df_flights_Carrier_Type[order(df_flights_Carrier_Type$Number_Flights_per_day), ]
#Into Analysis
df_subset$Carrier_Type<-ifelse(df_subset$CARRIER=="WN",'Southwest',
ifelse(df_subset$CARRIER=="OO",'Point-to-Point without Southwest',
ifelse(df_subset$CARRIER=="HA",'Point-to-Point without Southwest',
ifelse(df_subset$CARRIER=="MQ",'Point-to-Point without Southwest',
ifelse(df_subset$CARRIER=="EV",'Point-to-Point without Southwest',
ifelse(df_subset$CARRIER=="F9",'Point-to-Point without Southwest',
ifelse(df_subset$CARRIER=="FL",'Point-to-Point without Southwest', 'Hub and Spoke')))))))
detach("package:plyr", unload=TRUE)
x <- df_subset %>% group_by(Carrier_Type, CARRIER) %>% summarise(Number_Flights = n())
x[order(x$Number_Flights), ]
# Avg flight time per flight
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(Actual_Flight_Time = mean(ACTUAL_ELAPSED_TIME))
df_flights <- df_subset %>% group_by(CARRIER, Carrier_Type, TAIL_NUM, FL_DATE) %>% summarise(Actual_Flight_Time = max(Actual_Flight_Time))
df_flights[ df_flights$TAIL_NUM == 'N3GWAA', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","Actual_Flight_Time" ) ]
df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Actual_Flight_Time = mean(Actual_Flight_Time))
df_flights %>% group_by(Carrier_Type) %>% summarise(Actual_Flight_Time = mean(Actual_Flight_Time))
# Avg aircrafts per day
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
agg1 <- aggregate(data=df_subset, TAIL_NUM ~ FL_DATE + CARRIER, function(TAIL_NUM) length(unique(TAIL_NUM)))
agg2 <- aggregate(data=df_subset, TAIL_NUM ~ FL_DATE + Carrier_Type, function(TAIL_NUM) length(unique(TAIL_NUM)))
mean(agg2[agg2$Carrier_Type == 'Other',]$TAIL_NUM)
mean(agg2[agg2$Carrier_Type == 'Hub and Spoke',]$TAIL_NUM)
mean(agg2[agg2$Carrier_Type == 'Point-to-Point without Southwest',]$TAIL_NUM)
mean(agg2[agg2$Carrier_Type == 'Southwest',]$TAIL_NUM)
# DELAYS CAUSES
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(CARRIER_DELAY_TIME = sum(CARRIER_DELAY))
df_flights <- df_subset %>% group_by(CARRIER, Carrier_Type, TAIL_NUM, FL_DATE) %>% summarise(Carrier_Delay_Time = max(CARRIER_DELAY_TIME))
df_flights[ df_flights$TAIL_NUM == 'N3GWAA', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","Carrier_Delay_Time" ) ]
df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Carrier_Delay_Time = sum(Carrier_Delay_Time))
Delay_Cause_Carrier <- df_flights %>% group_by(Carrier_Type) %>% summarise(Carrier_Delay_Time = sum(Carrier_Delay_Time))
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(REAL_DEP_DELAY_NEW = sum(DEP_DELAY_NEW))
df_flights <- df_subset %>% group_by(CARRIER, Carrier_Type, TAIL_NUM, FL_DATE) %>% summarise(Real_Delay_Time = max(REAL_DEP_DELAY_NEW))
df_flights[ df_flights$TAIL_NUM == 'N3GWAA', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","Real_Delay_Time" ) ]
df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Real_Delay_Time = sum(Real_Delay_Time))
Delay_Cause_TOTAL <- df_flights %>% group_by(Carrier_Type) %>% summarise(Real_Delay_Time = sum(Real_Delay_Time))
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(LATE_DELAY = sum(LATE_AIRCRAFT_DELAY))
df_flights <- df_subset %>% group_by(CARRIER, Carrier_Type, TAIL_NUM, FL_DATE) %>% summarise(Late_Delay_Time = max(LATE_DELAY))
df_flights[ df_flights$TAIL_NUM == 'N3GWAA', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","Late_Delay_Time" ) ]
df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Late_Delay_Time = sum(Late_Delay_Time))
Delay_Cause_Late <- df_flights %>% group_by(Carrier_Type) %>% summarise(Late_Delay_Time = sum(Late_Delay_Time))
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(WEATHER_DELAY_DELAY = sum(WEATHER_DELAY))
df_flights <- df_subset %>% group_by(CARRIER, Carrier_Type, TAIL_NUM, FL_DATE) %>% summarise(Weather_Delay_Time = max(WEATHER_DELAY_DELAY))
df_flights[ df_flights$TAIL_NUM == 'N3GWAA', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","Weather_Delay_Time" ) ]
df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Weather_Delay_Time = sum(Weather_Delay_Time))
Delay_Cause_Weather <- df_flights %>% group_by(Carrier_Type) %>% summarise(Weather_Delay_Time = sum(Weather_Delay_Time))
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(NAS_DELAY_DELAY = sum(NAS_DELAY))
df_flights <- df_subset %>% group_by(CARRIER, Carrier_Type, TAIL_NUM, FL_DATE) %>% summarise(NAS_Delay_Time = max(NAS_DELAY_DELAY))
df_flights[ df_flights$TAIL_NUM == 'N3GWAA', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","NAS_Delay_Time" ) ]
df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(NAS_Delay_Time = sum(NAS_Delay_Time))
Delay_Cause_NAS <- df_flights %>% group_by(Carrier_Type) %>% summarise(NAS_Delay_Time = sum(NAS_Delay_Time))
df_subset <- df_subset[order(df_subset$TAIL_NUM, df_subset$CRS_DEP_GMT), ]
df_subset <-
df_subset %>%
group_by(TAIL_NUM, FL_DATE) %>%
mutate(SECURITY_DELAY_DELAY = sum(SECURITY_DELAY))
df_flights <- df_subset %>% group_by(CARRIER, Carrier_Type, TAIL_NUM, FL_DATE) %>% summarise(Security_Delay_Time = max(SECURITY_DELAY_DELAY))
df_flights[ df_flights$TAIL_NUM == 'N3GWAA', c("CARRIER","Carrier_Type", "FL_DATE","TAIL_NUM","Security_Delay_Time" ) ]
df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Security_Delay_Time = sum(Security_Delay_Time))
Delay_Cause_SECURITY <- df_flights %>% group_by(Carrier_Type) %>% summarise(Security_Delay_Time = sum(Security_Delay_Time))
Delay_Cause_TOTAL <- as.data.frame(Delay_Cause_TOTAL)
Delay_Cause_Late <- as.data.frame(Delay_Cause_Late)
Delay_Cause_Carrier <- as.data.frame(Delay_Cause_Carrier)
Delay_Cause_Weather <- as.data.frame(Delay_Cause_Weather)
Delay_Cause_NAS <- as.data.frame(Delay_Cause_NAS)
Delay_Cause_SECURITY <- as.data.frame(Delay_Cause_SECURITY)
x <- merge(Delay_Cause_Late, Delay_Cause_Carrier,by="Carrier_Type")
y <- merge(Delay_Cause_Weather, Delay_Cause_NAS,by="Carrier_Type")
x <- merge(x, y, by="Carrier_Type")
x <- merge(x, Delay_Cause_SECURITY,by="Carrier_Type")
paste("Hub-and-Speak delays by late aircraft", x[1,2]/(x[1,2]+x[1,3]+x[1,4]+x[1,5]+x[1,6]))
paste("Point-to-Point without Southwest delays by late aircraft", x[2,2]/(x[2,2]+x[2,3]+x[2,4]+x[2,5]+x[2,6]))
paste("SouthWest delays by late aircraft", x[3,2]/(x[3,2]+x[3,3]+x[3,4]+x[3,5]+x[3,6]))
paste("Hub-and-Speak delays by Carrier", x[1,3]/(x[1,2]+x[1,3]+x[1,4]+x[1,5]+x[1,6]))
paste("Point-to-Point without Southwest delays by Carrier", x[2,3]/(x[2,2]+x[2,3]+x[2,4]+x[2,5]+x[2,6]))
paste("SouthWest delays by Carrier", x[3,3]/(x[3,2]+x[3,3]+x[3,4]+x[3,5]+x[3,6]))
paste("Hub-and-Speak delays by Others", (x[1,4]+x[1,5]+x[1,6])/(x[1,2]+x[1,3]+x[1,4]+x[1,5]+x[1,6]))
paste("Point-to-Point without Southwest delays by Others", (x[2,4]+x[2,5]+x[2,6])/(x[2,2]+x[2,3]+x[2,4]+x[2,5]+x[2,6]))
paste("SouthWest delays by Others", (x[3,4]+x[3,5]+x[3,6])/(x[3,2]+x[3,3]+x[3,4]+x[3,5]+x[3,6]))
################################################################################################################################################################################
####################################################### ANALYSIS #########################################################################################################
################################################################################################################################################################################
# CHECK AMOUNT OF ERRORS FOR BAD SCHEDULING
count(df_subset)
count(df_subset[ df_subset$Flight == 'Different Plane', ])
round((count(df_subset[ df_subset$Flight == 'Different Plane', ])/count(df_subset))*100, digits = 2)
# For example:
df_subset[ df_subset$Flight == 'Different Plane', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY", "ARR_TIME", "ARR_DELAY" ) ]%>%
select(TAIL_NUM) %>%
distinct()
# CHECK AMOUNT OF TURN-AROUND TIME NEGATIVE
count(df_subset)
count(df_subset[ df_subset$Buffer <0, ])
round((count(df_subset[ df_subset$Buffer<0, ])/count(df_subset))*100, digits = 2)
# FILTER OUT ERRORS, NEW DAYS AND DELAYS FROM NAS, SECURITY OR WEATHER over 10 minutes.
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane' & df_subset$Day != 'New Day' & df_subset$Buffer>0 & df_subset$Buffer<300 & df_subset$DEP_DELAY_NEW<300, ]
count(df_subset)
count(df_Analysis)
df_subset[ df_subset$TAIL_NUM == 'N215WN', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW", "DEP_DELAY_GROUP", "ARR_TIME") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp <- df_Analysis[ df_Analysis$TAIL_NUM != '', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW", "DEP_DELAY_GROUP", "ARR_TIME") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_tem
str(df_temp)
# Histogram: TURN-AROUND TIME DISTRIBUTION
grid <- matrix(c(1,1,2,3), 2, 2, byrow = TRUE)
layout(grid)
par(mfrow=c(3,1))
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<120 , ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Hub and Spoke', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
x <- as.numeric(df_temp$Buffer)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Hub-and-Spoke")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$Buffer)
kurtosis(df_temp$Buffer)
skewness(df_temp$Buffer)
sd(df_temp$Buffer)
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<120 , ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Point-to-Point without Southwest', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
x <- as.numeric(df_temp$Buffer)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Point-to-Point without Southwest")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$Buffer)
kurtosis(df_temp$Buffer)
skewness(df_temp$Buffer)
sd(df_temp$Buffer)
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<120 , ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Southwest', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
x <- as.numeric(df_temp$Buffer)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Southwest")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$Buffer)
kurtosis(df_temp$Buffer)
skewness(df_temp$Buffer)
sd(df_temp$Buffer)
# TURN-AROUND TIME DISTRIBUTION BY CARRIER
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<120 , ]
df_Analysis %>% group_by(CARRIER) %>% summarise(Mean_TurnAroundTime = mean(Buffer))
df_Analysis %>% group_by(CARRIER) %>% summarise(Mean_DEP_DELAY = mean(DEP_DELAY_NEW))
df_Analysis %>% group_by(CARRIER) %>% summarise(Mean_Actual_TurnAround = mean(Actual_Buffer))
# Histogram: ACTUAL TURN-AROUND TIME DISTRIBUTION
grid <- matrix(c(1,1,2,3), 2, 2, byrow = TRUE)
layout(grid)
par(mfrow=c(3,1))
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<120
& df_subset$Actual_Buffer>0
# & df_subset$Actual_Buffer<40
, ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Hub and Spoke', c("CARRIER","DEPARTURE_MINUTES","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Actual_Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
df_Analysis[ df_Analysis$Carrier_Type == 'Southwest' & df_Analysis$TAIL_NUM=="N209WN" & df_Analysis$DEPARTURE_MINUTES >5000, c("CARRIER","Carrier_Type","DEPARTURE_MINUTES", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Actual_Buffer","Day","Flight", "DEP_TIME", "DEP_DELAY", "ARR_TIME") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
x <- as.numeric(df_temp$Actual_Buffer)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Southwest")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$Actual_Buffer)
mean(df_temp$DEP_DELAY)
kurtosis(df_temp$Actual_Buffer)
skewness(df_temp$Actual_Buffer)
sd(df_temp$Actual_Buffer)
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<120
& df_subset$Actual_Buffer>0
# & df_subset$Actual_Buffer<40
, ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Point-to-Point without Southwest', c("CARRIER","DEPARTURE_MINUTES","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Actual_Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
df_Analysis[ df_Analysis$Carrier_Type == 'Southwest' & df_Analysis$TAIL_NUM=="N209WN" & df_Analysis$DEPARTURE_MINUTES >5000, c("CARRIER","Carrier_Type","DEPARTURE_MINUTES", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Actual_Buffer","Day","Flight", "DEP_TIME", "DEP_DELAY", "ARR_TIME") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
x <- as.numeric(df_temp$Actual_Buffer)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Southwest")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$Actual_Buffer)
mean(df_temp$DEP_DELAY)
kurtosis(df_temp$Actual_Buffer)
skewness(df_temp$Actual_Buffer)
sd(df_temp$Actual_Buffer)
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<120
& df_subset$Actual_Buffer>0
# & df_subset$Actual_Buffer<40
, ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Southwest', c("CARRIER","DEPARTURE_MINUTES","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Actual_Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
df_Analysis[ df_Analysis$Carrier_Type == 'Southwest' & df_Analysis$TAIL_NUM=="N209WN" & df_Analysis$DEPARTURE_MINUTES >5000, c("CARRIER","Carrier_Type","DEPARTURE_MINUTES", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Actual_Buffer","Day","Flight", "DEP_TIME", "DEP_DELAY", "ARR_TIME") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
x <- as.numeric(df_temp$Actual_Buffer)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Southwest")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$Actual_Buffer)
mean(df_temp$DEP_DELAY)
kurtosis(df_temp$Actual_Buffer)
skewness(df_temp$Actual_Buffer)
sd(df_temp$Actual_Buffer)
# Graph: TURN-AROUND TIME vs. LATE_AIRCRAFT_DELAY
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
#& df_subset$Buffer>0
, ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Southwest' , c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","LATE_AIRCRAFT_DELAY", "DEP_DELAY_NEW", "CARRIER_DELAY" , "DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
df_temp$Buffer_intervals<-ifelse(df_temp$Buffer<=19,'<20',
ifelse(df_temp$Buffer<=29,'20-29',
ifelse(df_temp$Buffer<=39,'30-39',
ifelse(df_temp$Buffer<=60,'40-60','>1hr'))))
df_temp$Delay_Late<-ifelse(df_temp$LATE_AIRCRAFT_DELAY>15,"Late Aircraf Delay > 15 min","Delay <15 min")
counts1 <- as.matrix(table(df_temp$Delay_Late, df_temp$Buffer_intervals))
counts1 <- counts1[,c("<20","20-29","30-39","40-60", ">1hr")]
counts1 <- cbind(counts1, Total = rowSums(counts1))
counts1 <- rbind(counts1, Total = colSums(counts1))
counts1 <- rbind(counts1, Delay_pct = round((counts1[2,]/counts1[3,]),3)*100)
Total_pct <- c(round(counts1[3,1]/counts1[3,][6],3)*100,
round(counts1[3,2]/counts1[3,][6],3)*100,
round(counts1[3,3]/counts1[3,][6],3)*100,
round(counts1[3,4]/counts1[3,][6],3)*100,
round(counts1[3,5]/counts1[3,][6],3)*100,
round(counts1[3,6]/counts1[3,][6],3)*100)
counts1 <- rbind(counts1, Total_pct)
counts1
# SAME AS ABOVE BUT FOR Point-to-Point without Southwest
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
#& df_subset$Buffer>0
, ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Point-to-Point without Southwest' , c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","LATE_AIRCRAFT_DELAY", "DEP_DELAY_NEW", "CARRIER_DELAY" , "DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
df_temp$Buffer_intervals<-ifelse(df_temp$Buffer<=19,'<20',
ifelse(df_temp$Buffer<=29,'20-29',
ifelse(df_temp$Buffer<=39,'30-39',
ifelse(df_temp$Buffer<=60,'40-60','>1hr'))))
df_temp$Delay_Late<-ifelse(df_temp$LATE_AIRCRAFT_DELAY>15,"Late Aircraf Delay > 15 min","Delay <15 min")
counts1 <- as.matrix(table(df_temp$Delay_Late, df_temp$Buffer_intervals))
counts1 <- counts1[,c("<20","20-29","30-39","40-60", ">1hr")]
counts1 <- cbind(counts1, Total = rowSums(counts1))
counts1 <- rbind(counts1, Total = colSums(counts1))
counts1 <- rbind(counts1, Delay_pct = round((counts1[2,]/counts1[3,]),3)*100)
Total_pct <- c(round(counts1[3,1]/counts1[3,][6],3)*100,
round(counts1[3,2]/counts1[3,][6],3)*100,
round(counts1[3,3]/counts1[3,][6],3)*100,
round(counts1[3,4]/counts1[3,][6],3)*100,
round(counts1[3,5]/counts1[3,][6],3)*100,
round(counts1[3,6]/counts1[3,][6],3)*100)
counts1 <- rbind(counts1, Total_pct)
counts1
#CORRELATION BETWEEN DEPARTURE DELAYS AND LATE AIRCRAFT ARRIVALS
df_temp <- df_temp[ df_temp$Flight == 'Same Plane'
& df_temp$Day != 'New Day'
# & df_temp$Buffer>0
# & df_temp$Buffer<=35
# & ( (df_temp$Buffer<=35 & df_temp$id<=4) | (df_temp$Buffer<=100 & df_temp$id>4) )
# & df_temp$Carrier_Type == 'Southwest'
# & df_temp$DEP_DELAY_NEW>1
& df_temp$DEP_DELAY_NEW<400
& df_temp$LATE_AIRCRAFT_DELAY<400
,]
cor(df_temp$DEP_DELAY_NEW,df_temp$LATE_AIRCRAFT_DELAY)
summary(lm(df_temp$DEP_DELAY_NEW ~ df_temp$LATE_AIRCRAFT_DELAY , data = df_temp))
intercept <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$LATE_AIRCRAFT_DELAY, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$LATE_AIRCRAFT_DELAY, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(LATE_AIRCRAFT_DELAY,DEP_DELAY_NEW))
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Late Aircraft Arrival (minutes)") + ylab("Departure Delay")
# Effect of turnaround into late aircraft
# MAIN
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>19
& df_subset$Buffer<=35
# & ( (df_subset$Buffer<=35 & df_subset$id<=4) | (df_subset$Buffer<=100 & df_subset$id>4) )
# & df_subset$Carrier_Type == 'Southwest'
# & df_subset$DEP_DELAY_NEW>15
# & df_subset$LATE_AIRCRAFT_DELAY<50
,]
#color #ffc300
cor(df_temp$Buffer,df_temp$LATE_AIRCRAFT_DELAY)
summary(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer , data = df_temp))
intercept <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(Buffer,LATE_AIRCRAFT_DELAY))
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Turnaround Time (min)") + ylab("Late Aircraft Arrival (minutes)")
# SAME BUT WITH DEP_DELAY_NEW
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$Buffer<=35
# & ( (df_subset$Buffer<=35 & df_subset$id<=4) | (df_subset$Buffer<=100 & df_subset$id>4) )
# & df_subset$Carrier_Type == 'Southwest'
# & df_subset$DEP_DELAY_NEW>15
# & df_subset$LATE_AIRCRAFT_DELAY<50
,]
cor(df_temp$Buffer,df_temp$DEP_DELAY_NEW)
summary(lm(df_temp$DEP_DELAY_NEW ~ df_temp$Buffer , data = df_temp))
intercept <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$Buffer, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$Buffer, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(Buffer,DEP_DELAY_NEW))
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Turnaround Time (min)") + ylab("Departure Delay (minutes)")
# SAME BUT WITH ID
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
#& df_subset$Day != 'New Day'
& df_subset$Buffer>0
#& df_subset$Buffer<90
& df_subset$id<8
# & df_subset$id>=2
# & ( (df_subset$DEP_DELAY_NEW<=3 & df_subset$id<=1) | (df_subset$id>1) )
& df_subset$Carrier_Type == 'Southwest'
# & df_subset$DEP_DELAY_NEW<300
# & df_subset$LATE_AIRCRAFT_DELAY<50
,]
cor(df_temp$id,df_temp$DEP_DELAY_NEW)
summary(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id , data = df_temp))
intercept <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(id,DEP_DELAY_NEW))
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Flight number of the day") + ylab("Departure Delay (minutes)")
# SAME BUT WITH ID
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
#& df_subset$Day != 'New Day'
& df_subset$Buffer>0
#& df_subset$Buffer<90
& df_subset$id<8
# & df_subset$id>=2
# & ( (df_subset$DEP_DELAY_NEW<=3 & df_subset$id<=1) | (df_subset$id>1) )
& df_subset$Carrier_Type == 'Point-to-Point without Southwest'
# & df_subset$DEP_DELAY_NEW<300
# & df_subset$LATE_AIRCRAFT_DELAY<50
,]
cor(df_temp$id,df_temp$DEP_DELAY_NEW)
summary(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id , data = df_temp))
intercept <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(id,DEP_DELAY_NEW))
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Flight number of the day") + ylab("Departure Delay (minutes)")
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
#& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$id>=8
#& df_subset$id>=1
#& df_subset$Buffer<=35
& ( (df_subset$DEP_DELAY_NEW<=3 & df_subset$id<=1) | (df_subset$id>1) )
# & df_subset$Carrier_Type == 'Southwest'
# & df_subset$DEP_DELAY_NEW>15
# & df_subset$LATE_AIRCRAFT_DELAY<50
,]
cor(df_temp$id,df_temp$DEP_DELAY_NEW)
summary(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id , data = df_temp))
intercept <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$DEP_DELAY_NEW ~ df_temp$id, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(id,DEP_DELAY_NEW))
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Flight number of the day") + ylab("Departure Delay (minutes)")
# summary(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer + df_temp$id, data = df_temp))
# SAVING FLIGHTS UNDER 30 minutes turnaround
#How many SOuthwest flights have turnaround times under 30 minutes?
w <- df_subset[ df_subset$Carrier_Type == 'Southwest'
& df_subset$Buffer<35, ]
nrow(w)
df_subset <- df_subset[ df_subset$Carrier_Type == 'Southwest' , ]
df_subset$DEP_DELAY_NEW<-ifelse( is.na(df_subset$DEP_DELAY_NEW) ,0,df_subset$DEP_DELAY_NEW)
df_subset$Buffer <-ifelse( is.na(df_subset$Buffer) ,0,df_subset$Buffer)
df_subset$Buffer_Positive <-ifelse( df_subset$Buffer<0 ,0,df_subset$Buffer)
df_subset$New_Buffer <-ifelse( df_subset$Buffer<30
& df_subset$Carrier_Type == 'Southwest' , 30 ,df_subset$Buffer)
df_subset$New_Buffer_v1_2 <-ifelse( df_subset$Buffer<35
& df_subset$Carrier_Type == 'Southwest' , 35 ,df_subset$Buffer)
df_subset$New_Buffer_Diff_v1_2 <-ifelse( df_subset$Buffer<35
& df_subset$Carrier_Type == 'Southwest' , df_subset$New_Buffer_v1_2 - df_subset$Buffer_Positive ,0)
z<- df_subset[ df_subset$Carrier_Type == 'Southwest'
& df_subset$Buffer<35, ]
mean(z$New_Buffer_Diff_v1_2)
df_subset$New_Recommended_Delay <-ifelse( df_subset$Buffer<30
& df_subset$Carrier_Type == 'Southwest' ,
df_subset$DEP_DELAY_NEW - ((30-df_subset$Buffer)/2)*df_subset$id ,
df_subset$DEP_DELAY_NEW)
df_subset$New_Recommended_Delay <-ifelse( df_subset$New_Recommended_Delay<0 ,0,df_subset$New_Recommended_Delay)
df_subset$New_Recommended_Delay_v2_by_id <-ifelse( df_subset$Buffer<30
& df_subset$Carrier_Type == 'Southwest' ,
df_subset$DEP_DELAY_NEW - 5*df_subset$id ,
df_subset$DEP_DELAY_NEW)
df_subset$New_Recommended_Delay_v2_by_id <-ifelse( df_subset$New_Recommended_Delay_v2_by_id<0 ,0,df_subset$New_Recommended_Delay_v2_by_id)
df_subset$New_Recommended_Delay_v1_2 <-ifelse( df_subset$Buffer<35
& df_subset$Carrier_Type == 'Southwest' ,
df_subset$DEP_DELAY_NEW - ((35-df_subset$Buffer)/2)*df_subset$id ,
df_subset$DEP_DELAY_NEW)
df_subset$New_Recommended_Delay_v1_2 <-ifelse( df_subset$New_Recommended_Delay_v1_2<0 ,0,df_subset$New_Recommended_Delay_v1_2)
with_new_delay <- df_subset[ df_subset$Carrier_Type == 'Southwest'
& df_subset$DEP_DELAY_NEW>=15
& df_subset$Buffer<30
& df_subset$New_Recommended_Delay<15 ,
c("CARRIER","id","Carrier_Type", "TAIL_NUM", "FL_DATE",
"Buffer","New_Buffer", "DEP_DELAY_NEW",
"New_Recommended_Delay", "DEP_DEL15") ]
with_new_delay_v2 <- df_subset[ df_subset$Carrier_Type == 'Southwest'
& df_subset$DEP_DELAY_NEW>=15
& df_subset$Buffer<30
& df_subset$New_Recommended_Delay_v2_by_id<15 ,
c("CARRIER","id","Carrier_Type", "TAIL_NUM", "FL_DATE",
"Buffer","New_Buffer", "DEP_DELAY_NEW",
"New_Recommended_Delay_v2_by_id", "DEP_DEL15") ]
with_new_delay_v1_2 <- df_subset[ df_subset$Carrier_Type == 'Southwest'
& df_subset$DEP_DELAY_NEW>=15
& df_subset$Buffer<35
& df_subset$New_Recommended_Delay_v1_2<15 ,
c("CARRIER","id","Carrier_Type", "TAIL_NUM", "FL_DATE",
"Buffer","New_Buffer", "DEP_DELAY_NEW",
"New_Recommended_Delay_v2_by_id", "DEP_DEL15") ]
x <- df_subset[ df_subset$Carrier_Type == 'Southwest' ,
c("CARRIER","id","Carrier_Type", "TAIL_NUM", "FL_DATE",
"Buffer", "DEP_DELAY_NEW", "DEP_DEL15") ]
ontime_after_v1 <- 1-(sum(x$DEP_DEL15)-nrow(with_new_delay))/nrow(x)
ontime_after_v2 <- 1-(sum(x$DEP_DEL15)-nrow(with_new_delay_v2))/nrow(x)
ontime_after_v1_2 <- 1-(sum(x$DEP_DEL15)-nrow(with_new_delay_v1_2))/nrow(x)
ontime_before <- 1-(sum(x$DEP_DEL15))/nrow(x)
ontime_after_v1 - ontime_before
ontime_after_v2 - ontime_before
ontime_after_v1_2 - ontime_before
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$New_Buffer>0
& df_subset$New_Buffer<120 , ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Southwest', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"New_Buffer","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
x <- as.numeric(df_temp$New_Buffer)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Southwest")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$New_Buffer)
kurtosis(df_temp$New_Buffer)
skewness(df_temp$New_Buffer)
sd(df_temp$New_Buffer)
#V1_2
df_Analysis <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$New_Buffer>0
& df_subset$New_Buffer<120 , ]
df_temp <- df_Analysis[ df_Analysis$Carrier_Type == 'Southwest', c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","CRS_DEP_TIME","CRS_ARR_TIME" ,"New_Buffer_v1_2","Day","Flight", "GMTO", "GMTD","DEP_TIME", "DEP_DELAY","DEP_DELAY_NEW","DEP_DEL15", "DEP_DELAY_GROUP", "ARR_TIME","dep_hours") ] #, "ARR_DELAY","WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
df_temp
summary(df_temp)
x <- as.numeric(df_temp$New_Buffer_v1_2)
h<-hist(x, breaks=20, col="#1a3260", xlab="Turn-Around Time (min)", ylab="Number of Flights",
main="Southwest")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="black", lwd=2)
mean(df_temp$New_Buffer_v1_2)
kurtosis(df_temp$New_Buffer_v1_2)
skewness(df_temp$New_Buffer_v1_2)
sd(df_temp$New_Buffer_v1_2)
df_flights <- df_temp %>% group_by(CARRIER, TAIL_NUM, FL_DATE) %>% summarise(Number_Flights = max(Max_Id))
df_flights_Carrier <- df_flights %>% group_by(CARRIER, Carrier_Type) %>% summarise(Number_Flights_per_day = mean(Number_Flights))
# MAIN2
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>0
# & df_subset$Buffer>120
& df_subset$Buffer<=40
# & df_subset$Carrier_Type != 'Other'
# & df_subset$DEP_DELAY_NEW>15
# & df_subset$LATE_AIRCRAFT_DELAY<10
,]
x <- ggplot(df_temp,aes(Buffer,LATE_AIRCRAFT_DELAY))
intercept <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer, data = df_temp))[2],2)
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Turnaround Time (min)") + ylab("Late Aircraft Departure Delay (minutes)")
# MAIN3
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
& df_subset$Day != 'New Day'
& df_subset$Buffer>40
# & df_subset$Buffer>120
& df_subset$Buffer<=60
# & df_subset$Carrier_Type != 'Other'
# & df_subset$DEP_DELAY_NEW>15
# & df_subset$LATE_AIRCRAFT_DELAY<10
,]
x <- ggplot(df_temp,aes(Buffer,LATE_AIRCRAFT_DELAY))
intercept <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$Buffer, data = df_temp))[2],2)
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Turnaround Time (min)") + ylab("Late Aircraft Departure Delay (minutes)")
c <- 20
df_temp$cut <- ifelse(df_temp$Buffer<20,2,
ifelse(df_temp$Buffer<40,4,
ifelse(df_temp$Buffer<60,0,
ifelse(df_temp$Buffer<80,8,0))))
x <- ggplot(df_temp,aes(Buffer,LATE_AIRCRAFT_DELAY))
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(round(coef(lm(as.numeric(df_temp$LATE_AIRCRAFT_DELAY) ~ as.numeric(df_temp$Buffer), data = df_temp))[2],2) )
lm(LATE_AIRCRAFT_DELAY ~ Buffer, data = df_temp)
y <- df_temp %>% group_by(cut) %>% do(fit = lm(LATE_AIRCRAFT_DELAY ~ Buffer,data=.))
tidy(y,fit)
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(round(coef()[2],2) )
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") + facet_wrap(~cut,scales="free_x")
# Graph Flight number of the day vs. Late Aircraft Delay
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
#& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$id<8
& df_subset$DEP_DELAY_NEW>=5
, ]
round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$id, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(id,LATE_AIRCRAFT_DELAY))
intercept <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$id, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$id, data = df_temp))[2],2)
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Flight # of the day") + ylab("Late Aircraft Arrival (minutes)")
df_temp <- df_subset[ df_subset$Flight == 'Same Plane'
#& df_subset$Day != 'New Day'
& df_subset$Buffer>0
& df_subset$id>=8
& df_subset$DEP_DELAY_NEW>=5
, ]
round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$id, data = df_temp))[2],2)
x <- ggplot(df_temp,aes(id,LATE_AIRCRAFT_DELAY))
intercept <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$id, data = df_temp))[1],2)
slope <- round(coef(lm(df_temp$LATE_AIRCRAFT_DELAY ~ df_temp$id, data = df_temp))[2],2)
x + geom_jitter(alpha=0.5) + geom_smooth(method="lm", colour="#FFC000") +
ggtitle(paste("Intercept: " , intercept , " Slope: " , slope )) + xlab("Flight # of the day") + ylab("Late Aircraft Arrival (minutes)")
df_subset[ df_subset$id>12 & df_subset$CARRIER != "HA", c("CARRIER","Carrier_Type", "TAIL_NUM", "FL_DATE","ORIGIN_CITY_NAME","DEST_CITY_NAME","DEP_DELAY","DEP_DELAY_NEW", "LATE_AIRCRAFT_DELAY","CARRIER_DELAY", "WEATHER_DELAY","NAS_DELAY", "SECURITY_DELAY" ) ]
|
00d4396a7f97f31fb0cc1787ec4e6770923e6146 | e74744117fbfbc41973acbcaf71c49e583ae175a | /man/get_statsDB.Rd | 2ab539c889e754ce39fb964c8760e1631ed795d2 | [] | no_license | pa00gz/junr | 621460d0af704dc61d7934e11ca07826953bcd6e | dd6d1173891296ae6d9537ce0814a397faba1273 | refs/heads/master | 2022-04-02T08:52:38.355962 | 2020-02-01T08:16:13 | 2020-02-01T08:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 372 | rd | get_statsDB.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getstats.R
\name{get_statsDB}
\alias{get_statsDB}
\title{Get hits to dashboards}
\usage{
get_statsDB(base_url, api_key)
}
\arguments{
\item{base_url}{The base URL of the Junar service}
\item{api_key}{The user's API key for the Junar service}
}
\description{
Get a list of hits to dashboards
}
|
88943ea6d97d40cf27f243fd4f23fa7c6eae8764 | 93fef68695ec291350e728b928c608f6cb9e09eb | /NewTCGA_2017scripts/Thesis_final_scripts/Prognostic_lncs_more_detail_paper/final_scripts_2019/revisions_2020/figure6_de_analysis.R | 0671c9cefe57b003c125a7b3e7c607d83dabb15d | [] | no_license | HongyuanWu/lncRNAs_TCGA | ae4fa9202704545fc59a9dae19dabeeda2b7cb34 | cbfe2356f8d65b20672dcc378fe7de309eec3dba | refs/heads/master | 2023-07-28T00:11:29.152750 | 2021-09-09T13:33:01 | 2021-09-09T13:33:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,684 | r | figure6_de_analysis.R | set.seed(911)
source("/u/kisaev/lncRNAs_TCGA/NewTCGA_2017scripts/Thesis_final_scripts/Prognostic_lncs_more_detail_paper/final_scripts_2019/revisions_2020/load_data.R")
#setWD
setwd("/.mounts/labs/reimandlab/private/users/kisaev/Thesis/TCGA_FALL2017_PROCESSED_RNASEQ/lncRNAs_2019_manuscript")
#------FEATURES-----------------------------------------------------
allCands = readRDS("final_candidates_TCGA_PCAWG_results_100CVsofElasticNet_June15.rds")
allCands = subset(allCands, data == "TCGA") #173 unique lncRNA-cancer combos, #166 unique lncRNAs
allCands$combo = unique(paste(allCands$gene, allCands$cancer, sep="_"))
cands_dups = unique(allCands$gene[which(duplicated(allCands$gene))])
colnames(allCands)[7] = "Cancer"
allCands = merge(allCands, canc_conv, by="Cancer")
allCands$combo = paste(allCands$gene, allCands$type)
#------------------------------------------------------------------------------
#Take each lncRNA candidate and conduct LIMMA diff exp between lncRNA
#high and low risk groups
#------------------------------------------------------------------------------
library(edgeR)
library(limma)
library(corrplot)
#COSMIC cancer gene census
census = read.csv("/.mounts/labs/reimandlab/private/users/kisaev/Thesis/TCGA_FALL2017_PROCESSED_RNASEQ/Census_allFri_Jul_13_16_55_59_2018.csv")
#get ensg
get_census_ensg = function(genes){
glist = unlist(strsplit(genes, ","))
z = which(str_detect(glist, "ENSG"))
ensg = glist[z]
return(ensg)
}
census$ensg = sapply(census$Synonyms, get_census_ensg)
pcg_counts = readRDS("/.mounts/labs/reimandlab/private/users/kisaev/Thesis/TCGA_FALL2017_PROCESSED_RNASEQ/counts_19438_lncRNAs_tcga_all_cancers_March13_wclinical_data.rds")
table(pcg_counts$type)
#add in counts from LAML and SKCM
laml=readRDS("/u/kisaev/TCGA_LAML_count_data/LAML_count_data.rds")
skcm=readRDS("/u/kisaev/TCGA_SKCM_count_data/SKCM_count_data.rds")
#clean up pcg counts
cols = colnames(pcg_counts)[which(str_detect(colnames(pcg_counts), "ENSG"))]
z = which(colnames(pcg_counts) %in% c(cols, "patient", "type"))
pcg_counts = pcg_counts[,z]
#need to bind with new data
laml<-laml[names(pcg_counts)]
skcm<-skcm[names(pcg_counts)]
pcg_counts = rbind(pcg_counts, laml, skcm)
#keep only patients that are the in the main rna matrix to keep same list of patients
z = which(pcg_counts$patient %in% rna$patient)
pcg_counts = pcg_counts[z,]
table(pcg_counts$type)
#------FEATURES-----------------------------------------------------
#1. Get lncRNA - median within each tissue type
allCands$combo = paste(allCands$gene, allCands$Cancer, sep="_")
combos = unique(allCands$combo)
#canc type to cancer conversion
canc_conv = unique(rna[,c("type", "Cancer")])
get_name_pcg = function(pcg){
z = which(hg38$ensgene == pcg)
if(length(z)>1){
z = z[1]
}
return(hg38$symbol[z])
}
#3. Want ranking seperatley for high lncRNA expression group versus low lncRNA expression group
#---------------------------------------------------------
#Function 1
#for each lnc-cancer, label patient as lncRNA-risk or non-risk
#---------------------------------------------------------
#z = which(str_detect(combos, "Brain Lower Grade Glioma"))
#combos = combos[z]
rna=as.data.frame(rna)
get_lnc_canc = function(comb){
lnc = unlist(strsplit(comb, "_"))[1]
canc = unlist(strsplit(comb, "_"))[2]
canc_type = canc_conv$type[which(canc_conv$Cancer == canc)]
canc_data = subset(pcg_counts, type == canc_type)
z = which(colnames(rna) %in% c("patient", lnc, "type"))
lnc_dat = rna[,z]
z = which(lnc_dat$type == canc_type)
lnc_dat = lnc_dat[z,]
med = median(as.numeric(lnc_dat[,which(colnames(lnc_dat)==lnc)]))
if(med ==0){
z = which(lnc_dat[,which(colnames(lnc_dat)==lnc)] > 0)
lnc_dat$lnc_tag = ""
lnc_dat$lnc_tag[z] = "high"
lnc_dat$lnc_tag[-z] = "low"
}
if(!(med ==0)){
z = which(lnc_dat[,which(colnames(lnc_dat)==lnc)] >= med)
lnc_dat$lnc_tag = ""
lnc_dat$lnc_tag[z] = "high"
lnc_dat$lnc_tag[-z] = "low"
}
#merge back with orgiinal dataframe
canc_data = merge(canc_data, lnc_dat, by=c("patient", "type"))
#keep only PCGs not lncRNAs
pcgs_id = unique(colnames(pcg))
z = which(colnames(canc_data) %in% c(pcgs_id, colnames(lnc_dat)))
canc_data = canc_data[,z]
#Remove PCGs with median E < 5 FPKM
#get medians of all PCGs
z = which(str_detect(colnames(canc_data), "ENSG"))
#cox ph
z = which(allCands$combo == comb)
HR = as.numeric(allCands$HR[z])
if(HR <1){
risk = "low"
canc_data$risk = ""
canc_data$risk[canc_data$median=="high"] ="noRISK"
canc_data$risk[canc_data$median=="low"] ="RISK"
}
if(HR >1){
risk = "high"
canc_data$risk = ""
canc_data$risk[canc_data$median=="high"] ="RISK"
canc_data$risk[canc_data$median=="low"] ="noRISK"
}
canc_data$lnc = lnc
canc_data$canc = canc
colnames(canc_data)[which(colnames(canc_data)==lnc)] = "lncRNA"
return(canc_data)
}#end function evaluate_each_lnc
all_canc_lnc_data = llply(combos, get_lnc_canc, .progress="text")
#---------------------------------------------------------
#Function 2
#wtihin each cancer
#calculate for each lncRNAs differentially expressed PCGs
#---------------------------------------------------------
#for lgg add IDH mutation
lgg_dat = readRDS("TCGA_lgg_wsubtype_info_biolinks.rds")
diffE <- function(d){
print(d$lnc[1])
if(d$type[1] == "LGG"){
d = merge(d, lgg_dat, by="patient")
d$IDH.status = as.character(d$IDH.status)
z = which(is.na(d$IDH.status))
d = d[-z,]
design <- model.matrix(~ 0 + factor(d$lnc_tag) + factor(d$IDH.status))
colnames(design) <- c("high", "low", "IDH_WT")
}
z = which(str_detect(colnames(d), "ENSG"))
rownames(d) = d$patient
design <- model.matrix(~ 0 + factor(d$lnc_tag))
colnames(design) <- c("high", "low")
rownames(d) <- d$patient
expression <- t(d[,z])
# Obtain CPMs
myCPM <- cpm(expression)
# Have a look at the output
head(myCPM)
# Which values in myCPM are greater than 0.5?
thresh <- myCPM > 0.5
# This produces a logical matrix with TRUEs and FALSEs
head(thresh)
# Summary of how many TRUEs there are in each row
table(rowSums(thresh))
# we would like to keep genes that have at least 10 TRUES in each row of thresh
keep <- rowSums(thresh) >= 10
# Subset the rows of countdata to keep the more highly expressed genes
myCPM = myCPM[keep,]
counts.keep <- expression[keep,]
summary(keep)
dim(counts.keep)
y <- DGEList(counts.keep)
# have a look at y
#y
# Library size information is stored in the samples slot
y$samples
# Get log2 counts per million
logcounts <- cpm(y,log=TRUE)
#TMM normalization to eliminate composition biases between libraries
# Apply normalisation to DGEList object
y <- calcNormFactors(y)
#apply voom normalization
v <- voom(y,design,plot = TRUE)
# Fit the linear model
fit <- lmFit(v)
names(fit)
cont.matrix <- makeContrasts(LowvsHigh=high-low, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2)
ps <- fit2$p.value
ps <- p.adjust(ps, method="fdr")
#numGenes <- length(which(ps <= 0.05))
t <- topTable(fit2, coef=1, n="Inf")
t$ID = rownames(t)
t$cancer = d$type[1]
#dist of fold change
summary(t$logFC)
t$gene_name = llply(t$ID, get_name_pcg)
t$lnc = d$lnc[1]
t$gene_name = as.character(t$gene_name)
#plot fold changes
#p <- ggplot(t, aes(logFC, -log10(adj.P.Val)))
#print(p + geom_point(alpha = 0.55, color="lightcyan4") +
#geom_vline(xintercept=log(2), linetype="dashed", color = "red")+
#geom_vline(xintercept=log(0.5), linetype="dashed", color = "red")+
#geom_hline(yintercept=-log10(0.05), linetype="dashed", color = "gray40")+
#geom_text_repel(
#data = filter(t, -log10(adj.P.Val) > -log10(0.05) , abs(logFC) >= 2),
#aes(label=gene_name), size =2,
#segment.size = 0.2,
#segment.color = "grey50")+
#ggtitle(paste(d$Cancer[1], d$lnc[1], get_name(d$lnc[1]))))
#only include those with FC >2 or <2
t = as.data.table(filter(t, adj.P.Val <=0.05 & ((logFC <= log(0.5)) | (logFC >= log(2)))))
#if(dim(t)[1] <= 1){
# t <- c(paste(d$lnc[1], d$canc[1]), "none")
#}
return(t)
}
#pdf("LGG_HOXA10_volcano_plot_Aug21.pdf")
#diffE(all_canc_lnc_data[[2]])
#dev.off()
#pdf("volcano_plots_diffE_lncRNA_risks.pdf")
#diffEresults = llply(all_canc_lnc_data, diffE, .progress="text")
diffEresults = llply(all_canc_lnc_data, diffE, .progress="text")
#dev.off()
diffEresults1 = ldply(diffEresults, data.frame)
diffEresults1 = as.data.table(diffEresults1)
saveRDS(diffEresults1, file="diff_expressed_PCGs_lncRNA_risk_groups_Aug21.rds")
saveRDS(diffEresults1, file="diff_expressed_PCGs_lncRNA_risk_groups_lgg_nov30.rds")
##########
###DONE###
##########
|
686b3dc71ea6c07fcd61fe7dc687f5192b390eab | 0a14ea580df557be55f6c3e583a2c08054805cf8 | /Plot1.R | 4a92c75e4bc5844cb448a72711e9d614c1f449b5 | [] | no_license | danwilliams39/ExploringDat.Project | 198feeeeabc5d02534e9c080d6692677a89d7a3e | 61ff2ea459cfee666e7f19233c6006fe4366524e | refs/heads/master | 2020-07-03T19:34:00.343173 | 2016-08-26T15:34:08 | 2016-08-26T15:34:08 | 66,581,017 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 559 | r | Plot1.R | install.packages("gridExtra")
library(gridExtra)
unzip("Data.2.zip")
class.code=readRDS("Source_Classification_Code.rds")
summary.pm25=readRDS("summarySCC_PM25.rds")
master=merge(class.code[,c("SCC","SCC.Level.One","SCC.Level.Two","SCC.Level.Three")],summary.pm25,by.x="SCC",by.y = "SCC")
year.table=with(master,tapply(Emissions,year,sum))
#plot 1 of summary of values
plot(year.table,type='l',xlab="Year",ylab="Total PM2.5 Emissions",lwd=3,col="navy",main="Total PM2.5 Emissions in United States",xaxt='n')
axis(1,at=seq(1:4),labels=rownames(year.table))
|
281085d83f4fd4af504a1900884b2c9126a0ef20 | 4f5d384db579ef088bfe75fbbec5ccbbc9ccf00e | /ZIPModel.R | f6df1d124fd3d7482042aef53385e809194c3192 | [] | no_license | mEpiLab/Nohra_SourceAttributionCode | 3c07a71a8648f16ea2b42278a7941b55a3d68434 | d9fa4b9598649d1df76dc55bd7c88dbbbbe69e1b | refs/heads/master | 2020-05-31T03:57:28.148401 | 2019-06-03T22:37:19 | 2019-06-03T22:37:19 | 190,091,228 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,667 | r | ZIPModel.R | # read in the data
setwd("C:/Users/Antoine/Documents/Antoine- Massey/Thesis/C. jejuni/poultry_counts")
dat <- read.csv("poultry_counts.csv") ###encoding="utf8")
library(survival)
# convert sample column to an equivalent volume
# 50uL is 0.050 mL
# 1000uL is 1.000 mL
# p100uL is 4.000 mL as the pellet contains all bugs in 200mL which is then buffered into 5mL,
#then a 100uL sample taken (so equivalent to sampling 4/200 mL) (5/200=0.025, 0.1/0.025=4)
dat$Volume <- 0
levels(dat$Sample) -> samples
dat$Volume[dat$Sample == samples[1]] <- 1 # 1000 uL
dat$Volume[dat$Sample == samples[2]] <- 0.05 # 50 uL
dat$Volume[dat$Sample == samples[3]] <- 4 # 4000 uL
# generate month column
dat$Date <- as.character(dat$Date)
dat$Date <- as.Date(dat$Date, format="%d %B %Y")
dat$season <- factor(dat$Season, levels = c("Spring","Summer","Autumn", "Winter"))
levels(dat$season)
#dat$Weight <- dat$Weight.in.grams
#dat$Weight.in.grams <- NULL
# convert count to numbers, removing TNTC, UC, Dried
# TODO: Potentially TNTC is 'lots' so maybe think about how we can treat this as right censored or some such?
dat$Count <- as.numeric(as.character(dat$Count))
# convert weight to numbers (get rid of commas)
#dat$Weight <- as.numeric(gsub(",", as.character(dat$Weight), replacement="")) / 1000
# zero-inflated poisson model
library("pscl", lib.loc="~/R/win-library/3.1")
model.9 <- zeroinfl (Count ~ Intervention + offset(log(Volume)) | Intervention, data=dat)
summary(model.9)
model.11<-zeroinfl (Count ~ Intervention*Source + offset(log(Volume)) | Intervention* Source, data=dat)
summary(model.11)
|
ae2afce473e3a5fab36301db6ab02cf2c39851c8 | f1d4d986bbfe4d08026fb1c7f5e921becfb8895d | /man/updateImagesOnEdgeAdded.Rd | 502be9cc96d6548298bd20fb20834a5639a9f249 | [
"Apache-2.0"
] | permissive | mickash/Adaptive-Bayesian-Networks | 988d3f2fcfeed921055035437e1f4c52f5f89660 | 56611cf9f8562ebcbfa17d876d2a7d27c201b67a | refs/heads/master | 2020-09-09T14:51:25.727845 | 2019-11-13T14:20:59 | 2019-11-13T14:20:59 | 221,476,013 | 1 | 0 | null | 2019-11-13T14:21:01 | 2019-11-13T14:18:43 | R | UTF-8 | R | false | false | 667 | rd | updateImagesOnEdgeAdded.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AddEdge.R
\name{updateImagesOnEdgeAdded}
\alias{updateImagesOnEdgeAdded}
\title{Update images after adding an edge}
\usage{
updateImagesOnEdgeAdded(cn, net, dropImages, from, to)
}
\arguments{
\item{cn}{An open RODBC connection.}
\item{net}{The network.}
\item{dropImages}{Whether images should be dropped or updated.}
\item{from}{The index of the parent node}
\item{to}{The index of the child node}
}
\description{
Update the images after adding an edge.
Note that to be able to update the saved network images, the child must be
a binary noisy-or node.
}
\keyword{internal}
|
a7735e33ba1127fccec58ae28713e6fb6f0b4c56 | 63538ef67364d53ae169c7501ae9a95c874eef34 | /man/F2Fun.Rd | f42f2863952df647faf5d4969a548363e1beb267 | [] | no_license | cran/SEA | b9e658c8fd61ebcc7d84fb308d57bdbd32df0fcc | ef7fc9f003eb6c097ea659d3dbc632183b0b2673 | refs/heads/master | 2022-05-03T11:22:27.582739 | 2022-03-30T06:30:12 | 2022-03-30T06:30:12 | 134,713,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 551 | rd | F2Fun.Rd | \name{F2Fun}
\alias{F2Fun}
\title{segregation analysis of F2 population}
\description{
Phenotypic observations in F2 population have often been used to identify mixed major-gene plus polygene inheritance model for quantitative traits in plants.}
\usage{
F2Fun(df,model)
}
\arguments{
\item{df}{phenotype matrix.}
\item{model}{genetic model.}
}
\author{
Wang Jing-Tian, Zhang Ya-Wen, and Zhang Yuan-Ming \cr
Maintainer: Yuanming Zhang<soyzhang@mail.hzau.edu.cn>
}
\examples{
F2=data(F2exdata)
F2Fun(F2exdata,"0MG")
}
|
adcb21970a4dfb81dd93e532bfa0ccbee5421699 | dfc537e95db02570648e1ad40487fffc9aca4b29 | /man/relabel_class.Rd | 78ad0be1d0ca057f08a410e0c32a7b5b7af5f3fb | [
"MIT"
] | permissive | AlineTalhouk/diceR | 294d9b53844e5ae569d8269d29417ecbf9363903 | a97d3295a10881532dba1d294b5f6165e79694be | refs/heads/master | 2023-03-17T15:29:08.135407 | 2023-03-13T19:22:45 | 2023-03-13T19:22:45 | 69,271,060 | 39 | 13 | NOASSERTION | 2021-07-22T18:27:34 | 2016-09-26T16:45:01 | R | UTF-8 | R | false | true | 669 | rd | relabel_class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{relabel_class}
\alias{relabel_class}
\title{Relabel classes to a standard}
\usage{
relabel_class(pred.cl, ref.cl)
}
\arguments{
\item{pred.cl}{vector of predicted cluster assignments}
\item{ref.cl}{vector of reference labels to match to}
}
\value{
A vector of relabeled cluster assignments
}
\description{
Relabel clustering categories to match to a standard by minimizing the
Frobenius norm between the two labels.
}
\examples{
set.seed(2)
pred <- sample(1:4, 100, replace = TRUE)
true <- sample(1:4, 100, replace = TRUE)
relabel_class(pred, true)
}
\author{
Aline Talhouk
}
|
aba8c34907335654d45b209781ed3351c62f86a0 | 5d9af2ec080a8b36f5fc3a1fc4b315f3d879d15a | /R/BIFIE.cdata.select.R | 19fd79b835bae1179473e7502cd7716076a6d1a7 | [] | no_license | alexanderrobitzsch/BIFIEsurvey | cd8f75239a6a73b14e8b241b69bc7c9300829063 | 227f2794ddc19b3f889b54af476dc5e14f44735f | refs/heads/master | 2022-05-02T23:23:22.688282 | 2022-04-05T10:15:01 | 2022-04-05T10:15:01 | 95,295,402 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,081 | r | BIFIE.cdata.select.R | ## File Name: BIFIE.cdata.select.R
## File Version: 1.13
#--- selection variables or datasets in BIFIEcdata objects
BIFIE.cdata.select <- function( bifieobj, varnames=NULL, impdata.index=NULL ){
if ( ! bifieobj$cdata ){
stop("Use 'BIFIE.data.select' or the general function 'BIFIEdata.select'")
}
# retain variable "one"
varnames0 <- bifieobj$varnames
if ( ! is.null(varnames) ){
varnames <- union( varnames, intersect( "one", varnames0) )
}
#******* do some variable checking
if ( ! is.null(varnames) ){
h1 <- setdiff( varnames, bifieobj$varnames )
if ( length(h1) > 0 ){
stop( paste0( "Following variables not in BIFIEdata object:\n ",
paste0( h1, collapse=" " ) ) )
}
}
#******** select some imputed datasets
if ( ! is.null(impdata.index ) ){
i1 <- impdata.index
bifieobj$datalistM_imputed <- bifieobj$datalistM_imputed[, i1, drop=FALSE]
bifieobj$Nimp <- length(i1)
}
#********* select some variables
if ( ! is.null( varnames) ){
dfr1 <- data.frame( "varnames"=bifieobj$varnames,
"index"=seq(1,length(bifieobj$varnames) ) )
dfr1$selectvars <- 1 * ( dfr1$varnames %in% varnames )
dfr1 <- dfr1[ dfr1$selectvars==1, ]
bifieobj$datalistM_ind <- bifieobj$datalistM_ind[, dfr1$index ]
i1 <- bifieobj$datalistM_impindex[,2] %in% ( dfr1$index - 1 )
bifieobj$datalistM_imputed <- bifieobj$datalistM_imputed[ i1,, drop=FALSE]
bifieobj$datalistM_impindex <- bifieobj$datalistM_impindex[ i1,, drop=FALSE]
impindex2 <- match( bifieobj$datalistM_impindex[,2], dfr1$index - 1 ) - 1
bifieobj$datalistM_impindex[,2] <- impindex2
bifieobj$dat1 <- bifieobj$dat1[, dfr1$index, drop=FALSE]
bifieobj$varnames <- bifieobj$varnames[ dfr1$index ]
# process variable list
bifieobj$variables <- bifieobj$variables[ dfr1$index,, drop=FALSE]
}
bifieobj$Nvars <- ncol(bifieobj$dat1)
return(bifieobj)
}
|
f673ff42c94c35efa547cfa60018c370e5891277 | 4344aa4529953e5261e834af33fdf17d229cc844 | /input/gcamdata/R/zaglu_L100.FAO_SUA_connection.R | ad0ae3de84cd9c974510055bd0affc58ab53f76b | [
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JGCRI/gcam-core | a20c01106fd40847ed0a803969633861795c00b7 | 912f1b00086be6c18224e2777f1b4bf1c8a1dc5d | refs/heads/master | 2023-08-07T18:28:19.251044 | 2023-06-05T20:22:04 | 2023-06-05T20:22:04 | 50,672,978 | 238 | 145 | NOASSERTION | 2023-07-31T16:39:21 | 2016-01-29T15:57:28 | R | UTF-8 | R | false | false | 17,511 | r | zaglu_L100.FAO_SUA_connection.R | # Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_aglu_L100.FAO_SUA_connection
#'
#' Pull and further process SUA data needed
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs, a vector of output names, or (if
#' \code{command} is "MAKE") all the generated outputs: \code{FAO_SUA_APE_balance}
#' @details Pull and further process SUA data needed. Calculate moving average if needed.
#' @importFrom assertthat assert_that
#' @importFrom dplyr bind_rows filter if_else inner_join left_join mutate rename select
#' @importFrom tidyr complete drop_na gather nesting spread replace_na
#' @importFrom tibble tibble
#' @author XZ 2022
module_aglu_L100.FAO_SUA_connection <- function(command, ...) {
MODULE_INPUTS <-
c(FILE = "common/GCAM_region_names",
FILE = "aglu/FAO/FAO_ag_items_PRODSTAT",
FILE = "aglu/FAO/FAO_an_items_PRODSTAT",
"GCAM_AgLU_SUA_APE_1973_2019",
"FAO_AgProd_Kt_All",
"FAO_AgArea_Kha_All",
"FAO_Food_Macronutrient_All_2010_2019",
"FAO_Food_MacronutrientRate_2010_2019_MaxValue")
MODULE_OUTPUTS <-
c("L100.FAO_SUA_APE_balance",
"L100.FAO_ag_HA_ha",
"L100.FAO_ag_Prod_t",
"L100.FAO_PRODSTAT_TO_DOWNSCAL",
"L105.an_Prod_Mt_R_C_Y",
"L105.an_Prod_Mt_ctry_C_Y",
"L101.ag_Food_Mt_R_C_Y",
"L105.an_Food_Mt_R_C_Y",
"L101.CropMeat_Food_Pcal_R_C_Y",
"L101.ag_Feed_Mt_R_C_Y",
"L1091.GrossTrade_Mt_R_C_Y")
if(command == driver.DECLARE_INPUTS) {
return(MODULE_INPUTS)
} else if(command == driver.DECLARE_OUTPUTS) {
return(MODULE_OUTPUTS)
} else if(command == driver.MAKE) {
year <- value <- Year <- Value <- FAO_country <- iso <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs ----
get_data_list(all_data, MODULE_INPUTS, strip_attributes = TRUE)
# Key sets and mappings ----
# Note that fodder crops are included in COMM_CROP though SUA did not have them;
COMM_CROP <- FAO_ag_items_PRODSTAT %>% filter(!is.na(GCAM_commodity)) %>% distinct(GCAM_commodity) %>% pull
COMM_MEAT <- FAO_an_items_PRODSTAT %>% filter(!is.na(GCAM_commodity)) %>% distinct(GCAM_commodity) %>% pull
# 1. Supply-utilization accounting balance ----
# Change unit and year for later uses
# data was balanced already and in GCAM regions
L100.FAO_SUA_APE_balance <-
GCAM_AgLU_SUA_APE_1973_2019 %>%
spread(element, value) %>%
mutate(Net_Export = Export - Import) %>%
select(-unit) %>%
left_join_error_no_match(GCAM_region_names, by = "region") %>%
select(-region) %>%
gather(element, value, -GCAM_region_ID, -GCAM_commodity, -year) %>%
# change unit to Mt
mutate(value = value / 1000) %>%
# Adding 5-year moving average here
dplyr::group_by_at(dplyr::vars(-year, -value)) %>%
mutate(moving_avg = Moving_average(value, periods = aglu.MODEL_MEAN_PERIOD_LENGTH)) %>%
ungroup() %>%
mutate(value = if_else(is.na(moving_avg), value, moving_avg)) %>%
select(-moving_avg) %>%
filter(year %in% aglu.AGLU_HISTORICAL_YEARS)
# 2. Primary crop and meat production and harvested area ----
## 2.1. Primary crop production, harvested area, and a combined for downscaling ----
##* L100.FAO_ag_Prod_t ----
# will be used in residual bio
L100.FAO_ag_Prod_t <-
FAO_AgProd_Kt_All %>%
filter(CropMeat %in% c("Crop_Fodder", "Crop_NonFodder")) %>%
transmute(iso, GCAM_region_ID, item, item_code, year, GCAM_commodity, GCAM_subsector,
element = "Prod_t", value = value * 1000) %>%
# Adding 5-year moving average here
dplyr::group_by_at(dplyr::vars(-year, -value)) %>%
mutate(moving_avg = Moving_average(value, periods = aglu.MODEL_MEAN_PERIOD_LENGTH)) %>%
ungroup() %>%
mutate(value = if_else(is.na(moving_avg), value, moving_avg)) %>%
select(-moving_avg) %>%
filter(year %in% aglu.AGLU_HISTORICAL_YEARS)
##* L100.FAO_ag_HA_ha ----
# The file will be used for fertilization related calculation
L100.FAO_ag_HA_ha <-
FAO_AgArea_Kha_All %>%
transmute(iso, GCAM_region_ID, item, item_code, year, GCAM_commodity, GCAM_subsector,
element = "Area_harvested_ha", value = value * 1000) %>%
# Adding 5-year moving average here
dplyr::group_by_at(dplyr::vars(-year, -value)) %>%
mutate(moving_avg = Moving_average(value, periods = aglu.MODEL_MEAN_PERIOD_LENGTH)) %>%
ungroup() %>%
mutate(value = if_else(is.na(moving_avg), value, moving_avg)) %>%
select(-moving_avg) %>%
filter(year %in% aglu.AGLU_HISTORICAL_YEARS)
##* L100.FAO_PRODSTAT_TO_DOWNSCAL ----
# Aggregate to GCAM_commodity, GCAM_subsector to downscale to basin later
L100.FAO_PRODSTAT_TO_DOWNSCAL <-
L100.FAO_ag_Prod_t %>%
group_by(iso, GCAM_commodity, GCAM_subsector, year, GCAM_region_ID) %>%
summarise(Prod_t = sum(value), .groups = "drop") %>%
ungroup() %>%
left_join_error_no_match(
L100.FAO_ag_HA_ha %>%
group_by(iso, GCAM_commodity, GCAM_subsector, year, GCAM_region_ID) %>%
summarise(Area_harvested_ha = sum(value), .groups = "drop") %>%
ungroup(),
by = c("iso", "GCAM_commodity", "GCAM_subsector", "year", "GCAM_region_ID") )
### clean ----
rm(FAO_AgArea_Kha_All)
assertthat::assert_that(
L100.FAO_ag_Prod_t %>%
filter(!GCAM_commodity %in% c("FodderGrass", "FodderHerb")) %>%
group_by(GCAM_commodity, year, GCAM_region_ID) %>%
summarise(value = sum(value)/1000000, .groups = "drop") %>%
ungroup() %>%
left_join_error_no_match(
L100.FAO_SUA_APE_balance %>% filter(element == "Production") %>%
filter(GCAM_commodity %in% COMM_CROP) %>%
select(-element) %>% rename(value1 = value),
by = c("GCAM_commodity", "year", "GCAM_region_ID")
) %>%
mutate(diff = abs(value1 - value)) %>%
filter(diff > 0.0001) %>% nrow() == 0,
msg = "Inconsistency between L100.FAO_SUA_APE_balance and L100.FAO_ag_Prod_t"
)
## 2.2. Livestock Production ----
##* L105.an_Prod_Mt_ctry_C_Y ----
L105.an_Prod_Mt_ctry_C_Y <-
FAO_AgProd_Kt_All %>%
filter(CropMeat %in% c("Meat")) %>%
# complete year and fill zeros
complete(nesting(area_code, area, iso, GCAM_region_ID),
nesting(item_code, item, GCAM_commodity, GCAM_subsector, CropMeat), element,
year, fill = list(value = 0))%>%
# Adding 5-year moving average here
dplyr::group_by_at(dplyr::vars(-year, -value)) %>%
mutate(moving_avg = Moving_average(value, periods = aglu.MODEL_MEAN_PERIOD_LENGTH)) %>%
ungroup() %>%
mutate(value = if_else(is.na(moving_avg), value, moving_avg)) %>%
select(-moving_avg) %>%
filter(year %in% aglu.AGLU_HISTORICAL_YEARS) %>%
group_by(iso, GCAM_commodity, year, GCAM_region_ID) %>%
summarise(value = sum(value), .groups = "drop") %>%
ungroup() %>%
# complete year and fill zeros
#complete(nesting(iso, GCAM_commodity, GCAM_region_ID), year, fill = list(value = 0)) %>%
# change unit to Mt
mutate(value = value / 1000)
##* L105.an_Prod_Mt_R_C_Y ----
L105.an_Prod_Mt_R_C_Y <-
L105.an_Prod_Mt_ctry_C_Y %>%
group_by(GCAM_region_ID, GCAM_commodity, year) %>%
summarise(value = sum(value), .groups = "drop") %>%
ungroup()
assertthat::assert_that(
L105.an_Prod_Mt_R_C_Y %>%
left_join(
L100.FAO_SUA_APE_balance %>%
filter(element == "Production") %>%
filter(GCAM_commodity %in% COMM_MEAT) %>%
select(-element)%>%
rename(value1 = value),
by = c("GCAM_region_ID", "GCAM_commodity", "year")) %>%
mutate(diff = abs(value1- value)) %>%
filter(diff > 0.00001) %>% nrow() == 0,
msg = "Inconsistency between L100.FAO_SUA_APE_balance and L105.an_Prod_Mt_R_C_Y"
)
# 3 Food consumption in SUA and Calories----
FAO_Food_Macronutrient_All_2010_2019 %>%
filter(year %in% aglu.MODEL_MACRONUTRIENT_YEARS) %>%
# Aggregate to region and GCAM commodity
dplyr::group_by_at(vars(GCAM_region_ID, GCAM_commodity, year, macronutrient)) %>%
summarise(value = sum(value), .groups = "drop") %>%
# Mean over aglu.MODEL_MACRONUTRIENT_YEARS
dplyr::group_by_at(vars(GCAM_region_ID, GCAM_commodity, macronutrient)) %>%
summarise(value = mean(value), .groups = "drop") %>%
spread(macronutrient, value) ->
DF_Macronutrient_FoodItem1
DF_Macronutrient_FoodItem1 %>%
# NEC is removed by joining
# though not all food items are consumed in all regions (deal with NA later)
right_join(
L100.FAO_SUA_APE_balance %>% # Unit is Mt
filter(element == "Food",
year == dplyr::last(MODEL_BASE_YEARS)),
by = c("GCAM_region_ID", "GCAM_commodity")
) %>%
# Both data were average already
transmute(GCAM_region_ID, GCAM_commodity,
calperg = MKcal / value / 1000,
fatperc = MtFat / value * 100,
proteinperc = MtProtein / value * 100) ->
DF_Macronutrient_FoodItem2
# We have protein and fat data here but not used in GCAM (dropped here)
DF_Macronutrient_FoodItem2 %>%
tidyr::gather(macronutrient, value, calperg:proteinperc) %>%
# Join max regional conversion for adjustments later
left_join(
FAO_Food_MacronutrientRate_2010_2019_MaxValue,
by = c("GCAM_commodity", "macronutrient")
) %>%
# In rare cases, primary equivalent resulted in lower food mass consumption
# mainly due to a higher-than-one-extraction rate, e.g., beer of barley
# or small discrepancies (or possibly representing real processing)
# thus, the cal per g conversion is larger than the max of the conversion
# of the corresponding SUA items
# I.e., a few OtherGrain cases (e.g., Indonesia) and a Mexico soybean case;
# they all have relatively small consumption/impacts
# But we use the max of the conversion of the corresponding SUA items to limit the value here
# mainly for avoiding too different macronutrient rates across regions
mutate(value = pmin(value, max_macronutrient_value)) %>%
select(-max_macronutrient_value) %>%
# There are sill NA values e.g., palm oil is not consumed in Canada
# And fiber crop is not consumed in few regions
# Fill in NA with world mean
dplyr::group_by_at(vars(-GCAM_region_ID, -value)) %>%
mutate(value = if_else(is.na(value), mean(value, na.rm = T), value)) %>%
ungroup() %>%
filter(macronutrient == "calperg") %>%
spread(macronutrient, value) ->
DF_Macronutrient_FoodItem3_calperg
DF_Macronutrient_FoodItem4 <-
L100.FAO_SUA_APE_balance %>% filter(element == "Food") %>%
rename(Mt = value) %>%
left_join_error_no_match(DF_Macronutrient_FoodItem3_calperg,
by = c("GCAM_commodity", "GCAM_region_ID")) %>%
mutate(Kcalperg = calperg / 1000,
MKcal = Kcalperg * Mt * 1000) %>%
select(-calperg)
##* L101.ag_Food_Mt_R_C_Y ----
L101.ag_Food_Mt_R_C_Y <-
DF_Macronutrient_FoodItem4 %>%
filter(GCAM_commodity %in% COMM_CROP) %>%
transmute(GCAM_region_ID, GCAM_commodity, year, value = Mt)
##* L105.an_Food_Mt_R_C_Y ----
L105.an_Food_Mt_R_C_Y <-
DF_Macronutrient_FoodItem4 %>%
filter(GCAM_commodity %in% COMM_MEAT) %>%
transmute(GCAM_region_ID, GCAM_commodity, year, value = Mt)
##* L101.ag_Food_Pcal_R_C_Y ----
L101.CropMeat_Food_Pcal_R_C_Y <-
DF_Macronutrient_FoodItem4 %>%
transmute(GCAM_region_ID, GCAM_commodity, year, value = MKcal/1000)
rm(list = ls(pattern = "DF_Macronutrient_FoodItem*"))
# 4. Feed and trade ----
##* L101.ag_Feed_Mt_R_C_Y ----
L101.ag_Feed_Mt_R_C_Y <-
L100.FAO_SUA_APE_balance %>% filter(element == "Feed") %>%
filter(GCAM_commodity %in% COMM_CROP) %>%
select(-element)
##* L1091.GrossTrade_Mt_R_C_Y ----
# Including both ag and an
L1091.GrossTrade_Mt_R_C_Y <-
L100.FAO_SUA_APE_balance %>% filter(element %in% c("Export", "Import")) %>%
spread(element, value) %>%
rename(GrossExp_Mt = Export, GrossImp_Mt = Import)
# Produce outputs ----
#********************************* ----
L100.FAO_SUA_APE_balance %>%
add_title("Regional agricultural commodity prices for all traded primary GCAM AGLU commodities") %>%
add_units("1000 tonnes") %>%
add_comments("Supply utilization balance for GCAM commodities and regions in primary equivalent") %>%
add_precursors("GCAM_AgLU_SUA_APE_1973_2019",
"common/GCAM_region_names") ->
L100.FAO_SUA_APE_balance
L100.FAO_ag_HA_ha %>%
add_title("FAO agricultural harvested area by country, item, year") %>%
add_comments("Keep detailed FAO area info by item and country for later uses") %>%
add_units("Ha") %>%
add_precursors("FAO_AgArea_Kha_All") ->
L100.FAO_ag_HA_ha
L100.FAO_ag_Prod_t %>%
add_title("FAO agricultural production by country, item, year") %>%
add_comments("Keep detailed FAO production info by item and country for later uses") %>%
add_units("t") %>%
add_precursors("FAO_AgProd_Kt_All") ->
L100.FAO_ag_Prod_t
L100.FAO_PRODSTAT_TO_DOWNSCAL %>%
add_title("FAO agricultural production and harvested area by country, GCAM_item, year") %>%
add_comments("Aggregated to GCAM items for both production and area for downscaling") %>%
add_units("Ha and t") %>%
add_precursors("FAO_AgProd_Kt_All",
"FAO_AgArea_Kha_All") ->
L100.FAO_PRODSTAT_TO_DOWNSCAL
L105.an_Prod_Mt_R_C_Y %>%
add_title("Animal production by GCAM region / commodity / year") %>%
add_units("Mt") %>%
add_comments("Aggregate FAO country and item data by GCAM region commodity, and year") %>%
add_comments("Convert data from ton to Mt") %>%
add_legacy_name("L105.an_Prod_Mt_R_C_Y") %>%
add_precursors("FAO_AgProd_Kt_All") ->
L105.an_Prod_Mt_R_C_Y
L105.an_Prod_Mt_ctry_C_Y %>%
add_title("Animal production by country / commodity / year") %>%
add_units("Mt") %>%
add_comments("Aggregate FAO country and item data by GCAM commodity, and year") %>%
add_comments("Convert data from ton to Mt") %>%
add_legacy_name("L105.an_Prod_Mt_ctry_C_Y") %>%
add_precursors("FAO_AgProd_Kt_All") ->
L105.an_Prod_Mt_ctry_C_Y
L101.ag_Food_Mt_R_C_Y %>%
add_title("FAO food consumption by GCAM region, commodity, and year") %>%
add_units("Mt") %>%
add_comments("Aggregates FAO data by GCAM region, commodity, and year") %>%
add_comments("Data is also converted from tons to Mt") %>%
add_legacy_name("L101.ag_Food_Mt_R_C_Y") %>%
add_precursors("common/GCAM_region_names",
"aglu/FAO/FAO_ag_items_PRODSTAT",
"FAO_Food_Macronutrient_All_2010_2019",
"FAO_Food_MacronutrientRate_2010_2019_MaxValue") ->
L101.ag_Food_Mt_R_C_Y
L101.CropMeat_Food_Pcal_R_C_Y %>%
add_title("FAO food calories consumption by GCAM region, commodity, and year") %>%
add_units("Pcal") %>%
add_comments("Aggregates FAO data by GCAM region, commodity, and year") %>%
add_comments("Data is also converted from tons to Pcal") %>%
add_legacy_name("L101.CropMeat_Food_Pcal_R_C_Y") %>%
same_precursors_as(L101.ag_Food_Mt_R_C_Y) ->
L101.CropMeat_Food_Pcal_R_C_Y
L105.an_Food_Mt_R_C_Y %>%
add_title("Animal consumption by GCAM region / commodity / year") %>%
add_units("Mt") %>%
add_comments("Aggregate FAO country and item data by GCAM region, commodity, and year") %>%
add_comments("Convert data from ton to Mt") %>%
add_legacy_name("L105.an_Food_Mt_R_C_Y") %>%
add_precursors("common/GCAM_region_names",
"GCAM_AgLU_SUA_APE_1973_2019",
"aglu/FAO/FAO_an_items_PRODSTAT",
"FAO_Food_Macronutrient_All_2010_2019",
"FAO_Food_MacronutrientRate_2010_2019_MaxValue") ->
L105.an_Food_Mt_R_C_Y
L101.ag_Feed_Mt_R_C_Y %>%
add_title("Feed use by GCAM region, commodity, and year aggregated from FAO") %>%
add_comments("Feed consumption of GCAM Ag commodities; they will be adjusted in L108") %>%
add_units("Mt") %>%
add_legacy_name("L101.ag_Feed_Mt_R_C_Y") %>%
add_precursors("GCAM_AgLU_SUA_APE_1973_2019",
"aglu/FAO/FAO_ag_items_PRODSTAT") ->
L101.ag_Feed_Mt_R_C_Y
L1091.GrossTrade_Mt_R_C_Y %>%
add_title("Gross trade by GCAM region, commodity, and year aggregated from FAO") %>%
add_comments("Balanced gross trade of GCAM Ag commodities") %>%
add_units("Mt") %>%
add_legacy_name("L1091.GrossTrade_Mt_R_C_Y") %>%
add_precursors("GCAM_AgLU_SUA_APE_1973_2019") ->
L1091.GrossTrade_Mt_R_C_Y
# Done & return data----
return_data(MODULE_OUTPUTS)
} else {
stop("Unknown command")
}
}
|
43da32f0198642f9c2fecd8e667123e3c982f345 | 4d15bef2e5df72e1877cddd95d6a7dcc3e8f9460 | /R/API.R | 5dc5db66e163b8d91769f366a32c5d7a4671be02 | [] | no_license | MitraisEga/r-bootcamp-working | 34ff56bc928339835ecec612999aa7b4f34631a7 | 43e6a484685e35cfe97c5ab59e535faee702eb61 | refs/heads/master | 2020-04-29T14:37:07.507000 | 2019-03-19T08:13:13 | 2019-03-19T08:13:13 | 176,202,182 | 0 | 0 | null | 2019-03-18T04:05:05 | 2019-03-18T04:05:05 | null | UTF-8 | R | false | false | 1,374 | r | API.R | library("plumber")
library("Rook")
library("dplyr")
#' Show Hello World message
#' @get /hello
hello <- function(){
c("hello world")
}
#' read votes file and return the total vote type on every post
#' @param req uploaded file votes.csv based on stackexchange data
#' @post /uploadVotes
fileUpload <- function(req){
formContents <- Rook::Multipart$parse(req)
uploaded_loc <- formContents$req$tempfile
filedata<-read.csv(file= uploaded_loc,sep=',',header = TRUE)
#convert to df table
filedataset<-tbl_df(filedata)
result<- filedataset %>% select("post_id","vote_type_id") %>% group_by(post_id,vote_type_id) %>% summarise(total_vote=n())
return(result)
}
#' try to return result
#' @param req the posted votes.csv from stackexchange data
#' @png return the data as png
#' @post /readPostScore
getResult<- function(req){
formContents <- Rook::Multipart$parse(req)
uploaded_loc <- formContents$req$tempfile
filedata<-read.csv(file= uploaded_loc,sep=',',header = TRUE)
filedataset<-tbl_df(filedata)
result<- filedataset %>% select("post_id","vote_type_id") %>% group_by(post_id,vote_type_id) %>% summarise(total_vote=n())
post_score <- result %>% mutate(score= vote_type_id * total_vote) %>% select(post_id,score) %>% group_by(post_id) %>% summarise(score= sum(score))
barplot(post_score$score,names.arg = post_score$post_id)
}
|
266c8cb35f9df3b126cf64131f8bcd1465d35da1 | 6bb3dc3d43ee66307c1ed6f5940141be0e481936 | /R/tests/testthat.R | d31c47b169117131f95689e6be06de36e09f7aee | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | CCS-Lab/easyml | 7f41db72122f5de23f75381bcc03e13d65536c1b | c334d1174ee5795734f900d2cfa2836b76834bc9 | refs/heads/master | 2023-06-12T07:09:22.847009 | 2023-06-02T05:27:51 | 2023-06-02T05:27:51 | 71,721,801 | 40 | 19 | NOASSERTION | 2022-06-21T21:11:33 | 2016-10-23T18:38:28 | R | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(easyml)
test_check("easyml")
|
3273c894ae5cfca0e7d8d1dc9265b03b172547d8 | b9be641f25f5fe4611126fed9bb913174a0d05c2 | /tests/testthat.R | c4e171243ae3f2a01d8164e82e5ef9e48b9ea91f | [] | no_license | SondergardM/FARSpkg | bed76c776c08abca6bf75956f08c533ebb3fc909 | c51103db9bd40ee7d4fb79bac88474b2515987a2 | refs/heads/main | 2023-05-28T06:04:53.743032 | 2021-06-02T14:52:19 | 2021-06-02T14:52:19 | 371,123,611 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 58 | r | testthat.R | library(testthat)
library(FARSpkg)
test_check("FARSpkg")
|
af1610b000d52d192eafd73e899b4b8dcf5192c1 | 4f7836d03ed64a4c0fe3cae7c7ca03dc01272b3a | /tests/testthat/test-dummy.R | 102306a223d110ee1d0b0f9d731685250639b2e4 | [
"MIT"
] | permissive | Haoen-Cui/mischelperfuns | e5c07873b2f5e0e867b6eb1e7b4d000ea85f746a | ba1b5265a3b7fe1a9728e84e4b759ae43136e652 | refs/heads/master | 2020-04-29T10:16:30.685449 | 2019-11-11T06:08:24 | 2019-11-11T06:08:24 | 176,056,080 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 95 | r | test-dummy.R | context("dummy test")
test_that("dummy test needs to pass", {
expect_true( TRUE )
})
|
c4ac7fe666a180866521e5f6b1df93408d1add1e | 4f3b5b07828f2206c7d99007a045f55f5a5b108e | /R/network.summary.R | d5905767853fdd282fc4ab726f3e266bfb0747a0 | [] | no_license | MikeJSeo/bnma | a83fb33137e1af73c55eef87aacf17ea87c0380e | a7ab150eacd836044956d6864fe5e4d1a15e0c16 | refs/heads/master | 2023-08-19T03:03:01.305074 | 2023-08-16T19:16:56 | 2023-08-16T19:16:56 | 175,659,629 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 47,461 | r | network.summary.R | pick.summary.variables <- function(result, extra.pars = NULL, only.pars = NULL){
samples <- result[["samples"]]
varnames <- dimnames(samples[[1]])[[2]]
varnames.split <- sapply(strsplit(varnames, "\\["), '[[', 1)
varnames.split <- gsub("[[:digit:]]","",varnames.split)
if(!is.null(only.pars)){
if(!all(only.pars %in% varnames.split)){
stop(paste0(only.pars, "was not sampled"))
}
}
if(is.null(only.pars)){
pars <- c("d", "sd", "sigma", "b_bl", "beta", "C", "sdC", "sigmaC","B", "sdB", "sigmaB", "E", "sdE", "sigmaE")
} else{
pars <- only.pars
}
if(!is.null(extra.pars)){
pars <- c(pars, extra.pars)
}
summary.samples <- lapply(samples, function(x){x[,varnames.split %in% pars, drop = F]})
summary.samples <- coda::mcmc.list(summary.samples)
summary.samples
}
#' Summarize result run by \code{\link{network.run}}
#'
#' This function uses summary function in coda package to summarize mcmc.list object. Monte carlo error (Time-series SE) is also obtained using the coda package and is printed in the summary as a default.
#'
#' @param object Result object created by \code{\link{network.run}} function
#' @param ... Additional arguments affecting the summary produced
#' @return Returns summary of the network model result
#' @examples
#' network <- with(statins, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial",
#' Treat.order = c("Placebo", "Statin"), covariate = covariate, covariate.type = "discrete")
#' })
#' \donttest{
#' result <- network.run(network)
#' summary(result)
#' }
#' @export
summary.network.result <- function(object, ...){
if(!inherits(object, "network.result")) {
stop('This is not the output from network.run. Need to run network.run function first')
}
summary.samples <- pick.summary.variables(object, ...)
rval <- list("summary.samples"= summary(summary.samples),
"Treat.order" = object$network$Treat.order,
"deviance" = unlist(object$deviance[1:3]),
"total_n" = sum(object$network$na))
class(rval) <- 'summary.network.result'
rval
}
#' Plot traceplot and posterior density of the result
#'
#' This function uses plotting function in coda package to plot mcmc.list object
#'
#' @param x Result object created by \code{\link{network.run}} function
#' @param ... Additional arguments affecting the plot produced
#' @return None
#' @examples
#' network <- with(statins, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial",
#' Treat.order = c("Placebo", "Statin"), covariate = covariate, covariate.type = "discrete")
#' })
#' \donttest{
#' result <- network.run(network)
#' plot(result, only.pars = "sd")
#' }
#' @export
plot.network.result <- function(x, ...) {
summary.samples <- pick.summary.variables(x, ...)
plot(summary.samples)
}
#' Use coda package to plot Gelman-Rubin diagnostic plot
#'
#' This function plots Gelman-Rubin diagnostic using coda package.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param extra.pars Extra parameters that the user wants to plot other than the default parameters.
#' @param only.pars Parameters that user wants to display. This gets rids of other default parameters user doesn't want to show.
#' @return None
#' @examples
#' network <- with(statins, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial",
#' Treat.order = c("Placebo", "Statin"), covariate = covariate, covariate.type = "discrete")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.gelman.plot(result)
#' }
#' @export
network.gelman.plot <- function(result, extra.pars = NULL, only.pars = NULL){
summary.samples <- pick.summary.variables(result, extra.pars, only.pars)
summary.samples <- mcmc.list(lapply(summary.samples, function(x) { x[,colSums(abs(x)) != 0] }))
for(v in 1:nvar(summary.samples)){
gelman.plot(summary.samples[,v,drop=FALSE])
}
}
#' Use coda package to find Gelman-Rubin diagnostics
#'
#' This function uses coda package to find Gelman-Rubin diagnostics.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param extra.pars Extra parameters that the user wants to display other than the default parameters.
#' @param only.pars Parameters that user wants to display. This gets rids of other default parameters user doesn't want to show.
#' @return Returns gelman-rubin diagnostics
#' @examples
#' network <- with(statins, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial",
#' Treat.order = c("Placebo", "Statin"), covariate = covariate, covariate.type = "discrete")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.gelman.diag(result, extra.pars = c("Eta"))
#' }
#' @export
network.gelman.diag <- function(result, extra.pars = NULL, only.pars = NULL){
summary.samples <- pick.summary.variables(result, extra.pars, only.pars)
summary.samples <- mcmc.list(lapply(summary.samples, function(x) { x[,colSums(abs(x)) != 0] }))
gelman.diag(summary.samples, multivariate = FALSE)$psrf
}
#' Generate autocorrelation diagnostics using coda package
#'
#' This function generates autocorrelation diagnostics using coda package. User can specify lags and parameters to display.
#' Note that to display extra parameters that are not saved, user needs to first specify parameters in \code{extra.pars.save} parameter in \code{\link{network.run}} function.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param lags A vector of lags at which to calculate the autocorrelation
#' @param extra.pars Extra parameters that the user wants to display other than the default parameters.
#' @param only.pars Parameters that user wants to display. This gets rids of other default parameters user doesn't want to show.
#' @return Returns autocorrelation diagnostics
#' @examples
#' network <- with(blocker, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.autocorr.diag(result, only.pars = "d")
#' }
#' @export
network.autocorr.diag <- function(result, lags = c(0,1,5,10,50), extra.pars = NULL, only.pars = NULL){
summary.samples <- pick.summary.variables(result, extra.pars, only.pars)
summary.samples <- mcmc.list(lapply(summary.samples, function(x) { x[,colSums(abs(x)) != 0] }))
autocorr.diag(summary.samples, lags = lags)
}
#' Generate autocorrelation plot using coda package
#'
#' This function plots autocorrelation using coda package.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param extra.pars Extra parameters that the user wants to plot other than the default parameters.
#' @param only.pars Parameters that user wants to display. This gets rids of other default parameters user doesn't want to show
#' @return None
#' @examples
#' network <- with(blocker, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.autocorr.plot(result)
#' }
#' @export
network.autocorr.plot <- function(result, extra.pars = NULL, only.pars = NULL){
summary.samples <- pick.summary.variables(result, extra.pars, only.pars)
summary.samples <- mcmc.list(lapply(summary.samples, function(x) { x[,colSums(abs(x)) != 0] }))
autocorr.plot(summary.samples)
}
#' Find relative effects for base treatment and comparison treatments
#'
#' This function calculates relative effects for base treatment and comparison treatments.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param base.treatment Base treatment user wants for the relative effects. Base treatment is initially set by \code{Treat.order} parameter in \code{\link{network.data}} (first one in the list). If set to null, default is to use base treatment.
#' @param comparison.treatments Treatments that user wants to compare against base treatment. If set to null, all the treatments besides base treatment is considered as comparison treatments.
#' @param base.category Base category user wants for the relative effects. Only used for multinomial data.
#' @param comparison.categories Category that user wants to compare against base.category. Only used for multinomial data.
#' @param covariate Covariate value at which to compute relative effects. Only used if covariate value is specified in the model.
#' @return
#' This returns a mcmc.list sample of relative effects for the base treatment specified. This allows user to obtain relative effects of different base.treatment after the sampling has been done.
#' For a simple summary, use \code{\link{relative.effects.table}}.
#' @examples
#' network <- with(parkinsons, {
#' network.data(Outcomes, Study, Treat, SE = SE, response = "normal")
#' })
#' \donttest{
#' result <- network.run(network)
#' summary(relative.effects(result, base.treatment = "Placebo"))
#' }
#' @seealso \code{\link{relative.effects.table}}
#' @export
relative.effects <- function(result, base.treatment = NULL, comparison.treatments = NULL, base.category = NULL, comparison.categories = NULL, covariate = NULL){
network <- result$network
if(!is.null(covariate)){
stopifnot(length(covariate) == dim(network$covariate)[2])
}
Treat.order <- network$Treat.order
if(!is.null(base.treatment)){
stopifnot(base.treatment %in% Treat.order)
} else{
base.treatment <- Treat.order[1]
}
if(!is.null(comparison.treatments)){
stopifnot(comparison.treatments %in% Treat.order)
stopifnot(!comparison.treatments %in% base.treatment)
} else{
comparison.treatments <- Treat.order[-which(Treat.order == base.treatment)]
}
if(!is.null(covariate)){
summary.samples <- pick.summary.variables(result, only.pars = c("d", "beta"))
} else{
summary.samples <- pick.summary.variables(result, only.pars = c("d"))
}
vars <- dimnames(summary.samples[[1]])[[2]]
if(network$response != "multinomial"){
effects <- matrix(0, nrow = network$ntreat, ncol = length(comparison.treatments))
effects[which(Treat.order == base.treatment),] = -1
col_name = NULL
for(i in 1:ncol(effects)){
effects[which(comparison.treatments[i] == Treat.order),i] = 1
col_name <- c(col_name, paste0("d_treatment", base.treatment, comparison.treatments[i]))
}
if(!is.null(covariate)){
cov_matrix <- covariate_centerered <- NULL
for(i in 1:length(covariate)){
cov <- effects
covariate_centered <- covariate[i] - network[[paste0("mx",i)]]
cov <- cov * covariate_centered
cov_matrix <- rbind(cov_matrix, cov)
}
effects <- rbind(cov_matrix, effects)
}
colnames(effects) <- col_name
rownames(effects) <- vars
samples <- as.mcmc.list(lapply(summary.samples, function(chain){
samples <- chain %*% effects
colnames(samples) <- colnames(effects)
mcmc(samples, start = start(chain), end = end(chain), thin = thin(chain))
}))
} else{
vars_d <- vars[grep("d\\[", vars)]
categories_row <- as.numeric(substr(vars_d, nchar(vars_d[1])-1, nchar(vars_d[1])-1))
categories_row <- categories_row+1
ncat <- network$ncat
if(!is.null(base.category)){
stopifnot(base.category %in% 1:ncat)
} else{
base.category <- 1
}
if(!is.null(comparison.categories)){
stopifnot(comparison.categories %in% 1:ncat)
stopifnot(!comparison.categories %in% base.category)
} else{
comparison.categories <- (1:ncat)[-base.category]
}
effects <- matrix(0, nrow = network$ntreat*(network$ncat-1), length(vars), ncol = length(comparison.treatments) * length(comparison.categories))
categories_column <- rep(comparison.categories, each = length(comparison.treatments))
effects[which(rep(Treat.order, ncat-1) == base.treatment),] <- -1
col_name <- NULL
for(i in 1:ncol(effects)){
effects[which(rep(Treat.order, ncat-1) == rep(comparison.treatments, length(comparison.categories))[i]),i] <- 1
col_name <- c(col_name, paste0("d_treatment", base.treatment, rep(comparison.treatments, length(comparison.categories))[i]))
}
colnames(effects) <- col_name
for(i in 1:ncol(effects)){
effects[which(categories_row == base.category),i] <- -effects[which(categories_row == base.category),i]
effects[which(categories_row != base.category & categories_row != rep(comparison.categories, each = length(comparison.treatments))[i]),i] <- 0
colnames(effects)[i] <- paste0(colnames(effects)[i], "_category", base.category, rep(comparison.categories, each = length(comparison.treatments))[i])
}
if(!is.null(covariate)){
cov_matrix <- covariate_centerered <- NULL
for(i in 1:length(covariate)){
cov <- effects
covariate_centered <- covariate[i] - network[[paste0("mx",i)]]
cov <- cov * covariate_centered
cov_matrix <- rbind(cov_matrix, cov)
}
effects <- rbind(cov_matrix, effects)
}
rownames(effects) <- vars
samples <- as.mcmc.list(lapply(summary.samples, function(chain){
samples <- chain %*% effects
colnames(samples) <- colnames(effects)
mcmc(samples, start = start(chain), end = end(chain), thin = thin(chain))
}))
}
samples
}
#' Make a summary table for relative effects
#'
#' This function creates a summary table of relative effects. Relative effects are in units of log odds ratio for binomial and multinomial data and real number scale for normal data.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param summary_stat Specifies what type of statistics user wants. Options are: "mean", "ci", "quantile", "sd", "p-value".
#' "ci" gives 95% confidence interval (0.025, 0.5, 0.975) and "quantile" gives specific quantile specified in probs parameter.
#' "p-value" is the probability relative effect (in binomial, log odds ratio) is less than 0.
#' @param probs Used only for the quantile summary. Specifies which quantile user wants the summary of (should be one numeric value between 0 to 1)
#' @param base.category Specifies for which base category user wants for the summary. Used only for multinoimal.
#' @return Returns relative effects table
#' @examples
#' #cardiovascular
#' network <- with(cardiovascular,{
#' network.data(Outcomes, Study, Treat, N, response = "multinomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' exp(relative.effects.table(result)) #look at odds ratio instead of log odds ratio
#' }
#' @seealso \code{\link{relative.effects}}
#' @export
relative.effects.table <- function(result, summary_stat = "mean", probs = NULL, base.category = NULL){
stopifnot(summary_stat %in% c("mean", "quantile", "sd", "p-value", "ci"))
if(!is.null(probs)){
if(length(probs) != 1){
stop("length of probs should be 1")
}
}
Treat.order <- result$network$Treat.order
ts <- 1:length(Treat.order)
comps <- combn(ts, 2)
if(result$network$response != "multinomial"){
tbl <- matrix(NA, nrow = length(ts), ncol = length(ts), dimnames = list(Treat.order, Treat.order))
for (i in 1:ncol(comps)) {
comp <- comps[, i]
samples <- as.matrix(relative.effects(result, base.treatment = Treat.order[comp[1]], comparison.treatments = Treat.order[comp[2]]))
if(summary_stat == "mean"){
tbl[comp[1], comp[2]] <- mean(samples)
tbl[comp[2], comp[1]] <- -tbl[comp[1], comp[2]]
} else if(summary_stat == "ci"){
q <- round(quantile(samples, probs = c(0.025, 0.5, 0.975)), 6)
tbl[comp[1], comp[2]] <- paste0("[", q[1], ",", q[2], ",", q[3], "]")
tbl[comp[2], comp[1]] <- paste0("[", -q[3], ",", -q[2], ",", -q[1], "]")
} else if(summary_stat == "quantile"){
tbl[comp[1], comp[2]] <- round(quantile(samples, probs = probs), 6)
tbl[comp[2], comp[1]] <- -tbl[comp[1], comp[2]]
} else if(summary_stat == "sd"){
tbl[comp[1], comp[2]] <- tbl[comp[2], comp[1]] <- sd(samples)
} else if(summary_stat == "p-value"){
tbl[comp[1], comp[2]] <- sum(samples < 0)/ dim(samples)[1]
tbl[comp[2], comp[1]] <- 1 - tbl[comp[1], comp[2]]
}
}
} else if(result$network$response == "multinomial"){
ncat <- result$network$ncat
tbl <- array(NA, dim = c(length(ts), length(ts), ncat -1), dimnames = list(Treat.order, Treat.order, NULL))
for (i in 1:ncol(comps)) {
comp <- comps[, i]
samples <- as.matrix(relative.effects(result, base.treatment = Treat.order[comp[1]], comparison.treatments = Treat.order[comp[2]], base.category = base.category))
if(summary_stat == "mean"){
tbl[comp[1], comp[2],] <- apply(samples, 2, mean)
tbl[comp[2], comp[1],] <- -tbl[comp[1], comp[2],]
} else if(summary_stat == "ci"){
q <- round(apply(samples, 2, quantile, probs = c(0.025, 0.5, 0.975)), 6)
q1 <- apply(q, 2, function(x){ paste0("[", x[1], ",", x[2], ",", x[3], "]")})
q2 <- apply(q, 2, function(x){ paste0("[", -x[3], ",", -x[2], ",", -x[1], "]")})
tbl[comp[1], comp[2],] <- q1
tbl[comp[2], comp[1],] <- q2
} else if(summary_stat == "quantile"){
tbl[comp[1], comp[2],] <- apply(samples, 2, quantile, probs = probs)
tbl[comp[2], comp[1],] <- -tbl[comp[1], comp[2],]
} else if(summary_stat == "sd"){
tbl[comp[1], comp[2],] <- tbl[comp[2], comp[1],] <- apply(samples, 2, sd)
} else if(summary_stat == "p-value"){
tbl[comp[1], comp[2],] <- apply(samples, 2, function(x){ sum(x <0) / length(x)})
tbl[comp[2], comp[1],] <- 1 - tbl[comp[1], comp[2],]
}
}
}
tbl
}
#' Create a treatment rank table
#'
#' This function makes a table of ranking for each treament. Each number in the cell represents a probability certain treatment was in such rank.
#' This table is also stored as an output from \code{\link{network.run}}.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @return Returns a table of ranking
#' @examples
#' network <- with(blocker, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' rank.tx(result)
#' }
#' @seealso \code{\link{network.rank.tx.plot}}
#' @export
rank.tx <- function(result){
samples <- result[["samples"]]
varnames <- dimnames(samples[[1]])[[2]]
varnames.split <- sapply(strsplit(varnames, "\\["), '[[', 1)
varnames.split <- gsub("[[:digit:]]","",varnames.split)
rank.samples <- lapply(samples, function(x){x[,varnames.split %in% "prob"]})
Treat.order <- result$network$Treat.order
response <- result$network$response
if(response != "multinomial"){
prob.matrix <- matrix(NA, nrow = length(Treat.order), ncol = length(Treat.order), dimnames = list(paste0("rank ", 1:length(Treat.order)), paste0("treatment ", Treat.order)))
for(i in 1:nrow(prob.matrix)){
for(j in 1:ncol(prob.matrix)){
prob.matrix[i,j] <- mean(unlist(lapply(rank.samples, function(x){ x[,paste0("prob[", i, ",", j, "]")]})))
}
}
} else if(response == "multinomial"){
ncat <- result$network$ncat
prob.matrix <- array(NA, dim = c(length(Treat.order), length(Treat.order), ncat-1), dimnames = list(paste0("rank ", 1:length(Treat.order)), paste0("treatment ", Treat.order), paste0("Category ", 1:(ncat-1))))
for(i in 1:nrow(prob.matrix)){
for(j in 1:ncol(prob.matrix)){
for(k in 1:(ncat-1)){
prob.matrix[i,j,k] <- mean(unlist(lapply(rank.samples, function(x){ x[,paste0("prob[", i, ",", j, ",", k, "]")]})))
}
}
}
}
return(prob.matrix)
}
#' Create a treatment rank plot
#'
#' This plot displays how each treatment is ranked. For each rank, we show how likely each treatment will be at that rank.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param txnames Treatment names used in creating legend
#' @param catnames Category names. Only used in multinomial.
#' @param legend.position x,y position of the legend
#' @return None
#' @examples
#' network <-with(blocker, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.rank.tx.plot(result, txnames = c("a", "b"))
#' }
#' @seealso \code{\link{rank.tx}}
#' @export
network.rank.tx.plot <- function(result, txnames = NULL, catnames = NULL, legend.position = c(1,1)){
rank.table <- rank.tx(result)
ntreat = dim(rank.table)[1]
if (is.null(txnames)) txnames <- paste("Treatment", result$network$Treat.order)
if(result$network$response != "multinomial"){
plot(seq(ntreat),seq(ntreat),type="n",xaxt="n",ylim=c(0,1),pty="s",yaxt="n",ylab="Probability",xlab="Rank")
axis(side=1,at=seq(ntreat))
axis(side=2,at=seq(0,1,by=0.2))
for (i in seq(ntreat)) {
points(seq(ntreat), rank.table[,i],type="b",lty=i,col=i,pch=i)
}
legend(legend.position[1], legend.position[2],txnames,lty=1:ntreat,bty="n",cex=.75,col=1:ntreat)
} else if(result$network$response == "multinomial"){
ncat <- dim(rank.table)[3]
if (is.null(catnames)) catnames <- paste("Outcome Category with base 1 and comparison", 1+seq(ncat))
for (j in seq(ncat)) {
plot(seq(ntreat),seq(ntreat),type="n",xaxt="n",ylim=c(0,1),pty="s",yaxt="n",ylab="Probability",xlab="Rank")
axis(side=1,at=seq(ntreat))
axis(side=2,at=seq(0,1,by=0.2))
title(catnames[j])
for (i in seq(ntreat)) {
points(seq(ntreat), rank.table[,i,j],type="b",lty=i,col=i,pch=i)
}
legend(legend.position[1], legend.position[2],txnames,lty=1:ntreat,bty="n",cex=.75,col=1:ntreat)
}
}
}
#' Create a treatment cumulative rank plot
#'
#' This function creates a treatment cumulative rank plot. Rank preference can be specified by the \code{rank.preference} parameter in \code{\link{network.data}}
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param txnames Treatment names used in creating legend
#' @param catnames Category names. Only used in multinomial.
#' @param legend.position x, y position of the legend
#' @return None
#' @examples
#' network <- with(blocker, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.cumrank.tx.plot(result, txnames = c("control", "beta blocker"))
#' }
#' @seealso \code{\link{rank.tx}}
#' @export
network.cumrank.tx.plot <- function(result, txnames = NULL, catnames = NULL, legend.position = c(1,1)){
rank.table <- rank.tx(result)
ntreat = dim(rank.table)[1]
if (is.null(txnames)) txnames <- paste("Treatment", result$network$Treat.order)
if(result$network$response != "multinomial"){
x <- apply(rank.table,2,cumsum)
plot(seq(ntreat),seq(ntreat),type="n",xaxt="n",ylim=c(0,1),yaxt="n",ylab="Cumulative Probability",xlab="Rank")
axis(side=1,at=seq(ntreat))
axis(side=2,at=seq(0,1,by=0.2))
for (j in seq(ntreat))
points(seq(ntreat), x[,j],type="l",lty=j,col=j)
legend(legend.position[1], legend.position[2], txnames,lty=1:(ntreat),bty="n",cex=.75,col=1:(ntreat))
} else if(result$network$response == "multinomial"){
ncat <- dim(rank.table)[3]
if (is.null(catnames)) catnames <- paste("Outcome Category with base 1 and comparison", 1+seq(ncat))
for (i in seq(ncat)) {
x = apply(rank.table[,,i],2,cumsum)
plot(seq(ntreat),seq(ntreat),type="n",xaxt="n",ylim=c(0,1),yaxt="n",ylab="Cumulative Probability",xlab="Rank")
axis(side=1,at=seq(ntreat))
axis(side=2,at=seq(0,1,by=0.2))
title(catnames[i])
for (j in seq(ntreat))
points(seq(ntreat), x[,j],type="l",lty=j,col=j)
legend(legend.position[1], legend.position[2],txnames,lty=1:ntreat,bty="n",cex=.75,col=1:ntreat)
}
}
}
#' Calculate SUCRA
#'
#' SUCRA is the surface under the cumulative ranking distribution defined in Salanti et al. (2011)
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param txnames Treatment names used in creating legend
#' @param catnames Category names. Only used in multinomial.
#' @return Returns SUCRA for each treatment
#' @examples
#' ########### certolizumab (with baseline risk)
#' network <- with(certolizumab, {
#' network.data(Outcomes, Study, Treat, N=N, response = "binomial", Treat.order,
#' baseline = "common", hy.prior = list("dhnorm", 0, 9.77))
#' })
#' \donttest{
#' result <- network.run(network)
#' sucra(result)
#' }
#' @seealso \code{\link{rank.tx}}
#' @references G. Salanti, A.E. Ades, J.P.A. Ioannidisa (2011), \emph{Graphical methods and numerical summaries for presenting results from multiple-treatment meta-analysis: an overview and tutorial}, Journal of Clinical Epidemiology 64(2):163-71. \doi{10.1016/j.jclinepi.2010.03.016}
#' @export
sucra = function(result, txnames = NULL, catnames = NULL)
{
rank.table <- rank.tx(result)
ntreat = dim(rank.table)[1]
if (is.null(txnames)) txnames <- paste("Treatment", result$network$Treat.order)
if(result$network$response != "multinomial"){
if(ntreat ==2){
x <- rank.table[-ntreat,]
} else{
x <- apply(apply(rank.table[-ntreat,],2,cumsum),2,sum)/(ntreat-1)
}
names(x) <- txnames
} else if(result$network$response == "multinomial"){
ncat <- dim(rank.table)[3]
if (is.null(catnames)) catnames <- paste("Outcome Category with base 1 and comparison", 1+seq(ncat))
x <- array(NA,dim(rank.table)[2:3])
for (i in seq(ncat)){
if(ntreat ==2){
x[,i] <- rank.table[-ntreat,,i]
} else{
x[,i] <- apply(apply(rank.table[-ntreat,,i],2,cumsum),2,sum)/(ntreat-1)
}
dimnames(x) <- list(txnames,catnames)
}
}
return(x)
}
#################### Deviance calculation and plots
#' Find deviance statistics such as DIC and pD.
#'
#' Calculates deviance statistics. This function automatically called in \code{\link{network.run}} and the deviance statistics are stored after sampling is finished.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @return
#' \item{Dbar}{Overall residual deviance}
#' \item{pD}{Sum of leverage_arm (i.e. total leverage)}
#' \item{DIC}{Deviance information criteria (sum of Dbar and pD)}
#' \item{data.points}{Total number of arms in the meta analysis}
#' \item{dev_arm}{Posterior mean of the residual deviance in each trial arm}
#' \item{devtilda_arm}{Deviance at the posterior mean of the fitted values}
#' \item{leverage_arm}{Difference between dev_arm and devtilda_arm for each trial}
#' \item{rtilda_arm}{Posterior mean of the fitted value for binomial and multinomial}
#' \item{ybar_arm}{Posterior mean of the fitted value for normal}
#' @examples
#' #parkinsons
#' network <- with(parkinsons, {
#' network.data(Outcomes, Study, Treat, SE = SE, response = "normal")
#' })
#' \donttest{
#' result <- network.run(network)
#' calculate.deviance(result)
#' }
#' @references S. Dias, A.J. Sutton, A.E. Ades, and N.J. Welton (2013a), \emph{A Generalized Linear Modeling Framework for Pairwise and Network Meta-analysis of Randomized Controlled Trials}, Medical Decision Making 33(5):607-617. \doi{10.1177/0272989X12458724}
#' @export
calculate.deviance <- function(result){
network <- result$network
samples <- result$samples
totresdev <- lapply(samples, function(x){ x[,"totresdev"]})
Dbar <- mean(unlist(totresdev))
###### find residual deviance by arm
dev <- lapply(samples, function(x) { x[,grep("dev\\[", dimnames(samples[[1]])[[2]])]})
dev <- do.call(rbind, dev)
dev <- apply(dev, 2, mean)
dev_arm <- matrix(NA, nrow = network$nstudy, ncol = max(network$na))
for(i in 1:dim(dev_arm)[1]){
for(j in 1:dim(dev_arm)[2]){
ind <- which(paste("dev[", i, ",", j, "]", sep = "") == names(dev))
if(length(ind) != 0){
dev_arm[i,j] <- dev[ind]
}
}
}
############find leverage
if(network$response == "binomial"){
rtilda <- lapply(samples, function(x){ x[,grep("rhat\\[", dimnames(samples[[1]])[[2]])] })
rtilda <- do.call(rbind, rtilda)
rtilda <- apply(rtilda, 2, mean)
rtilda_arm <- devtilda_arm <- matrix(NA, nrow = network$nstudy, ncol = max(network$na))
for(i in 1:network$nstudy){
for(j in 1:network$na[i]){
r_value <- network$r[i,j]
n_value <- network$n[i,j]
rtilda_arm[i,j] <- rtilda[which(paste("rhat[", i, ",", j, "]", sep = "") == names(rtilda))]
devtilda_arm[i,j] <- ifelse(r_value != 0, 2 * r_value * (log(r_value)-log(rtilda_arm[i,j])), 0)
devtilda_arm[i,j] <- devtilda_arm[i,j] + ifelse((n_value - r_value) != 0, 2 * (n_value-r_value) *(log(n_value-r_value) - log(n_value- rtilda_arm[i,j])), 0)
}
}
} else if(network$response == "normal"){
ybar <- lapply(samples, function(x){ x[,grep("theta\\[", dimnames(samples[[1]])[[2]])] })
ybar <- do.call(rbind, ybar)
ybar <- apply(ybar, 2, mean)
ybar_arm <- devtilda_arm <- matrix(NA, nrow = network$nstudy, ncol = max(network$na))
for(i in 1:network$nstudy){
for(j in 1:network$na[i]){
r_value <- network$r[i,j]
se_value <- network$se[i,j]
if(inherits(network, "nodesplit.network.data")){
ybar_arm[i,j] <- ybar[which(paste("theta[", i, ",", network$t[i,j], "]", sep = "") == names(ybar))]
} else{
ybar_arm[i,j] <- ybar[which(paste("theta[", i, ",", j, "]", sep = "") == names(ybar))]
}
devtilda_arm[i,j] <- ifelse(se_value != 0, (r_value - ybar_arm[i,j])^2 / se_value^2, 0)
}
}
} else if(network$response == "multinomial"){
rtilda <- lapply(samples, function(x){ x[,grep("rhat\\[", dimnames(samples[[1]])[[2]])]})
rtilda <- do.call(rbind, rtilda)
rtilda <- apply(rtilda, 2, mean)
rtilda_arm <- devtilda_category <- array(NA, dim = c(network$nstudy, max(network$na), network$ncat))
for(i in 1:network$nstudy){
for(j in 1:network$na[i]){
for(k in 1:network$ncat){
r_value <- network$r[i,j,k]
rtilda_arm[i,j,k] <- rtilda[which(paste("rhat[", i, ",", j, ",", k, "]", sep = "") == names(rtilda))]
devtilda_category[i,j,k] <- ifelse(r_value != 0, 2 * r_value * log(r_value/rtilda_arm[i,j,k]), 0)
}
}
}
devtilda_arm <- apply(devtilda_category, 1:2, sum)
}
leverage_arm <- dev_arm - devtilda_arm
pD <- sum(leverage_arm, na.rm = TRUE)
DIC <- Dbar + pD
out <- list(Dbar = Dbar, pD = pD, DIC = DIC, data.points = sum(network$na), dev_arm = dev_arm, devtilda_arm = devtilda_arm, leverage_arm = leverage_arm)
if(network$response == "binomial" || network$response == "multinomial"){
out$rtilda_arm = rtilda_arm
} else if(network$response == "normal"){
out$ybar_arm = ybar_arm
}
return(out)
}
#' Make a deviance plot
#'
#' This makes a deviance plot which plots residual deviance (dev_arm) vs. all the arms for each study.
#' @param result Object created by \code{\link{network.run}} function
#' @return None
#' @examples
#' network <- with(blocker, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.deviance.plot(result)
#' }
#' @export
network.deviance.plot <- function(result){
deviance <- result$deviance
dev_vector <- c(t(deviance$dev_arm))
dev_vector <- dev_vector[!is.na(dev_vector)]
plot(seq(sum(result$network$na)), dev_vector, xlab = "Arm", ylab = "Residual Deviance", main = "Per-arm residual deviance")
}
#' Make a leverage plot
#'
#' This function makes a leverage vs. square root of residual deviance plot (mean for each study)
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param per.study Indicator for using an average square root of residual deviance for each study instead of for each arm. Default is FALSE.
#' @return None
#' @examples
#' network <- with(blocker, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' network.leverage.plot(result)
#' }
#' @export
network.leverage.plot <- function(result, per.study = FALSE){
deviance <- result$deviance
if(per.study == TRUE){
dev <- apply(sqrt(deviance$dev_arm), 1, mean, na.rm = TRUE)
leverage <- apply(deviance$leverage_arm, 1, mean, na.rm = TRUE)
plot(dev, leverage, xlim = c(0, max(c(dev, 2.5))), ylim = c(0, max(c(leverage,4))),
xlab = "Square root of residual deviance", ylab = "Leverage", main = "Leverage versus residual deviance")
mtext("Per-study mean contribution")
} else{
dev <- c(t(sqrt(deviance$dev_arm)))
dev <- dev[!is.na(dev)]
leverage <- c(t(deviance$leverage_arm))
leverage <- leverage[!is.na(leverage)]
plot(dev, leverage, xlim = c(0, max(c(dev, 2.5))), ylim = c(0, max(c(leverage,4))),
xlab = "Square root of residual deviance", ylab = "Leverage", main = "Leverage versus residual deviance")
mtext("Per-arm contribution")
}
x <- NULL
for(i in 1: floor(max(c(leverage,4)))){
curve(i-x^2, from=0, to = max(c(dev, 2.5)), add = TRUE)
}
}
#' Make a covariate plot
#'
#' This function makes a covariate plot of how the relative effect changes as the covariate value changes.
#' User needs to specify one base treatment and one comparison treatment to make this plot (base category and comparison category is also needed for multinomial).
#' The function uses the \code{\link{relative.effects}} to calculate the correct relative effect. 2.5\%, median, and 97.5\% C.I. are drawn.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param base.treatment Base treatment for relative effect
#' @param comparison.treatment Treatment comparing against base treatment
#' @param base.category Base category for multinomial data. Note that category in multinomial denotes which column it is in the Outcomes matrix. Thus, this should be a numeric value.
#' @param comparison.category Comparison category for multinomial data
#' @param covariate.name A vector of covariate names of the covariate that goes into x-axis label
#' @return None
#' @examples
#' ########### certolizumab (with covariate)
#' network <- with(certolizumab, {
#' network.data(Outcomes, Study, Treat, N=N, response="binomial", Treat.order,
#' covariate = covariate, hy.prior = list("dhnorm", 0, 9.77))
#' })
#' \donttest{
#' result <- network.run(network)
#' network.covariate.plot(result, base.treatment = "Placebo", comparison.treatment = "CZP",
#' covariate.name = "Disease Duration")
#' }
#' @export
network.covariate.plot <- function(result, base.treatment = NULL, comparison.treatment= NULL, base.category = NULL, comparison.category = NULL, covariate.name = NULL){
if(is.null(network$covariate)){
stop("need to provide covariate information to make this plot")
}
if(result$network$response != "multinomial"){
if(is.null(base.treatment) || is.null(comparison.treatment)){
stop("need to specify both base.treatment and comparison.treatment")
}
} else{
if(is.null(base.treatment) || is.null(comparison.treatment) || is.null(base.category) || is.null(comparison.category)){
stop("need to specify all base.treatment, comparison.treatment, base.category, and comparison.category")
}
}
network <- result$network
observed <- network$covariate
xvals <- matrix(NA, nrow = dim(network$covariate)[2], ncol = 7)
xlim <- matrix(NA, nrow = dim(network$covariate)[2], ncol = 2)
covariate_mx <- NULL
for(i in 1:dim(network$covariate)[2]){
xlim[i,] <- c(min(observed[,i], na.rm = TRUE), max(observed[,i], na.rm = TRUE))
xvals[i,] <- seq(xlim[i,1], xlim[i,2], length.out = 7)
covariate_mx <- c(covariate_mx, network[[paste0("mx",i)]])
}
for(i in 1:dim(network$covariate)[2]){
res <- lapply(xvals[i,], function(xval) {
covariate <- covariate_mx
covariate[i] <- xval
if(network$response != "multinomial"){
samples <- relative.effects(result, base.treatment, comparison.treatment, covariate = covariate)
} else{
samples <- relative.effects(result, base.treatment, comparison.treatment, base.category, comparison.category, covariate = covariate)
}
samples <- as.matrix(samples)
stats <- t(apply(samples, 2, quantile, probs = c(0.025, 0.5, 0.975)))
data.frame(median = stats[,"50%"], lower = stats[,"2.5%"], upper = stats[,"97.5%"])
})
res <- do.call(rbind,res)
dim_names <- if(network$response != "multinomial"){
dimnames(as.matrix(relative.effects(result, base.treatment, comparison.treatment)))[[2]]
} else{
dimnames(as.matrix(relative.effects(result, base.treatment, comparison.treatment, base.category, comparison.category)))[[2]]
}
ylim <- c(min(res), max(res))
xlab_name <- ifelse(is.null(covariate.name), paste0("covariate ", i), covariate.name[i])
plot(xvals[i,], res$median, type = "l", xlim = xlim[i,], ylim = ylim, main = "Treatment effect vs. covariate", xlab = xlab_name, ylab = dim_names)
lines(xvals[i,], res$lower, lty = 2)
lines(xvals[i,], res$upper, lty = 2)
}
}
#' Calculate correlation matrix for multinomial heterogeneity parameter.
#'
#' This function calculates correlation matrix from the variance matrix for heterogeneity parameter. Only used for multinomial.
#' @param result Object created by \code{\link{network.run}} function
#' @return Returns correlation matrix
#' @examples
#' #cardiovascular
#' network <- with(cardiovascular, {
#' network.data(Outcomes, Study, Treat, N, response = "multinomial")
#' })
#' \donttest{
#' result <- network.run(network)
#' variance.tx.effects(result)
#' }
#' @export
variance.tx.effects = function(result)
{
if(result$network$response != "multinomial"){
stop("this function is used only for multinomial response")
}
samples_sigma <- pick.summary.variables(result, only.pars = c("sigma"))
samples_sigma <- do.call(rbind, samples_sigma)
samples_sigma <- apply(samples_sigma, 2, mean)
sigma_matrix <- matrix(samples_sigma, nrow = result$network$ncat-1)
cor_matrix <- sigma_matrix/outer(sqrt(diag(sigma_matrix)),sqrt(diag(sigma_matrix)))
return(list(sigma_matrix = sigma_matrix, cor_matrix = cor_matrix))
}
#' Draws forest plot
#'
#' Draws forest plot of pooled treatment effect. Reports odds ratio for binomial and multinomial outcomes and continuous scale for normal outcomes.
#'
#' @param result Object created by \code{\link{network.run}} function
#' @param level Confidence level. Default is 0.95 denoting 95 percent C.I.
#' @param ticks.position Position of the x-axis tick marks. If left unspecified, the function tries to set it at sensible values
#' @param label.multiplier This is a multiplying factor to move the position of the text associated with median[lower, upper] values. This number is multiplied by the range of x-axis and added to the x-axis limit. Default multiplier is set to 0.2.
#' @param label.margin This is how much margin space you specify to assign space for the median[lower, upper] values. Default margin is set to 10.
#' @param title Header name which you can modify
#' @param only.reference.treatment Indicator for plotting only the comparison to the reference treatment
#' @return None
#' @examples
#' network <- with(certolizumab, {
#' network.data(Outcomes, Study, Treat, N=N, response="binomial", Treat.order,
#' covariate = covariate, hy.prior = list("dhnorm", 0, 9.77))
#' })
#' \donttest{
#' result <- network.run(network)
#' network.forest.plot(result)
#' }
#' @references W. Viechtbauer (2010), \emph{Conducting meta-analyses in R with the metafor package}, Journal of Statistical Software, 36(3):1-48. \doi{10.18637/jss.v036.i03}
#' @export
network.forest.plot <- function(result, level = 0.95, ticks.position = NULL, label.multiplier = 0.2, label.margin = 10, title = "Network Meta-analysis Forest plot", only.reference.treatment = FALSE){
ncat <- ifelse(result$network$response == "multinomial", result$network$ncat, 2)
for(i in 1:(ncat-1)){
if(i != 1) grid::grid.newpage()
if(result$network$response == "multinomial"){
lower <- relative.effects.table(result, summary_stat = "quantile", probs = (1- level)/2)[,,i]
OR <- relative.effects.table(result, summary_stat = "quantile", probs = 0.5)[,,i]
upper <- relative.effects.table(result, summary_stat = "quantile", probs = level + (1- level)/2)[,,i]
} else{
lower <- relative.effects.table(result, summary_stat = "quantile", probs = (1- level)/2)
OR <- relative.effects.table(result, summary_stat = "quantile", probs = 0.5)
upper <- relative.effects.table(result, summary_stat = "quantile", probs = level + (1- level)/2)
}
if(only.reference.treatment == TRUE){
lower <- lower[1,-1]
OR <- OR[1,-1]
upper <- upper[1,-1]
} else{
lower <- -lower[lower.tri(lower)]
OR <- -OR[lower.tri(OR)]
upper <- -upper[lower.tri(upper)]
}
odds <- data.frame(lower = lower, OR = OR, upper = upper)
if(result$network$response %in% c("binomial", "multinomial")){
odds <- exp(odds) #report odds ratio instead of log odds ratio
}
Treat.order <- result$network$Treat.order
ts <- 1:length(Treat.order)
comps <- combn(ts, 2)
name <- rep(NA, ncol(comps))
for(j in 1:ncol(comps)){
name[j] <- paste0(Treat.order[comps[2,j]]," vs ", Treat.order[comps[1,j]])
}
if(only.reference.treatment == TRUE){
name <- name[1:(length(Treat.order)-1)]
}
odds$name <- name
if(is.null(ticks.position)){
if(result$network$response %in% c("binomial", "multinomial")){
ticks <- c(0.1, 0.2, 0.5, 1, 2, 5, 10)
} else if(result$network$response == "normal"){
ticks <- pretty(c(min(odds$lower, na.rm =TRUE), max(odds$upper, na.rm = TRUE)))
}
} else{
ticks <- ticks.position
}
if(result$network$response %in% c("binomial", "multinomial")){
yintercept <- 1
} else if(result$network$response == "normal"){
yintercept <- 0
}
p <- ggplot(odds, aes(y = OR, x = name)) +
geom_point() +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2) +
scale_x_discrete(limits = name) +
geom_hline(yintercept = yintercept, linetype = 2) +
coord_flip() +
theme_bw() +
theme(plot.margin = unit(c(1,label.margin,1,1), "lines"))
if(result$network$response %in% c("binomial")){
p <- p + labs(x = "Treatment comparison", y = "Odds Ratio", title = title) +
scale_y_log10(breaks = ticks, labels = ticks)
} else if(result$network$response %in% c("multinomial")){
p <- p + labs(x = "Treatment comparison", y = "Odds Ratio", title = paste0(title, ": Multinomial Category ", (i+1), " vs 1")) +
scale_y_log10(breaks = ticks, labels = ticks)
} else if(result$network$response %in% c("normal")){
p <- p + labs(x = "Treatment comparison", y = "Continuous Scale", title = title) +
scale_y_continuous(breaks = ticks, labels = ticks)
}
#find actual xlim range; this part of code keeps changing with ggplot update..
xlim.range <- ggplot_build(p)$layout$panel_params[[1]]$x.range
p <- p + geom_text(aes(label = paste0(sprintf("%0.2f", round(OR, digits = 2)), " [", sprintf("%0.2f", round(lower, digits = 2)) , ", ", sprintf("%0.2f", round(upper, digits = 2)), "]")), y = xlim.range[2] + diff(xlim.range)*label.multiplier, x = 1:length(name)) # hjust = -1, vjust = 2)
median_name_location <- ifelse(length(odds[,1]) <= 3, length(name) + 0.5, length(name) + 1)
p <- p + geom_text(aes(label = "Median [95% Crl]"), y = xlim.range[2] + diff(xlim.range)*label.multiplier, x = median_name_location)
gt <- ggplot_gtable(ggplot_build(p))
gt$layout$clip[gt$layout$name == "panel"] <- "off"
grid::grid.draw(gt)
}
}
#' Draws network graph using igraph package
#'
#' This function draws network graph using igraph package
#' @param network Object created by \code{\link{network.data}} function
#' @param label.dist distance of the label from the node. Default is 2.
#' @return None
#' @examples
#' #cardiovascular
#' network <- with(thrombolytic, {
#' network.data(Outcomes, Study, Treat, N=N, response = "binomial")
#' })
#' draw.network.graph(network)
#' @export
draw.network.graph <- function(network, label.dist = 2){
if(inherits(network, "contrast.network.data")){
Treat <- c(t(network$Treat))[!is.na(c(t(network$Treat)))]
Study <- rep(1:length(network$na), times = network$na)
} else{
Treat <- network$Treat.order[network$Treat]
Study <- network$Study
}
pairs <- do.call(rbind, lapply(split(Treat, Study),
function(x) t(combn(x,2))))
pairs <- aggregate(rep(1, length(X1)) ~ X1 + X2, data = data.frame(pairs), sum)
colnames(pairs)[3] <- "freq"
g <- igraph::graph.edgelist(as.matrix(pairs[,1:2]), directed=FALSE)
plot(g, edge.curved=FALSE, edge.width=pairs$freq, vertex.label.dist= label.dist)
}
#' Plotting comparison of posterior mean deviance in the consistency model and inconsistency model
#'
#' This function compares posterior mean deviance of inconsistency model and consistency model.
#' Such comparison provides information that can help identify the loops in which inconsistency is present.
#'
#' This function draws network graph using igraph package
#' @param result1 consistency model result from running \code{\link{network.run}} function
#' @param result2 inconsistency model result from running \code{\link{ume.network.data}} function
#' @param with.label indicator to show the study number; default is true.
#' @return None
#' @references S. Dias, N.J. Welton, A.J. Sutton, D.M. Caldwell, G. Lu, and A.E. Ades (2013), \emph{Evidence synthesis for decision making 4: inconsistency in networks of evidence based on randomized controlled trials}, Medical Decision Making 33(5):641-656. \doi{10.1177/0272989X12455847}
#' @examples
#' network1 <- with(smoking, {
#' network.data(Outcomes, Study, Treat, N = N, response = "binomial", type = "random")
#' })
#'
#' network2 <- with(smoking, {
#' ume.network.data(Outcomes, Study, Treat, N = N, response = "binomial", type = "random")
#' })
#' \donttest{
#' result1 <- network.run(network1)
#' result2 <- ume.network.run(network2)
#' network.inconsistency.plot(result1, result2)
#' }
#' @export
network.inconsistency.plot <- function(result1, result2, with.label = T){
if(!inherits(result1, "network.result")){
stop("result1 has to be a consistency model result")
}
if(!inherits(result2, "ume.network.result")){
stop("result2 has to be an inconsistency model result")
}
rownumber <- rep(1:nrow(result1$deviance$dev_arm), each = ncol(result1$deviance$dev_arm))
dev <- c(t(result1$deviance$dev_arm))
dev2 <- c(t(result2$deviance$dev_arm))
names(dev) <- names(dev2) <- rownumber
dev <- dev[!is.na(dev)]
dev2 <- dev2[!is.na(dev2)]
max_point <- ceiling(max(c(dev, dev2))) #for same scale
if(with.label == T){
plot(dev2 ~ dev, col="lightblue", pch=19, cex=2, xlim = c(0, max_point), ylim = c(0, max_point), xlab = "consistency model", ylab = "inconsistency model", cex.lab = 0.75, cex.axis = 0.75)
abline(0, 1, lty = "dotted")
text(dev2 ~ dev, labels = names(dev), cex = 0.8)
} else{
plot(dev2 ~ dev, xlim = c(0, max_point), ylim = c(0, max_point), xlab = "consistency model", ylab = "inconsistency model", cex.lab = 0.75, cex.axis = 0.75)
abline(0, 1, lty = "dotted")
}
}
|
c8fbe1166edf0693984a894ac425b840a4ccc7e2 | 04ebb0b8a9b9b02073289094878b84c2f3329c31 | /man/createCoefficientsTable.Rd | 65f2fe265ecf0634e69c084da3e53bee4860c517 | [
"MIT"
] | permissive | piguy314159265/flightdeck | 1fccebc041243422db5245dca85c1ccbaa45e9f0 | 906362d29a5199ecc2dca6b295f116914855db39 | refs/heads/master | 2021-01-12T05:07:55.557227 | 2018-04-26T17:41:51 | 2018-04-26T17:41:51 | 77,863,137 | 0 | 0 | null | 2017-01-02T21:32:09 | 2017-01-02T21:32:09 | null | UTF-8 | R | false | true | 563 | rd | createCoefficientsTable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tables.R
\name{createCoefficientsTable}
\alias{createCoefficientsTable}
\title{Create a table of coefficents}
\usage{
createCoefficientsTable(mod, digits, ...)
}
\arguments{
\item{mod}{model object}
\item{digits}{number of digits to display}
\item{...}{additional arguments passed to \code{\link{createCoefficientsTable}}}
}
\description{
This is an S3 method that creates a table of coefficients that can be passed
to \code{\link{fdPanelCoefficients}} to display in a dashboard.
}
|
70a1e025c13a7bc7266c14744c8b45360573dd55 | 2a9bd26a5aae4bc3c3b02a0d966259671911db89 | /Exploratory Data Analysis/Assignment 1/plot4.R | 2411cd330a5dc2ceb917f15e678e8bd3d93628c9 | [] | no_license | kernelCruncher/Data-Science-Specialisation | 69acf21b5705743abd80555069541bbddd3f6460 | 1c54223023db0e5bb4a1759d3395dacc01c4d3e7 | refs/heads/main | 2023-06-10T14:05:04.146832 | 2021-06-24T20:25:25 | 2021-06-24T20:25:25 | 379,712,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,335 | r | plot4.R | library(dplyr)
data<-read.table("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'), na.strings = "?")
data<-mutate(data, DateTime = paste(Date, Time))
data <- select(data, -(1:2))
data$DateTime <- strptime(data$DateTime, "%d/%m/%Y %H:%M:%S")
data$DateTime <-as.POSIXct(data$DateTime)
data <- filter(data, DateTime >= as.POSIXct("2007-2-1") & DateTime < as.POSIXct("2007-2-3"))
par(mfrow=c(2,2),mar=c(4,4,2,1))
plot(Global_active_power~ DateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="", data = data)
plot(Voltage~ DateTime, type="l",
ylab="Voltage (volt)", xlab="", data = data)
plot(Sub_metering_1~ DateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="", data = data)
lines(Sub_metering_2~ DateTime,col='Red', data = data)
lines(Sub_metering_3~ DateTime,col='Blue', data = data)
legend("topright", col=c("black", "red", "blue"), lty="solid",bty = "n", y.intersp=0.2, cex = 0.5, ncol =1,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~DateTime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="", data = data)
dev.copy(png,"plot4.png", width=480, height=480)
dev.off() |
def3732a51cca88988c7910d6f9d71f8ff109130 | 5bb2c8ca2457acd0c22775175a2722c3857a8a16 | /man/MatchIt.url.Rd | e49205875ad24b6947dcb2e02a53bf7e0df50bbd | [] | no_license | IQSS/Zelig | d65dc2a72329e472df3ca255c503b2e1df737d79 | 4774793b54b61b30cc6cfc94a7548879a78700b2 | refs/heads/master | 2023-02-07T10:39:43.638288 | 2023-01-25T20:41:12 | 2023-01-25T20:41:12 | 14,958,190 | 115 | 52 | null | 2023-01-25T20:41:13 | 2013-12-05T15:57:10 | R | UTF-8 | R | false | false | 191 | rd | MatchIt.url.Rd | \name{MatchIt.url}
\alias{MatchIt.url}
\title{Table of links for Zelig}
\description{
Table of links for \code{help.zelig} for the companion MatchIt package.
}
\keyword{datasets}
|
4f5dde6c4f9d6429b65bb14e12175021bce3eb78 | f3d0f307f48b01d05f445b9ee53fb05e577604dd | /6.Signatures/tutorial/step4_SparseSignatures_inference/main_part1.R | 02823ea65f03f2eb710e68aecb430d2bdc689bb7 | [] | no_license | caravagn/GDA | 0dbee432aff216afb17b1b2b6dde950267095d77 | e1f784e5eeef145186820d01756b588ace344339 | refs/heads/main | 2023-05-31T02:07:59.758433 | 2021-06-05T22:34:17 | 2021-06-05T22:34:17 | 342,669,562 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 490 | r | main_part1.R | # load required libraries and sources
library("SparseSignatures")
# load the data
load("background.RData")
load("trinucleotides_counts.RData")
# settings
K = 2:7
nmf_runs = 10
my_seed_starting_beta = 789
# fit the initial betas for each configuration
initial_betas = startingBetaEstimation(x=trinucleotides_counts,K=K,background_signature=background,normalize_counts=FALSE,nmf_runs=nmf_runs,seed=my_seed_starting_beta,verbose=TRUE)
save(initial_betas,file="results/initial_betas.RData")
|
5002d419779a8ace590e5a681f6036de06a0967a | 05c855aedb2f08484d6b76ebbd1b50b5212ef8c6 | /lib/reshape_ms(del).R | 19ca023c8667927bc15bc01049ae448990935d7d | [] | no_license | Wanting-Cui/Collaborative-Filtering | 80e12b3ca5a9bcb59b52c99f263cca13a5e2496b | 01eebd86fed06fcc9c537e1348f3b0ce0f9f76d2 | refs/heads/master | 2020-04-14T15:30:18.980070 | 2019-01-03T05:53:56 | 2019-01-03T05:53:56 | 163,928,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,260 | r | reshape_ms(del).R | library(reshape)
movies_train<-read.csv("eachmovie_sample/data_train.csv")
movies_test<-read.csv("eachmovie_sample/data_test.csv")
movies_train_reshape <- reshape(movies_train,
v.names = "Score",
direction = "wide",
idvar = "User",
timevar = "Movie")
movies_test_reshape <- reshape(movies_test,
v.names = "Score",
direction = "wide",
idvar = "User",
timevar = "Movie")
save(movies_train_reshape, file = "~/Documents/data_sample/movies_train_reshape.Rdata")
save(movies_test_reshape, file = "~/Documents/data_sample/movies_test_reshape.Rdata")
load("~/Documents/data_sample/movies_train_reshape.Rdata")
load("~/Documents/data_sample/movies_test_reshape.Rdata")
```
# returns the corresponding row or column for a user or movie.
get_movies_num <- function(user){
u_i <- match(user, users)
return(graph[u_i,-1])
}
get_users_num <- function(movie){
m_j <- match(movie, movies)
return(graph[,m_j+1])
}
# return the users or movies with a non zero
get_movies <- function(user){
series = get_movies_num(user)
return(movies[which(series!=0)])
}
get_users <- function(movie){
series = get_users_num(movie)
return(users[which(series!=0)])
}
user_simrank <- function(u1, u2, C) {
if (u1 == u2){
return(1)
} else {
pre = C / (sum(get_movies_num(u1)) * sum(get_movies_num(u2)))
post = 0
for (m_i in get_movies(u1)){
for (m_j in get_movies(u2)){
i <- match(m_i, movies)
j <- match(m_j, movies)
post <- post + movie_sim[i, j]
}
}
return(pre*post)
}
}
movie_simrank <- function(m1, m2, C) {
if (m1 == m2){
return(1)
} else {
pre = C / (sum(get_users_num(m1)) * sum(get_users_num(m2)))
post = 0
for (u_i in get_users(m1)){
for (u_j in get_users(m2)){
i <- match(u_i, users)
j <- match(u_j, users)
post <- post + user_sim[i, j]
}
}
return(pre*post)
}
}
simrank <- function(C=0.8, times = 1, calc_user = T, calc_movie = F, data){
for (run in 1:times){
if(calc_user){
for (ui in users){
for (uj in users){
i = match(ui, users)
j = match(uj, users)
user_sim[i, j] <<- user_simrank(ui, uj, C)
}
}
}
if(calc_movie){
for (mi in movies){
for (mj in movies){
i = match(mi, movies)
j = match(mj, movies)
movie_sim[i, j] <<- movie_simrank(mi, mj, C)
}
}
}
}
}
graph <- movies_train_reshape
graph[is.na(graph)] <- 0
graph[,-1][graph[,-1] < 5] <- 0
graph[,-1][graph[,-1] >= 5] <- 1
# set similarity matrices to be calculated
calc_user = T
calc_movie = F
# initialize the similarity matrices
user_sim <- diag(dim(graph)[1])
movie_sim <- diag(dim(graph)[2])
# create list of users and movies
users <- graph[,1]
movies <- colnames(graph[,-1])
system.time(simrank(0.8, 1))
colnames(user_sim) <- users
user_sim <- cbind(users, user_sim)
write.csv(user_sim, file='~/Documents/data_sample/simrankusers_100.csv', row.names = FALSE) |
927146c8ad043dd1aff91559062c3b06a5524ddc | 4ba230d58bb611d9a854c25966c60ee2900129ad | /dc-hierarchical-models.R | 07f3481b39b60c8c6c1d6b4d70758674abea3388 | [] | no_license | jilmun/notes | 014c20d300e26f1fe92c2c596a6d206816c5a9c6 | caa58293176e88d780427df87827bc7ee462cb35 | refs/heads/master | 2021-09-26T14:52:34.370858 | 2018-10-31T02:12:17 | 2018-10-31T02:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,624 | r | dc-hierarchical-models.R | # Hierarchical and Mixed Effect Models
# October 2018, Richard Erickson
library(lme4)
# studentData -------------------------------------------------------------
# Mixed effect model
# Predict mathgain based on sex, mathprep, mathknow
# add classid and schoolid as random effects
lmerModel <- lmer(mathgain ~ sex +
mathprep + mathknow + (1|classid) +
(1|schoolid), data = studentData, na.action = "na.omit",
REML = TRUE)
summary(lmerModel)
# Extract and plot
extractAndPlot(lmerModel)
# countyBirthsData --------------------------------------------------------
# First, build a lmer with state as a random effect. Then look at the model's summary and the plot of residuals.
birthRateStateModel <- lmer(BirthRate ~ (1 | State), data =countyBirthsData)
summary(birthRateStateModel)
plot(birthRateStateModel)
# Next, plot the predicted values from the model on top of the plot shown during the video.
countyBirthsData$birthPredictState <- predict(birthRateStateModel, countyBirthsData)
ggplot() + theme_minimal() +
geom_point(data =countyBirthsData,
aes(x = TotalPopulation, y = BirthRate)) +
geom_point(data = countyBirthsData,
aes(x = TotalPopulation, y = birthPredictState),
color = 'blue', alpha = 0.5)
# Include the AverageAgeofMother as a fixed effect within the lmer and state as a random effect
ageMotherModel <- lmer( BirthRate ~ AverageAgeofMother + (1|State),
countyBirthsData)
summary(ageMotherModel)
# Compare the random-effect model to the linear effect model
summary(lm(BirthRate ~ AverageAgeofMother, data = countyBirthsData))
# Include the AverageAgeofMother as a correlated random-effect slope parameter
ageMotherModelRandomCorrelated <- lmer( BirthRate ~ AverageAgeofMother + (AverageAgeofMother | State),
countyBirthsData)
summary(ageMotherModelRandomCorrelated)
# Include the AverageAgeofMother as an uncorrelated random-effect slope parameter
ageMotherModelRandomUncorrelated <- lmer( BirthRate ~ AverageAgeofMother + (AverageAgeofMother || State),
countyBirthsData)
summary(ageMotherModelRandomUncorrelated)
# Checking results --------------------------------------------------------
# Extract the fixed-effect coefficients
fixef(out)
# Extract the random-effect coefficients
ranef(out)
# Estimate the confidence intervals
confint(out)
# Extract out the parameter estimates and confidence intervals and manipulate the data
dataPlot <- data.frame(cbind( fixef(out), confint(out)[ 3:4, ]))
rownames(dataPlot)[1] <- "Intercept"
colnames(dataPlot) <- c("mean", "l95", "u95")
dataPlot$parameter <- rownames(dataPlot)
# Print the new dataframe
print(dataPlot)
# Plot the results using ggplot2
ggplot(dataPlot, aes(x = parameter, y = mean,
ymin = l95, ymax = u95)) +
geom_hline( yintercept = 0, color = 'red' ) +
geom_linerange() + geom_point() + coord_flip() + theme_minimal()
# Maryland crime dataset --------------------------------------------------
# Plot the change in crime through time by County
plot1 <- ggplot(data = MDcrime, aes(x = Year, y = Crime, group = County)) +
geom_line() +
theme_minimal() +
ylab("Major crimes reported per county")
print(plot1)
# Add the trend line for each county
plot1 + geom_smooth(method="lm", se=FALSE)
# Use lmerTest to extract p-values
## Load lmerTest
library(lmerTest)
## Fit the model with Year as both a fixed and random-effect
lmer(Crime ~ Year + (1 + Year | County) , data = MDCrime) # doesn't converge
## Fit the model with Year2 rather than Year
out <- lmer(Crime ~ Year2 + (1 + Year2 | County) , data = MDCrime) # rescale year
## Examine the model's output
summary(out)
# Use anova to test models
## Build the Null model with only County as a random-effect
null_model <- lmer(Crime ~ (1 | County) , data = MDCrime)
## Build the Year2 model with Year2 as a fixed and random slope and County as the random-effect
year_model <- lmer(Crime ~ Year2 + (1 + Year2 | County) , data = MDCrime)
## Compare the two models using an anova
anova(null_model, year_model) # if Year is significant, p-val will be < 0.05
# GLM crash course --------------------------------------------------------
# Fit a glm using data in a long format
## Each row is one observation
fitLong <- glm( mortality ~ dose, data = dfLong, family = "binomial")
summary(fitLong)
# Fit a glm using data in a short format with two columns
## Each row is a treatment (6 successes, 4 failures)
fitShort <- glm( cbind(mortality , survival ) ~ dose , data = dfShort, family = "binomial")
summary(fitShort)
# Fit a glm using data in a short format with weights
# Predict probability of mortality
fitShortP <- glm( mortalityP ~ dose , data = dfShort, weights = nReps , family = "binomial")
summary(fitShortP)
# All three methods have same coef but different degrees of freedom
# Fiting a poisson regression on a different data set
summary(glm(y~x, family = "poisson"))
# Plot the data using jittered points and the default stat_smooth
ggplot(data = dfLong, aes(x = dose, y = mortality)) +
geom_jitter(height = 0.05, width = 0.1) +
stat_smooth(fill = 'pink', color = 'red')
# Plot the data using jittered points and the the glm stat_smooth
ggplot(data = dfLong, aes(x = dose, y = mortality)) +
geom_jitter(height = 0.05, width = 0.1) +
stat_smooth(method = 'glm',
method.args = list(family = "binomial"))
# More fun with glmer -----------------------------------------------------
# Load lmerTest
library(lmerTest)
# Fit glmerOut and look at its summary
glmerOut <- glmer(mortality ~ dose + (1|replicate), family = 'binomial', data = df)
summary(glmerOut) # if estimated effect for dose is different than zero, then dose has an effect on mortality
# Fit the model and look at its summary
## cbind because this is aggregate data
modelOut <- glmer( cbind(Purchases, Pass) ~ friend + ranking + (1 | city), data = allData, family = 'binomial')
summary( modelOut)
# Compare outputs to a lmer model, first create ratio
summary(lmer( Purchases/( Purchases + Pass) ~ friend + ranking + (1|city), data = allData))
# Run the code to see how to calculate odds ratios
summary( modelOut)
exp(fixef(modelOut)[2]) # extract coef for friends
exp(confint(modelOut)[3,]) # extract confidence interval for friends
# Fit a Poisson glmer
summary( glmer(clicks ~ webpage + (1|group), family = 'poisson', data = userGroups))
# Another Poisson example
modelOut <- glmer(count ~ age + year + (year|county), family = 'poisson',
data = ILdata) # include year as random-effect of county
summary(modelOut)
# Extract out fixed effects
fixef(modelOut)
# Extract out random effects
ranef(modelOut)
# Run code to see one method for plotting the data
ggplot(data = ILdata2, aes(x = year, y = count, group = county)) +
geom_line() +
facet_grid(age ~ . ) +
stat_smooth( method = 'glm', # glm results won't exactly be the same as glmer, but helps display results
method.args = list( family = "poisson"), se = FALSE,
alpha = 0.5) +
theme_minimal()
# Repeated measures -------------------------------------------------------
# Run a standard, non-paired t-test
t.test(y[treat == "before"], y[treat == "after"], paired = FALSE)
# Run a standard, paired t-test (does not assume constant variance of groups)
t.test(y[treat == "before"], y[treat == "after"], paired = TRUE)
# Run a repeated-measures ANOVA
## Paired t-test is a special case of repeated-measures ANOVA
anova(lmer(y ~ treat + (1|x)))
# Sleep study data example ------------------------------------------------
# Modeling approach
# * Visualize data
# * Build a simple model
# * Build model of interest
# * Extract information of interest
# * Visualize results
# Plot the data
ggplot(data = sleepstudy) +
geom_line(aes(x = Days, y = Reaction, group = Subject)) +
stat_smooth(aes(x = Days, y = Reaction),
method = 'lm', size = 3, se = FALSE)
# Build a lm
lm( Reaction ~ Days, data = sleepstudy)
# Build a lmer
lmer( Reaction ~ Days + (1| Subject), data = sleepstudy)
# Run an anova
anova(lmerOut)
# Look at the regression coefficients
summary(lmerOut) # non-zero could mean significant
# Hate in NY state --------------------------------------------------------
# Plot hate crimes in NY by Year, grouped by County
## Different trend lines for county suggests using random intercept
ggplot( data = hate, aes(x = Year, y = TotalIncidents, group = County)) +
geom_line() + geom_smooth(method = 'lm', se = FALSE)
# Load lmerTest
library(lmerTest)
# glmer with raw Year, fails to converge
glmer( TotalIncidents ~ Year + (Year|County),
data = hate, family = "poisson")
# glmer with scaled Year, Year2 (0,1,2,etc.)
glmerOut <- glmer( TotalIncidents ~ Year2 + (Year2|County),
data = hate, family = "poisson")
summary(glmerOut)
# Extract and manipulate data
countyTrend <- ranef(glmerOut)$County
countyTrend$county <- factor(row.names(countyTrend), levels = row.names(countyTrend)[order(countyTrend$Year2)])
# Plot results
ggplot(data = countyTrend, aes(x = county, y = Year2)) + geom_point() +
coord_flip() + ylab("Change in hate crimes per year") +
xlab("County")
|
690598cc3a86ae3347e952aaa625d245d97d822c | a593d96a7f0912d8dca587d7fd54ad96764ca058 | /R/ml_recommendation_als.R | 8d834da8d4126724462b0e3b462b8d2bd5414a42 | [
"Apache-2.0"
] | permissive | sparklyr/sparklyr | 98f3da2c0dae2a82768e321c9af4224355af8a15 | 501d5cac9c067c22ad7a9857e7411707f7ea64ba | refs/heads/main | 2023-08-30T23:22:38.912488 | 2023-08-30T15:59:51 | 2023-08-30T15:59:51 | 59,305,491 | 257 | 68 | Apache-2.0 | 2023-09-11T15:02:52 | 2016-05-20T15:28:53 | R | UTF-8 | R | false | false | 14,654 | r | ml_recommendation_als.R | #' Spark ML -- ALS
#'
#' Perform recommendation using Alternating Least Squares (ALS) matrix factorization.
#'
#' @template roxlate-ml-x
#' @param formula Used when \code{x} is a \code{tbl_spark}. R formula as a character string or a formula.
#' This is used to transform the input dataframe before fitting, see \link{ft_r_formula} for details.
#' The ALS model requires a specific formula format, please use \code{rating_col ~ user_col + item_col}.
#' @param rating_col Column name for ratings. Default: "rating"
#' @param user_col Column name for user ids. Ids must be integers. Other numeric types are supported for this column, but will be cast to integers as long as they fall within the integer value range. Default: "user"
#' @param item_col Column name for item ids. Ids must be integers. Other numeric types are supported for this column, but will be cast to integers as long as they fall within the integer value range. Default: "item"
#' @param rank Rank of the matrix factorization (positive). Default: 10
#' @param reg_param Regularization parameter.
#' @param implicit_prefs Whether to use implicit preference. Default: FALSE.
#' @param alpha Alpha parameter in the implicit preference formulation (nonnegative).
#' @param nonnegative Whether to apply nonnegativity constraints. Default: FALSE.
#' @param max_iter Maximum number of iterations.
#' @param num_user_blocks Number of user blocks (positive). Default: 10
#' @param num_item_blocks Number of item blocks (positive). Default: 10
#' @template roxlate-ml-checkpoint-interval
#' @param cold_start_strategy (Spark 2.2.0+) Strategy for dealing with unknown or new users/items at prediction time. This may be useful in cross-validation or production scenarios, for handling user/item ids the model has not seen in the training data. Supported values: - "nan": predicted value for unknown ids will be NaN. - "drop": rows in the input DataFrame containing unknown ids will be dropped from the output DataFrame containing predictions. Default: "nan".
#' @param intermediate_storage_level (Spark 2.0.0+) StorageLevel for intermediate datasets. Pass in a string representation of \code{StorageLevel}. Cannot be "NONE". Default: "MEMORY_AND_DISK".
#' @param final_storage_level (Spark 2.0.0+) StorageLevel for ALS model factors. Pass in a string representation of \code{StorageLevel}. Default: "MEMORY_AND_DISK".
#' @template roxlate-ml-uid
#' @template roxlate-ml-dots
#' @return ALS attempts to estimate the ratings matrix R as the product of two lower-rank matrices, X and Y, i.e. X * Yt = R. Typically these approximations are called 'factor' matrices. The general approach is iterative. During each iteration, one of the factor matrices is held constant, while the other is solved for using least squares. The newly-solved factor matrix is then held constant while solving for the other factor matrix.
#'
#' This is a blocked implementation of the ALS factorization algorithm that groups the two sets of factors (referred to as "users" and "products") into blocks and reduces communication by only sending one copy of each user vector to each product block on each iteration, and only for the product blocks that need that user's feature vector. This is achieved by pre-computing some information about the ratings matrix to determine the "out-links" of each user (which blocks of products it will contribute to) and "in-link" information for each product (which of the feature vectors it receives from each user block it will depend on). This allows us to send only an array of feature vectors between each user block and product block, and have the product block find the users' ratings and update the products based on these messages.
#'
#' For implicit preference data, the algorithm used is based on "Collaborative Filtering for Implicit Feedback Datasets", available at \doi{10.1109/ICDM.2008.22}, adapted for the blocked approach used here.
#'
#' Essentially instead of finding the low-rank approximations to the rating matrix R, this finds the approximations for a preference matrix P where the elements of P are 1 if r is greater than 0 and 0 if r is less than or equal to 0. The ratings then act as 'confidence' values related to strength of indicated user preferences rather than explicit ratings given to items.
#'
#' The object returned depends on the class of \code{x}.
#'
#' \itemize{
#' \item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns an instance of a \code{ml_als} recommender object, which is an Estimator.
#'
#' \item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
#' the recommender appended to the pipeline.
#'
#' \item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a recommender
#' estimator is constructed then immediately fit with the input
#' \code{tbl_spark}, returning a recommendation model, i.e. \code{ml_als_model}.
#' }
#'
#' @examples
#' \dontrun{
#'
#' library(sparklyr)
#' sc <- spark_connect(master = "local")
#'
#' movies <- data.frame(
#' user = c(1, 2, 0, 1, 2, 0),
#' item = c(1, 1, 1, 2, 2, 0),
#' rating = c(3, 1, 2, 4, 5, 4)
#' )
#' movies_tbl <- sdf_copy_to(sc, movies)
#'
#' model <- ml_als(movies_tbl, rating ~ user + item)
#'
#' ml_predict(model, movies_tbl)
#'
#' ml_recommend(model, type = "item", 1)
#' }
#'
#' @export
ml_als <- function(x, formula = NULL, rating_col = "rating", user_col = "user", item_col = "item",
rank = 10, reg_param = 0.1, implicit_prefs = FALSE, alpha = 1,
nonnegative = FALSE, max_iter = 10, num_user_blocks = 10,
num_item_blocks = 10, checkpoint_interval = 10,
cold_start_strategy = "nan", intermediate_storage_level = "MEMORY_AND_DISK",
final_storage_level = "MEMORY_AND_DISK", uid = random_string("als_"), ...) {
check_dots_used()
UseMethod("ml_als")
}
#' @export
ml_als.spark_connection <- function(x, formula = NULL, rating_col = "rating", user_col = "user", item_col = "item",
rank = 10, reg_param = 0.1, implicit_prefs = FALSE, alpha = 1,
nonnegative = FALSE, max_iter = 10, num_user_blocks = 10,
num_item_blocks = 10, checkpoint_interval = 10,
cold_start_strategy = "nan", intermediate_storage_level = "MEMORY_AND_DISK",
final_storage_level = "MEMORY_AND_DISK", uid = random_string("als_"), ...) {
.args <- list(
rating_col = rating_col,
user_col = user_col,
item_col = item_col,
rank = rank,
reg_param = reg_param,
implicit_prefs = implicit_prefs,
alpha = alpha,
nonnegative = nonnegative,
max_iter = max_iter,
num_user_blocks = num_user_blocks,
num_item_blocks = num_item_blocks,
checkpoint_interval = checkpoint_interval,
cold_start_strategy = cold_start_strategy,
intermediate_storage_level = intermediate_storage_level,
final_storage_level = final_storage_level
) %>%
validator_ml_als()
jobj <- invoke_new(x, "org.apache.spark.ml.recommendation.ALS", uid) %>%
(
function(obj) {
do.call(
invoke,
c(obj, "%>%", Filter(
function(x) !is.null(x),
list(
list("setRatingCol", .args[["rating_col"]]),
list("setUserCol", .args[["user_col"]]),
list("setItemCol", .args[["item_col"]]),
list("setRank", .args[["rank"]]),
list("setRegParam", .args[["reg_param"]]),
list("setImplicitPrefs", .args[["implicit_prefs"]]),
list("setAlpha", .args[["alpha"]]),
list("setNonnegative", .args[["nonnegative"]]),
list("setMaxIter", .args[["max_iter"]]),
list("setNumUserBlocks", .args[["num_user_blocks"]]),
list("setNumItemBlocks", .args[["num_item_blocks"]]),
list("setCheckpointInterval", .args[["checkpoint_interval"]]),
jobj_set_param_helper(
obj, "setIntermediateStorageLevel", .args[["intermediate_storage_level"]],
"2.0.0", "MEMORY_AND_DISK"
),
jobj_set_param_helper(
obj, "setFinalStorageLevel", .args[["final_storage_level"]],
"2.0.0", "MEMORY_AND_DISK"
),
jobj_set_param_helper(
obj, "setColdStartStrategy", .args[["cold_start_strategy"]],
"2.2.0", "nan"
)
)
))
)
})
new_ml_als(jobj)
}
#' @export
ml_als.ml_pipeline <- function(x, formula = NULL, rating_col = "rating", user_col = "user", item_col = "item",
rank = 10, reg_param = 0.1, implicit_prefs = FALSE, alpha = 1,
nonnegative = FALSE, max_iter = 10, num_user_blocks = 10,
num_item_blocks = 10, checkpoint_interval = 10,
cold_start_strategy = "nan", intermediate_storage_level = "MEMORY_AND_DISK",
final_storage_level = "MEMORY_AND_DISK", uid = random_string("als_"), ...) {
stage <- ml_als.spark_connection(
x = spark_connection(x),
formula = formula,
rating_col = rating_col,
user_col = user_col,
item_col = item_col,
rank = rank,
reg_param = reg_param,
implicit_prefs = implicit_prefs,
alpha = alpha,
nonnegative = nonnegative,
max_iter = max_iter,
num_user_blocks = num_user_blocks,
num_item_blocks = num_item_blocks,
checkpoint_interval = checkpoint_interval,
cold_start_strategy = cold_start_strategy,
intermediate_storage_level = intermediate_storage_level,
final_storage_level = final_storage_level,
uid = uid,
...
)
ml_add_stage(x, stage)
}
#' @export
ml_als.tbl_spark <- function(x, formula = NULL, rating_col = "rating", user_col = "user", item_col = "item",
rank = 10, reg_param = 0.1, implicit_prefs = FALSE, alpha = 1,
nonnegative = FALSE, max_iter = 10, num_user_blocks = 10,
num_item_blocks = 10, checkpoint_interval = 10,
cold_start_strategy = "nan", intermediate_storage_level = "MEMORY_AND_DISK",
final_storage_level = "MEMORY_AND_DISK", uid = random_string("als_"), ...) {
formula <- ml_standardize_formula(formula)
stage <- ml_als.spark_connection(
x = spark_connection(x),
formula = formula,
rating_col = rating_col,
user_col = user_col,
item_col = item_col,
rank = rank,
reg_param = reg_param,
implicit_prefs = implicit_prefs,
alpha = alpha,
nonnegative = nonnegative,
max_iter = max_iter,
num_user_blocks = num_user_blocks,
num_item_blocks = num_item_blocks,
checkpoint_interval = checkpoint_interval,
cold_start_strategy = cold_start_strategy,
intermediate_storage_level = intermediate_storage_level,
final_storage_level = final_storage_level,
uid = uid,
...
)
if (is.null(formula)) {
model_als <- stage %>%
ml_fit(x)
} else {
ml_construct_model_recommendation(
new_ml_model_als,
predictor = stage,
formula = formula,
dataset = x
)
}
}
# Validator
validator_ml_als <- function(.args) {
.args[["rating_col"]] <- cast_string(.args[["rating_col"]])
.args[["user_col"]] <- cast_string(.args[["user_col"]])
.args[["item_col"]] <- cast_string(.args[["item_col"]])
.args[["rank"]] <- cast_scalar_integer(.args[["rank"]])
.args[["reg_param"]] <- cast_scalar_double(.args[["reg_param"]])
.args[["implicit_prefs"]] <- cast_scalar_logical(.args[["implicit_prefs"]])
.args[["alpha"]] <- cast_scalar_double(.args[["alpha"]])
.args[["nonnegative"]] <- cast_scalar_logical(.args[["nonnegative"]])
.args[["max_iter"]] <- cast_scalar_integer(.args[["max_iter"]])
.args[["num_user_blocks"]] <- cast_scalar_integer(.args[["num_user_blocks"]])
.args[["num_item_blocks"]] <- cast_scalar_integer(.args[["num_item_blocks"]])
.args[["checkpoint_interval"]] <- cast_scalar_integer(.args[["checkpoint_interval"]])
.args[["cold_start_strategy"]] <- cast_choice(.args[["cold_start_strategy"]], c("nan", "drop"))
.args[["intermediate_storage_level"]] <- cast_string(.args[["intermediate_storage_level"]])
.args[["final_storage_level"]] <- cast_string(.args[["final_storage_level"]])
.args
}
# Constructors
new_ml_als <- function(jobj) {
new_ml_estimator(jobj, class = "ml_als")
}
new_ml_als_model <- function(jobj) {
new_ml_transformer(
jobj,
rank = invoke(jobj, "rank"),
recommend_for_all_items = function(num_users) {
num_users <- cast_scalar_integer(num_users)
invoke(jobj, "recommendForAllItems", num_users) %>%
sdf_register()
},
recommend_for_all_users = function(num_items) {
num_items <- cast_scalar_integer(num_items)
invoke(jobj, "recommendForAllUsers", num_items) %>%
sdf_register()
},
item_factors = invoke(jobj, "itemFactors") %>%
sdf_register() %>%
sdf_separate_column("features"),
user_factors = invoke(jobj, "userFactors") %>%
sdf_register() %>%
sdf_separate_column("features"),
user_col = invoke(jobj, "getUserCol"),
item_col = invoke(jobj, "getItemCol"),
prediction_col = invoke(jobj, "getPredictionCol"),
class = "ml_als_model"
)
}
# Hideous hack
utils::globalVariables("explode")
#' @rdname ml_als
#' @param model An ALS model object
#' @param type What to recommend, one of \code{items} or \code{users}
#' @param n Maximum number of recommendations to return
#'
#' @details \code{ml_recommend()} returns the top \code{n} users/items recommended for each item/user, for all items/users. The output has been transformed (exploded and separated) from the default Spark outputs to be more user friendly.
#'
#' @export
ml_recommend <- function(model, type = c("items", "users"), n = 1) {
version <- spark_jobj(model) %>%
spark_connection() %>%
spark_version()
if (version < "2.2.0") stop("`ml_recommend()`` is only supported for Spark 2.2+.", call. = FALSE)
model <- if (inherits(model, "ml_model_als")) model$model else model
type <- match.arg(type)
n <- cast_scalar_integer(n)
(switch(type,
items = model$recommend_for_all_users,
users = model$recommend_for_all_items
))(n) %>%
dplyr::mutate(recommendations = explode(!!as.name("recommendations"))) %>%
sdf_separate_column("recommendations")
}
|
6259cdaf284f5a905dad9ecf2902bf092ed05801 | ecc2925a7f127eea277f6c06908ef1d659e55319 | /R/utils.R | ca7caafac7c99e6dbb14b3c1bc1ac2a59465344a | [] | no_license | cran/rextendr | 3a7ab79776771d934930bfa6b2d01dfa7626249f | 50c2f83ff4e62a151b4415af8960285074d037c6 | refs/heads/master | 2023-06-21T20:05:29.130751 | 2023-06-20T17:20:02 | 2023-06-20T17:20:02 | 377,362,878 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,890 | r | utils.R | #' Inform the user that a development version of `extendr` is being used.
#'
#' This function returns a string that should be used inside of a `cli` function.
#' See `validate_extendr_features()` for an example.
#'
#' @keywords internal
inf_dev_extendr_used <- function() "Are you using a development version of {.code extendr}?"
#' Silence `{cli}` output
#'
#' Use for functions that use cli output that should optionally be suppressed.
#'
#' @examples
#'
#' if (interactive()) {
#' hello_rust <- function(..., quiet = FALSE) {
#' local_quiet_cli(quiet)
#' cli::cli_alert_info("This should be silenced when {.code quiet = TRUE}")
#' }
#'
#' hello_rust()
#' hello_rust(quiet = TRUE)
#' }
#' @keywords internal
local_quiet_cli <- function(quiet, env = rlang::caller_env()) {
if (quiet) {
withr::local_options(
list("cli.default_handler" = function(...) {
}),
.local_envir = env
)
}
}
#' Helper function for checking cargo sub-commands.
#' @param args Character vector, arguments to the `cargo` command. Passed to [processx::run()]'s args param.
#' @return Logical scalar indicating if the command was available.
#' @noRd
cargo_command_available <- function(args = "--help") {
!any(is.na(try_exec_cmd("cargo", args)))
}
#' Helper function for executing commands.
#' @param cmd Character scalar, command to execute.
#' @param args Character vector, arguments passed to the command.
#' @return Character vector containing the stdout of the command or `NA_character_` if the command failed.
#' @noRd
try_exec_cmd <- function(cmd, args = character()) {
result <- tryCatch(
processx::run(cmd, args, error_on_status = FALSE),
error = \(...) list(status = -1)
)
if (result[["status"]] != 0) {
NA_character_
} else {
stringi::stri_split_lines1(result$stdout)
}
}
|
09fac637ec111f00f6dfbb5f3220203170417815 | 1251645c1987abcf8d0c0908a4688ded1ea4617c | /configure.R | 7ca2794cd7f8884c2d318487a595f31145adc4c3 | [
"Apache-2.0"
] | permissive | NanaAkwasiAbayieBoateng/mleap | 40279d2ac24b780ca31d7e0123157cb7dc22248f | 65e2d9e83e364b289d9c7c0217df07fcd5624787 | refs/heads/master | 2020-03-15T12:52:54.706188 | 2018-05-04T00:35:49 | 2018-05-04T00:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 384 | r | configure.R | #!/usr/bin/env Rscript
library(purrr)
spec <- sparklyr::spark_default_compilation_spec() %>%
map(function(x) {
x$jar_dep <- list.files(mleap:::resolve_mleap_path(), full.names = TRUE) %>%
grep("(mleap|scala|bundle).+jar$", ., value = TRUE) %>%
map_chr(normalizePath)
x
}) %>%
keep(~ .x$spark_version >= "2.0.0")
sparklyr::compile_package_jars(spec = spec)
|
aa73a06e3a5434891bb00a1a7736c72864194be9 | f1971a5cbf1829ce6fab9f5144db008d8d9a23e1 | /packrat/lib/x86_64-pc-linux-gnu/3.2.5/rbokeh/tests/testthat/test-css-svg.R | ccfd12a61d7f7f9ab71871b48a066fd4cf79ccd5 | [] | no_license | harryprince/seamonster | cc334c87fda44d1c87a0436139d34dab310acec6 | ddfd738999cd302c71a11aad20b3af2f4538624f | refs/heads/master | 2021-01-12T03:44:33.452985 | 2016-12-22T19:17:01 | 2016-12-22T19:17:01 | 78,260,652 | 1 | 0 | null | 2017-01-07T05:30:42 | 2017-01-07T05:30:42 | null | UTF-8 | R | false | false | 1,740 | r | test-css-svg.R |
# see here: https://github.com/bokeh/rbokeh/issues/40
test_that("svgs in css are base64 encoded", {
ff <- file.path(system.file(package = "rbokeh"), "htmlwidgets/lib/bokehjs/bokeh.min.css")
css <- suppressWarnings(readLines(ff))
expect_false(any(grepl("<svg", css)))
})
# to fix svg problem: find something like this in bokeh.min.css:
# .bk-logo.grey{filter:url("data:image/svg+xml;utf8,<svgxmlns=\'http://www.w3.org/2000/svg\'><filterid=\'grayscale\'><feColorMatrixtype=\'matrix\'values=\'0.33330.33330.3333000.33330.33330.3333000.33330.33330.33330000010\'/></filter></svg>#grayscale");filter:gray;-webkit-filter:grayscale(100%)}
# .bk-logo.grey{filter:url("data:image/svg+xml;utf8,<svg xmlns=\'http://www.w3.org/2000/svg\'><filter id=\'grayscale\'><feColorMatrix type=\'matrix\' values=\'0.3333 0.3333 0.3333 0 0 0.3333 0.3333 0.3333 0 0 0.3333 0.3333 0.3333 0 0 0 0 0 1 0\'/></filter></svg>#grayscale");filter:gray;-webkit-filter:grayscale(100%)}
# then
# base64enc::base64encode(charToRaw("<svg xmlns=\'http://www.w3.org/2000/svg\'><filter id=\'grayscale\'><feColorMatrix type=\'matrix\' values=\'0.3333 0.3333 0.3333 0 0 0.3333 0.3333 0.3333 0 0 0.3333 0.3333 0.3333 0 0 0 0 0 1 0\'/></filter></svg>"))
# then replace it with:
# .bk-logo.grey{filter:url("data:image/svg+xml;base64,PHN2ZyB4bWxucz0naHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmcnPjxmaWx0ZXIgaWQ9J2dyYXlzY2FsZSc+PGZlQ29sb3JNYXRyaXggdHlwZT0nbWF0cml4JyB2YWx1ZXM9JzAuMzMzMyAwLjMzMzMgMC4zMzMzIDAgMCAwLjMzMzMgMC4zMzMzIDAuMzMzMyAwIDAgMC4zMzMzIDAuMzMzMyAwLjMzMzMgMCAwIDAgMCAwIDEgMCcvPjwvZmlsdGVyPjwvc3ZnPg==");filter:gray;-webkit-filter:grayscale(100%)}
# also update css tooltip colors
# #1e4b6c to #aaa for background and to #888 for border
# #9ab9b1 to #fff
# #e2ddbd to #fff
|
deee0b9bf7fcfa951252b0f42600097aec26ae29 | 5803671bfa8812aeb8c7a60419bf24ad55373248 | /R/psi_criterion_mixed_kw_pmf.R | 629997edca5888e83fc5e76e91c5c7cbef43a702 | [] | no_license | gragedaa/SkeweDF | 3887c924cc19a217b3e41b94f81227c8912b0a11 | 36acdc9a76938eeb62c140bb1a8cd8c003c972d5 | refs/heads/master | 2023-06-18T02:15:06.144078 | 2023-05-27T23:21:45 | 2023-05-27T23:21:45 | 248,872,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,513 | r | psi_criterion_mixed_kw_pmf.R | #' Psi Criterion Mixed KW PMF
#'
#' This function calculates the Psi criterion goodness of fit metric given a set of parameters for the probability mass distribution function of the a mixed Generalized Kolmogorov Waring function.
#' @param x Vector of parameters
#' @param d Int used to indicate the number of 'a' terms within the 'param_bounds' variable. The remaining values will be considered 'b' terms.
#' @param data Vector of observed values
#' @param left_trunc Int used to determine starting index of model to use for optimization
#' @param right_trunc Int used to determine ending index of model to use for optimization
#' @param weighted_rt Boolean used to determine if the weighted right-tail cumulative distribution function should be used or not.
#' @export
psi_criterion_mixed_kw_pmf <- function(x, d, data, left_trunc, right_trunc){
a1 <- x[1:d[1]]
b1 <- x[(d[1]+1):(2*d[1])]
theta1 <- x[(2*d[1]+1)]
a2 <- x[(2*d[1]+2):(2*d[1]+2+d[2]-1)]
b2 <- x[(2*d[1]+2+d[2]):(2*d[1]+2+2*d[2]-1)]
theta2 <- x[2*d[1]+2+2*d[2]]
model1_weight <- unlist(x[2*d[1]+2+2*d[2]+1])
model1 <- Kolmogorov_Waring(right_trunc, unlist(a1), unlist(b1), unlist(theta1))
model2 <- Kolmogorov_Waring(right_trunc, unlist(a2), unlist(b2), unlist(theta2))
model <- additive_mixed_model(model1, model2, model1_weight)
model <- model[-1]
model <- model[left_trunc:right_trunc]
model <- model / sum(model)
model <- model * sum(data)
return(psi_criterion(data, model, length(x)) * -1)
}
|
90dfcf18089fe7752d44afceb2bd1e9b990cfd4e | ee7c5282535e192fa619ca3b7a47aff09983ebbe | /R/dataPurification_NFIPSP.R | ea37578da416805bb4b6e336c66a97287a39bc6b | [] | no_license | ianmseddy/PSP_Clean | df409f5ecdbbcff2d6f7aa5230a9fdf546ed78a2 | 9d29a83722db14c3d0666111a78d1b61f611eb41 | refs/heads/master | 2021-11-18T14:21:48.417089 | 2021-08-26T15:51:25 | 2021-08-26T15:51:25 | 155,439,399 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,834 | r | dataPurification_NFIPSP.R | dataPurification_NFIPSP <- function(lgptreeRaw,
lgpHeaderRaw,
approxLocation) {
# start from tree data to obtain plot infor
lgptreeRaw[, year := as.numeric(substr(lgptreeRaw$meas_date, 1, 4))]
lgpHeaderRaw[, year := as.numeric(substr(lgpHeaderRaw$meas_date, 1, 4))]
lgpHeader <- lgpHeaderRaw[nfi_plot %in% unique(lgptreeRaw$nfi_plot), ][, .(nfi_plot, year, meas_plot_size, site_age)]
approxLocation <- approxLocation[, .(nfi_plot, utm_n, utm_e, utm_zone, elevation)] %>%
unique(., by = "nfi_plot")
lgpHeader <- setkey(lgpHeader, nfi_plot)[setkey(approxLocation, nfi_plot), nomatch = 0]
# remove the plots without SA and location infor
lgpHeader <- lgpHeader[!is.na(site_age), ][!is.na(utm_n), ][!is.na(utm_e), ]
treeData <- lgptreeRaw[, .(nfi_plot, year, tree_num, lgtree_genus, lgtree_species,
lgtree_status, dbh, height)][nfi_plot %in% unique(lgpHeader$nfi_plot), ]
treeData <- treeData[lgtree_status != "DS" & lgtree_status != "M", ][, lgtree_status := NULL]
setnames(treeData, c("nfi_plot", "year", "tree_num","lgtree_genus", "lgtree_species", "dbh", "height"),
c("OrigPlotID1", "MeasureYear", "TreeNumber", "Genus", "Species", "DBH", "Height"))
# names(lgpHeader) <- c("OrigPlotID1", "baseYear", "PlotSize", "baseSA", "Northing", "Easting", "Zone", "Elevation")
setnames(lgpHeader, old = c("nfi_plot", "year", "meas_plot_size", "site_age", "utm_n", "utm_e", "utm_zone", "elevation"),
new = c("OrigPlotID1", "baseYear", "PlotSize", "baseSA", "Northing", "Easting", "Zone", "Elevation"))
lgpHeader <- unique(lgpHeader, by = "OrigPlotID1")
newheader <- unique(treeData[, .(OrigPlotID1, MeasureYear)], by = c("OrigPlotID1", "MeasureYear"))
newheader[, MeasureID := paste("NFIPSP_", row.names(newheader), sep = "")]
treeData <- setkey(treeData, OrigPlotID1)
treeData <- treeData[newheader, on = c("OrigPlotID1", "MeasureYear")]
lgpHeader <- setkey(lgpHeader, OrigPlotID1)[setkey(newheader, OrigPlotID1), nomatch = 0]
#above line changed as now there are repeat measures in NFI, so join must be on MeasureID as well as OrigPlotID1
lgpHeader <- setkey(lgpHeader, OrigPlotID1)
lgpHeader <- lgpHeader[newheader, on = c("OrigPlotID1", "MeasureID")]
treeData <- treeData[, .(MeasureID, OrigPlotID1, OrigPlotID2 = NA, MeasureYear,
TreeNumber, Genus, Species, DBH, Height)]
lgpHeader <- lgpHeader[, .(MeasureID, OrigPlotID1, MeasureYear, Longitude = NA, Latitude = NA, Zone,
Easting, Northing, Elevation, PlotSize, baseYear, baseSA)]
treeData <- standardizeSpeciesNames(treeData, forestInventorySource = "NFIPSP") #Need to add to pemisc
return(list(plotHeaderData = lgpHeader, treeData = treeData))
}
|
8d8ec354292fcb2a0d62087f591143ce833d15e2 | a8a6fda91dac3c3c0b2403331b2244cee603c579 | /man/get_incr.Rd | 434ff45896b1f1647d4500203a7735d87696870c | [] | no_license | cran/ezplot | afd9bf17c123f902c1f83b331fd5cffd3395ebf2 | d6a33360c6f412caa4b87bd717d30a07bedd513f | refs/heads/master | 2023-06-26T13:11:06.662729 | 2023-06-17T06:30:02 | 2023-06-17T06:30:02 | 168,390,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 314 | rd | get_incr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_incr.R
\name{get_incr}
\alias{get_incr}
\title{get_incr}
\usage{
get_incr(x)
}
\arguments{
\item{x}{A numeric or date vector}
}
\description{
returns the minimum increment between sorted unique values of a
vector
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.