blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5dad228cdf21afce0cd54288e5fd286eadd31b4
|
fac73e7dbe1136a56863945e585eb3b5c5e386e8
|
/seq_features/proteinko.R
|
997deca9d78f89683c0ced39c281bc5e8023c05f
|
[] |
no_license
|
thaddad91/Thesis-N-glycosylation
|
c6dbff765c4d047d9ebe64b4d97e267295380775
|
d79a75f90885bad27cb75b6419dc029fce1df372
|
refs/heads/master
| 2021-06-21T20:26:21.783570
| 2021-03-01T15:25:19
| 2021-03-01T15:25:19
| 181,561,783
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
proteinko.R
|
library(ggfortify)
library(randomForest)
data <- read.csv("table_pI", header = FALSE, sep = "\t", dec = ".")
# Balanced pos/neg set
pos <- data[which(data$V1=='pos'), ]
neg <- data[which(data$V1=='neg'), ]
neg <- data[sample(nrow(data), 2200),]
d <- rbind(pos, neg)
# PCA
pca <- prcomp(d[, -1], center = TRUE, scale. = FALSE)
autoplot(pca, data = d, colour = 'V1', x = 5, y = 6) # Other PCA's
#loadings = TRUE, loadings.label = TRUE)
# Randomforest
rf <- randomForest(V1 ~ ., data = d)
|
ff465301cabe33b73a6ccafd0f001fb92e96bfa9
|
1f0a2fbe2ecbc70761250efd896fb60884301fd0
|
/code/R/report_quality_assurance.R
|
8e5b76ca3341b4cfd4d476d49c95f98ee99f924b
|
[] |
no_license
|
ruijiang81/crowdsourcing
|
63e923e884e894d11845ff6ba9c985adc37b0efe
|
3d6e91a16a495130026ebcf366bdcaa1539b3ac1
|
refs/heads/master
| 2020-04-05T06:05:05.637119
| 2018-11-08T00:02:33
| 2018-11-08T00:02:33
| 156,624,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 568
|
r
|
report_quality_assurance.R
|
report_quality_assurance <- function(report) {
column_names <- c(
"instance_num", "pay", "assert_all_are_greater_than_or_equal_to", "change", "cost_so_far",
"AUC_holdout", "full_AUC", "subset_AUC"
)
assertive::assert_are_intersecting_sets(report %>% colnames(), column_names)
assertive::assert_all_are_not_na(report %>% select(instance_num, batch))
assertive::assert_all_are_greater_than_or_equal_to(report %>% .$batch %>% diff(), 0)
assertive::assert_all_are_greater_than_or_equal_to(report %>% .$instance_num %>% diff(), 0)
}
|
c9b34cc896f8a2186b99a8913b345a07651ebb91
|
9f674f754bdc1a0f92a650933e52e504d7c0e727
|
/AnalysiswithR/CentralLimitTheorem.R
|
a3c2e48dd83567edb38f5b202a144d1bb3503657
|
[] |
no_license
|
AdarshKandwal/BasicOfPythonForML
|
3552fb35b1bd717ddbb5067c97041ceb82099472
|
fa18130125bda9079d8c45608651fbf38de96301
|
refs/heads/master
| 2023-04-13T00:28:39.517081
| 2021-04-25T13:14:16
| 2021-04-25T13:14:16
| 261,206,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 778
|
r
|
CentralLimitTheorem.R
|
dat <- read.csv("mice_pheno.csv")
head(dat)
library(dplyr)
controlPopulation <- filter(dat,Sex == "F" & Diet == "chow") %>% select(Bodyweight) %>% unlist
hfPopulation <- filter(dat,Sex == "F" & Diet == "hf") %>% select(Bodyweight) %>% unlist
mu_hf <- mean(hfPopulation)
mu_control <- mean(controlPopulation)
print(mu_hf - mu_control)
Ns <- c(3,12,25,50)
B <- 10000 #number of simulations
res <- sapply(Ns,function(n) {
replicate(B,mean(sample(hfPopulation,n))-mean(sample(controlPopulation,n)))
})
library(rafalib)
mypar(2,2)
for (i in seq(along=Ns)) {
titleavg <- signif(mean(res[,i]),3)
titlesd <- signif(popsd(res[,i]),3)
title <- paste0("N=",Ns[i]," Avg=",titleavg," SD=",titlesd)
qqnorm(res[,i],main=title)
qqline(res[,i],col=2)
}
|
c93008032049a9a30f9b6cf9f92b1ec0e6c34428
|
11487d4bbf3e905b409e844dfd03fad97970f1e4
|
/steps.r
|
b17d8d8aec1984567a74a5c040c3badd86f39860
|
[] |
no_license
|
WRaat/zp_feedback
|
8740db0e7d57fd7fa58af539a978bcf94f2258ed
|
ee9fbfc722aabf5e522cffe7c24b59a86c20bffa
|
refs/heads/main
| 2023-04-20T02:55:35.519826
| 2021-04-28T14:44:19
| 2021-04-28T14:44:19
| 361,686,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 428
|
r
|
steps.r
|
# Uw praktijk heeft X maandelijkse testen afgelegd. X artsen in uw praktijk hebben ten minste 1 test aangevraagd. Dit is een participatiegraad van X% tegenover een algemene participatiegraad van X% in het zorgprogramma.
# Het mediane aantal testen in uw praktijk was X tegenover een mediaan van X testen algemeen.
# Onderstaande grafiek geeft een overzicht van uw aantal testen in de tijd en tegenover het totale aantal afnames.
|
057b7f571177edbbb7e57d263e1797373fadf012
|
42629d99c178a551bc4fb94dad14d235b4c34f62
|
/twitter/ui.R
|
e013fb50c128cdf8e3364af6a551c6a001f4bde5
|
[
"MIT"
] |
permissive
|
covix/shiny-pancake
|
f08e8991d89e891b90069138de684e5f4af94e00
|
0e6e133be55d6d8e2ee25ab94ebb5633424794e2
|
refs/heads/master
| 2021-01-20T11:47:22.347676
| 2018-08-01T11:59:27
| 2018-08-01T11:59:27
| 79,322,281
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,128
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(leaflet)
library(shinythemes)
library(networkD3)
library(streamgraph)
# library(shinyCustom)
# library(visNetwork)
clusterAlg <- c('No cluster' = 'default',
'WalkTrap' = 'walktrap',
'InfoMap' = 'infomap')
weightAlg <- c('Number of tweets' = 'default',
'PageRank' = 'page_rank')
hsChoices <- c('', sort(
unique(
hs %>%
group_by(text) %>%
count() %>%
arrange(desc(n)) %>%
filter(row_number() <= 20) %>%
arrange(text) %>%
.$text
)
))
shinyUI(tagList(
# useShinyCustom(triggered_event = 'mouseup'),
tags$head(tags$style(HTML(
"
svg text {
font-size: 15px;
}
"
)), includeCSS("styles.css")),
navbarPage(
"Shiny Tweets",
tabPanel(
"HashtagsFlow",
streamgraphOutput("hsStreamGraphPlot"),
fluidRow(
column(
width = 3,
offset = 3,
class="col-md-push-0",
sliderInput(
inputId = "hashtags",
label = h3("Min hashtag count / hour"),
min = 1,
max = 50,
value = 20
),
helpText(
"If set to ",
em("x"),
", hashtags that were not",
"retweetd at least ",
em("x"),
" times in all the considered hour,",
"will be filtered out",
"in the selected time interval"
)
),
column(
width = 3,
offset = 0,
sliderInput(
inputId = "hsTime",
label = h3("Time Interval"),
min = 0,
max = 200,
value = c(0, 50)
),
helpText(
"Time is expressend in hours spent from the first tweet collected."
)
)
)
),
tabPanel("Map",
div(
class = "outer",
leafletOutput("map", width = "100%", height = "100%"),
# Shiny versions prior to 0.11 should use class="modal" instead.
absolutePanel(
class = "controls panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 20,
bottom = "auto",
width = 330,
height = "auto",
h2("Source selector"),
selectInput("source", "Source", sources)
)
)),
tabPanel(
"Network",
div(
class = "outer",
forceNetworkOutput("force", width = "100%", height = "100%"),
# visNetworkOutput("force", width = "100%", height = "100%"),
absolutePanel(
class = "controls panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 20,
bottom = "auto",
width = 330,
height = "auto",
h2("Source selector"),
numericInput(
"degreeInput",
"Mininum inbound degree",
min = 0,
max = 1000,
value = 200
),
selectInput("weightAlg", "Weighting Algorithm", weightAlg),
selectInput("clusterAlg", "Clustering Algorithm", clusterAlg),
# show at most 5 options in the list
selectizeInput(
"netHashtag",
'Show network for hashtag',
choices = hsChoices,
options = list(maxItems = 1)
)
)
)
),
theme = shinytheme("united")
)
))
|
847683a344c5f8e1d93a78eb0fa3f6126db908e2
|
e5bdefbe3349890d72175e89b87e98f9c22c234e
|
/run_analysis.R
|
25cb9ca95b2a3239a58e52b684499bdf3589d54e
|
[] |
no_license
|
jschwertz/TidyDataProject
|
4cf42654d464112b61b20a499c78d9de6d94fa60
|
2c2e38c7ff1a02f53bd238e25f545aa7b2c9ea60
|
refs/heads/master
| 2016-09-06T18:54:43.154801
| 2014-09-21T17:00:14
| 2014-09-21T17:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,637
|
r
|
run_analysis.R
|
## Script cleans sensor data from accelerometers in Galaxy S device for analysis.
## 1. Merges the training and test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
## Load required libraries.
library(plyr)
## If data directory exists, change to that directory.
## Else, create data directory, download and unzip raw data files.
if(file.exists("./data")) {
setwd("./data")
} else {
dir.create("./data")
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/zipfile.zip",method="auto")
setwd("./data")
unzip("zipfile.zip", files = NULL, list = FALSE, overwrite = TRUE,
junkpaths = FALSE, exdir = ".", unzip = "internal",
setTimes = FALSE)
}
## Read raw data files into memory for processing.trainSubj<-read.table("./UCI HAR Dataset/train/subject_train.txt",header=FALSE)
trainSubj<-read.table("./UCI HAR Dataset/train/subject_train.txt",header=FALSE)
trainY<-read.table("./UCI HAR Dataset/train/y_train.txt",header=FALSE)
trainX<-read.table("./UCI HAR Dataset/train/X_train.txt",header=FALSE)
testSubj<-read.table("./UCI HAR Dataset/test/subject_test.txt",header=FALSE)
testY<-read.table("./UCI HAR Dataset/test/y_test.txt",header=FALSE)
testX<-read.table("./UCI HAR Dataset/test/X_test.txt",header=FALSE)
features<-read.table("./UCI HAR Dataset/features.txt",header=FALSE)
activityLabels<-read.table("./UCI HAR Dataset/activity_labels.txt",header=FALSE)
## Join train and test files.
training <- cbind(trainX,trainY,trainSubj)
testing <- cbind(testX,testY,testSubj)
## Make the feature names better suited for R with some substitutions
features[,2] = gsub('-mean', 'Mean', features[,2])
features[,2] = gsub('-std', 'Std', features[,2])
features[,2] = gsub('[-()]', '', features[,2])
## Merge training and test sets together
mrgData = rbind(training, testing)
## Remove working files.
#remove(trainSubj,testSubj,trainY,testY,trainX,testX,training, testing)
## Select only the data on mean and std. dev.
selCols <- grep(".*Mean.*|.*Std.*", features[,2])
## Reduce the features table to what we want
features <- features[selCols,]
## Now add the last two columns (subject and activity)
selCols <- c(selCols, 562, 563)
## And remove the unwanted columns from allData
mrgData <- mrgData[,selCols]
## Add the column names (features) to allData
colnames(mrgData) <- c(features$V2, "Activity", "Subject")
colnames(mrgData) <- tolower(colnames(mrgData))
currentActivity = 1
for (currentActivityLabel in activityLabels$V2) {
mrgData$activity <- gsub(currentActivity, currentActivityLabel, mrgData$activity)
currentActivity <- currentActivity + 1
}
mrgData$activity <- as.factor(mrgData$activity)
mrgData$subject <- as.factor(mrgData$subject)
numCols <- length(colnames(mrgData)) -2
tidy = aggregate(mrgData[,1:numCols], by=list(activity = mrgData$activity, subject=mrgData$subject), FUN = mean)
## Remove the subject and activity column, since a mean of those has no use
## tidy[,90] = NULL
## tidy[,89] = NULL
write.table(tidy, "tidy.txt", sep="\t", row.name=FALSE)
## Remove working files.
remove(mrgData, tidy, numCols, activityLabels, features)
setwd("../")
|
b95ef2f98d42ee45deb9717d0c4391eb26c4d8c0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/igraph/examples/make_star.Rd.R
|
b5874cff994ffb67abb8701e5a265a8774b50c8a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 232
|
r
|
make_star.Rd.R
|
library(igraph)
### Name: make_star
### Title: Create a star graph, a tree with n vertices and n - 1 leaves
### Aliases: make_star graph.star star
### ** Examples
make_star(10, mode = "out")
make_star(5, mode = "undirected")
|
13e8729e8085c47c884b6a30afd6eafea094ce06
|
6964d8eb7cf8f9ed5abd612f6c2f0756877bca04
|
/tests/testthat/test_most_frequent.R
|
a6c000019ba750cb65c1ddadd4c4719b12e4dc2e
|
[
"Unlicense"
] |
permissive
|
s-fleck/hammr
|
7a6805acc2f897c380b3f40d4e9112900646006d
|
b8fd5fa9d67698bc4c46ef48d079b0948a036387
|
refs/heads/master
| 2023-07-20T11:56:32.005037
| 2023-07-10T07:32:59
| 2023-07-10T07:32:59
| 119,056,265
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
test_most_frequent.R
|
context("most_frequent")
test_that("most_frequent works as expected", {
tdat <- c('a', 'a', NA, NA, NA, 'b', 'c', 'c', 'c')
expect_identical(most_frequent(tdat), NA_character_)
expect_identical(most_frequent(tdat, na.rm = TRUE), 'c')
expect_identical(most_frequent(tdat, n = 2), c(NA_character_, 'c'))
})
|
4ced7e16a93717275a2db699a5ecb6aff0fbcb93
|
a026f85dbdd045ea2dc5b74df474afd02c3eb9af
|
/man/next_quarter.Rd
|
8bd5f85097ae043d923574abfea181b0fad8a11b
|
[] |
no_license
|
selesnow/timeperiodsR
|
93df215538e9091fd9a9f0f0cb8e95db7735dc9d
|
3612001767f0dce942cea54f17de22b1d97863af
|
refs/heads/master
| 2023-04-27T15:52:19.511667
| 2023-04-20T10:15:49
| 2023-04-20T10:15:49
| 208,013,525
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
rd
|
next_quarter.Rd
|
\name{next_quarter}
\alias{next_quarter}
\title{
Start and end of next quarter
}
\description{
Defines first and last date in n next quarter
}
\usage{
next_quarter(x = Sys.Date(),
n = 1,
part = getOption("timeperiodsR.parts"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{Date object}
\item{n}{Number of periods for offset}
\item{part}{Part of period you need to receive, one of "all", "start", "end","sequence", "length". See details.}
}
\details{
You can get object of tpr class with all components or specify which component you need, use \code{part} for manage this option:
\itemize{
\item all - get all components
\item start - get only first date of period
\item end - get only last date of period
\item start - get vector of all dates in period
\item length - get number of dates in period
}
}
\value{Object of tpr class}
\author{
Alexey Seleznev
}
\seealso{
For get next other periods see \code{\link[timeperiodsR:next_week]{next_week()}}, \code{\link[timeperiodsR:next_month]{next_month()}}, \code{\link[timeperiodsR:next_year]{next_year()}}
}
\examples{
## To get start, end and sequence of next quarter
nextquarter <- next_quarter()
## To get vector of date sequences
next_quarter(part = "sequence")
next_quarter()$sequence
seq(nextquarter)
## Get number of days of next quarter
day_nums <- next_quarter(part = "length")
next_quarter()$length
length(nextquarter)
}
|
6e83b092e728e9313fddc6fbc177b07e8dd420d3
|
2451a929f21a636690dafe9eb56c54972bf0cef5
|
/R/0_projtest-package.R
|
5b455df9ef6429004c3683a9470797853fdce5ea
|
[] |
no_license
|
AkselA/R-projtest
|
66b66fc5db1d423f6be8f8ef157c3422b6e2de36
|
7ffed50278a7aa1ec6f9b97246a11a17e484d1e2
|
refs/heads/master
| 2021-04-02T01:56:58.259839
| 2020-03-18T13:41:07
| 2020-03-18T13:41:07
| 248,231,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 284
|
r
|
0_projtest-package.R
|
#' A short description of the package
#'
#' A more detailed description of the package
#'
#' @section Details:
#' Highligh central functions, quick-start guide, etc.
#'
#' @section projtest contributors:
#'
#' @docType package
#' @name projtest-package
#' @rdname projtest
NULL
|
53791b6e112a9862419bf672be13d4632f658f07
|
8ed441ee034ab9f22ed248645f8f6ba2606b6e5b
|
/poids /poids1000grainesCOL.R
|
23a49d83adedab6041ac92e552dd8ea82772e160
|
[] |
no_license
|
CathyCat88/thesis
|
51b33ddf4f86255f1c143f68a8e57ad4dc98726c
|
a1f311f4b95d4ef40006dd5773d54c97cb295ea7
|
refs/heads/master
| 2020-09-28T12:39:58.624508
| 2016-11-13T16:17:16
| 2016-11-13T16:17:16
| 66,710,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,847
|
r
|
poids1000grainesCOL.R
|
remove(list = ls())
library("ggplot2")
library("cowplot")
data <- data.frame(read.csv("Col1000.csv", sep = ",", header = TRUE))
#########
#CALCULS#
#########
mean <- aggregate(data$masse, list(data$genotype), mean)
StandErr <- function(x) {
se <- sd(x)/sqrt(length(x)) # pas besoin de détailler x, aggregate se charge de tout compléter
}
standard.error <- aggregate(data$masse, list(data$genotype), StandErr)
summary <- cbind(mean, standard.error[,2]) # rassembler 2 tableaux
colnames(summary) <- c("genotype", "weight", "standard_error")
summary$genotype <- factor(summary$genotype, levels = c("WT", "nqr", "fqr1","nqrfqr1"))
#########
#BARPLOT#
#########
g <- ggplot(data = summary,
aes(x = genotype,
y = weight,
fill = genotype))
g <- g + theme(legend.position = "none")
g <- g + geom_bar(stat = "identity", color = "black", width = 0.6)
g <- g + xlab("")
g <- g + scale_y_continuous(name = "masse \nde 1000 graines (g)\n",
expand = c(0,0),
limits = c(0,22))
g <- g + geom_errorbar(aes(ymin = summary$weight, ymax = summary$weight + summary$standard_error), width = 0.05)
g
save_plot('masse1000grainesCol.png', g, base_aspect_ratio = 1.3)
#######
#TESTS#
#######
shapiroTest <- aggregate(masse ~ genotype, data = data,
function (x) shapiro.test(x)$p.value)
#toutes les distributions sont normales sauf nqr
result <- data.frame(reference=character(0),
genotype=character(0),
bartlett=numeric(0),
bartlett.pass=logical(0),
student=numeric(0),
student.pass=logical(0))
for (params in list(c("WT", "nqr"),
c("WT", "fqr1"),
c("WT", "nqrfqr1"))) {
reference <- params[1]
genotype <- params[2]
bartlettResult <- bartlett.test(list(data$masse[data$genotype == reference],
data$masse[data$genotype == genotype]))
#égalité des variances
studentResult<- t.test(data$masse[data$genotype == reference],
data$masse[data$genotype == genotype])
result <- rbind(result, data.frame(reference=reference,
genotype=genotype,
bartlett=bartlettResult$p.value,
bartlett.pass=(bartlettResult$p.value < 0.05),
student=studentResult$p.value,
student.pass=(studentResult$p.value > 0.05)))
}
wilcox.test(data$masse[data$genotype == "WT"],
data$masse[data$genotype == "nqr"])
|
ff610d2c9ce8b7b9e6b7fb18c3f08bfaad54e130
|
94d2f365e4eb96b6acc4289dd3ceae61f984c34d
|
/Rmd/01_03Leaset squares.R
|
86d3dabb6069c5d122832dcd97b8f057257bcb95
|
[] |
no_license
|
TrentLin/Regression-Model
|
c76520d684c7384d828092aa8c25f912fc0f284b
|
dba011206476a55667859c5c74f88d92da29254c
|
refs/heads/master
| 2021-01-01T19:38:49.342945
| 2015-01-06T07:49:00
| 2015-01-06T07:49:00
| 28,852,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 756
|
r
|
01_03Leaset squares.R
|
### Double check our calculations with R
library(UsingR);library(ggplot2)
data(galton)
y <- galton$child
x <- galton$parent
beta1 <- cor(y, x) * sd(y) / sd(x)
beta0 <- mean(y) - beta1 * mean(x)
rbind(c(beta0, beta1), coef(lm(y ~ x)))
###Reversing the outcome/predictor relationship
beta1 <- cor(y, x) * sd(x) / sd(y)
beta0 <- mean(x) - beta1 * mean(y)
rbind(c(beta0, beta1), coef(lm(x ~ y)))
###Regression through the origin yields an equivalent slope if you center the data first
yc <- y - mean(y)
xc <- x - mean(x)
beta1 <- sum(yc * xc) / sum(xc ^ 2)
c(beta1, coef(lm(y ~ x))[2])
###Normalizing variables results in the slope being the correlation
yn <- (y - mean(y))/sd(y)
xn <- (x - mean(x))/sd(x)
c(cor(y, x), cor(yn, xn), coef(lm(yn ~ xn))[2])
|
50c727870f372740672eb8f9c79f082f96a41cd7
|
382af42dc83d91a5cafa65657e020418baf9c168
|
/docs/R/render_site.R
|
7ff21b5f1ba51947d9999fdec89bcca4e3c23bb2
|
[] |
no_license
|
PLAY-behaviorome/PLAY-project.org
|
dce64e6c05e5e04af65595fc71683f7ed2664028
|
262415efabd89f27e6a34b53f953c7469e9a6b5c
|
refs/heads/master
| 2023-08-04T09:11:41.303136
| 2023-07-24T14:45:16
| 2023-07-24T14:45:16
| 183,499,477
| 1
| 5
| null | 2023-07-24T14:45:18
| 2019-04-25T19:44:06
|
R
|
UTF-8
|
R
| false
| false
| 224
|
r
|
render_site.R
|
# render_site
# source helper scripts and functions
#source(file = list.files("R", pattern = "\\.R$", full.names = TRUE))
source("R/write_video_clip_html.R")
source("R/write_video_clip_html_SJ.R")
rmarkdown::render_site()
|
82fed81e1cdabbc46ba4da16383d3c9414e6b907
|
430fdac27572c12f84620ab9eb9fecca772366ac
|
/wdesign-co-primary-code.R
|
46184c8c3ca4390d6e488a92ee149393677654c1
|
[] |
no_license
|
adaptive-designs/inf-theory
|
448d4c2489ee40506206885210d10fe3b6a802c5
|
a905018aaa0df2f7f7e040960c7eb82330c50002
|
refs/heads/master
| 2020-08-20T05:54:29.748824
| 2019-10-18T11:56:29
| 2019-10-18T11:56:29
| 215,988,814
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,570
|
r
|
wdesign-co-primary-code.R
|
library("pwr")
library("mvtnorm")
median.beta<-function(x,n){y<-x/n
return(y)}
loss.uni<-function(p1,p2,target1,target2,n=1,kappa=0.5){
alpha1<-p1*p2
alpha2<-p1*(1-p2)
alpha3<-(1-p1)*p2
alpha4<-1-alpha1-alpha2-alpha3
theta1<-target1*target2
theta2<-target1*(1-target2)
theta3<-(1-target1)*target2
theta4<-1-theta1-theta2-theta3
y1<-theta1^2/alpha1+theta2^2/alpha2+theta3^2/alpha3+theta4^2/alpha4-1
y<-(y1)*n^(2*kappa-1)
return(y)}
wdesign.co.primary.ph2<-function(true1,true2,target1,target2,correlation=0,n,cohort=1,assignment="best",prior1=NULL,prior2=NULL,beta1=1,beta2=1,kappa=0.5,nsims,
hypothesis=T,alternative="greater",cut.off.typeI=0.05,control=1,test="Fisher"){
typeI<-cut.off.typeI
fish.mat1<-mat.or.vec(2,2)
fish.mat2<-mat.or.vec(2,2)
N<-round(n/cohort)
M<-length(true1)
M2<-M-1
cutoff<-typeI/(M2)/2
z.norm<-qnorm(1-cutoff, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
rec.all<-result.exp1<-result.exp2<-mat.or.vec(nsims,M)
power.final<-result.suc1<-bias.final1<-result.suc2<-bias.final2<-mat.or.vec(nsims,1)
suc.cum1<-suc1<-exp1<-exp.cum1<-suc.cum2<-suc2<-exp2<-exp.cum2<-mat.or.vec(N+1,M)
experiment1<-array(0,dim=c(N+1,M,nsims))
experiment2<-array(0,dim=c(N+1,M,nsims))
for (z in 1:nsims){
p.values<-mat.or.vec(2,M)
losses<-mat.or.vec(N+1,M)
exp1[1,]<- exp.cum1[1,]<-beta1
exp2[1,]<- exp.cum2[1,]<-beta2
suc1[1,]<-suc.cum1[1,]<-prior1*beta1
suc2[1,]<-suc.cum2[1,]<-prior2*beta2
p.est1<-median.beta(suc.cum1[1,],exp.cum1[1,])
p.est2<-median.beta(suc.cum2[1,],exp.cum2[1,])
suc.cum.new<-mat.or.vec(4,M)
losses[1,]<-loss.uni(p1=p.est1,p2=p.est2,target1=target1,target2=target2,n=1,kappa)
if(assignment=="randomization"){
nextdose<-sample(1:M, cohort, prob = (1/(losses[1,]))/sum(1/(losses[1,])),replace = TRUE)}
else{nextdose<-which.min(losses[1,])}
suc1[2,]<-exp1[2,]<-0
suc2[2,]<-exp2[2,]<-0
exp1[2,nextdose]<-cohort
exp2[2,nextdose]<-cohort
exp.cum1[2,]<-exp.cum1[1,]
exp.cum2[2,]<-exp.cum2[1,]
exp.cum1[2,nextdose]<-exp.cum1[1,nextdose]+exp1[2,nextdose]
exp.cum2[2,nextdose]<-exp.cum2[1,nextdose]+exp2[2,nextdose]
y<-pnorm(rmvnorm(cohort,mean=c(0,0),sigma=rbind(c(1,correlation),c(correlation,1))))
if(y[1]<true1[nextdose] & y[2]<true2[nextdose]){
response<-1
}else{
if(y[1]<true1[nextdose] & y[2]>true2[nextdose]){
response<-2
}else{
if(y[1]>true1[nextdose] & y[2]<true2[nextdose]){
response<-3
}else{
response<-4
}
}
}
suc.cum.new[response,nextdose]<-suc.cum.new[response,nextdose]+1
suc1[2,nextdose]<-sum(y[1]<true1[nextdose])
suc2[2,nextdose]<-sum(y[2]<true2[nextdose])
suc.cum1[2,]<-suc.cum1[1,]
suc.cum2[2,]<-suc.cum2[1,]
suc.cum1[2,nextdose]<-suc.cum1[1,nextdose]+suc1[2,nextdose]
suc.cum2[2,nextdose]<-suc.cum2[1,nextdose]+suc2[2,nextdose]
j<-2
while (j<N+1){
p.est1<-median.beta(suc.cum1[j,],exp.cum1[j,])
p.est2<-median.beta(suc.cum2[j,],exp.cum2[j,])
# cat("probability 1=",p.est1,"\n")
# cat("probability 2=",p.est2,"\n")
losses[j,]<-loss.uni(p1=p.est1,p2=p.est2,target1=target1,target2=target2,n=exp.cum1[j,],kappa)
# cat(losses[j,],"\n")
if(assignment=="randomization"){
nextdose<-sample(1:M, cohort, prob = (1/(losses[j,]))/sum(1/(losses[j,])),replace = TRUE)}
else{nextdose<-which.min(losses[j,])}
exp1[j+1,]<-suc1[j+1,]<-0
exp2[j+1,]<-suc2[j+1,]<-0
exp1[j+1,nextdose]<-cohort
exp2[j+1,nextdose]<-cohort
exp.cum1[j+1,]<-exp.cum1[j,]
exp.cum2[j+1,]<-exp.cum2[j,]
exp.cum1[j+1,nextdose]<-exp.cum1[j,nextdose]+exp1[j+1,nextdose]
exp.cum2[j+1,nextdose]<-exp.cum2[j,nextdose]+exp2[j+1,nextdose]
y<-pnorm(rmvnorm(cohort,mean=c(0,0),sigma=rbind(c(1,correlation),c(correlation,1))))
# if(y[1]<true1[nextdose] & y[2]<true2[nextdose]){
# response<-1
# }else{
# if(y[1]<true1[nextdose] & y[2]>true2[nextdose]){
# response<-2
# }else{
# if(y[1]>true1[nextdose] & y[2]<true2[nextdose]){
# response<-3
# }else{
# response<-4
# }
# }
# }
# suc.cum.new[response,nextdose]<-suc.cum.new[response,nextdose]+1
suc1[j+1,nextdose]<-sum(y[1]<true1[nextdose])
suc2[j+1,nextdose]<-sum(y[2]<true2[nextdose])
suc.cum1[j+1,]<-suc.cum1[j,]
suc.cum2[j+1,]<-suc.cum2[j,]
suc.cum1[j+1,nextdose]<-suc.cum1[j,nextdose]+suc1[j+1,nextdose]
suc.cum2[j+1,nextdose]<-suc.cum2[j,nextdose]+suc2[j+1,nextdose]
j<-j+1}
result.exp1[z,]<-(exp.cum1[N+1,]-beta1)
result.exp2[z,]<-(exp.cum2[N+1,]-beta2)
result.suc1[z]<-sum(suc.cum1[N+1,]-suc.cum1[1,])
result.suc2[z]<-sum(suc.cum2[N+1,]-suc.cum2[1,])
j<-N+1
p.est1<-median.beta(suc.cum1[j,]-suc.cum1[1,],exp.cum1[j,]-exp.cum1[1,])
p.est2<-median.beta(suc.cum2[j,]-suc.cum2[1,],exp.cum2[j,]-exp.cum2[1,])
# cat("probability 1=",p.est1,"\n")
# cat("probability 2=",p.est2,"\n")
losses[j,]<-loss.uni(p1=p.est1,p2=p.est2,target1=target1,target2=target2,n=1,kappa=0.5)
# cat(losses[j,],"\n")
nextdose<-which.min(losses[j,])
rec.all[z,nextdose]<-1
bias.final1[z]<-(median.beta(suc.cum1[j,control]-suc.cum1[1,control],exp.cum1[j,control]-exp.cum1[1,control])-median.beta(suc.cum1[j,which.max(true1)]-suc.cum1[1,which.max(true1)],exp.cum1[j,which.max(true1)]-exp.cum1[1,which.max(true1)]))-(true1[control]-true1[which.max(true1)])
bias.final2[z]<-(median.beta(suc.cum2[j,control]-suc.cum2[1,control],exp.cum2[j,control]-exp.cum2[1,control])-median.beta(suc.cum2[j,which.max(true2)]-suc.cum2[1,which.max(true2)],exp.cum2[j,which.max(true2)]-exp.cum2[1,which.max(true2)]))-(true2[control]-true2[which.max(true2)])
if (hypothesis==T){
if(all(true1==true1[control])){
if(test=="Fisher"){
for (q in 1:M){if(q!=control){
fish.mat1[1,1]<-as.integer(sum(suc.cum1[N+1,q]-suc.cum1[1,q]))
fish.mat1[1,2]<-as.integer((exp.cum1[N+1,q]-beta1[q])-sum(suc.cum1[N+1,q]-suc.cum1[1,q]))
fish.mat1[2,1]<-as.integer(sum(suc.cum1[N+1,control]-suc.cum1[1,control]))
fish.mat1[2,2]<-as.integer((exp.cum1[N+1,control]-beta1[control])-sum(suc.cum1[N+1,control]-suc.cum1[1,control]))
my<-fisher.test(fish.mat1, alternative = alternative)
p.values[1,q]<-my$p.value
fish.mat2[1,1]<-as.integer(sum(suc.cum2[N+1,q]-suc.cum2[1,q]))
fish.mat2[1,2]<-as.integer((exp.cum2[N+1,q]-beta2[q])-sum(suc.cum2[N+1,q]-suc.cum2[1,q]))
fish.mat2[2,1]<-as.integer(sum(suc.cum2[N+1,control]-suc.cum2[1,control]))
fish.mat2[2,2]<-as.integer((exp.cum2[N+1,control]-beta2[control])-sum(suc.cum2[N+1,control]-suc.cum2[1,control]))
my<-fisher.test(fish.mat2, alternative = alternative)
p.values[2,q]<-my$p.value
}else{
p.values[1:2,q]<-0
}}}
}else{better<-as.vector(which(true1>true1[control]))
if(test=="Fisher"){for (q in 1:length(better)){
fish.mat1[1,1]<-as.integer(sum(suc.cum1[N+1,better[q]]-suc.cum1[1,better[q]]))
fish.mat1[1,2]<-as.integer((exp.cum1[N+1,better[q]]-beta1[better[q]])-sum(suc.cum1[N+1,better[q]]-suc.cum1[1,better[q]]))
fish.mat1[2,1]<-as.integer(sum(suc.cum1[N+1,control]-suc.cum1[1,control]))
fish.mat1[2,2]<-as.integer((exp.cum1[N+1,control]-beta1[control])-sum(suc.cum1[N+1,control]-suc.cum1[1,control]))
my<-fisher.test(fish.mat1, alternative = alternative)
p.values[1,q]<-my$p.value
fish.mat2[1,1]<-as.integer(sum(suc.cum2[N+1,better[q]]-suc.cum2[1,better[q]]))
fish.mat2[1,2]<-as.integer((exp.cum2[N+1,better[q]]-beta2[better[q]])-sum(suc.cum2[N+1,better[q]]-suc.cum2[1,better[q]]))
fish.mat2[2,1]<-as.integer(sum(suc.cum2[N+1,control]-suc.cum2[1,control]))
fish.mat2[2,2]<-as.integer((exp.cum2[N+1,control]-beta2[control])-sum(suc.cum2[N+1,control]-suc.cum2[1,control]))
my<-fisher.test(fish.mat2, alternative = alternative)
p.values[2,q]<-my$p.value
}}
}
if(test=="Fisher"){
if(any(p.values[p.values!=0]<cutoff)){
power.final[z]<-1
}else{
power.final[z]<-0
}
}
}
else{
power.final[z]<-0
}
}
y<-colSums(rec.all)/nsims
var.pavel<-mat.or.vec(M,1)
for (u in 1:M){var.pavel[u]<-var(result.exp1[,u]/n)}
if(all(true1==true1[control])){output<-list(True.Probabilities=true1,number.of.simulation=nsims,Sample.Size=mean(rowSums(result.exp1)),
Experimentation=colSums(result.exp1)/(n*nsims),
Experimentation.SE=sqrt(var.pavel),Selections=y,ENS=mean(result.suc1),SE.ENS=sqrt(var(result.suc1)),ENS2=mean(result.suc2),SE.ENS2=sqrt(var(result.suc2)),
TypeI.error=mean(power.final),Bias=mean(bias.final1))}else{
output<-list(True.Probabilities=true1,number.of.simulation=nsims,Sample.Size=mean(rowSums(result.exp1)),
Experimentation=colSums(result.exp1)/(n*nsims),
Experimentation.SE=sqrt(var.pavel),Selections=y,ENS=mean(result.suc1),SE.ENS=sqrt(var(result.suc1)),ENS2=mean(result.suc2),SE.ENS2=sqrt(var(result.suc2)),
Power=mean(power.final),Bias=mean(bias.final1))}
return(output)}
|
122130653c9fc21092fbe329608b43e40c8b45e5
|
425d8acc79d9b149333f61a3b9c86532b0fe1754
|
/run_analysis.R
|
da4fda716045b255fb9f07597f0205eecd6a7244
|
[] |
no_license
|
jrnardin/Getting-and-Cleaning-Data
|
db94c5a9552e3d7875447a5803af649e3046291a
|
2cec461bc87e0b0f92ad249daec8f85a93201041
|
refs/heads/master
| 2021-07-12T00:24:53.796690
| 2017-10-09T00:24:33
| 2017-10-09T00:24:33
| 106,218,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,367
|
r
|
run_analysis.R
|
# Week 4 Assignment
#read in all of the tables
#used this in live assessment of data dimensions & content
library(plyr)
#read in all of the tables required for assignment
#left out the Inertial Signals data because it seemed unnecessary for this assignment
xtrain <- read.table("X_train.txt")
xtest <- read.table("X_test.txt")
ytrain <- read.table("y_train.txt")
ytest <- read.table("y_test.txt")
stest <- read.table("subject_test.txt")
strain <- read.table("subject_train.txt")
features <- read.delim("features.txt", header=FALSE, sep="")
#used this in live assessment of data dimensions & content
#label the columns of the TEST and TRAIN sets; all get same labels
stest <- setNames(object=stest, "Subject")
ytest <- setNames(object = ytest, "Labels")
xtest <- setNames(object=xtest, features$V2)
strain <- setNames(object=strain, "Subject")
ytrain <- setNames(object = ytrain, "Labels")
xtrain <- setNames(object=xtrain, features$V2)
##STEP 1: Merge the data sets
#I start by emrging the test sets together, then do the same for the train sets
#this maintains the order & subsequent data integrity
#merge the TEST sets together
temptest <- cbind(ytest, xtest)
newtest <- cbind(stest, temptest)
#merge the TRAIN sets together
temptrain <- cbind(ytrain, xtrain)
newtrain <- cbind(strain, temptrain)
#put the TEST and TRAIN datasets together
CompleteDataSet <- rbind(newtrain, newtest)
##STEP 2: Extracts the mean and standard deviation for each measurement.
#Create new Dataframe with subset of data
#includes measurements of the mean and standard deviation
#does not include measurements based on Mean as input to a function
#this reduced dataset by 7 columns/fields of data (the 7 "angle" fields)
MeanStdSubset <- subset(CompleteDataSet,
select=c(1,2,
grep("[sS]td", names(CompleteDataSet)),
grep("-[mM]ean", names(CompleteDataSet))))
##STEP 3: Uses descriptive activity names to name the activities in the data set
#populate the "Labels" field with descriptive labels for activities
#instead of numbers used the data in "activity_labels.txt" file as mapping table
MeanStdSubset$Labels <- factor(MeanStdSubset$Labels,
levels = c(1, 2, 3, 4, 5, 6),
labels = c("walking", "Walking_Upstairs", "Walking_Downstairs",
"sitting", "Standing", "Laying"))
##STEP 4: Appropriately labels the data set with descriptive variable names.
# a rather long method method; pretty sure this could be optimized with time
# Most variables have 6 parts:
# 1. Whether the calculation is a Mean or Standard Deviation (StdDev)
# 2. Whether the calculation is on the X, Y, Z axis or on the magnitude
# magnitude derived from the three-dimensional signals using the Euclidean norm
# 3. Whether it is motion from the Body or from Gravity
# 4. Whether it was recorded from the accelerometer or from the gyroscope
# 5. If it was canculated as a jerk motion (linear acceleration and angular velocity were derived in time)
# 6. Whether the values are from the time or frequency domain signals
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-mean()-X"] <- "Mean_of_X_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-mean()-Y"] <- "Mean_of_Y_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-mean()-Z"] <- "Mean_of_Z_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-meanFreq()-X"] <- "MeanFreq_of_X_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-meanFreq()-Y"] <- "MeanFreq_of_Y_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-meanFreq()-Z"] <- "MeanFreq_of_Z_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-std()-X"] <- "StdDev_of_X_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-std()-Y"] <- "StdDev_of_Y_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccJerk-std()-Z"] <- "StdDev_of_Z_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyAccJerkMag-mean()"] <- "Mean_of_Magnitude_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyAccJerkMag-meanFreq()"] <- "MeanFreq_of_Magnitude_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyAccJerkMag-std()"] <- "StdDev_of_Magnitude_from_Body_Accelerometer_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyGyroJerkMag-mean()"] <- "Mean_of_Magnitude_from_Body_Gyroscope_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyGyroJerkMag-meanFreq()"] <- "MeanFreq_of_Magnitude_from_Body_Gyroscope_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyGyroJerkMag-std()"] <- "StdDev_of_Magnitude_from_Body_Gyroscope_Jerk_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerk-mean()-X"] <- "Mean_of_X_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerk-mean()-Y"] <- "Mean_of_Y_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerk-mean()-Z"] <- "Mean_of_Z_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerk-std()-X"] <- "StdDev_of_X_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerk-std()-Y"] <- "StdDev_of_Y_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerk-std()-Z"] <- "StdDev_of_Z_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerkMag-mean()"] <- "Mean_of_Magnitude_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccJerkMag-std()"] <- "StdDev_of_Magnitude_from_Body_Accelerometer_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerk-mean()-X"] <- "Mean_of_X_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerk-mean()-Y"] <- "Mean_of_Y_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerk-mean()-Z"] <- "Mean_of_Z_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerk-std()-X"] <- "StdDev_of_X_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerk-std()-Y"] <- "StdDev_of_Y_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerk-std()-Z"] <- "StdDev_of_Z_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerkMag-mean()"] <- "Mean_of_Magnitude_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroJerkMag-std()"] <- "StdDev_of_Magnitude_from_Body_Gyroscope_Jerk_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "Labels"] <- "Activity_Measured"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "Subject"] <- "ID_Num_of_Subject"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-mean()-X"] <- "Mean_of_X_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-mean()-Y"] <- "Mean_of_Y_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-mean()-Z"] <- "Mean_of_Z_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-meanFreq()-X"] <- "MeanFreq_of_X_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-meanFreq()-Y"] <- "MeanFreq_of_Y_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-meanFreq()-Z"] <- "MeanFreq_of_Z_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-std()-X"] <- "StdDev_of_X_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-std()-Y"] <- "StdDev_of_Y_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAcc-std()-Z"] <- "StdDev_of_Z_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccMag-mean()"] <- "Mean_of_Mag_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccMag-meanFreq()"] <- "MeanFreq_of_Mag_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyAccMag-std()"] <- "StdDev_of_Mag_from_Body_Accelerometer_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyGyroMag-mean()"] <- "Mean_of_Mag_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyGyroMag-meanFreq()"] <- "MeanFreq_of_Mag_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyBodyGyroMag-std()"] <- "StdDev_of_Mag_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-mean()-X"] <- "Mean_of_X_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-mean()-Y"] <- "Mean_of_Y_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-mean()-Z"] <- "Mean_of_Z_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-meanFreq()-X"] <- "MeanFreq_of_X_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-meanFreq()-Y"] <- "MeanFreq_of_Y_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-meanFreq()-Z"] <- "MeanFreq_of_Z_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-std()-X"] <- "StdDev_of_X_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-std()-Y"] <- "StdDev_of_Y_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "fBodyGyro-std()-Z"] <- "StdDev_of_Z_from_Body_Gyroscope_freq"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAcc-mean()-X"] <- "Mean_of_X_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAcc-mean()-Y"] <- "Mean_of_Y_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAcc-mean()-Z"] <- "Mean_of_Z_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAcc-std()-X"] <- "StdDev_of_X_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAcc-std()-Y"] <- "StdDev_of_Y_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAcc-std()-Z"] <- "StdDev_of_Z_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccMag-mean()"] <- "Mean_of_Mag_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyAccMag-std()"] <- "StdDev_of_Mag_from_Body_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyro-mean()-X"] <- "Mean_of_X_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyro-mean()-Y"] <- "Mean_of_Y_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyro-mean()-Z"] <- "Mean_of_Z_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyro-std()-X"] <- "StdDev_of_X_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyro-std()-Y"] <- "StdDev_of_Y_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyro-std()-Z"] <- "StdDev_of_Z_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroMag-mean()"] <- "Mean_of_Mag_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tBodyGyroMag-std()"] <- "StdDev_of_Mag_from_Body_Gyroscope_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAcc-mean()-X"] <- "Mean_of_X_from_Gravity_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAcc-mean()-Y"] <- "Mean_of_Y_from_Gravity_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAcc-mean()-Z"] <- "Mean_of_Z_from_Gravity_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAcc-std()-X"] <- "StdDev_of_X_from_Gravity_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAcc-std()-Y"] <- "StdDev_of_Y_from_Gravity_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAcc-std()-Z"] <- "StdDev_of_Z_from_Gravity_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAccMag-mean()"] <- "Mean_of_Magnitude_from_Gravity_Accelerometer_time"
colnames(MeanStdSubset)[colnames(MeanStdSubset) == "tGravityAccMag-std()"] <- "StdDev_of_Magnitude_from_Gravity_Accelerometer_time"
# STEP 5: creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# feels a wee-bit long, but simply calculated the mean (aka average) for
# every column for each subject's 6 types of activities
# was very careful to not manipulate the data or the columns names
# so that reassignment is aligned.
Mean_of_All <- ddply(MeanStdSubset, .(ID_Num_of_Subject, Activity_Measured), summarize,
mean(StdDev_of_X_from_Body_Accelerometer_time),
mean(StdDev_of_Y_from_Body_Accelerometer_time),
mean(StdDev_of_Z_from_Body_Accelerometer_time),
mean(StdDev_of_X_from_Gravity_Accelerometer_time),
mean(StdDev_of_Y_from_Gravity_Accelerometer_time),
mean(StdDev_of_Z_from_Gravity_Accelerometer_time),
mean(StdDev_of_X_from_Body_Accelerometer_Jerk_time),
mean(StdDev_of_Y_from_Body_Accelerometer_Jerk_time),
mean(StdDev_of_Z_from_Body_Accelerometer_Jerk_time),
mean(StdDev_of_X_from_Body_Gyroscope_time),
mean(StdDev_of_Y_from_Body_Gyroscope_time),
mean(StdDev_of_Z_from_Body_Gyroscope_time),
mean(StdDev_of_X_from_Body_Gyroscope_Jerk_time),
mean(StdDev_of_Y_from_Body_Gyroscope_Jerk_time),
mean(StdDev_of_Z_from_Body_Gyroscope_Jerk_time),
mean(StdDev_of_Mag_from_Body_Accelerometer_time),
mean(StdDev_of_Magnitude_from_Gravity_Accelerometer_time),
mean(StdDev_of_Magnitude_from_Body_Accelerometer_Jerk_time),
mean(StdDev_of_Mag_from_Body_Gyroscope_time),
mean(StdDev_of_Magnitude_from_Body_Gyroscope_Jerk_time),
mean(StdDev_of_X_from_Body_Accelerometer_freq),
mean(StdDev_of_Y_from_Body_Accelerometer_freq),
mean(StdDev_of_Z_from_Body_Accelerometer_freq),
mean(StdDev_of_X_from_Body_Accelerometer_Jerk_freq),
mean(StdDev_of_Y_from_Body_Accelerometer_Jerk_freq),
mean(StdDev_of_Z_from_Body_Accelerometer_Jerk_freq),
mean(StdDev_of_X_from_Body_Gyroscope_freq),
mean(StdDev_of_Y_from_Body_Gyroscope_freq),
mean(StdDev_of_Z_from_Body_Gyroscope_freq),
mean(StdDev_of_Mag_from_Body_Accelerometer_freq),
mean(StdDev_of_Magnitude_from_Body_Accelerometer_Jerk_freq),
mean(StdDev_of_Mag_from_Body_Gyroscope_freq),
mean(StdDev_of_Magnitude_from_Body_Gyroscope_Jerk_freq),
mean(Mean_of_X_from_Body_Accelerometer_time),
mean(Mean_of_Y_from_Body_Accelerometer_time),
mean(Mean_of_Z_from_Body_Accelerometer_time),
mean(Mean_of_X_from_Gravity_Accelerometer_time),
mean(Mean_of_Y_from_Gravity_Accelerometer_time),
mean(Mean_of_Z_from_Gravity_Accelerometer_time),
mean(Mean_of_X_from_Body_Accelerometer_Jerk_time),
mean(Mean_of_Y_from_Body_Accelerometer_Jerk_time),
mean(Mean_of_Z_from_Body_Accelerometer_Jerk_time),
mean(Mean_of_X_from_Body_Gyroscope_time),
mean(Mean_of_Y_from_Body_Gyroscope_time),
mean(Mean_of_Z_from_Body_Gyroscope_time),
mean(Mean_of_X_from_Body_Gyroscope_Jerk_time),
mean(Mean_of_Y_from_Body_Gyroscope_Jerk_time),
mean(Mean_of_Z_from_Body_Gyroscope_Jerk_time),
mean(Mean_of_Mag_from_Body_Accelerometer_time),
mean(Mean_of_Magnitude_from_Gravity_Accelerometer_time),
mean(Mean_of_Magnitude_from_Body_Accelerometer_Jerk_time),
mean(Mean_of_Mag_from_Body_Gyroscope_time),
mean(Mean_of_Magnitude_from_Body_Gyroscope_Jerk_time),
mean(Mean_of_X_from_Body_Accelerometer_freq),
mean(Mean_of_Y_from_Body_Accelerometer_freq),
mean(Mean_of_Z_from_Body_Accelerometer_freq),
mean(MeanFreq_of_X_from_Body_Accelerometer_freq),
mean(MeanFreq_of_Y_from_Body_Accelerometer_freq),
mean(MeanFreq_of_Z_from_Body_Accelerometer_freq),
mean(Mean_of_X_from_Body_Accelerometer_Jerk_freq),
mean(Mean_of_Y_from_Body_Accelerometer_Jerk_freq),
mean(Mean_of_Z_from_Body_Accelerometer_Jerk_freq),
mean(MeanFreq_of_X_from_Body_Accelerometer_Jerk_freq),
mean(MeanFreq_of_Y_from_Body_Accelerometer_Jerk_freq),
mean(MeanFreq_of_Z_from_Body_Accelerometer_Jerk_freq),
mean(Mean_of_X_from_Body_Gyroscope_freq),
mean(Mean_of_Y_from_Body_Gyroscope_freq),
mean(Mean_of_Z_from_Body_Gyroscope_freq),
mean(MeanFreq_of_X_from_Body_Gyroscope_freq),
mean(MeanFreq_of_Y_from_Body_Gyroscope_freq),
mean(MeanFreq_of_Z_from_Body_Gyroscope_freq),
mean(Mean_of_Mag_from_Body_Accelerometer_freq),
mean(MeanFreq_of_Mag_from_Body_Accelerometer_freq),
mean(Mean_of_Magnitude_from_Body_Accelerometer_Jerk_freq),
mean(MeanFreq_of_Magnitude_from_Body_Accelerometer_Jerk_freq),
mean(Mean_of_Mag_from_Body_Gyroscope_freq),
mean(MeanFreq_of_Mag_from_Body_Gyroscope_freq),
mean(Mean_of_Magnitude_from_Body_Gyroscope_Jerk_freq),
mean(MeanFreq_of_Magnitude_from_Body_Gyroscope_Jerk_freq))
names(Mean_of_All) <- names(MeanStdSubset)
write.csv(Mean_of_All, "Mean_of_All_Output.csv")
write.table(Mean_of_All, file = "Mean_of_All_Output.txt", row.names = FALSE)
|
03bb784e283542e6f694602e6a1d493bd4f03d39
|
845ff6a964548045e8d9bd8100f2315f13990d0a
|
/run_analysis.R
|
4d22ad8c0fc5a5c05981e6376b005442a4582e7e
|
[] |
no_license
|
rrbaker/coursera_gettingcleaningdata
|
c39e59dd59df20a6166d8199f92a53f9a1244322
|
79bbf8dfb2d0207125017e5828407bc1a5770852
|
refs/heads/master
| 2020-12-25T14:38:58.158705
| 2016-06-06T19:02:47
| 2016-06-06T19:02:47
| 60,539,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,139
|
r
|
run_analysis.R
|
## Getting and Cleaning Data Course
## Final Project (Week 4 Assignment), June 2016
## Coursera
## 0. Setup
setwd("/Users/rrbaker/Sites/_edu/coursera_r/coursera_gettingcleaningdata")
read.table("features.txt", skip=FALSE)[,2] -> train_features
read.table("train/subject_train.txt", skip=FALSE) -> train_subject
read.table("train/X_train.txt", skip=FALSE) -> train_x
read.table("train/y_train.txt", skip=FALSE) -> train_y
read.table("test/X_test.txt", skip=FALSE) -> test_x
read.table("test/y_test.txt", skip=FALSE) -> test_y
read.table("test/subject_test.txt", skip=FALSE) -> test_subject
read.table("activity_labels.txt", skip=FALSE) -> test_labels
labels <- test_labels
names(labels) <- c("Code","Description")
## 1. Let's merge some data (test and training sets)
cbind(train_subject,train_y,train_x) -> train_x
cbind(test_subject,test_y,test_x) -> test_x
data_merged <- rbind(train_x,test_x)
names(data_merged) <- c("subject","activity",as.character(train_features))
## 2. Extract only the measurements on the mean and standard deviation for each measurement.
# find it
grepl("mean()",names(data_merged),fixed=T) -> mean_cols
grepl("std()",names(data_merged),fixed=T) -> std_cols
cols<-mean_cols|std_cols
cols[1:2] <- c(1,1)
data_merged<-data_merged[,as.logical(cols)]
## 3. Uses descriptive activity names to name the activities in the data set
data_merged$activity <- actions$Description[as.numeric(data_merged$activity)]
gsub('_','',data_merged$activity) -> data_merged$activity #cleanup
gsub('_','',data_merged$activity) -> data_merged$activity
## 4. Appropriately labels the data set with descriptive variable names.
# gsub("BodyBody","",names(data_merged)) -> names(data_merged) # cleanup
## 5. Export some data
require(plyr)
# generate summarized data
data_summarized<-ddply(data_merged,.(subject,activity),function(x) colMeans(x[,3:length(names(x))]))
# cleanup
names(data_summarized) <- tolower(names(data_summarized))
names(data_summarized) <- gsub("[\\(\\)|-]","",names(data_summarized))
# Final test
data_summarized
# Export to file
write.table(data_summarized, file="tidyData.txt",row.name=FALSE)
|
1bba5ca180fd51011ccc72f776945e9d7ba02359
|
221c9aa934db54586c552e452fff832a7bd8142b
|
/code/rangeshiftR_install.R
|
85ad3e19378bbf0afb2f3f7606ffa057b2e1a1cd
|
[] |
no_license
|
VeeBurton/CRAFTY-OPM
|
59e6f0ab41fd4ff4b54e7e8b81247f9736cbbc74
|
f39704add7b5b017cf7747a385d81b98f613e5cc
|
refs/heads/master
| 2023-03-19T23:13:37.629370
| 2021-03-05T13:35:30
| 2021-03-05T13:35:30
| 339,418,772
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 661
|
r
|
rangeshiftR_install.R
|
# from instructions here:
# https://rangeshifter.github.io/RangeshiftR-tutorials/installing.html
install.packages("Rcpp")
install.packages("devtools")
install.packages("Rdpack")
# Error in inDL(x, as.logical(local), as.logical(now), ...) : unable to load shared object 'C:/Users/vanessa.burton.sb/Documents/R/win-library/4.0/xml2/libs/x64/xml2.dll':
# LoadLibrary failure: This program is blocked by group policy. For more information, contact your system administrator.
library(Rcpp)
library(devtools)
library(Rdpack)
Rcpp::evalCpp("2+2")
devtools::install_github("https://github.com/RangeShifter/RangeShiftR-package", ref = "main")
library(RangeShiftR)
|
c8de8b71b494d8407ed82a831df70e5bbcf37d6a
|
e90a363627ad08bbeb2c5d91c08d9db0e662837f
|
/man/camel_underscore.Rd
|
a70d47cdd2e1f64bef8ea10f8072e29ad12cd6b9
|
[
"MIT"
] |
permissive
|
tarakc02/preprocessr
|
77451fb88304338a4d53b037f6904381889bacbc
|
673594f9bb158df8c4068410a50176c6fc90c5b4
|
refs/heads/master
| 2020-06-01T13:36:19.578429
| 2015-09-01T23:47:41
| 2015-09-01T23:47:41
| 41,769,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 268
|
rd
|
camel_underscore.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/etc.R
\name{camel_underscore}
\alias{camel_underscore}
\title{Convert camelCase to under_score}
\usage{
camel_underscore(strings)
}
\description{
Convert camelCase to under_score
}
|
5b5b62a369ffe32c928380f68cccdafbd08f2d5a
|
b3f3ba484c247fed8d9e872846d269273575772c
|
/man/open.Rd
|
14621084db1b6aa487c91f8cf28901356496aa32
|
[
"MIT"
] |
permissive
|
ip2location/ip2proxy-r
|
b5a79f24749a06cadc30aeab10e85e78593d2f27
|
1c655641cd0ec91cf4cf27c9bc8619e18cd98fbe
|
refs/heads/main
| 2023-02-05T15:11:07.920491
| 2023-02-02T09:01:03
| 2023-02-02T09:01:03
| 310,230,190
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 432
|
rd
|
open.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IP2Proxy.r
\name{open}
\alias{open}
\title{Load IP2Proxy BIN data}
\usage{
open(bin_location)
}
\arguments{
\item{bin_location}{Absolute path of IP2Proxy BIN data}
}
\description{
Load the IP2Proxy BIN data for lookup. Free IP2Proxy LITE data available for download at <https://lite.ip2location.com/>
}
\examples{
\dontrun{
open("~/IP-COUNTRY.BIN")
}
}
|
4a4b2540438f7b5d538a9382877903ebf61977f8
|
f30c509a803fdb653df321dc4bf661e729446b72
|
/Task_02/task02.r
|
cb4047642dbbc282ba51f1b62c272a557589e110
|
[] |
no_license
|
akp0006/Tasks
|
dca719d1285eb44a3dbc0b0f266d6653259c7cfb
|
10a2937b105a38bcbe2217201a0b997b54027964
|
refs/heads/master
| 2020-12-10T20:33:21.714890
| 2020-04-04T01:43:24
| 2020-04-04T01:43:24
| 233,703,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,970
|
r
|
task02.r
|
setwd("C:\\Users\\Abbey\\Desktop\\Evolution\\Tasks\\Task_02")
Data <- read.csv("http://jonsmitchell.com/data/beren.csv", stringsAsFactors=F)
write.csv(Data, "rawdata.csv", quote=F)
Data
length(Data)
nrow(Data)
ncol(Data)
colnames(Data)
head(Data)
Data[1,]
Data[2,]
Data[1:3,]
Data[1:3, 4]
Data[1:5, 1:3]
Data[257, 1:3]
Feeds <- which(Data[,9] == "bottle")
berenMilk <- Data[Feeds,]
head(berenMilk)
Feeds <- which(Data[,"event"] == "bottle")
Feeds <- which(Data$event == "bottle")
dayID <- apply(Data, 1, function(x) paste(x[1:3], collapse="-"))
dateID <- sapply(dayID, as.Date, format = "%Y-%m-%d", origin = "2019-04-18")
Data$age <- dateID - dateID[which(Data$event == "birth")]
head(Data)
beren2 <- Data
beren3 <- beren2[order(beren2$age),]
head(beren2)
head(beren3)
write.csv(beren3, "beren_new.csv", quote=F, row.names=FALSE)
setwd("C:\\Users\\Abbey\\Desktop\\Evolution\\Tasks\\Task_02")
beren3 <- read.csv("C:\\Users\\Abbey\\Desktop\\Evolution\\Tasks\\Task_02\\beren_new.csv", stringsAsFactors=F)
beren3
#Question1: The first hypothesis is inappropriate for the given data for many reasons. First of all, Beren's weight is not regularly recorded, especially after August 19th. There is also the fact that there is no recroded quantitative data for how much he eats (only how many oz he drinks from the bottle). Therefore, a comparison of how much Beren eats each day and how much he weighs is not testable given the data. The second hypothesis is not testable because it is too vague--the presence of a "relationship" is impossible to test.
Feeds <- which(beren3[,9] == "bottle")
avgMilk <- mean(beren3$value[Feeds])
avgMilk
avgFeed <- tapply(beren3$value[Feeds], beren3$age[Feeds], mean)
avgFeed
length(avgFeed)
class(avgFeed)
dim(avgFeed)
varFeed <- tapply(beren3$value[Feeds], beren3$age[Feeds], var)
totalFeed <- tapply(beren3$value[Feeds], beren3$age[Feeds], sum)
numFeeds <- tapply(beren3$value[Feeds], beren3$age[Feeds], length)
totalFeed
numFeeds
varFeed
?cor
cor(beren3$value[Feeds], beren3$age[Feeds])
cor.test(beren3$value[Feeds], beren3$age[Feeds])
berenCor <- cor.test(beren3$value[Feeds], beren3$age[Feeds])
summary(berenCor)
berenANOVA <- aov(beren3$value[Feeds] ~ beren3$caregiver[Feeds])
berenANOVA
boxplot(beren3$value[Feeds] ~ beren3$caregiver[Feeds], xlab = "who gave the bottle", ylab = "amount of milk consumed (oz)")
?par
options("device")
par(las=1, mar=c(5,5,1,1), mgp=c(2,0.5,0), tck=-0.01)
plot(as.numeric(names(totalFeed)), totalFeed, type="b", pch=16, xlab="age in days", ylab="ounces of milk")
abline(h=mean(totalFeed), lty=2, col='red')
pdf("r02b-totalMilkByDay.pdf", height=4, width=4)
par(las=1, mar=c(5,5,1,1), mgp=c(2,0.5,0), tck=-0.01)
plot(as.numeric(names(totalFeed)), totalFeed, type="b", pch=16, xlab="age in days", ylab="ounces of milk")
abline(h=mean(totalFeed), lty=2, col='red')
dev.off()
#Question2: Because the milk data was recorded at various times of day, multiple times per day while he was in daycare. The graph only shows the total milk per day and it doesn't account for the time of day, how much he had at each feeding, or how long he was actually in daycare to be fed.
source("http://jonsmitchell.com/code/plotFxn02b.R")
Naps <- which(beren3[,9] == "nap")
beren4 <- beren3[Naps,]
beren4
class(beren4)
napstart <- beren4[,5]
napstartmin <- beren4[,6]
NStimestamp <- paste(napstart, ":", napstartmin, sep="")
NStimestamp
sapply(strsplit(NStimestamp,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
napend <- beren4[,7]
napendmin <- beren4[,8]
NEtimestamp <- paste(napend,":", napendmin, sep="")
sapply(strsplit(NEtimestamp,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
#I had to re-do this part because I didn't remove the rows with NAs (in the relevant columns) first.
beren5 <- beren4
beren4[complete.cases(beren4[ , 7:8]),]
napstart2 <- beren4[,5]
napstartmin2 <- beren4[,6]
NStimestamp2 <- paste(napstart2, ":", napstartmin2, sep="")
NStimestamp2
sapply(strsplit(NStimestamp2,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
napend2 <- beren4[,7]
napendmin2 <- beren4[,8]
NEtimestamp2 <- paste(napend2,":", napendmin2, sep="")
sapply(strsplit(NEtimestamp2,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
#and again because I forgot to add the beren4 changes to an object...
beren6 <- beren4[complete.cases(beren4[ , 7:8]),]
napstart3 <- beren6[,5]
napstartmin3 <- beren6[,6]
NStimestamp3 <- paste(napstart3, ":", napstartmin3, sep="")
NStimestamp3
NS_dec <- sapply(strsplit(NStimestamp3,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
napend3 <- beren6[,7]
napendmin3 <- beren6[,8]
NEtimestamp3 <- paste(napend3,":", napendmin3, sep="")
NE_dec <- sapply(strsplit(NEtimestamp3,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
NE_dec - NS_dec
timeeachnap <- NE_dec - NS_dec
beren7 <- cbind(beren6, timeeachnap)
totalNap <- tapply(beren7$timeeachnap, beren7$age, sum)
plot(as.numeric(names(totalNap)), totalNap, type="b", pch=16, xlab="age in days", ylab="total time slept (hours)")
beren8 <- cbind(beren7, NS_dec)
berenCorr <- cor.test(beren8$timeeachnap, beren8$NS_dec)
#Step8: Given the p-value of 0.002, the test suggests that the time the nap starts and the nap's duration are significantly correlated. The correlation coefficient of -0.28 suggests that the correlation isn't particularly strong and that these two variables are negatively correlated--as one variable increases, the other decreases.
#Hypothesis: Between the time 10:00am and 2:00pm, the amount of milk Beren drinks each day is positively correlated with the time he spends napping.
setwd("C:\\Users\\Abbey\\Desktop\\Evolution\\Tasks\\Task_02")
Data <- read.csv("http://jonsmitchell.com/data/beren.csv", stringsAsFactors=F)
write.csv(Data, "rawdata.csv", quote=F)
Data
length(Data)
nrow(Data)
ncol(Data)
colnames(Data)
head(Data)
Data[1,]
Data[2,]
Data[1:3,]
Data[1:3, 4]
Data[1:5, 1:3]
Data[257, 1:3]
Feeds <- which(Data[,9] == "bottle")
berenMilk <- Data[Feeds,]
head(berenMilk)
Feeds <- which(Data[,"event"] == "bottle")
Feeds <- which(Data$event == "bottle")
dayID <- apply(Data, 1, function(x) paste(x[1:3], collapse="-"))
dateID <- sapply(dayID, as.Date, format = "%Y-%m-%d", origin = "2019-04-18")
Data$age <- dateID - dateID[which(Data$event == "birth")]
head(Data)
beren2 <- Data
beren3 <- beren2[order(beren2$age),]
totalFeed <- tapply(beren3$value[Feeds], beren3$age[Feeds], sum)
Naps <- which(beren3[,9] == "nap")
beren4 <- beren3[Naps,]
beren6 <- beren4[complete.cases(beren4[ , 7:8]),]
napstart3 <- beren6[,5]
napstartmin3 <- beren6[,6]
NStimestamp3 <- paste(napstart3, ":", napstartmin3, sep="")
NStimestamp3
NS_dec <- sapply(strsplit(NStimestamp3,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
napend3 <- beren6[,7]
napendmin3 <- beren6[,8]
NEtimestamp3 <- paste(napend3,":", napendmin3, sep="")
NE_dec <- sapply(strsplit(NEtimestamp3,":"),
function(x) {
x <- as.numeric(x)
x[1]+x[2]/60
}
)
NE_dec - NS_dec
timeeachnap <- NE_dec - NS_dec
beren7 <- cbind(beren6, timeeachnap)
totalNap <- tapply(beren7$timeeachnap, beren7$age, sum)
beren_totalNap <- totalNap
Beren_napsubsettime <- subset(beren4, start_hour >= 10 & start_hour < 14)
nrow(Beren_napsubsettime)
berenMilk2 <- which(beren3$event == "bottle")
beren_Milk <- beren3[berenMilk2,]
Beren_milksubsettime2 <- subset(beren_Milk, start_hour >= 10 & start_hour < 14)
totalFeed2 <- tapply(Beren_milksubsettime2$value, Beren_milksubsettime2$age, sum)
beren7_subsettime <- subset(beren7, start_hour >= 10 & start_hour < 14)
totalNap2 <- tapply(beren7_subsettime$timeeachnap, beren7_subsettime$age, sum)
TF2_matrix <- matrix(totalFeed2, ncol=1)
TN2_matrix <- matrix(totalNap2, ncol=1)
tN2_matrix <- TN2_matrix[c(1:41,44:61),]
tF2_matrix <- TF2_matrix[c(1:18,21,24:25,28:29,31:41,45:48,50:70),]
scatter.smooth(x=tN2_matrix, y=tF2_matrix, main="Milk consumption as a function of time napped", xlab="Time spent napping per day (hrs)", ylab="Milk consumed per day (oz)")
|
75024cbe7e9b837e0ed7d5026a68c29f94c8ecdd
|
c4bc4a8ebc3e6b85201f794a92d228143839f468
|
/src/02_data-processing/old/country_province.R
|
52c0defd6db10aaee40597e2501294023e3bdbc0
|
[] |
no_license
|
papabloblo/coronavirus
|
e718a32086eb1d95dcb2d20782996593cb83d898
|
95b981231f5e9eea5404feed5280b030c688574e
|
refs/heads/master
| 2021-03-12T15:15:55.658036
| 2020-12-17T08:25:25
| 2020-12-17T08:25:25
| 246,631,642
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 761
|
r
|
country_province.R
|
library(tidyverse)
daily_reports <- readRDS("data/01_tratamiento/daily_reports.RDS")
population <- readRDS("data/01_tratamiento/population.RDS")
population <- population %>%
filter(country != "US")
daily_reports <- daily_reports %>%
left_join(population)
country_province <- daily_reports %>%
group_by(country, province) %>%
arrange(date) %>%
mutate(
confirmed_acum_14d = confirmed_acum - lag(confirmed_acum, n = 14L, default = 0),
incidence_100k_14d_acum = confirmed_acum_15d/population*100000,
deaths_acum_14d = deaths_acum - lag(deaths_acum, n = 14L, default = 0),
deaths_100k_14d_acum = deaths_acum/population*100000
) %>%
ungroup()
saveRDS(country_province, "data/01_tratamiento/country_province.RDS")
|
dd0eeb73bb4534f475a3e3b23546d7ec864b59a0
|
0c7c77bb715d5098fbbc5e05bf21d77548ec2a81
|
/part2.R
|
d6f75ca19363e157e23bd70f55ac0b469710cb4a
|
[] |
no_license
|
san5696/STAT501
|
86f7da3e4ee0987f061912da41cb34355c92874f
|
0c4a31023434551323cd560e0e03567cc8bdbece
|
refs/heads/main
| 2023-03-18T23:07:09.111856
| 2021-03-19T02:29:56
| 2021-03-19T02:29:56
| 326,817,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 537
|
r
|
part2.R
|
bf = read.table('FinalData.csv', sep=',', header=T)
llogit <- glm(SexCode ~ Height+Weight+Class, family = binomial, data = bf)
summary(llogit)
anova(llogit)
anova(llogit, test="Chisq")
full_model <- glm(SexCode ~ Height, family = binomial, data = bf)
reduced_model <- glm(SexCode ~ Weight + Class, family = binomial, data = bf)
anova(reduced_model)
anova(full_model)
linear_model <- lm(Weight~Height+ Sex+ Class, data=bf)
summary(linear_model)
anova(linear_model)
reduced_model1 <- lm(Weight~Sex+Class, data = bf)
anova(reduced_model1)
|
92d00bc27b26e2f2ec750f221430b299912a92df
|
faced0c1cc44934c02ae35903d2d6031a8bda92a
|
/doc/2 lasso.R
|
f653a3e68c175ba1eac48cd4b3b521b5f83c48bd
|
[] |
no_license
|
hz2657/Spring2020-Project3-group12
|
e9b8f07b23ffeff7ee8cdac4949c0c31f529da0c
|
66e745e2007e6ce1e79f4a58aca08ca89b3f9d33
|
refs/heads/master
| 2021-05-24T13:34:58.944927
| 2020-04-02T00:16:38
| 2020-04-02T00:16:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 831
|
r
|
2 lasso.R
|
title: "Lasso R"
author: Huizhe ZHU
output: html_notebook
---
newx <- model.matrix(~.-emotion_idx,data=dat_test)
# lasso
library(glmnet)
x = model.matrix(emotion_idx~., data = dat_train)
y = factor(dat_train$emotion_idx)
lassoModel = glmnet(x,y,alpha =1,family = "multinomial")
# cross validation
set.seed(1031)
cv.lasso = cv.glmnet(x,y,alpha = 1,family = "multinomial")
system.time(cv.glmnet(x,y,alpha = 1,family = "multinomial")) # 2926.84 secs = 48.78 min
coef(cv.lasso) ## Print out coefficients at optimal lambda
time<-system.time(lassoModel = glmnet(x,y,alpha =1))
# fit test set
fit_test <- predict(cv.lasso, newx=newx)
accu1 <- mean(dat_test$emotion_idx == fit_test) # 52%
accu1
tm_test1 <- system.time(pred <- test(model_best, dat_test)) # 12.70 secs
|
8895bb1d0dbd47b7c0ea5de1ef79a6c2cb8722ab
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/IIS/examples/average_HDL_levels.Rd.R
|
c979f03cf484d8b063fb18a01df538d9dd86af98
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
average_HDL_levels.Rd.R
|
library(IIS)
### Name: average_HDL_levels
### Title: Average HDL Levels
### Aliases: average_HDL_levels
### Keywords: datasets
### ** Examples
data(average_HDL_levels)
summary(average_HDL_levels)
|
cce8c16667b9020a45589fdcb8ef4b2247a11bb7
|
a2ac457f30f0690fc4328c3e5ca047617d72cc96
|
/HyperparameterFinalOld.R
|
c06d8a5472714b0ef90a366ae18be2a9a418508c
|
[] |
no_license
|
AtrayeeNeog/Cardio-Classifier-HIWi
|
ce0d8f6acf1383d9aa7867cc1467b94218bd92b9
|
e6e85eefc93588522fffa92ac4274c24eb91a9c2
|
refs/heads/master
| 2022-11-19T23:37:29.853748
| 2020-07-14T14:29:57
| 2020-07-14T14:29:57
| 259,164,483
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,554
|
r
|
HyperparameterFinalOld.R
|
library(mlr)
library(caret)
library(ggplot2)
library(rJava)
library(RWeka)
library("FSelector")
library(FSelectorRcpp)
library(Biocomb)
library(mlr)
library(rpart)
library(rpart.plot)
library(kernlab)
library(glmnet)
library(ROCR)
library(tidyverse)
# Loading and Preprocessing the Data:
set.seed(123)
dt <- readr::read_csv(here::here("Data", "Data.csv"))
head(dt)
ncol(dt)
nrow(dt)
which(dt$pathology == "3")
dt <- dt[-(which(dt$pathology == "3")),]
nrow(dt)
unique(dt$pathology)
which(dt$pathology == "3")
colnames(dt)
summary(dt)
#dt$gender <- ifelse(dt$gender == 'f', 1, 0)
str(dt$pathology)
length(which(dt$age > 47 & dt$pathology == "1")) #30
length(which(dt$pathology == "2")) #22
dt_old<- dt[(which(dt$age > 47 & dt$pathology == "1")),]
dt_bav <- dt[(which(dt$pathology == "2")),]
dt <- rbind(dt_old, dt_bav)
nrow(dt) #52
age <- dt$age
# getting rid of columns with 0 variance i.e columns with identical values
zv <- apply(dt, 2, function(x) length(unique(x)) == 1)
dtr <- dt[, !zv]
str(dtr)
length(colnames(dtr))
#getting rid of all the columns with near zero variance:
to_remove <- caret::nearZeroVar(round(dtr,2))
colnames(dtr[,c(6,150,157)])
dtr <- dtr[, -c(6,150,157)]
length(colnames(dtr))
# always keep the following feature(s)
cols_fixed <- c("pathology")
# apply the correlation feature selection on the following features
cols <- setdiff(names(dtr), cols_fixed)
correlationMatrix <- cor(dtr[cols], use = "pairwise.complete.obs")
# checking for NaNs:
sapply(correlationMatrix, function(x)all(any(is.na(x))))
sum(is.na(correlationMatrix))
print(correlationMatrix)
# Find highly correlated attributes (ideally>0.75):
highlyCorrelated <- caret::findCorrelation(correlationMatrix, cutoff = 0.90)
print(highlyCorrelated)
length(highlyCorrelated)
# Delete the highly correlated feature from dataset:
cols_to_remove <- cols[highlyCorrelated]
# works only with dplyr >= 1.0
dtr <- dtr %>% select(-all_of(cols_to_remove))
# works also with dplyr < 1.0
# dtr <- dtr %>% select(-cols_to_remove)
length(dtr)
colnames(dtr)
dtr$pathology <- factor(dtr$pathology)
dtr$age
dtr <- dtr %>% select(-age)
intrain <- createDataPartition(y = dtr$pathology, p= 0.7, list = FALSE)
training <- dtr[intrain,]
testing <- dtr[-intrain,]
# Hyperparameters for various Classifier Models:
train_task <- makeClassifTask(data = training, target = "pathology", positive = "2")
test_task <- makeClassifTask(data = testing, target = "pathology", positive = "2")
|
ee126cf553dffab0b21ca02c7883273e38e81c4b
|
ac2aadc49a14f95cbf92d8d3c4ddcdea5272f350
|
/R/ggvenn.R
|
1081b4b896fb7a70df676fd3158159149798ea40
|
[] |
no_license
|
AndyZHGai/ggvennEx
|
12f3c5b62cf30e09908bd81b83802d5ff03de75e
|
d5b4940d2e4ec2d573dffeb4fd59b2dc0f97aa66
|
refs/heads/main
| 2023-06-13T08:10:33.932742
| 2021-07-14T01:58:53
| 2021-07-14T01:58:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,363
|
r
|
ggvenn.R
|
#' Venn plot based on ggplot2, forked from yanlinlin82/ggvenn
#'
#' @param data
#' @param text.size the size of label and text size, default value is 4
#'
#' @return
#' @export
#'
#' @author ZHonghui Gai
#' @examples
#' data <- read.csv(file = "genus.Syn.csv", row.names = 1)
#' data <- ggvennEx:::vennlist(data)
#' v.d <- venn.data(data)
#' ggvenn(v.d)
ggvenn <- function(data, text.size = 4,
label.color = "white") {
if (label.color == "white") {
color <- "white"
}else{
color <- "black"
}
data$shapes |>
transform(group = LETTERS[group]) |>
ggplot() +
geom_polygon(aes(x = x, y = y, group = group, fill = group),
alpha = 0.5) +
geom_polygon(aes(x = x, y = y, group = group, fill = NA),
color = "white", size = 0.5, alpha = 0.1, linetype = 1) +
geom_text(data = data$labels,
aes(x = x, y = y, label = text, hjust = hjust, vjust = vjust),
color = color, fontface = "bold.italic", size = text.size*1.2) +
geom_text(data = data$texts,
aes(x = x, y = y, label = n, hjust = hjust, vjust = vjust),
color = "black", fontface = "bold", size = text.size) +
scale_x_continuous(expand = c(0.01, 0.01)) +
scale_y_continuous(expand = c(0.01, 0.01)) +
coord_fixed() + theme_void() + theme(legend.position = 0)
}
|
6a27c9d490374dc6ebd779a0251773e7e430b7fc
|
e09e243a46d02339cb9c012684a19465700169df
|
/01/try1.R
|
62ff86610b932579ba068217fa79cf2a70001491
|
[] |
no_license
|
kotaaaa/RBasic
|
e7adb7a950671bbe11e8ce857f85326a6b32c1ce
|
27b0c2a3323cc1b615713a45a28d33b7680e2fea
|
refs/heads/master
| 2020-03-22T23:11:12.515032
| 2018-07-13T04:08:25
| 2018-07-13T04:08:25
| 140,794,191
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
r
|
try1.R
|
x <- c(1,2,3,4,5)
x
y <- c(1:5,3:1)
y
z <- c(rep(3,4),rep(c(1,5,10),c(2,3,4)))
z
a <- c("A","B","C")
a
mean(1:5)
a <- c(1,2,3)
a
1:5
seq(0,10,by=2)
seq(0, 10, length=5)
c(5,5,5,5) -c(1,2,3,4)
c(5,5,5,5)- c(1,2)
c(5,5,5,5)- 1
y <- c(2,3,4,5,6)
y - x
com <- c(1,2,Inf, 4,5)
mean(com)
curve(sin(x*x), from=0, to=5)
dev.copy2eps(file="filename2.eps", width=6)
pdf()
curve(sin(x*x), from=0, to=5)
dev.off
a[3] <- 10
a
a <- 1:5
b <-replace(a, c(2,4), c(-2,-4))
b
d <- replace(a,c(2,4),NA)
d
replace(d, which(is.na(d)), 0)
mean(d)
d
e <- d[!is.na(d)]
e
mean(e)
n <- 10
sum <- 0
for(i in 1:n)
sum <- sum +i
print(sum)
myabs <- function(x){
if(x >= 0)
return(x)
else
return(-x)
}
myabs(-5)
png()
curve(sin(x**x), from=0, to=5)
dev.off()
x<-1:10
plot(x)
x <- rnorm(3000); y <-rnorm(3000)
plot(x,y)
|
d3a59e8e811b53d620ae48a497e9a96c859cdaee
|
33956256668c50faa0708f425d8ea4d83377b2b4
|
/to_report.R
|
3e16d62d10344a2e3a466e341edf198b492d0761
|
[] |
no_license
|
hkorevaar/US_NPI_Re
|
8b3247681cf4d5ae6e80b3e247e7aa95bfb1b21c
|
2485a791474d46f28423c973736d677d21b53434
|
refs/heads/master
| 2022-12-02T04:34:59.225302
| 2020-08-06T20:00:34
| 2020-08-06T20:00:34
| 276,170,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,312
|
r
|
to_report.R
|
## This is the code to analyze Re and R0 estiamtes, you will need esitmates
## before running this code.
## Please see Re_rev.R to calculate R0 from growth rates.
## Please see Re_lag.R to calculate Re from stochastic back-forecasted mortality data.
## Please Re_cases.R to calculate Re from cases or mortality data without back-forecasting. We do not report
## these estimates in the main text of the paper, but we used them as a robustness check and they
## were qualitatively consistent.
## NOTE if you load data which used EpiEstim to estimate Re directly from death data
## (from Re_cases.R) we recommend using lag_days = 21 in the 'data_aggregator' function
## in order match dates with appropriate Re values.
## For cases (from Re_cases.R), the lag will be closer to 7-10 days.
## If you load data which used back-forecasted mortality to reconstruct incidence,
## (as in Re_lag.R), then the data has already been lagged appropriately so you should
## set lag_days = 0.
## write function in order to facilitate faster data aggregation with multiple
## estimation data frames
require(tidyr)
require(dplyr)
require(ggplot2)
acts_iso <- read.csv('acts_iso.csv')
county_popden <- read.csv('county_popden.csv')
metro_areas <- c('New York New York City County','Massachusetts Suffolk County',
'Illinois Cook County', 'Pennsylvania Philadelphia County',
'Colorado Denver County','California Orange County','Wisconsin Milwaukee County',
'Texas Dallas County','Virginia Fairfax County','Texas Harris County',
'California Los Angeles County','Ohio Franklin County','Texas Tarrant County',
'Minnesota Hennepin County','North Carolina Mecklenburg County','California Sacramento County',
'Utah Salt Lake County','Rhode Island Providence County','Georgia Fulton County',
'Missouri St. Louis County','Florida Miami-Dade County')
data_aggregator <- function(covid_data, lag_days = 21){
covid_data$county_new <- paste(covid_data$county, 'County')
covid_data$county_state <- paste(covid_data$state, covid_data$county_new, sep = ' ')
covid_data$state_county <- paste(covid_data$state, covid_data$county, sep = ' ')
covid_data$county_state[covid_data$state == 'Louisiana'] <- paste(covid_data$state_county[covid_data$state == 'Louisiana'], 'Parish')
nlag <- lag_days
covid_data$date <- covid_data$Date - nlag
Re_acts <- inner_join(covid_data, acts_iso, by = c('date', 'state'))
Re_acts[is.na(Re_acts$level) & Re_acts$Date < '2020-02-26','level'] <- 0
county_popden$county_state[county_popden$county_state %in% c(' New York Kings County','New York New York County',
'New York Bronx County','New York Queens County')] <- 'New York New York City County'
Re_acts <- left_join(Re_acts, county_popden, by='county_state')
Re_acts$value[is.na(Re_acts$value)] <- 0
Re_acts$state[Re_acts$county == 'New York City'] <- 'New York City'
Re_acts <- Re_acts %>% filter(!is.na(R_e_median))
begin <- as.Date("2020-02-15", format = '%Y-%m-%d')
Re_acts$cal_time <- Re_acts$date - begin
Re_acts <- Re_acts %>%
group_by(state_county) %>%
mutate(days = date - min(date),
days1 = sum(level==1, na.rm = T),
days2 = sum(level==2, na.rm = T),
days3 = sum(level==3, na.rm = T),
Re_percent = ((R_e_median - mean(R_e_median[level == 0]))/mean(R_e_median[level == 0]))*100)
Re_acts$level1 <- ifelse(Re_acts$level == 1 | Re_acts$level == 2 | Re_acts$level == 3, 1, 0)
Re_acts$level2 <- ifelse(Re_acts$level == 2 | Re_acts$level == 3, 1, 0)
Re_acts$level3 <- ifelse(Re_acts$level == 3, 1, 0)
return(Re_acts)
}
early_summary <- function(covid_acts, metros = TRUE, ndays = 5){
metro_areas <- c('New York New York City County','Massachusetts Suffolk County',
'Illinois Cook County', 'Pennsylvania Philadelphia County',
'Colorado Denver County','California Orange County','Wisconsin Milwaukee County',
'Texas Dallas County','Virginia Fairfax County','Texas Harris County',
'California Los Angeles County','Ohio Franklin County','Texas Tarrant County',
'Minnesota Hennepin County','North Carolina Mecklenburg County','California Sacramento County',
'Utah Salt Lake County','Rhode Island Providence County','Georgia Fulton County',
'Missouri St. Louis County','Florida Miami-Dade County')
if(metros == TRUE){sub = covid_acts %>% filter(county_state %in% metro_areas)
}else{sub = covid_acts}
sub_sum <- sub %>% group_by(state, county_state) %>%
summarize(begRe = mean(R_e_median[1:ndays]),
rate = growth_rate[1],
rmax = mean(r_q0975[1:ndays]),
rmin = mean(r_q0025[1:ndays]),
#bmax = mean(R_e_median[1:ndays]) + 1.96*sd(R_e_median[1:ndays])/ndays,
bmax = mean(R_e_q0975[1:ndays]),
#bmin = mean(R_e_median[1:ndays]) - 1.96*sd(R_e_median[1:ndays])/ndays,
bmin = mean(R_e_q0025[1:ndays]),
meanRe = mean(R_e_median),
noRe = mean(R_e_median[level ==0], na.rm = T),
time_high = length(level == 3),
Re_high = mean(R_e_median[level==3], na.rm = T),
hmax = mean(R_e_median[level==3], na.rm = T) + 1.96*(sd(R_e_median[level==3], na.rm = T))/time_high,
hmin = mean(R_e_median[level==3], na.rm = T) - 1.96*(sd(R_e_median[level==3], na.rm = T))/time_high,
pd = popden[1],
pd_rob = pop[1]/(landarea[1]/1e6),
rural = rural[1],
urban = urban[1],
pop=pop[1],
start = min(cal_time))
return(sub_sum)
}
## load R0 estimates from Re_rev.R
load('county_rates.RData')
deaths_rates <- data_aggregator(Re.dat)
deaths_sub <- early_summary(deaths_rates, metros = FALSE)
deaths_sub$pd_rob[deaths_sub$state == 'District of Columbia'] <- 5047
## create summary table for supplement
summary_table <- data.frame(county = deaths_sub$county_state, R0 = deaths_sub$begRe, density = deaths_sub$pd_rob)
summary_table <- summary_table[order(summary_table$density, decreasing = F),]
## log density
deaths_sub$log_den <- log10(deaths_sub$pd_rob)
deaths_sub$logpop <- log10(deaths_sub$pop)
## linear fit for density
fit <- lm(begRe ~ log_den, data = deaths_sub)
## update with segmented fit
fit_seg <- segmented(fit, seg.Z = ~log_den, psi = 3)
## linear fit for density, metro counties only
fit_sub <- lm(begRe ~ log10(pd_rob), data = deaths_sub %>% filter(county_state %in% metro_areas))
## linear fit for pop
deaths_sub$pop[deaths_sub$state == 'District of Columbia'] <- 705749
fit_pop <- lm(begRe ~ log10(pop), data = deaths_sub)
## run this to make Figure One
## density plot with predicted values and ribbon CI
pdat <- with(deaths_sub, data.frame(log_den = log10(seq(min(pd_rob),
max(pd_rob), length = 100))))
tmp2 <- predict(fit_seg, newdata = pdat, se.fit = TRUE)
tmp2$pop_den <- 10^(pdat$log_den)
denplot <- ggplot() + geom_point(aes(x=pd_rob, y=begRe), data = deaths_sub) +
geom_linerange(aes(x=pd_rob, ymin = bmin, ymax = bmax), alpha = .25, data = deaths_sub)+
geom_line(aes(x=tmp2$pop_den, y=tmp2$fit)) +
geom_ribbon(aes(x=tmp2$pop_den, ymax=tmp2$fit + 1.96*tmp2$se.fit, ymin = tmp2$fit - 1.96*tmp2$se.fit), alpha = .5) +
scale_x_log10() + theme_classic() + ylab(expression(Inferred ~R[0])) + xlab('log population density') +
ggtitle('Early Transmission and Population Density') + ylim(0,9.5)
## do the same for metro counties
pdat_sub <- with(deaths_sub %>% filter(county_state %in% metro_areas), data.frame(pd_rob = (seq(min(pd_rob),
max(pd_rob), length = 100))))
tmp2_sub <- predict(fit_sub, newdata = pdat_sub, se.fit = TRUE)
tmp2_sub$pd_rob <- pdat_sub$pd_rob
inset <- ggplot() +
geom_point(aes(x=pd_rob, y=begRe), data = deaths_sub %>% filter(county_state %in% metro_areas), col = 'tomato2') +
scale_x_log10() + theme_classic() +
ylab(NULL) + xlab(NULL) +
geom_line(aes(x=tmp2_sub$pd_rob, y=tmp2_sub$fit), col = 'tomato2') +
geom_ribbon(aes(x=tmp2_sub$pd_rob, ymax=tmp2_sub$fit + 1.96*tmp2_sub$se.fit,
ymin = tmp2_sub$fit - 1.96*tmp2_sub$se.fit), alpha = .5, fill = 'tomato2') +
geom_linerange(aes(x=pd_rob, ymin = bmin, ymax = bmax), alpha = .25,
data = deaths_sub %>% filter(county_state %in% metro_areas), col = 'tomato2') +
ggtitle('Primary Metro Counties')
## population plot with fit and CI ribbon
pdat_pop <- with(deaths_sub, data.frame(pop = (seq(min(pop),
max(pop), length = 100))))
tmp_pop <- predict(fit_pop, newdata = pdat_pop, se.fit = TRUE)
tmp_pop$pop <- pdat_pop$pop
popplot <- ggplot() + geom_point(aes(x=pop, y=begRe), data = deaths_sub) +
geom_linerange(aes(x=pop, ymin = bmin, ymax = bmax), alpha = .25, data = deaths_sub)+
geom_line(aes(x=tmp_pop$pop, y=tmp_pop$fit)) +
geom_ribbon(aes(x=tmp_pop$pop, ymax=tmp_pop$fit + 1.96*tmp_pop$se.fit, ymin = tmp_pop$fit - 1.96*tmp_pop$se.fit), alpha = .5) +
scale_x_log10() + theme_classic() + ylab(expression(Inferred ~R[0])) + xlab('log population size') +
ggtitle('Early Transmission and Population Size') + ylim(0,9.5)
## population inset for metro counties
pdat_sub_pop <- with(deaths_sub %>% filter(county_state %in% metro_areas), data.frame(pop = (seq(min(pop),
max(pop), length = 100))))
fit_sub_pop <- fit_sub <- lm(begRe ~ log10(pop), data = deaths_sub %>% filter(county_state %in% metro_areas))
tmp_pop_sub <- predict(fit_sub_pop, newdata = pdat_sub_pop, se.fit = TRUE)
tmp_pop_sub$pop <- pdat_sub_pop$pop
inset_pop <- ggplot() +
geom_point(aes(x=pop, y=begRe), data = deaths_sub %>% filter(county_state %in% metro_areas), col = 'tomato2') +
scale_x_log10() + theme_classic() +
ylab(NULL) + xlab(NULL) +
geom_line(aes(x=tmp_pop_sub$pop, y=tmp_pop_sub$fit), col = 'tomato2') +
geom_ribbon(aes(x=tmp_pop_sub$pop, ymax=tmp_pop_sub$fit + 1.96*tmp_pop_sub$se.fit, ymin = tmp_pop_sub$fit - 1.96*tmp_pop_sub$se.fit),
fill = 'tomato2', alpha = .5) +
geom_linerange(aes(x=pop, ymin = bmin, ymax = bmax), alpha = .25,
data = deaths_sub %>% filter(county_state %in% metro_areas), col = 'tomato2') +
ggtitle('Primary Metro Counties')
## make Figure One
den_inset <- ggdraw(denplot ) +
draw_plot(inset, .5, .55, .45, .4)
pop_inset <- ggdraw(popplot ) +
draw_plot(inset_pop, .5, .55, .45, .4)
plot_grid(den_inset, pop_inset, labels = c('A','B'))
#### continuous estimates of Re from Re_lag.R
load('county_Re.RData')
Re_acts <- data_aggregator(Re.dat, lag_days = 0)
Re_acts$state[Re_acts$county == 'New York City'] <- 'New York City'
## drop outliers (generally single day spikes resulting from instability in estimates)
Re_acts <- Re_acts %>% filter(R_e_median < 20)
## we will drop observations from post opening
Re_acts$post_open <- ifelse(Re_acts$date > as.Date('2020-05-17', format = '%Y-%m-%d') & Re_acts$level != 3, 1, 0)
## run this to make Figure 2
## want county level means, will plot these with lines connecting counties
Re_grouped <- Re_acts %>%
filter(post_open == 0) %>%
group_by(state_county,level, state) %>%
summarize(meanRe = mean(R_e_median, na.rm = T),
pd = popden[1],
pop = pop[1])
ggplot(Re_grouped) + geom_point(aes(x=level,meanRe,group=level,col=level)) +
geom_line(aes(x=level,meanRe,group=state_county), alpha = .2) +
geom_hline(yintercept = 3, lty = 3)+
geom_hline(yintercept = 1, lty = 3)+
facet_wrap(vars(state), ncol = 7, nrow = 7) +
scale_y_log10() +
scale_color_manual(values = c('grey','chartreuse3','dodgerblue','tomato2'),
labels = c('none','low','med','high'), name = 'response') +
ggtitle(expression(paste('Lagged', ~R[e], ' estimates by state response level',sep = ' '))) +
xlab('response level') + ylab(expression(Inferred ~R[e])) + theme_classic() +
scale_x_discrete(breaks = c(0,1,2,3), labels = c('none','low','med','high')) +
theme(axis.text.x = element_text(angle = 45))
### examine outliers, find they are mostly rural and start late
Re_delta <- Re_acts %>%
filter(post_open == 0) %>%
group_by(state_county, county_state, state) %>%
summarize(pd = popden[1],
Re0 = if(length(R_e_median[level==0])>5){mean(R_e_median[level == 0][(length(R_e_median[level==0])-5):length(R_e_median[level==0])])}
else{NA},
Re1 = if(length(R_e_median[level==1])>5){mean(R_e_median[level == 1][(length(R_e_median[level==1])-5):length(R_e_median[level==1])])}
else{NA},
Re2 =if(length(R_e_median[level==2])>5){mean(R_e_median[level == 2][(length(R_e_median[level==2])-5):length(R_e_median[level==2])])}
else{NA},
Re3 = if(length(R_e_median[level == 3])>5){mean(R_e_median[level == 3][(length(R_e_median[level==3])-5):length(R_e_median[level==3])])}
else{NA})
Re_outlier <- Re_grouped %>%
group_by(state_county) %>%
dplyr::summarize(diff = if(length(meanRe[level == 2] - meanRe[level == 3])>0){meanRe[level == 2] - meanRe[level == 3]}else{NA})
Re_outlier$out <- ifelse(Re_outlier$diff < -1, 1, 0)
Re_outlier$out[is.na(Re_outlier$out)] <- 0
outliers <- Re_outlier$state_county[Re_outlier$out == 1]
## look at the distribution of density in places with increase Re between level 2 and 3
Re_acts %>% filter(state_county %in% outliers) %>%
plyr::summarize(dist = quantile(popden, probs = seq(from = .1, to = 1, by = .1)))
#### Now we look at change in Re by state and map these
Re_grouped_wide <- Re_grouped %>% pivot_wider(names_from = 'level',values_from = 'meanRe')
Re_grouped_wide %>% ggplot() + geom_point(aes(x=pd,y=`2`-`3`))
Re_state_delta <- Re_grouped_wide %>%
group_by(state) %>%
summarize(delta_one = mean(`1`-`0`, na.rm = T),
delta_two = mean(`2`-`1`, na.rm = T),
delta_three = mean(`3`-`2`, na.rm = T))
States <- map_data('state')
Re_state_delta$region <- tolower(Re_state_delta$state)
States_data <- left_join(States, Re_state_delta, by = 'region')
map1 <- ggplot() +
geom_polygon( data=States_data, aes(x=long, y=lat, group=group, fill = delta_one),
color="white" ) + theme_void() +
scale_fill_gradient2(high= 'tomato2',low= 'chartreuse3', limits = c(-4.1,2), na.value = 'grey70',
name = expression(paste('change in',~R[e]))) +
ggtitle('No NPI to Low') +
theme(legend.position=c(0.89,0.25),
legend.text = element_text(size = 20),title = element_text(size = 23))
map2 <- ggplot() +
geom_polygon( data=States_data, aes(x=long, y=lat, group=group, fill = delta_two),
color="white" ) +
scale_fill_gradient2(high= 'tomato2',low= 'chartreuse3', limits = c(-4.1,2), na.value = 'grey70') +
theme_void() +guides(fill = FALSE) +
theme(title = element_text(size = 23))+
ggtitle('Low to Medium')
## we take places that have increased more that 50% from
## https://www.npr.org/sections/health-shots/2020/03/16/816707182/map-tracking-the-spread-of-the-coronavirus-in-the-u-s
## accessed june, 25
spikes <- c('oklahoma','florida','arizona','texas','idaho','kansas','oregon','georgia','tennessee',
'washington','arkansas','california','ohio','alabama')
States_data$spike <- ifelse(States$region %in% tolower(spikes), 1, 0)
map3 <- ggplot() +
geom_polygon( data=States_data, aes(x=long, y=lat, group=group, fill = delta_three,
color=as.factor(spike))) + theme_void() +
scale_color_manual(values = c('white','tomato2'), na.value = 'grey70',name = 'June cases + > 50%', labels = c('no','yes')) +
scale_fill_gradient2(high= 'tomato2',low= 'chartreuse3', limits = c(-4.1,2), na.value = 'grey70',
name = expression(paste('change in',~R[e]))) +
geom_polygon( data=States_data %>% filter(spike == 1), aes(x=long, y=lat, group=group, fill = delta_three),
color='tomato2') +
theme(legend.position=c(0.15,0.2),
legend.text = element_text(size = 20), title = element_text(size = 23)) + ggtitle('Medium to High') + guides(fill = FALSE)
plot_grid(map1,map2,map3, nrow = 1)
#### We also look at county by county differences
#### First create a data frame to store results from t tests
#### Then calculate t tests for counties with 5 obs in the two levels being compared
stat_sig <- data.frame(state_county = unique(Re_acts$state_county))
stat_sig$t.val1 <- stat_sig$t.val2 <-stat_sig$t.val3 <- NA
stat_sig$mean1 <- stat_sig$mean2 <- stat_sig$mean3 <- NA
stat_sig$sig1 <- stat_sig$sig2 <- stat_sig$sig3 <- NA
stat_sig$pd <- NA
Re_pre <- Re_acts %>% filter(post_open == 0)
for(county in stat_sig$state_county){
sub = Re_pre[Re_pre$state_county == county, ]
if(length(sub$R_e_median[sub$level == 0]) > 5 & length(sub$R_e_median[sub$level == 1]) > 5){
t = t.test(sub$R_e_median[sub$level == 0], sub$R_e_median[sub$level == 1])
stat_sig[stat_sig$state_count == county, 't.val1'] <- t$statistic
stat_sig[stat_sig$state_county == county, 'sig1'] <- ifelse(sign(t$conf.int[1]) == sign(t$conf.int[2]),
1, 0)
stat_sig[stat_sig$state_county == county, 'mean1'] <- t$estimate[2] - t$estimate[1]
}
if(length(sub$R_e_median[sub$level == 1]) > 5 & length(sub$R_e_median[sub$level == 2]) > 5){
t = t.test(sub$R_e_median[sub$level == 1], sub$R_e_median[sub$level == 2])
stat_sig[stat_sig$state_count == county, 't.val2'] <- t$statistic
stat_sig[stat_sig$state_county == county, 'sig2'] <- ifelse(sign(t$conf.int[1]) == sign(t$conf.int[2]),
1, 0)
stat_sig[stat_sig$state_county == county, 'mean2'] <- t$estimate[2] - t$estimate[1]
}
if( length(sub$R_e_median[sub$level == 2]) > 5 & length(sub$R_e_median[sub$level == 3]) > 5){
t = t.test(sub$R_e_median[sub$level == 2], sub$R_e_median[sub$level == 3])
stat_sig[stat_sig$state_count == county, 't.val3'] <- t$statistic
stat_sig[stat_sig$state_county == county, 'sig3'] <- ifelse(sign(t$conf.int[1]) == sign(t$conf.int[2]),
1, 0)
stat_sig[stat_sig$state_county == county, 'mean3'] <- t$estimate[2] - t$estimate[1]
}
stat_sig[stat_sig$state_county == county, 'pd'] <- sub$popden[1]
}
## Now we look at how change in Re is associated with starting values
Re_state_delta <- Re_grouped_wide %>%
group_by(state_county, state) %>%
summarize(delta_one = mean(`1`-`0`, na.rm = T),
r0 = `0`,
r1 = `1`,
r2 = `2`,
r3 = `3`,
delta_two = mean(`2`-`1`, na.rm = T),
delta_three = mean(`3`-`2`, na.rm = T),
pd = pd[1])
d1 <- (lm(delta_one ~ r0 , data = Re_state_delta))
d2 <- (lm(delta_two ~ r1, data = Re_state_delta))
d3 <- (lm(delta_three ~ r2, data = Re_state_delta))
## robustness checks for regression
metro_areas <- c('New York New York City','Massachusetts Suffolk',
'Illinois Cook', 'Pennsylvania Philadelphia',
'Colorado Denver','California Orange','Wisconsin Milwaukee',
'Texas Dallas','Virginia Fairfax','Texas Harris',
'California Los Angeles','Ohio Franklin','Texas Tarrant',
'Minnesota Hennepin','North Carolina Mecklenburg','California Sacramento',
'Utah Salt Lake','Rhode Island Providence','Georgia Fulton',
'Missouri St. Louis','Florida Miami-Dade')
d1_check <- (lm(delta_one ~ log10(pd), Re_state_delta %>% filter(state_county %in% metro_areas)))
Re_state_delta %>% filter(state_county %in% metro_areas) %>%
ggplot() + geom_point(aes(x=log10(pd),y=delta_one))
Re_state_delta %>% filter(state_county %in% metro_areas) %>%
ggplot() + geom_point(aes(x=log10(pd),y=r3))
d1_check <- lm(r1 ~ r0, Re_state_delta)
confint(d1_check)
d2_check <- lm(r2 ~ r1, Re_state_delta)
confint(d2_check)
d3_check <- lm(r3 ~ r2, Re_state_delta)
confint(d3_check)
## You can also examine these, but the r-squared is very low
f1 <- (lmer(R_e_median ~ level + (1|state), data = Re_acts %>% filter(level %in% c(0,1))))
f2 <- (lmer(R_e_median ~ level + (1|state), data = Re_acts %>% filter(level %in% c(1,2))))
f3 <- (lmer(R_e_median ~ level + (1|state), data = Re_acts %>% filter(level %in% c(2,3))))
## make up some data where Re2<Re1, and Re2a is smaller than Re1- but crucially also smaller for big ones
Re1 <- rnorm(1000,3,2)
Re2 <- Re1-rnorm(length(Re1),1,1)
Re2a <- Re1-Re1*0.5+rnorm(length(Re1),0,0.5)
plot(Re1,Re2)
points(Re1,Re2a,col=4)
abline(0,1) ## ~ all points are below the 0,1
## for the first slope is not different from 1.
fit <- lm(Re2~Re1)
summary(fit)
abline(fit, col=2)
## for the second it is
fit1 <- lm(Re2a~Re1)
summary(fit1)
abline(fit1, col=2)
## and confidence interval is below 1
fit1$coeff[2]+c(-1,1)*1.96*summary(fit)$coeff[2,2]
|
7c642e64585ba143a1911dad02f1ae3be62ed35a
|
3b2b5636282ae842def1c16265cccac19f9d125a
|
/R/timeTicks.R
|
7b2cc4e412f687fade21939c5829ddb55055bebf
|
[
"BSD-2-Clause"
] |
permissive
|
ilkkavir/LPI.gdf
|
06cf2ccb0ed19b7a04df417fe93cef2f7e530115
|
088a53c3624c68406b87ccaf8d1451ef678c3b62
|
refs/heads/master
| 2023-05-28T09:56:00.948551
| 2023-05-15T13:23:37
| 2023-05-15T13:23:37
| 205,375,323
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,971
|
r
|
timeTicks.R
|
## file:timeTicks.R
## (c) 2010- University of Oulu, Finland
## Written by Ilkka Virtanen <ilkka.i.virtanen@oulu.fi>
## Licensed under FreeBSD license.
##
##
## Time axis tick marks
##
## Arguments:
## times A vector of times [s], only the smallest and largest
## value are used
## tickres Tick mark resolution in seconds
##
## Returns:
## A list with elements "tick" (tick mark positions)
## and "string" (tick mark labels)
##
timeTicks <- function(times,tickres=NULL)
{
ttick <- vector()
tstr <- character()
result <- list()
if (is.null(tickres)){
ttot <- max(times)-min(times)
tickres <- 240
if (ttot<(60*60*24)){
tickres <- 120
}
if (ttot<(60*60*12)){
tickres <- 60
}
if (ttot<(60*60*6)){
tickres <- 30
}
if (ttot<(60*60*3)){
tickres <- 15
}
if (ttot<(60*60*2)){
tickres <- 10
}
if (ttot<(60*60)){
tickres <- 5
}
if (ttot<(60*5)){
tickres <- 1
}
}
time_tmp <- seq(min(times),max(times))
ttick <- floor(time_tmp/(60*tickres))*60*tickres
l<-2
for (k in seq(2,length(time_tmp))){
if (ttick[k]>ttick[k-1]){
ttick[l]<-ttick[k]
l<-l+1
}
}
ttick<-ttick[seq(1,(l-1))]
for (k in seq(1,length(ttick))){
tstr[k] <- '00:00'
if (as.integer(((ttick[k]/(60*60*24))%%1)*24+.01)<10){
substr(tstr[k],2,2)<-as.character(floor(((ttick[k]/(60*60*24))%%1)*24+.01))
}else{
substr(tstr[k],1,2)<-as.character(floor(((ttick[k]/(60*60*24))%%1)*24+.01))
}
if (as.integer(((ttick[k]/(60*60))%%1)*60+.01)<10){
substr(tstr[k],5,5)<-as.character(floor(((ttick[k]/(60*60))%%1)*60+.1))
}else{
substr(tstr[k],4,5)<-as.character(floor(((ttick[k]/(60*60))%%1)*60+.1))
}
}
result[['tick']] <- ttick
result[['string']] <- tstr
return(result)
}
|
5fd504d05b72db953e2ebb49870f38eea04deef9
|
84d4b0f90866b8ef5ab3bd325a295d46b195d20f
|
/man/raman_hdpe.Rd
|
e2e3b319f9f27b491ef7408e429b8cdfea21b228
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
rainyl/OpenSpecy
|
310d8a42bdd6abd39f5c8b1bcd0046bf3338a158
|
92c72594abaaf91925d7c0550e791de5a149192d
|
refs/heads/main
| 2023-05-11T06:28:30.482481
| 2021-06-01T18:04:47
| 2021-06-01T18:04:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 850
|
rd
|
raman_hdpe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raman_hdpe.R
\docType{data}
\name{raman_hdpe}
\alias{raman_hdpe}
\title{Sample Raman spectrum}
\format{
A data table containing 964 rows and 2 columns:
\tabular{ll}{
\code{wavenumber}: \tab spectral wavenumber [1/cm] \cr
\code{intensity}: \tab absorbance values [-] \cr
}
}
\description{
Raman spectrum of high-density polyethylene (HDPE).
}
\examples{
data("raman_hdpe")
}
\references{
Cowger W, Gray A, Christiansen SH, Christiansen SH, Christiansen SH,
De Frond H, Deshpande AD, Hemabessiere L, Lee E, Mill L, et al. (2020).
“Critical Review of Processing and Classification Techniques for Images and
Spectra in Microplastic Research.” \emph{Applied Spectroscopy},
\strong{74}(9), 989–1010. \doi{10.1177/0003702820929064}.
}
\author{
Win Cowger
}
\keyword{data}
|
3f825c97f61d1b96e930387f1a409485deccf72f
|
0db9b9ad4b00a908d9ddba1f157d2d3bba0331c4
|
/man/dist_unit_options.Rd
|
84540c3726a5cd3a453772ecc0b238543bab3876
|
[
"MIT"
] |
permissive
|
elipousson/sfext
|
c4a19222cc2022579187fe164c27c78470a685bb
|
bbb274f8b7fe7cc19121796abd93cd939279e30a
|
refs/heads/main
| 2023-08-18T15:29:28.943329
| 2023-07-19T20:16:09
| 2023-07-19T20:16:09
| 507,698,197
| 16
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 414
|
rd
|
dist_unit_options.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dist_unit_options}
\alias{dist_unit_options}
\title{Distance units (vector)}
\format{
A character vector with 86 names, plural names, aliases, and symbols
for distance units.
}
\usage{
dist_unit_options
}
\description{
A vector of supported distance units pulled from \code{dist_units}.
}
\keyword{datasets}
|
b90953e7dc3a272e42d2fb7d16941e6a23e40d34
|
0ad74abaed93e23fe196c7556b2ba74090234697
|
/cachematrix.R
|
d38173861e862f28fb35076966846019b5bb72fd
|
[] |
no_license
|
plancksconstant/ProgrammingAssignment2
|
da584e7f70283c32e6a8dabc1006543297e79fd0
|
ddaa36f7ebc367716a1ad39344e2f7a7eb6fc43c
|
refs/heads/master
| 2021-09-01T04:07:37.202978
| 2017-12-24T17:06:28
| 2017-12-24T17:06:28
| 115,274,357
| 0
| 0
| null | 2017-12-24T16:15:32
| 2017-12-24T16:15:31
| null |
UTF-8
|
R
| false
| false
| 1,042
|
r
|
cachematrix.R
|
## These functions are for calculating the inverse
## of a matrix, if the inverse does not already exist
## in the cache.
## This function returns a list of functions. These
## will set a matrix, get the matrix, set the matrix
## inverse, and get the matrix inverse
makeCacheMatrix <- function(x = matrix()) {
im <- NULL
set <- function(y) {
x <<- y
im <<- NULL
}
get <- function() x
setinvm <- function(invmat) im <<- invmat
getinvm <- function() im
list(set = set, get = get,
setinvm = setinvm,
getinvm = getinvm)
}
## This function will check to see if the matrix
## inverse is stored in the cache and return it,
## if it is there, or calculate inverse, if it
## is not in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
im <- x$getinvm()
if(!is.null(im)) {
message("getting cached data")
return(im)
}
data <- x$get()
im <- solve(data, ...)
x$setinvm(im)
im
}
|
4912d2748eede745a68cbb5a4ee77b251341b4b1
|
e02b906d4d3c548085954f3832afac30c7137228
|
/R/data-pinna.R
|
b2ef64a942fdf2b95b2eb2fbda4b36708c896750
|
[] |
no_license
|
poissonconsulting/bauw
|
151948ab0dc55649baff13b2d79a551b6fc5a49d
|
47b12dc140ba965ae8c89693c0d8d8fefa0fd7db
|
refs/heads/main
| 2023-06-15T10:56:20.506561
| 2022-12-16T20:00:03
| 2022-12-16T20:00:03
| 78,153,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 636
|
r
|
data-pinna.R
|
#' Pen shell detection data
#'
#' The pen shell (\emph{Pinna nobilis}) detection data from the Balearic Islands
#' in 2010.
#'
#' The variables are as follows:
#' \itemize{
#' \item \code{d1} indicator for shell detected by first team.
#' \item \code{d2} indicator for shell detected by second team.
#' \item \code{width} shell width (cm).
#' }
#'
#' @format A data frame with 143 rows and 3 columns
#' @source Kery & Schaub (2011 p.166) courtesy of Iris Hendriks and colleagues
#' @references
#' Kery M & Schaub M (2011) Bayesian Population Analysis
#' using WinBUGS. Academic Press. (\url{http://www.vogelwarte.ch/bpa})
"pinna"
|
e52e4b2979c01232206743d404bc8b520054213a
|
83ce3b39e88c03e2c98ef2f05174195708ac3dbe
|
/R/groupLocation.R
|
eef40581a5e175c27afdaad121c4ac99e2c119db
|
[] |
no_license
|
cran/shotGroups
|
e02467ffb36b8e528fa1c230b2a718512159fc19
|
ae04a8371aa1cc18af598413d1bc41d389762acb
|
refs/heads/master
| 2022-10-01T18:19:20.943958
| 2022-09-17T18:06:04
| 2022-09-17T18:06:04
| 17,699,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,522
|
r
|
groupLocation.R
|
groupLocation <-
function(xy, level=0.95, plots=TRUE, bootCI="none",
dstTarget, conversion) {
UseMethod("groupLocation")
}
groupLocation.data.frame <-
function(xy, level=0.95, plots=TRUE, bootCI="none",
dstTarget, conversion) {
## distance to target from override or from data
if(missing(dstTarget)) {
dstTarget <- if(hasName(xy, "distance")) {
xy[["distance"]]
} else {
NA_real_
}
}
## determine conversion factor from data if override is not given
if(missing(conversion)) {
conversion <- determineConversion(xy)
}
xy <- getXYmat(xy)
groupLocation(xy, level=level, plots=plots, bootCI=bootCI,
dstTarget=dstTarget, conversion=conversion)
}
groupLocation.default <-
function(xy, level=0.95, plots=TRUE, bootCI="none",
dstTarget, conversion) {
if(!is.matrix(xy)) { stop("xy must be a matrix") }
if(!is.numeric(xy)) { stop("xy must be numeric") }
if(ncol(xy) != 2L) { stop("xy must have two columns") }
if(!is.numeric(level)) { stop("level must be numeric") }
if(level <= 0) { stop("level must be > 0") }
bootCI <- match.arg(bootCI, choices=c("none", "norm", "basic", "perc", "bca"), several.ok=TRUE)
## check if CI level is given in percent
if(level >= 1) {
while(level >= 1) { level <- level / 100 }
warning(c("level must be in (0,1) and was set to ", level))
}
dstTarget <- if(missing(dstTarget) ||
all(is.na(dstTarget)) ||
(length(unique(dstTarget)) > 1L)) {
NA_real_
} else {
mean(dstTarget)
}
conversion <- if(missing(conversion) ||
all(is.na(conversion)) ||
(length(unique(conversion)) > 1L)) {
NA_character_
} else {
unique(conversion)
}
#####-----------------------------------------------------------------------
## prepare data
X <- xy[ , 1] # x-coords
Y <- xy[ , 2] # y-coords
Npts <- nrow(xy) # number of observations
res <- vector("list", 0) # empty list to later collect the results
haveRob <- if(Npts < 4L) { # can we do robust estimation?
warning("We need >= 4 points for robust estimations")
haveRob <- FALSE
} else {
TRUE
} # if(nrow(xy) < 4L)
#####-----------------------------------------------------------------------
## location measures
res$ctr <- colMeans(xy) # center of joint (x,y)-distribution
## robust estimation of center
res$ctrRob <- if(haveRob) {
robustbase::covMcd(xy)$center
} else {
NULL
} # if(haveRob)
distPOA <- sqrt(sum(res$ctr^2)) # distance to point of aim
res$distPOA <- makeMOA(distPOA, dst=dstTarget, conversion=conversion)
res$distPOArob <- if(haveRob) { # rob distance to point of aim
distPOArob <- sqrt(sum(res$ctrRob^2))
makeMOA(distPOArob, dst=dstTarget, conversion=conversion)
} else {
NULL
} # if(haveRob)
## Hotelling's T^2 test for equality of (x,y)-center with point of aim (0,0)
res$Hotelling <- if(Npts > 2L) {
anova(lm(cbind(X, Y) ~ 1), test="Hotelling-Lawley")
} else {
warning("We need >= 3 points for Hotelling's T^2 test")
NULL
} # if(Npts > 2L)
#####-----------------------------------------------------------------------
## confidence intervals for x- and y-coords
## parametric: t-CI
alpha <- 1-level # alpha-level
tCrit <- qt(c(alpha/2, 1-alpha/2), Npts-1) # critical t-values left and right
Mx <- mean(X) # mean x-coords
My <- mean(Y) # mean y-coords
sMx <- sd(X) / sqrt(Npts) # standard error of the mean x
sMy <- sd(Y) / sqrt(Npts) # standard error of the mean y
ctrXci <- rbind(t=rev(Mx-tCrit*sMx)) # t-CI x-coords
ctrYci <- rbind(t=rev(My-tCrit*sMy)) # t-CI y-coords
## non-parametric: bootstrap-CIs for center (basic and BCa)
if(!("none" %in% bootCI)) { # do bootstrap CIs
NrplMin <- 1499L # minimum number of replications
Nrpl <- if("bca" %in% bootCI) { # number of replications
max(NrplMin, Npts+1) # BCa needs at least this number of points
} else {
NrplMin
}
## group center for one replication
getCtr <- function(x, idx) { colMeans(x[idx, , drop=FALSE]) }
bs <- boot::boot(xy, statistic=getCtr, R=Nrpl) # bootstrap centers
xCIboot <- boot::boot.ci(bs, conf=level, type=bootCI, index=1) # x
yCIboot <- boot::boot.ci(bs, conf=level, type=bootCI, index=2) # y
## CI type names in output structure of boot.ci()
CInames <- c(basic="basic", norm="normal", perc="percent", bca="bca")
CItype <- CInames[bootCI]
xCImat <- vapply(CItype, function(x) {
len <- length(xCIboot[[x]])
xCIboot[[x]][(len-1):len] }, numeric(2))
yCImat <- vapply(CItype, function(x) {
len <- length(yCIboot[[x]])
yCIboot[[x]][(len-1):len] }, numeric(2))
## add bootstrap CIs to parametric CI
ctrXci <- rbind(ctrXci, t(xCImat))
ctrYci <- rbind(ctrYci, t(yCImat))
}
res$ctrXci <- ctrXci
res$ctrYci <- ctrYci
colnames(res$ctrXci) <- c("x (", "x )")
colnames(res$ctrYci) <- c("y (", "y )")
if(plots) {
## infer (x,y)-coord units from conversion
unitXY <- na.omit(getUnits(conversion, first=FALSE))
unitDst <- na.omit(getUnits(conversion, first=TRUE))
devNew <- getDevice() # platform-dependent window open
## distance to target may be heterogeneous
dstTargetPlot <- paste(unique(round(na.omit(dstTarget))), collapse=", ")
#####-------------------------------------------------------------------
## diagram: 2D-scatter plot for the (x,y)-distribution
devNew() # open new diagram
plot(Y ~ X, asp=1, main="Group (x,y)-coordinates", pch=16,
sub=paste("distance:", dstTargetPlot, unitDst),
xlab=paste0("X [", unitXY, "]"), ylab=paste0("Y [", unitXY, "]"))
abline(v=0, h=0, col="gray") # add point of aim
## add (robust) group center
points(res$ctr[1], res$ctr[2], col="blue", pch=4, lwd=2, cex=1.5)
if(haveRob) {
points(res$ctrRob[1], res$ctrRob[2], col="red",
pch=4, lwd=2, cex=1.5)
} # if(haveRob)
## add legend
legend(x="bottomleft", legend=c("group center", "robust group center"),
col=c("blue", "red"), pch=4, lty=NA, lwd=2, bg=rgb(1, 1, 1, 0.7))
} # if(plots)
#####-----------------------------------------------------------------------
## return all the collected numerical results and tests
return(res)
}
|
a1b978510b7d67008efad62f49b3f228ec6d2ddc
|
16eaf576186c56624c4ecde31a92e4cdfa2c3106
|
/Q5/q5.r
|
ef8e2cdbc6bdf6db981245ee90ca45e46dabb4d6
|
[] |
no_license
|
rithvik-vasishta/DA_Lab
|
26d97a7497c0c9b16a7304335dac1c5c6c3ccbf2
|
9e28a5c60b6ddcf7c6e7eb0ae365817f4af1df2e
|
refs/heads/master
| 2023-03-12T05:17:35.099752
| 2021-02-15T14:21:18
| 2021-02-15T14:21:18
| 338,852,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 544
|
r
|
q5.r
|
df = data.frame(
sales = c(2,5,7,10,12,15,20),
budget_tv = c(5,15,25,30,35,50,100),
budget_radio = c(7,12,17,25,30,35,70)
)
df
# a
model = lm(df$sales ~ df$budget_tv + df$budget_radio)
df$pred_builtin = predict(model, data = df)
# b. Using normal equation method
x <- df[,2:3]
x$intercept <- rep(1,nrow(df))
x <- as.matrix(x)
x_transpose = t(x)
# inverse( X_transpose * X) * X_transpose * y
coef <- solve( x_transpose %*% x ) %*% x_transpose %*% df$sales
df$pred <- coef[1]* df$budget_tv + coef[2]*df$budget_radio + coef[3]
df
|
62eb86900042f700f8780e60e3adf3f236a82ea6
|
1114eaf591e56bd3fefc9b3827ebf76454970445
|
/CriticaAutomaticaCompras/Scripts/PreparacaoDosDadosResumo.R
|
5e99df163bc5b0fc6a59575a0dfef490d50bb6e8
|
[] |
no_license
|
NeuKnowledge/EAC
|
4e8c1e569459755ff4fa3921636ce8777704c6f7
|
c6715b3944f34dd19e87046dfb3502331c2c1a76
|
refs/heads/master
| 2021-01-19T10:53:35.676588
| 2016-08-03T15:27:38
| 2016-08-03T15:27:38
| 61,637,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,084
|
r
|
PreparacaoDosDadosResumo.R
|
rm(list=ls())
library(data.table)
library(dplyr)
library(reshape)
library(reshape2)
library(ggplot2)
library(gridExtra)
library(plotly)
#
# Carga Resumo produtos mais vendidos
#
load(file = "../Dados/resumoProdutosMaisVendidos.RData")
load(file = "../Dados/produtosMaisVendidosR.RData")
#
# Produtos
#
{
load(file="../Dados/varejao5anos/produtos.RData")
codProduto <- "1T 1095"
subset(produtos,COD_PRODUTO == codProduto)
produtos <- produtos[order(COD_PRODUTO)]
ptn <- "1O95"
ndx <- grep(ptn, produtos$COD_PRODUTO)
p <- produtos[ndx,][order(COD_PRODUTO)]
}
#############
# INICIO
#############
# Visualização Mensal do produto mais vendido
{
geraGraficoLinha <- function (pplotDados, pDatas, pCoProduto, pTitulo, pCoLoja, pFlagPontos = TRUE) {
g <- ggplot(pplotDados,aes(x=AnoMes,y=Qtde, group=Dados,label=Qtde)) +
geom_rect(mapping=aes(xmin = pDatas[1], xmax = pDatas[2], ymin = -Inf, ymax = Inf),
fill = "gray90",
alpha = 0.05,
colour="gray90") +
geom_rect(mapping=aes(xmin = pDatas[3], xmax = pDatas[4], ymin = -Inf, ymax = Inf),
fill = "gray90",
alpha = 0.05,
colour="gray90") +
geom_rect(mapping=aes(xmin = pDatas[5], xmax = pDatas[6], ymin = -Inf, ymax = Inf),
fill = "gray90",
alpha = 0.05,
colour="gray90") +
geom_line(aes(linetype = Dados, color = Dados)) +
ggtitle(paste0(pTitulo," produto : ",pCoProduto," - Loja : ",pCoLoja)) +
theme(axis.text.x = element_text(angle=70,hjust=1,size=10),
plot.title = element_text(lineheight=1.5, face="bold",colour = "black")) +
theme(plot.margin=unit(c(0.5,0,0.5,0.5),"cm")) +
scale_size_manual(values=c(1, 1.5)) +
geom_hline(yintercept = 0)
if (pFlagPontos) {
g <- g +
geom_point(aes(color = Dados)) +
geom_text(hjust = 0, vjust = -0.1, size=2)
}
return(g)
}
geraBoxPlot <- function (pplotDados, pCoProduto, pTitulo, pCoLoja) {
pplotDados <- as.data.table(pplotDados)[,.(Ano = substr(AnoMes,1,4), Dados, Qtde)]
g <- ggplot(pplotDados,aes(x=Ano,y=Qtde)) +
geom_boxplot(show.legend = TRUE) +
stat_summary(fun.y=sd,col='blue',geom='point', aes(shape = "desvio padrão")) +
stat_summary(fun.y=mean,col='red',geom='point', aes(shape="média")) +
ggtitle(paste0(pTitulo," produto : ",pCoProduto," - Loja : ",pCoLoja)) +
theme(axis.text.x = element_text(angle=70,hjust=1,size=10),
plot.title = element_text(lineheight=1.5, face="bold",colour = "black")) +
theme(plot.margin=unit(c(0.5,0,0.5,0.5),"cm")) +
scale_size_manual(values=c(1, 1.5)) +
coord_flip()
return(g)
}
gplotDados <- NULL
printGraficos <- function(pProdutosMaisVendidos, pNoListaProdutos, pFlagPontos, pTipoGrafico) {
dtAux <- data.frame(Estoque = rep(0,60),
Vendas = rep(0,60),
Compras = rep(0,60),
AnoMes = c(201101:201112,
201201:201212,
201301:201312,
201401:201412,
201501:201512))
dtAux$AnoMes <- as.character(dtAux$AnoMes)
Datas <- dtAux$AnoMes
Datas <- Datas[c(1,12,24,36,48,60)]
coLoja <- 1
listaVendas <- list()
listaCompras <- list()
listaEstoqueCalculado <- list()
listaDifComprasVendas <- list()
for ( i in 1:length(pProdutosMaisVendidos)) {
coProduto <- pProdutosMaisVendidos[i]
resumoProduto <- subset(resumoProdutosMaisVendidos, Produto == coProduto & Loja == coLoja)[,.(Compras,Vendas,Estoque,AnoMes)]
resumoProduto <- rbind(resumoProduto,dtAux)
resumoProduto <- as.data.frame(resumoProduto[,.(Compras = sum(Compras),
Vendas = sum(Vendas),
Estoque = sum(Estoque)), by=.(AnoMes)])
resumoProduto <- as.data.table(arrange(resumoProduto,AnoMes))
resumoProduto <- resumoProduto[, .(Compras, Vendas, Estoque, AnoMes, "Estoque Calculado" = Compras - Vendas, "Dif Compras X Vendas" = Compras - Vendas)]
for (j in 2:nrow(resumoProduto)) {
if (resumoProduto[j,]$"Estoque Calculado" != 0){
resumoProduto[j,]$"Estoque Calculado" <- resumoProduto[j-1,]$"Estoque Calculado" + resumoProduto[j,]$Compras - resumoProduto[j,]$Vendas
} else {
resumoProduto[j,]$"Estoque Calculado" <- resumoProduto[j-1,]$"Estoque Calculado"
}
}
plotDadosEstoque <- as.data.table(melt(resumoProduto, id.vars = c("AnoMes"), measure.vars = c("Estoque")))
plotDadosVendas <- as.data.table(melt(resumoProduto, id.vars = c("AnoMes"), measure.vars = c("Vendas")))
plotDadosCompras <- as.data.table(melt(resumoProduto, id.vars = c("AnoMes"), measure.vars = c("Compras")))
plotDadosEstoqueCalculado <- as.data.table(melt(resumoProduto, id.vars = c("AnoMes"), measure.vars = c("Estoque Calculado")))
plotDadosDifComprasVendas <- as.data.table(melt(resumoProduto, id.vars = c("AnoMes"), measure.vars = c("Dif Compras X Vendas")))
plotDados <- rbind(plotDadosEstoque, plotDadosVendas, plotDadosCompras, plotDadosEstoqueCalculado, plotDadosDifComprasVendas)
plotDados <- as.data.frame(plotDados)
names(plotDados) <- c("AnoMes","Dados","Qtde")
gplotDados <<- plotDados
# noArq <- paste0("ResumoProduto_",coProduto,"_Loja_",coLoja,"Dif")
# noFile <- paste0("../Relatorio/",noArq,".pdf")
# pdf(width=12,height=12,file=noFile)
if (pTipoGrafico == "Linha") {
dadosMantidos <- c("Estoque","Vendas","Compras","Dif Compras X Vendas")
g <- geraGraficoLinha(pplotDados = subset(plotDados, Dados %in% dadosMantidos),
pDatas = Datas,
pTitulo = "Dif Compras X Vendas",
pCoProduto = coProduto,
pCoLoja = coLoja,
pFlagPontos)
g <- g +
scale_color_manual(values=c("Estoque" = "darkgreen",
"Vendas" = "blue",
"Compras" = "red",
"Dif Compras X Vendas" = "black")) +
scale_linetype_manual(values=c("Estoque" = "solid",
"Vendas" = "solid",
"Compras" = "solid",
"Dif Compras X Vendas" = "blank"))
listaDifComprasVendas[[i]] <- g
# print (g)
# dev.off()
# noArq <- paste0("ResumoProduto_",coProduto,"_Loja_",coLoja,"EstCalc")
# noFile <- paste0("../Relatorio/",noArq,".pdf")
# pdf(width=12,height=12,file=noFile)
dadosMantidos <- c("Estoque","Vendas","Compras","Estoque Calculado")
g <- geraGraficoLinha(pplotDados = subset(plotDados, Dados %in% dadosMantidos),
pDatas = Datas,
pTitulo = "Estoque Calculado",
pCoProduto = coProduto,
pCoLoja = coLoja,
pFlagPontos)
g <- g +
scale_color_manual(values=c("Estoque" = "darkgreen",
"Vendas" = "blue",
"Compras" = "red",
"Estoque Calculado" = "black")) +
scale_linetype_manual(values=c("Estoque" = "solid",
"Vendas" = "solid",
"Compras" = "solid",
"Estoque Calculado" = "dashed"))
listaEstoqueCalculado[[i]] <- g
# print (g)
# dev.off()
dadosMantidos <- c("Vendas")
# noArq <- paste0("ResumoProduto_",coProduto,"_Loja_",coLoja,"_",dadosMantidos[1],dadosMantidos[2])
# noFile <- paste0("../Relatorio/",noArq,".pdf")
# pdf(width=12,height=12,file=noFile)
g <- geraGraficoLinha(pplotDados = subset(plotDados, Dados %in% dadosMantidos),
pDatas = Datas,
pTitulo = "Vendas",
pCoProduto = coProduto,
pCoLoja = coLoja,
pFlagPontos)
g <- g +
scale_color_manual(values=c("Vendas" = "blue")) +
scale_linetype_manual(values=c("Vendas" = "solid"))
listaVendas[[i]] <- g
# print (g)
dadosMantidos <- c("Compras")
g <- geraGraficoLinha(pplotDados = subset(plotDados, Dados %in% dadosMantidos),
pDatas = Datas,
pTitulo = "Compras",
pCoProduto = coProduto,
pCoLoja = coLoja,
pFlagPontos)
g <- g +
scale_color_manual(values=c("Compras" = "red")) +
scale_linetype_manual(values=c("Compras" = "solid"))
listaCompras[[i]] <- g
# print (g)
# dev.off()
} else if (pTipoGrafico == "Boxplot") {
tipoDado <- "Vendas"
g <- geraBoxPlot(pplotDados = subset(plotDados, Dados == tipoDado),
pTitulo = paste0("Box Plot ",tipoDado),
pCoProduto = coProduto,
pCoLoja = coLoja)
listaVendas[[i]] <- g
tipoDado <- "Compras"
g <- geraBoxPlot(pplotDados = subset(plotDados, Dados == tipoDado),
pTitulo = paste0("Box Plot ",tipoDado),
pCoProduto = coProduto,
pCoLoja = coLoja)
listaCompras[[i]] <- g
}
}
if (pTipoGrafico == "Linha") {
noArq <- paste0("ResumoProdutos_",pNoListaProdutos,"_Loja_",coLoja,"EstoqueCalculado")
noFile <- paste0("../Relatorio/",noArq,".pdf")
pdf(width=12,height=12,file=noFile)
do.call(what = grid.arrange, args = listaEstoqueCalculado)
dev.off()
noArq <- paste0("ResumoProdutos_",pNoListaProdutos,"_Loja_",coLoja,"DifComprasVendas")
noFile <- paste0("../Relatorio/",noArq,".pdf")
pdf(width=12,height=12,file=noFile)
do.call(what = grid.arrange, args = listaDifComprasVendas)
dev.off()
noArq <- paste0("ResumoProdutos_",pNoListaProdutos,"_Loja_",coLoja,"Compras")
noFile <- paste0("../Relatorio/",noArq,".pdf")
pdf(width=12,height=12,file=noFile)
do.call(what = grid.arrange, args = listaCompras)
dev.off()
noArq <- paste0("ResumoProdutos_",pNoListaProdutos,"_Loja_",coLoja,"Vendas")
noFile <- paste0("../Relatorio/",noArq,".pdf")
pdf(width=12,height=12,file=noFile)
do.call(what = grid.arrange, args = listaVendas)
dev.off()
} else if (pTipoGrafico == "Boxplot") {
noArq <- paste0("BoxplotProdutos_",pNoListaProdutos,"_Loja_",coLoja,"Compras")
noFile <- paste0("../Relatorio/",noArq,".pdf")
pdf(width=12,height=12,file=noFile)
do.call(what = grid.arrange, args = listaCompras)
dev.off()
noArq <- paste0("BoxplotProdutos_",pNoListaProdutos,"_Loja_",coLoja,"Vendas")
noFile <- paste0("../Relatorio/",noArq,".pdf")
pdf(width=12,height=12,file=noFile)
do.call(what = grid.arrange, args = listaVendas)
dev.off()
}
}
tipoGrafico <- "Linha"
{
produtosMaisVendidos <- produtosMaisVendidosR[1:6]
noListaProdutos <- "1:6"
flagPontos <- FALSE
printGraficos(produtosMaisVendidos, noListaProdutos, flagPontos, tipoGrafico)
produtosMaisVendidos <- produtosMaisVendidosR[7:12]
noListaProdutos <- "7:12"
flagPontos <- FALSE
printGraficos(produtosMaisVendidos, noListaProdutos, flagPontos, tipoGrafico)
produtosMaisVendidos <- produtosMaisVendidosR[13:18]
noListaProdutos <- "13:18"
flagPontos <- FALSE
printGraficos(produtosMaisVendidos, noListaProdutos, flagPontos, tipoGrafico)
produtosMaisVendidos <- produtosMaisVendidosR[19:21]
noListaProdutos <- "19:21"
flagPontos <- FALSE
printGraficos(produtosMaisVendidos, noListaProdutos, flagPontos, tipoGrafico)
for (i in 1:length(produtosMaisVendidosR)) {
produtosMaisVendidos <- produtosMaisVendidosR[i]
noListaProdutos <- produtosMaisVendidosR[i]
flagPontos <- TRUE
printGraficos(produtosMaisVendidos, noListaProdutos, flagPontos, tipoGrafico)
}
}
tipoGrafico <- "Boxplot"
{
i <- 1
for (i in 1:length(produtosMaisVendidosR)) {
produtosMaisVendidos <- produtosMaisVendidosR[i]
noListaProdutos <- produtosMaisVendidosR[i]
flagPontos <- NULL
printGraficos(produtosMaisVendidos, noListaProdutos, flagPontos, tipoGrafico)
}
}
}
|
1983db831fad64369a67b0a72f160c2e31fd206b
|
d59e56c7658f5177551b308b483ab352236da8a2
|
/cran/paws.compute/man/ec2_modify_availability_zone_group.Rd
|
83bc827297dbf18b015b417c5abf059ba7851d78
|
[
"Apache-2.0"
] |
permissive
|
jcheng5/paws
|
a09b03b93c6bafdab26c3217c33926b86907276b
|
9bb49f9a3ba415c3276955fa676bc881bc22fa3e
|
refs/heads/main
| 2023-02-01T15:25:58.124905
| 2020-11-10T22:35:42
| 2020-11-10T22:35:42
| 317,394,924
| 0
| 0
|
NOASSERTION
| 2020-12-01T01:48:12
| 2020-12-01T01:48:12
| null |
UTF-8
|
R
| false
| true
| 1,500
|
rd
|
ec2_modify_availability_zone_group.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_modify_availability_zone_group}
\alias{ec2_modify_availability_zone_group}
\title{Enables or disables an Availability Zone group for your account}
\usage{
ec2_modify_availability_zone_group(GroupName, OptInStatus, DryRun)
}
\arguments{
\item{GroupName}{[required] The name of the Availability Zone Group.}
\item{OptInStatus}{[required] Indicates whether to enable or disable membership. The valid values are
\verb{opted-in}. You must contact \href{https://console.aws.amazon.com/support/home#/case/create?issueType=customer-service&serviceCode=general-info&getting-started&categoryCode=using-aws&services}{AWS Support}
to disable an Availability Zone group.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Enables or disables an Availability Zone group for your account.
}
\details{
Use
\href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html}{DescribeAvailabilityZones}
to view the value for \code{GroupName}.
}
\section{Request syntax}{
\preformatted{svc$modify_availability_zone_group(
GroupName = "string",
OptInStatus = "opted-in"|"not-opted-in",
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
|
914200066e9adeda64477dc7a05d29e0f0e770a7
|
8dbe523b5cd123fb95bdcb97dac806d482af566f
|
/tests/regression_tests/hojsgaard_model_tests/random.graph.R
|
aaa5ff4136d24502d2596bcfb0ae4df15150c629
|
[] |
no_license
|
npetraco/CRFutil
|
b5ca67b73afdab9dc64712fc709fe08a8fbce849
|
50ef4ca06b7ab11ac1d54472a87e7854beb07cec
|
refs/heads/master
| 2023-01-22T10:53:55.149603
| 2023-01-06T02:03:47
| 2023-01-06T02:03:47
| 135,449,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,537
|
r
|
random.graph.R
|
library(igraph)
library(gRbase)
library(CRFutil)
#library(rstanarm)
library(rstan)
library(MASS)
# Make up a random graph
g <- erdos.renyi.game(10, 0.6, typ="gnp")
dev.off()
plot(g)
# Get its adjacency matrix and genrate an MRF sample
adj <- as.matrix(as_adj(g))
f0 <- function(y){ as.numeric(c((y==1),(y==2)))}
rmod <- make.empty.field(adj.mat = adj, parameterization.typ = "standard", plotQ = F)
# "true" theta
rmod$par <- runif(rmod$n.par,-1.5,1.5)
rmod$par
# Make true pots from true theta
out.pot <- make.pots(parms = rmod$par, crf = rmod, rescaleQ = T, replaceQ = T)
rmod$edges
rmod$node.pot
rmod$edge.pot
# So now sample from the model as if we obtained an experimental sample:
num.samps <- 500
samps <- sample.exact(rmod, num.samps)
colnames(samps) <- 1:ncol(samps)
mrf.sample.plot(samps)
# Fit params in multiple ways:
# First get formula for graph
gf <- adj2formula(adj)
dev.off()
plot(ug(gf))
# Empirical
emp.dist <- fit_empirical(samps)
head(emp.dist)
dev.off()
plot(emp.dist[,ncol(emp.dist)], typ="h", xlab="configuration state#", ylab="Emp. Freq.")
# True model from true params
tru.dist <- fit_true(rmod)
head(tru.dist)
reordr.idxs <- reorder_configs(emp.dist[,1:(ncol(emp.dist)-1)], tru.dist[,1:(ncol(emp.dist)-1)])
tru.dist <- tru.dist[reordr.idxs,]
plot(tru.dist[,ncol(emp.dist)], typ="h", xlab="configuration state#", ylab="True Freq.")
# CRF MLE
mle.dist <- fit_mle(gf,samps,infer.exact, mag.grad.tol = 1e-2)
reordr.idxs <- reorder_configs(emp.dist[,1:(ncol(emp.dist)-1)], mle.dist[,1:(ncol(emp.dist)-1)])
mle.dist <- mle.dist[reordr.idxs,]
plot(mle.dist[,ncol(emp.dist)], typ="h", xlab="configuration state#", ylab="MLE. Freq.")
# logistic regression (glm) CAUTION SLOW!!!!!!!!!!!!!!
logis.dist <- fit_logistic(gf, samps)
reordr.idxs <- reorder_configs(emp.dist[,1:(ncol(emp.dist)-1)], logis.dist[,1:(ncol(emp.dist)-1)])
logis.dist <- logis.dist[reordr.idxs,]
plot(logis.dist[,ncol(emp.dist)], typ="h", xlab="configuration state#", ylab="Logistic Freq.")
# Bayes logistic regression (Stan, loo, WAIC)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
blogis.dist <- fit_bayes_logistic(gf, samps)
reordr.idxs <- reorder_configs(emp.dist[,1:10], blogis.dist[,1:10])
blogis.dist <- blogis.dist[reordr.idxs,]
plot(blogis.dist[,11], typ="h", xlab="configuration state#", ylab="Logistic Freq.")
# log linear (loglin, glm)
loglin.dist <- fit_loglinear(gf, samps)
reordr.idxs <- reorder_configs(emp.dist[,1:(ncol(emp.dist)-1)], loglin.dist[,1:(ncol(emp.dist)-1)])
loglin.dist <- loglin.dist[reordr.idxs,]
plot(loglin.dist[,ncol(emp.dist)], typ="h", xlab="configuration state#", ylab="Log-Linear Freq.")
# Bayes log linear (Poisson, Stan, loo, WAIC) Doesn't work with this model matrix !!!!!!! FIX******************
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
bloglin.dist <- fit_bayes_loglinear(gf, samps)
reordr.idxs <- reorder_configs(emp.dist[,1:10], bloglin.dist[,1:10])
bloglin.dist <- bloglin.dist[reordr.idxs,]
plot(bloglin.dist[,11], typ="h", xlab="configuration state#", ylab="Bayes Loglin Freq.")
# Bayes log linear2 (Poisson, Stan, loo, WAIC)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
model.c <- stanc(file = "inst/poisson_model.stan", model_name = 'model')
sm <- stan_model(stanc_ret = model.c, verbose = T)
bloglin2.dist <- fit_bayes_loglinear2(gf, samps, stan.model = sm)
reordr.idxs <- reorder_configs(emp.dist[,1:10], bloglin2.dist[,1:10])
bloglin2.dist <- bloglin2.dist[reordr.idxs,]
plot(bloglin2.dist[,11], typ="h", xlab="configuration state#", ylab="Bayes Loglin2 Freq.")
# Bayes zero-inflated (Stan, MODEL MATRIX?????)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
model.c <- stanc(file = "inst/zero_inflated_poisson_take3.stan", model_name = 'model')
sm <- stan_model(stanc_ret = model.c, verbose = T)
bzipp.dist <- fit_bayes_zip(gf, samps, stan.model = sm, iter = 2000, chains = 4)
library(shinystan)
launch_shinystan(bzipp.dist)
si <- summary(bzipp.dist)
head(si$summary)
jneff <- si$summary[,9]
jR <- si$summary[,10]
min(jneff)
jneff[1]
min(jneff[-which(is.nan(jneff) == T)])
max(jR[-which(is.nan(jR) == T)])
sparams <- extract(bzipp.dist, permuted = TRUE)
colnames(sparams)
sparams$y_new[1:10]
length(sparams$y_new)
dim(sparams$beta)
length(rmod$par)
jtheta <- apply(sparams$beta, 2, median)
jtheta
jbeta <- extract(bzipp.dist, "beta")[[1]]
dim(jbeta)
jbzipp.fit <- make.empty.field(graph.eq = gf, parameterization.typ = "standard")
jbzipp.fit$par <- apply(extract(bzipp.dist,"beta_theta")[[1]], 2, median)
jout.potsx <- make.pots(parms = bzipp.fit$par, crf = bzipp.fit, rescaleQ = T, replaceQ = T)
jpotentials.info <- make.gRbase.potentials(bzipp.fit, node.names = colnames(samples), state.nmes = c("1","2"))
jdistribution.info <- distribution.from.potentials(potentials.info$node.potentials, potentials.info$edge.potentials)
jjoint.distribution <- as.data.frame(as.table(distribution.info$state.probs))
# # Re-order columns to increasing order
# freq.idx <- ncol(joint.distribution)
# node.nums <- colnames(joint.distribution)[-freq.idx]
# node.nums <- unlist(strsplit(node.nums, split = "X"))
# node.nums <- node.nums[-which(node.nums == "")]
# node.nums <- as.numeric(node.nums)
# col.reorder <- order(node.nums)
# joint.distribution <- joint.distribution[,c(col.reorder, freq.idx)]
jconfigs.and.counts <- as.data.frame(ftable(data.frame(samps)))
head(jconfigs.and.counts)
jbzipp.fit <- make.empty.field(graph.eq = gf, parameterization.typ = "standard")
jM <- compute.model.matrix(
configs = jconfigs.and.counts[,-ncol(jconfigs.and.counts)],
edges.mat = jbzipp.fit$edges,
node.par = jbzipp.fit$node.par,
edge.par = jbzipp.fit$edge.par,
ff = f0)
jfreq <- jconfigs.and.counts[,ncol(jconfigs.and.counts)]
length(which(jconfigs.and.counts[,11] == 0))/nrow(jconfigs.and.counts)
ppi <- extract(bzipp.dist,"theta")[[1]]
hist(as.numeric(ppi))
bzipp.dist2 <- fit_bayes_zip(gf, samps, stan.model = sm, iter = 2000, chains = 4)
# Bayes neg-binomial
# MLE zero-inflated, neg-binomial??
# Assess difference from the true distribution
hist(mle.dist[,11] - tru.dist[,11])
hist(emp.dist[,11] - tru.dist[,11])
hist(logis.dist[,11] - tru.dist[,11])
hist(blogis.dist[,11] - tru.dist[,11])
hist(loglin.dist[,11] - tru.dist[,11])
hist(bloglin.dist[,11] - tru.dist[,11])
hist(bloglin2.dist[,11] - tru.dist[,11])
|
a6663dc4b35cae4f815c39493888cbebee4a21f0
|
aeaa9ac30428b8df7e88d980da1d727925938d3e
|
/man/influenza.Rd
|
75b0bd203b8e8590735fc738b4bd4575ae85de40
|
[] |
no_license
|
cran/tscount
|
826b5c77cf3940cf075968146f8e064a3ac3bf2d
|
e804ef82017773f515570a19de424919d3e44797
|
refs/heads/master
| 2021-01-18T22:05:17.334697
| 2020-09-08T06:00:03
| 2020-09-08T06:00:03
| 30,640,940
| 6
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,064
|
rd
|
influenza.Rd
|
\name{influenza}
\alias{influenza}
\title{
Influenza Infections Time Series
}
\description{
Weekly number of reported influenza cases in the state of North Rhine-Westphalia (Germany) from January 2001 to May 2013.
}
\usage{
influenza
}
\format{
A data frame with variables \code{year} and \code{week} giving the year and calendar week of observation, and with a variable \code{cases} giving the number of reported cases in the respective week.
}
\source{
Robert Koch Institute: SurvStat@RKI, \url{https://survstat.rki.de}, accessed on 10th June 2013.
The data are provided with kind permission of the Robert Koch Institute. Further details and terms of usage are given at \url{https://survstat.rki.de}. More data reported under the German Infectious Diseases Protection Act is available via the SurvStat@RKI web application linked above.
}
\seealso{
\code{\link{campy}}, \code{\link{ecoli}}, \code{\link{ehec}}, \code{\link{measles}} in this package, \code{\link[gamlss.data]{polio}} in package \code{gamlss.data}
}
\keyword{Data}
|
38ec5342eb980bc65426666bb275cd39701caf62
|
860c59446ab714b979ba478c478470191e04e6aa
|
/ID3/CustomerID3Function.r
|
6cbfbf70b04a8682ec052b53c94fea7b89112470
|
[] |
no_license
|
HelloMrChen/AlgorithmPractise-R
|
8c0781c8d874c5806ef76265de98c81ec141adbf
|
6c9c762b493cc60bec08a81c8abd49a81923211a
|
refs/heads/master
| 2021-05-06T02:45:17.635428
| 2018-04-04T16:12:57
| 2018-04-04T16:12:57
| 114,624,465
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,843
|
r
|
CustomerID3Function.r
|
#用R语言实现决策树ID3算法,以iris数据集为例
#计算总体信息值的函数,这里只允许最后一列作为决策结果列
info<-function(dataSet){
rowCount=nrow(dataSet) #计算数据集中有几行,也即有几个样本点
colCount=ncol(dataSet)
resultClass=NULL
resultClass=levels(factor(dataSet[,colCount])) #此代码取得判别列中有个可能的值,输出 "Iris-setosa" "Iris-versicolor" "Iris-virginica"
classCount=NULL
classCount[resultClass]=rep(0,length(resultClass)) #以决策变量的值为下标构建计数数组,用于计算和存储样本中出现相应变量的个数
for(i in 1:rowCount){ #该for循环的作用是计算决策变量中每个值出现的个数,为计算信息值公式做准备
if(dataSet[i,colCount] %in% resultClass){
temp=dataSet[i,colCount]
classCount[temp]=classCount[temp]+1
}
}
#计算总体的信息值
t=NULL
info=0
for (i in 1:length(resultClass)) {
t[i]=classCount[i]/rowCount
info=-t[i]*log2(t[i])+info
}
return(info)
}
#拆分数据集,此函数的作用在于对于每列自变量,按照其包含的类别值将原始数据集按行拆分,以便在这个子集中计算特定自变量的熵值
splitDataSet<-function(originDataSet,axis,value){#含义即从originDataSet数据集中拆分出第axis个变量等于value的所有行,合并成子集
retDataSet=NULL
for (i in 1:nrow(originDataSet)) { #循环原始数据集所有行
if(originDataSet[i,axis]==value){ #限制特定自变量,遇到目标值则记录下原始数据集整行,然后rbind行连接
tempDataSet=originDataSet[i,]
retDataSet=rbind(tempDataSet,retDataSet)
}
}
rownames(retDataSet)=NULL
return(retDataSet) #返回针对某个自变量的值筛选后的子集
}
#选择最佳拆分变量
chooseBestFeatureToSplita<-function(dataSet){
bestGain=0.0
bestFeature=-1
baseInfo=info(dataSet) #计算总的信息熵
numFeature<-ncol(dataSet)-1 #计算除决策变量之外的所有列,即为自变量个数
for (i in 1:numFeature) {#对于每个自变量计算信息熵
featureInfo=0.0
Feature=dataSet[,i]#定位到第i列
classCount=levels(factor(Feature)) #计算第i列中变量类别,即有几种值
for (j in 1:classCount) {
subDataSet=splitDataSet(dataSet,i,Feature[j]) #将dataSet中第i个变量等于Feature[j]的行拆分出来
newInfo=info(subDataSet) #计算该子集的信息熵,也就是计算该变量在该取值下的信息熵部分
prob=length(subDataSet[,1]*1.0)/nrow(dataSet)# 这里计算该变量等于Feature[j]的情况在总数据集中出现的概率
featureInfo=featureInfo+prob*newInfo #不不断将该变量下各部分信息熵加总
} #第第i个变量的信息熵计算结束
infoGain=baseInfo-featureInfo
if(infoGain>bestGain){ #
bestGain=infoGain
bestFeature=i
}
}# 所有所有变量信息熵计算结束,并且得出了最佳拆分变量
return(bestFeature) #返回最佳变量值
}
#最终判断属于哪一类的条件
majorityCnt <- function(classList){
classCount = NULL
count = as.numeric(table(classList))
majorityList = levels(as.factor(classList))
if(length(count) == 1){
return (majorityList[1])
}else{
f = max(count)
return (majorityList[which(count == f)][1])
}
}
#判断剩余的值是否属于同一类,是否已经纯净了
trick <- function(classList){
count = as.numeric(table(classList))
if(length(count) == 1){
return (TRUE)
}else
return (FALSE)
}
#递归生成树
createTree<-function(dataSet){
decision_tree = list()
classList = dataSet[,length(dataSet)]
#判断是否属于同一类
if(trick(classList))
return (rbind(decision_tree,classList[1]))
#是否在矩阵中只剩Label标签了,若只剩最后一列,则都分完了
if(ncol(dataSet) == 1){
decision_tree = rbind(decision_tree,majorityCnt(classList))
return (decision_tree)
}
#选择最佳属性进行分割
bestFeature=chooseBestFeatureToSplita(dataSet)
labelFeature=colnames(dataSet)[bestFeature] #获取最佳划分属性的变量名
decision_tree=rbind(decision_tree,labelFeature) #这里rbind方法,如果有一个变量列数不足,会自动重复补齐
t=dataSet[,bestFeature]
temp_tree=data.frame()
for(j in 1:length(levels(as.factor(t)))){
#这个标签的两个属性,比如“yes”,“no”,所属的数据集
dataSet = splitDataSet(dataSet,bestFeature,levels(as.factor(t))[j])
dataSet=dataSet[,-bestFeature]
#递归调用这个函数
temp_tree = createTree(dataSet)
decision_tree = rbind(decision_tree,temp_tree)
}
return (decision_tree)
}
t<-createTree(iris)
|
88bf265d7566c9b2568bed782cb179d6dfa7f5da
|
2b5b885a283ac7853b6c46fae908f3e66abffcff
|
/R/write_input.R
|
9105bb3d8dec5f3f3f656726db18f53bc868c64f
|
[] |
no_license
|
quanted/VarroaPopWrapper
|
8b89322b966f1a442f8830c8151d337f9d7ec5c8
|
ade75f2c3810b7ed7c557868351d6e3c36caf03f
|
refs/heads/dev
| 2020-03-07T23:48:21.891865
| 2018-12-12T16:23:03
| 2018-12-12T16:23:03
| 127,790,278
| 0
| 0
| null | 2018-10-23T17:51:44
| 2018-04-02T17:38:14
|
R
|
UTF-8
|
R
| false
| false
| 3,774
|
r
|
write_input.R
|
##
# Write VarroaPop Inputs
# code by Jeff Minucci
#
##
#' Write a VarroaPop input file from a named list or vector
#'
#' Function to create a single input file from a set of parameters in the form of
#' a one row dataframe, where columns are named.
#'
#' @param params Named vector of VarroaPop inputs to be written to .txt file.
#' @param in_path Directory to write vp_input.txt file to (optional).
#' @param in_filename Filename of the written input file. Defaults to 'vp_input.txt'.
#' @param weather_file Full path to the weather file e.g. C:/VarroaPop/weather.wea (must be .wea/.dvf/.wth) OR
#' one of either 'Columbus' (default), 'Sacramento', 'Phoenix', 'Yakima', 'Eau Claire', 'Jackson', or 'Durham'
#' @param verbose T/F print extra details?
#'
#' @return None... writes inputs to a .txt file in in_path for VarroaPop
#'
#' @author Jeffrey M Minucci, \email{jminucci2@@gmail.com}
#'
#' @examples
#' parameters <- c("foo"=15,"bar"=4)
#' write_vp_input(parameters, in_path = "d:/path/to/inputdir")
#'
#' @export
write_vp_input <- function(params, in_path = system.file("varroapop_files","input",package="VarroaPopWrapper"),
in_filename = 'vp_input.txt',
weather_file = 'Columbus',
verbose=F){
weather_locs <- c("columbus" = system.file("varroapop_files","weather","18815_grid_39.875_lat.wea",package="VarroaPopWrapper"),
"sacramento" = system.file("varroapop_files","weather","17482_grid_38.375_lat.wea",package="VarroaPopWrapper"),
"phoenix" = system.file("varroapop_files","weather","12564_grid_33.375_lat.wea",package="VarroaPopWrapper"),
"yakima" = system.file("varroapop_files","weather","25038_grid_46.375_lat.wea",package="VarroaPopWrapper"),
"eau claire" = system.file("varroapop_files","weather","23503_grid_44.875_lat.wea",package="VarroaPopWrapper"),
"jackson" = system.file("varroapop_files","weather","11708_grid_32.375_lat.wea",package="VarroaPopWrapper"),
"durham" = system.file("varroapop_files","weather","15057_grid_35.875_lat.wea",package="VarroaPopWrapper"))
if(tolower(weather_file) %in% names(weather_locs)) weather_file <- weather_locs[tolower(weather_file)]
if(verbose){
print(paste("Printing input file to:",in_path))
print(paste("Weather file location:", weather_file))
}
inputs <- paste(names(params),as.character(params),sep="=")
inputs <- c(inputs, paste("WeatherFileName",weather_file,sep="="))
write(inputs, file = paste(in_path, in_filename, sep = ""), sep="")
}
#' Write a VarroaPop input file where each named parameter is to be written (except in_path or verbose)
#'
#' function to create a single input file from a set of parameters in the form of
#' a one row dataframe, where columns are named
#'
#' @param in_path Directory to write vp_input.txt file to
#' @param ... Initial parameters to be passed to VarroaPop
#' @param in_filename Filename of the written input file. Defaults to 'vp_input.txt'.
#' @param verbose T/F print extra details?
#'
#' @return None... writes inputs to a .txt file in in_path for VarroaPop
#'
#' @examples
#' parameters <- c()
#' write_vp_input(in_path = "d:/path/to/inputdir",foo=15,bar=4)
#'
#' @export
write_vp_input_long <- function(in_path = paste(system.file(package="VarroaPopWrapper"),
"/varroapop_files/input",sep=""), ... ,
in_filename = 'vp_input.txt', verbose=F){
dots = list(...)
if(verbose){
print(paste("Printing input file to:",in_path))
}
inputs <- paste(names(dots),as.character(dots),sep="=")
write(inputs, file = paste(in_path, in_filename, sep = ""), sep="")
}
|
43fcdedbb88512828883d4155c50e18bdd848ac1
|
591771c6a3972cab8c680696771fd4b4aa0c3f20
|
/R/0.0.0-Level2URI.R
|
e136ce065b185271a31a5018edf54172afebf6d1
|
[] |
no_license
|
Sumpfohreule/S4Level2
|
a36dfc014dde47763009dcc4420a198ce11a9a5d
|
9034cddbd04efed8cea8c5b90cb2e4fbf16209e7
|
refs/heads/main
| 2023-08-19T08:58:05.616624
| 2021-09-29T14:47:03
| 2021-09-29T14:47:03
| 304,371,990
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,635
|
r
|
0.0.0-Level2URI.R
|
########################################################################################################################
setClass(Class = "Level2URI", slots = c(
URI_Split = "character",
Depth = "numeric"
))
#' Constructor for Level2URI
#' @param ... URI like path consisting of 0-3 strings (Plot, SubPlot, Logger)
Level2URI <- function(...) {
all_elements <- list(...)
if (length(all_elements) == 0) {
all_elements <- ""
}
uri_elements <- all_elements %>%
unlist() %>%
purrr::walk(~ if(!(is.character(.x) || is.Level2URI(.x) || is.factor(.x))) {
stop("Some element is not of type character or Level2URI")
}) %>%
purrr::map(~ as.character(.x)) %>%
purrr::map(~ stringr::str_split(.x, pattern = "/")) %>%
unlist()
if (length(uri_elements) > 3) {
elements <- unlist(uri_elements)
stop("Can't convert the following elements to a Level2URI as its length would be > 3\n", paste(elements, collapse = "/"))
} else if (length(uri_elements) == 3) {
assertthat::assert_that(uri_elements[2] != "" || uri_elements[3] == "")
}
if (length(uri_elements) > 1) {
uri_elements <- uri_elements %>%
purrr::discard(~ .x == "")
}
.Object <- new("Level2URI")
.Object@URI_Split <- uri_elements
.Object@Depth <- length(uri_elements)
.Object
}
#' @include getURI_Depth.R
setMethod("getURI_Depth", signature = "Level2URI", definition = function(.Object) {
return(.Object@Depth)
})
#' @include getPlotName.R
setMethod("getPlotName", signature = "Level2URI", definition = function(.Object) {
if (getURI_Depth(.Object) < 1) {
stop("PlotName seems to be missing from this Level2URI")
}
return(.Object@URI_Split[1])
})
#' @include getSubPlotName.R
setMethod("getSubPlotName", signature = "Level2URI", definition = function(.Object) {
if (getURI_Depth(.Object) < 2) {
return("")
}
return(.Object@URI_Split[2])
})
#' @include getDataStructureName.R
setMethod("getDataStructureName", signature = "Level2URI", definition = function(.Object) {
if (getURI_Depth(.Object) < 3) {
return("")
}
return(.Object@URI_Split[3])
})
#' @include getPlotURI.R
setMethod("getPlotURI", signature = "Level2URI", definition = function(.Object) {
plot_uri <- .Object %>%
getPlotName() %>%
Level2URI()
return(plot_uri)
})
#' @include getSubPlotURI.R
setMethod("getSubPlotURI", signature = "Level2URI", definition = function(.Object) {
plot_uri <- getPlotName(.Object)
sub_plot_uri <- .Object %>%
getSubPlotName() %>%
Level2URI(plot_uri, .)
return(sub_plot_uri)
})
#' @include getDataStructureURI.R
setMethod("getDataStructureURI", signature = "Level2URI", definition = function(.Object) {
sub_plot_uri <- getSubPlotURI(.Object)
data_structure_uri <- .Object %>%
getDataStructureName() %>%
Level2URI(sub_plot_uri, .)
return(data_structure_uri)
})
#' S3 Method for converting a Level2URI into a character string
#'
#' @param .Object An Object of type Level2URI
as.character.Level2URI <- function(.Object) {
uri_string <- paste(.Object@URI_Split, collapse = "/")
return(uri_string)
}
#' Converts multiple strings to multiple URIs
#'
#' Works different from Level2URI(...) because here the elements are not combined into one URI
#' @param uri_strings Vector of strings which represent Level2 Objects (e.g. c("Altensteig/Fichte", "Conventwald/Freiland/ADLM"))
as.Level2URI <- function(uri_strings) {
uri_strings %>%
purrr::map(~ Level2URI(.x))
}
|
6e15bfad8c5c4ed41d12215ff8b1d4a6576e6d82
|
f1ae55b68fa8f895ecdfe34060f0f3a99bec5352
|
/czestosc_w_grupach.R
|
4fb0bd369b227af0111774e1019668ca4edeccf1
|
[] |
no_license
|
psobczyk/signal-peptide
|
755875bea49061cfc85d004a4646c5c8b6cdbd8e
|
58ee07638d40538093224b7af23bd67647e47fe9
|
refs/heads/master
| 2021-04-12T05:17:50.944784
| 2014-07-10T14:09:28
| 2014-07-10T14:09:28
| 13,646,865
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,255
|
r
|
czestosc_w_grupach.R
|
#setwd("~/Dropbox/Doktorat/sekwencje_sygnalowe/")
#source("wczytywanie_danych.R")
con <- function(x, n){
result <- NULL
for (i in 1:n){
result[i] <- sum(x==i)/length(x)
}
result
} #computing probs for n groups (it must be exclusive)
distance <- function(con1, con2){
sum((con1-con2)^2/con1)
} #distance we want to maximise while choping amino acid sequence into two
distribution <- function(rawSeqs, aagroups){
N <- length(rawSeqs)
seqs <- vector(mode="list", length=N)
for(j in 1:N){
a = rawSeqs[j][[1]]
aas = NULL
for (i in 1:length(a)){
aas = c(aas, a[i])
}
seqs[[j]] <- aas
}
#now we have list of vectors representing sequences of aminoacids
seqs <- lapply(seqs, FUN=function(x) degenerate(toupper(x), aagroups))
#now our sequences are degenarated ti certain aminoacid groups
#print(seqs[50:100])
cutoff <- NULL
for(seq in seqs){
n1 <- 15
n2 <- 100
probs <- NULL
for (i in n1:(min(n2, length(seq)-2))){
con1 = con(seq[1:i], length(aagroups)) + 0.1
con2 = con(seq[(i+1):length(seq)], length(aagroups)) + 0.1
probs <- c(probs, distance(con1, con2))
} #we compute the most likely place for end of signal sequence
#print(n1+which.max(unlist(probs)))
#print(probs)
cutoff <- c(cutoff, n1-1+which.max(probs))
#print(paste((n1+which.max(na.omit(probs))), " ", length(seq)))
}
#print(cutoff)
good <- (cutoff>31 & cutoff<150)
#for(i in 1:length(good)) print(paste(i, " ", length(seqs[[i]]), " ", cutoff[i]))
goodProt <- seqs[good]
goodCut <- cutoff[good]
learningPositive <- vector(mode="list", length=length(good))
learningNegative <- vector(mode="list", length=length(good))
for (i in 1:sum(good)){
learningPositive[i] = list(goodProt[[i]][1:goodCut[i]])
learningNegative[i] = list(goodProt[[i]][(1+goodCut[i]):length(goodProt[[i]])] )
#if(sum(is.na(learningPositive[[i]]))>0){
# print(goodProt[[i]])
# print(goodCut[i])
#}
}
#print(unlist(learningPositive))
list(con(unlist(learningPositive),length(aagroups)), con(unlist(learningNegative),length(aagroups)))
}
degenerate <- function(seq, aa_group) {
for (i in 1L:length(aa_group)) {
seq[seq %in% aa_group[[i]]] <- i
}
seq
}
|
6a5e2e1828ba11e1a24281f2c2613b16cf0baba5
|
dca44395dbf60e1743c65bced7b26838bd676781
|
/HGU/SNU/last__GGOk.R
|
81f93e6a9594e1e2b2b7ff9a59923e1dafe5b664
|
[] |
no_license
|
ksmpooh/SungminCode
|
1b550c375125ea7869917de337aa093160aa03eb
|
33b266b80389664282a2d4d6eb9c2db593442a5f
|
refs/heads/master
| 2023-08-03T16:22:35.085299
| 2023-07-31T16:12:33
| 2023-07-31T16:12:33
| 106,177,934
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,254
|
r
|
last__GGOk.R
|
#install.packages("h2o")
#install.packages("ROCR")
#install.packages("caret")
#install.packages("e1071")
#install.packages("Hmisc")
#install.packages("Dplyr")
library(Hmisc)
library(h2o)
library(ROCR)
library(caret)
library(e1071)
library(dplyr)
data <-read.csv("Total_data.csv",header = T, sep = ",")
ob <- data
#summary(ob)
#str(ob)
#str(data)
#colnames(ob)
setwd("C:/Users/bibs-student/Desktop/last_project")
set.seed(123)
ob <- read.csv("FinalData.csv",header = T, sep = ",")
ob<-ob[,2:ncol(ob)]
###############################
ob_without_NAC <- subset(ob,select = -NAC)
ob<-ob_without_NAC
###############################
SNUH <-subset(ob,ob$Institution==1)
Asan <-subset(ob,ob$Institution==3)
SNUH <-subset(SNUH,select = -Institution)
Asan <-subset(Asan,select = -Institution)
##################################
output_test <-array(5)
output_train <- array(5)
count = nrow(SNUH)/5
count = as.integer(count)
remainder = nrow(SNUH)%%5
if(remainder!=0){
Index <- c(rep(1:5,count),1:remainder)
}else{
Index <-rep(1:5,count)}
SNUH$index <- Index
##########################
count = nrow(Asan)/5
count = as.integer(count)
remainder = nrow(Asan)%%5
if(remainder!=0){
Index <- c(rep(1:5,count),5,4)
}else{
Index <-rep(1:5,count)}
Asan$index <- Index
h2o.init(nthreads = -1)
h2o.removeAll()
for(i in 1:5){
SNUH_test <- SNUH[SNUH$index == i,]
SNUH_train <- SNUH[SNUH$index != i,]
Asan_test <- Asan[Asan$index == i,]
Asan_train <- Asan[Asan$index != i,]
training <- rbind(SNUH_train,Asan_train)
testing <- rbind(SNUH_test,Asan_test)
training <- subset(training,select = -index)
testing <-subset(testing,select = -index)
training <-training[sample(1:nrow(training)),]
testing <-testing[sample(1:nrow(testing)),]
#colnames(training)
training[,1] <- h2o::as.factor(training[,1])
trData <- h2o::as.h2o(training)
tsData <- h2o::as.h2o(testing)
number_of_obs <- ncol(training)
train_result<- h2o.deeplearning(x = 2:number_of_obs,
y = "Platinum_sensitivity_in_platinum_users1",
training_frame = trData,
validation_frame = tsData,
hidden = c(45,40,35,30,25,20),
#hidden = c(30, 30, 30),
#input_dropout_ratio = 0.3,
#l1 = 1e-5,
l2 = 1e-5,
rate = 0.005,
activation = "Rectifier",
max_w2 = 10,
epochs = 100000,
stopping_rounds = 3,
stopping_metric = "misclassification",
#stopping_metric = "AUC",
stopping_tolerance = 0.01,
export_weights_and_biases = T)
output_test[i] <- h2o.auc(train_result, valid = T)
output_train[i] <- h2o.auc(train_result, train = T)
}
h2o.weights(train_result,matrix_id = 6)
h2o.shutdown(prompt=F)
summary(output_test)
summary(output_train)
sd(output_test)
sd(output_train)
output_test
output_train
|
4d008e748021d1709064f3593b83cc2a3d20668c
|
da646a1815d8daa4f0b333d9b4529aaeb634afc1
|
/backend/TFMir.R
|
f4634f2cc90cbe23ab91bed2d79fbe0b8d1e8562
|
[] |
no_license
|
crs/tfmir
|
940a5b09f0353d28c5f4d800675292f9db87a3fc
|
04be78516ed8a25854d6e7afa86f82e02dfbf10c
|
refs/heads/master
| 2021-03-22T04:43:29.076469
| 2016-01-20T13:50:11
| 2016-01-20T13:50:11
| 139,831,239
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,637
|
r
|
TFMir.R
|
###########################################
# - TFMir Project #
# - TFMir main function #
# - Main function to be called #
# - 2014-10-1 #
# - Copyright: Mohamed Hamed #
###########################################
#R programming environments:
#- R studio IDE
#- R version 2.12.0 (2010-10-15)
#-Platform: x86_64-apple-darwin9.8.0/x86_64 (64-bit)
#-locale: [1] C/en_US.UTF-8/C/C/C/C
## =============================================================================
## Initialize calling required packages and install them if they are not and log
## =============================================================================
source("loadpackages.R")
## ==================================================
## read the configuration file and get the parameters
## ==================================================
source("readconfig.R")
config
## ===================================================
## Load the statistics ,color venn, and grapth scripts
## ===================================================
source("statistics.R")
source("ColorVenn.R")
source("graph.R")
## =============================================================================================================================
## Intialize and Read all files in config file (tranmir db , mirna disease association file and mirna function association file)
## =============================================================================================================================
source("initialize.R")
#getMIRnaCategory("hsa-mir-212","function")
#getMIRnaCategory("hsa-mir-212","disease")
## ===============================
## call the main function of TFMir
## ===============================
readInput=function(path)
{
molecule.df = read.delim(path, header=FALSE)
molecule.df=molecule.df[!duplicated (molecule.df),]
molecule.input=unique(as.character(unlist(molecule.df[1])))
return(molecule.input)
}
TFMir =function(tf.path, mirna.path,pval.cutoff=0.05,evidence,disease="",output.path)
{
# tf.path="tf.sample2.txt"
# mirna.path="mirna.sample.txt"
# mirna.path=""
# pval.cutoff=0.05
# disease="Melanoma"
# # disease="Neoplasm"
# # disease="Alzheimer Disease"
# # disease=""
# output.folder="output"
# evidence="Experimental"
# evidence="Predicted"
# evidence="both"
#
#
## ========================
## log the input parameters
## ========================
writeToLog(tf.path)
writeToLog(mirna.path)
writeToLog(pval.cutoff)
writeToLog(disease)
writeToLog(evidence)
writeToLog(output.path)
writeToLog("=====================================")
if(tolower(evidence)=="both") { evidence=c("Experimental","Predicted") }
## ==================================================================
## read the input files and test automatically which scenario will be
## ==================================================================
tfs.df=data.frame("gene"=character(),"gene.reg"=numeric())
if(! is.na(tf.path) & tf.path !="")
{
tfs.df = read.delim(tf.path, header=FALSE)
}else
{
mirnas.input=unique(tolower(readInput(mirna.path)))
tf.pval=as.double(config$pval.cutoffgene.targets_regulators.fromMiRNA.inputlist)
tfs.regulatorsofMiRNA= getTFsRegulatorsofMiRNAs(mirnas.input,tf.pval,evidence)
tfs.targetsofMiRNA= getTFsTargetsofMiRNAs(mirnas.input,tf.pval,evidence)
tfs.list=unique(c(tfs.targetsofMiRNA,tfs.regulatorsofMiRNA))
if(length(tfs.list) > 0){
tfs.df=data.frame("gene"=tfs.list,"gene.reg"=0) }
}
names(tfs.df)=c("gene","gene.reg")
tfs.df=tfs.df[!duplicated (tfs.df),]
tfs.input=toupper(unique(as.character(unlist(tfs.df$gene))))
printGeneEntrezIDsMap(tfs.input,output.path)
mirnas.df=data.frame("mirna"=character(),"mirna.reg"=numeric())
if(! is.na(mirna.path) & mirna.path !="")
{
mirnas.df = read.delim(mirna.path, header=FALSE)
}else
{
mirna.pval=as.double(config$pval.cutoffmirna.regulators_targets.fromTFS.inputlist)
tfstargets.mirna= getMiRNAsTargetsofTFs(tfs.input,mirna.pval,evidence)
tfsregulators.mirna= getMiRNAsRegulatorsofTFs(tfs.input,mirna.pval,evidence)
mirnas.list=unique(c(tfstargets.mirna,tfsregulators.mirna))
if(length(mirnas.list) > 0){
mirnas.df=data.frame("mirna"=mirnas.list,"mirna.reg"=0) }
}
names(mirnas.df)=c("mirna","mirna.reg")
mirnas.df$mirna=tolower(mirnas.df$mirna)
mirnas.df=mirnas.df[!duplicated (mirnas.df),]
mirnas.input=unique(as.character(unlist(mirnas.df$mirna)))
## ==================================
## get the four kinds of interactions
## ==================================
tf.mirna.res=getInteractions(category="tf-mirna",reg.input=tfs.input,target.input=mirnas.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
tf.gene.res=getInteractions(category="tf-gene",reg.input=tfs.input,target.input=tfs.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
#mirna.gene.res=tf.mirna.res[tf.mirna.res$category=="mirna-mirna",] # return initial empty structure
mirna.gene.res=getInteractions(category="mirna-gene",reg.input=mirnas.input,target.input=tfs.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
mirna.mirna.res=tf.mirna.res[tf.mirna.res$category=="mirna-mirna",] # return initial empty structure
if("Predicted" %in% evidence) ## cause mirna-mirna interacctions are only predictions
mirna.mirna.res=getInteractions(category="mirna-mirna",reg.input=mirnas.input,target.input=mirnas.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
## ======================================================================================================================
## Combine these interactions and get those related to disease only (disease speccific network) (if disease is specified)
## ======================================================================================================================
input=list( tf.genes=names(tf.gene.res), mirna.genes=names(mirna.gene.res),tf.mirna=names(tf.mirna.res),mirna.mirna=names(mirna.mirna.res))
columns=Reduce(intersect,input)
all.res=rbind(tf.mirna.res[,columns],mirna.mirna.res[,columns],mirna.gene.res[,columns],tf.gene.res[,columns])
names(mirnas.df)=c("node","regulation")
names(tfs.df)=c("node","regulation")
nodes.input=rbind(mirnas.df,tfs.df)
names(nodes.input)=c("target","target.reg")
all.res=merge(all.res,nodes.input,by="target")
names(nodes.input)=c("regulator","regulator.reg")
all.res=merge(all.res,nodes.input,by="regulator")
all.res.disease=all.res[all.res$is_regulator_in_disease==TRUE | all.res$is_target_in_disease==TRUE,]
if(dim(all.res)[1] > 0)
exportNetworkProperties (all.res,file.path(output.path,"all"), disease,pval.cutoff)
if(dim(all.res.disease)[1] > 0)
exportNetworkProperties (all.res.disease,file.path(output.path,"disease"),disease,pval.cutoff)
write("finished", file=file.path(output.path,"finished.txt"),append=F,sep="\n")
}
ExportMotifs =function(net.path,output.path,evidence)
{
######### take care christian : for test only #####
#net.path="output/disease/res.txt"
#output.path="output/disease"
##################################################
net=read.delim(net.path, header=TRUE)
if(tolower(evidence)=="both") { evidence=c("Experimental","Predicted") }
#### extract all putative Tf-mirna paris who share target genes
tfmir.pairs=getPutativeTFmiRPairs(net)
#### extract all significant TF - miRNA pairs
#tfmir.pairs=getSignificantTFmiRpairs(tfmir.pairs,evidence)
if(dim(tfmir.pairs)[1] >0)
{
#### relax and message the TF mir pairs who have more than one target
tfmir.pairs=relaxMultipleTargetsForTFmiRPairs(tfmir.pairs)
#### get motif type 1 : composite-FFL
motifs.composite= getMotifs.composite(tfmir.pairs,net)
#### get motif type 2 : TF-FFL
motifs.TF.FFL= getMotifs.TF.FFL(tfmir.pairs,net)
#### get motif type 3 : miRNA-FFL
motifs.miRNA.FFL= getMotifs.miRNA.FFL(tfmir.pairs,net)
#### get motif type 4 : Coregulation-FFL
motifs.coregulation= getMotifs.coregulation(tfmir.pairs,net)
motifs=rbind(motifs.composite,motifs.TF.FFL,motifs.miRNA.FFL,motifs.coregulation)
if( dim(motifs)[1] > 0 )
{
motifs.ids=paste("motif",seq(1:dim(motifs)[1]), sep="" )
motifs=cbind(motifs.ids,motifs)
write.table(motifs,file=file.path(output.path,"motifs.txt"),quote=F,row.names=F,col.names=T,sep="\t")
}
}
write("finished motifs", file=file.path(output.path,"finishedmotifs.txt"),append=F,sep="\n")
}
PlotFunctionalSimilarity=function(genes,output.path)
{
###### @christian : these commented lines for testing only. u can try them urself
# genes="ESR1, TP53, GRIN2D, AGER, AKT1, TERT, NCOA2, BBC3"
# genes="CREB1, LTC4S, TLR9, IL5RA, MCAM, RPL10, RPS3A, ME2, CXCR4, SLC6A4, ERF, ID1, FLII, TGFB1, FLI1, UBE2I, PPRC1, CDC37, LRRFIP1, TGIF1, JAG1, TP53BP2, MSH6, MSH2"
# genes="CREB1, RPL10, CXCR4, ID1, TGFB1, UBE2I, LRRFIP1, TGIF1, JAG1, TP53BP2, MSH6, MSH2"
# genes="ESR1, TP53, GRIN2D, AGER, AKT1, TERT, NCOA2, BBC3"
# genes="SPI1, BACH1, GNA13, SACM1L, FLI1, RAB23, POLE4, MSH2, SERTAD2, SKI, PHC2, ATP6V1C1, MSH6, DHX40, DPP7, RCN2, CHAF1A, PKN2, MECP2, ARL5B, MYO1E, B2M, TYROBP, FLII, MSR1, P2RY10, WAS"
# genes="SPI1, BACH1, GNA13, SACM1L, FLI1, RAB23, POLE4, MSH2, SERTAD2, SKI, PHC2, ATP6V1C1, MSH6, DHX40, DPP7, RCN2, CHAF1A, PKN2, MECP2, ARL5B, MYO1E, B2M, TYROBP, FLII, MSR1, P2RY10, WAS"
# output.path="output/disease/funsim.png"
# ############################################
print(output.path)
genes=as.vector(unlist(strsplit(genes,",")))
dput(genes)
genes.entrez=unique(as.vector(unlist(mget(as.character(genes), envir=org.Hs.egALIAS2EG, ifnotfound=NA))))
gosem=mgeneSim(genes.entrez,organism="human",measure="Wang")#,ont="BP"
gosem=gosem[upper.tri(gosem)]
all.entrez.genes <- mappedkeys(org.Hs.egACCNUM)
#pvals.ks=c()
#pvals.t=c()
#pvals.wc=c()
gosem.random.vector=c()
for(i in 1: as.integer(config$NO_OF_random_permutations_for_functional_similarity))
{
genes.random=sample(all.entrez.genes,length(genes.entrez),replace = FALSE)
gosem.random=mgeneSim(genes.random,organism="human",measure="Wang")#,ont="BP"
gosem.random=gosem.random[upper.tri(gosem.random)]
gosem.random.vector=c(gosem.random.vector,gosem.random)
# if(length(gosem.random)>1)
# {
# pvals.ks=c(pvals.ks,ks.test(gosem,gosem.random,alternative="l")$p.value)
# pvals.wc=c(pvals.wc,wilcox.test(gosem,gosem.random,alternative="g")$p.value)
# pvals.t=c(pvals.t,t.test(gosem,gosem.random,alternative="g")$p.value)
# }
}
# pval.t.final= (length(pvals.t[pvals.t > 0.05]) / length(pvals.t))
# pval.ks.final= (length(pvals.ks[pvals.ks > 0.05]) / length(pvals.ks))
# pval.wc.final= (length(pvals.wc[pvals.wc > 0.05]) / length(pvals.wc))
# pval=min(median(pvals.t),median(pvals.wc),median(pvals.ks))
gosem.random.forplot=sample(gosem.random.vector,length(gosem))
pval=ks.test(gosem,gosem.random.forplot,alternative="l")$p.value
CairoPNG(bg="transparent",output.path,width=as.integer(config$funsimilarity.diagram.width),height=as.integer(config$funsimilarity.diagram.height))
plot(ecdf(gosem),col="red", xlim=range(c(gosem, gosem.random.forplot)) , main="",xlab="Pair-wise similarity score", ylab="Cumulative distribution")
#lines(ecdf(gosem),col="red",type="l")
grid()
lines(ecdf(gosem.random.forplot))
#text(0.9,0.05, col="blue", paste("P-value < ",round(pval,4) ,sep=""),cex=1, adj = c(0.5, 0.5))
text(0.9,0.05, col="blue", paste("P-value < ",format(pval, scientific = TRUE,digits=2) ,sep=""),cex=0.8, adj = c(0.5, 0.5))
#mtext(paste("P-value < ",round(pval,3) ,sep=""), adj = 1,col="blue")
legend(bty="n","topleft",c("Motif genes CDF","Random genes CDF") ,pch=c(19,19), col=c("red","black") ,cex=1)
dev.off()
}
TFMir_old =function(tf.path, mirna.path,pval.cutoff=0.05,evidence,disease="",output.folder)
{
# tf.path="tf.sample2.txt"
# mirna.path="mirna.sample.txt"
# pval.cutoff=0.05
# disease="Melanoma"
# disease="Neoplasm"
# disease="Alzheimer Disease"
# disease=""
# output.folder="user8"
# evidence="Experimental"
# evidence="Predicted"
# evidence="both"
#
## ========================
## log the input parameters
## ========================
writeToLog(tf.path)
writeToLog(mirna.path)
writeToLog(pval.cutoff)
writeToLog(disease)
writeToLog(evidence)
writeToLog(output.folder)
writeToLog("=====================================")
## ===========================
## Create the output directory
## ===========================
# output.path= file.path(config$output.directory,output.folder)
# if (! file.exists(output.path)){
# dir.create(file.path(output.path))
# }
output.path= output.folder
## ================================================
## read the input files and intersect with transmir
## ================================================
tfs.df = read.delim(tf.path, header=FALSE)
names(tfs.df)=c("gene","gene.reg")
tfs.df=tfs.df[!duplicated (tfs.df),]
tfs.input=toupper(unique(as.character(unlist(tfs.df$gene))))
mirnas.df = read.delim(mirna.path, header=FALSE)
names(mirnas.df)=c("mirna","mirna.reg")
mirnas.df$mirna=tolower(mirnas.df$mirna)
mirnas.df=mirnas.df[!duplicated (mirnas.df),]
mirnas.input=unique(as.character(unlist(mirnas.df$mirna)))
if(tolower(evidence)=="both") { evidence=c("Experimental","Predicted") }
## ==================================
## get the four kinds of interactions
## ==================================
tf.mirna.res=getInteractions(category="tf-mirna",reg.input=tfs.input,target.input=mirnas.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
mirna.gene.res=getInteractions(category="mirna-gene",reg.input=mirnas.input,target.input=tfs.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
tf.gene.res=getInteractions(category="tf-gene",reg.input=tfs.input,target.input=tfs.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
mirna.mirna.res=tf.mirna.res[tf.mirna.res$category=="mirna-mirna",] # return initial empty structure
if("Predicted" %in% evidence) ## cause mirna-mirna interacctions are only predictions
mirna.mirna.res=getInteractions(category="mirna-mirna",reg.input=mirnas.input,target.input=mirnas.input,disease=disease,evidence=evidence,output.path=output.path,pval.cutoff=pval.cutoff)
## ======================================================================================================================
## Combine these interactions and get those related to disease only (disease speccific network) (if disease is specified)
## ======================================================================================================================
input=list( tf.genes=names(tf.gene.res), mirna.genes=names(mirna.gene.res),tf.mirna=names(tf.mirna.res),mirna.mirna=names(mirna.mirna.res))
columns=Reduce(intersect,input)
all.res=rbind(tf.mirna.res[,columns],mirna.mirna.res[,columns],mirna.gene.res[,columns],tf.gene.res[,columns])
names(mirnas.df)=c("node","regulation")
names(tfs.df)=c("node","regulation")
nodes.input=rbind(mirnas.df,tfs.df)
names(nodes.input)=c("target","target.reg")
all.res=merge(all.res,nodes.input,by="target")
names(nodes.input)=c("regulator","regulator.reg")
all.res=merge(all.res,nodes.input,by="regulator")
all.res.disease=all.res[all.res$is_regulator_in_disease==TRUE | all.res$is_target_in_disease==TRUE,]
if(dim(all.res)[1] > 0)
exportNetworkProperties (all.res,file.path(output.path,"all"), disease,pval.cutoff)
if(dim(all.res.disease)[1] > 0)
exportNetworkProperties (all.res.disease,file.path(output.path,"disease"),disease,pval.cutoff)
write("finished", file=file.path(output.path,"finished.txt"),append=F,sep="\n")
}
# Let Graph G(V,E) be a connected graph, n=|V|, adj is the adjacency matrix of the graph G, and adj(i,i)=0, X is a binary array of size n, such that X(i)=1 if node I was marked as a key node, and 0 otherwise.
# Objective Function: Min Σi X(i) Subjected to: ∀i Σi adj(i,j) . X(j)>= 1
#
# Let \ Graph \ G(V,E) \ be \ a \ connected\ graph, n=|V|,\\\ adj\ is \ the \ adjacency\ matrix \ of\ G, and \ adj(i,i)=0,\\ X is \ a\ binary\ array\ ofsize\ n, such\ that \ X(i)=1\ if \\ node \ i \ was \ marked \ as \ a \ key\ node, and \ 0 \ otherwise. \\ Then,\ The \
# Objective \ Function: Min\sum _{i=1}^{ n} {X(i)} \ \ \ \ \ \ \\ Subjected \ to:\forall i \sum_{i}^{n} adj(i,j)\ast X(j) >=1
# # \textrm{Let Graph } G(V,E) \textrm{ be a connected graph, } n=|V|, \\ \textrm{adj is the adjacency matrix of } G, \textrm{and adj}(i,i)=0,\\ X \textrm{ is a binary array of size } n, \textrm{such that }X(i)=1 \textrm{ if node}\\ i \textrm{ was marked as a key node, and 0 otherwise. Then, the}\\
# \textrm{Objective Function:} \\
# \min\sum _{i=1}^{ n} {X(i)}, \textrm{subjected to: }\forall i \sum_{i}^{n} \textrm{adj}(i,j)\cdot X(j) >=1
# \\
# CR = \frac{N_d}{N_t}
# P = 1 - \sum^x_{i=0} \frac{\binom{k}{i}\binom{M-k}{N-i}}{\binom{M}{N}}\\
# Z = \frac{N_o - N_m}{\sigma}
|
b3ecffcf8a2a85b7c997bdf77e92ecf9c9736c19
|
5d1dca92964fb981ca109ecb6af365d1d4077f57
|
/man/intron.Rd
|
19a5a440899a93c947eea8c5f398532a087742e8
|
[] |
no_license
|
xyang2uchicago/BioTIP
|
6a862e490aaecbf33278eaa99c11c46ff67b9919
|
037c82e06d0e78f10d0611427edfa90b86013e12
|
refs/heads/master
| 2023-09-01T13:14:23.069682
| 2023-08-28T20:53:17
| 2023-08-28T20:53:17
| 184,810,257
| 8
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 726
|
rd
|
intron.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{intron}
\alias{intron}
\title{Coding transcriptome in chr21 dataset}
\format{
A data frame with 659327 rows and 5 variables:
\describe{
\item{seqnames}{chromosome names (chr1,chrM,chr21)}
\item{ranges}{chromosome ranges on the genome(167684657--167729508)}
\item{strand}{specific strand of the genomic location (+,-,*)}
\item{name}{internal assigned names(uc001aaa.3_intron_0_0_chr1_12228_f)}
\item{score}{score not used in this data set(0)}
}
}
\usage{
intron
}
\description{
A dataset containing chromosomes in the genome regions of interest.
The variables are as follows:
}
\keyword{datasets}
|
c9a978140f111a51838e863f6811891e800ed441
|
5f6369b039c01b619656d531d2eea98f4f0ab389
|
/Plots_of_ROIs.R
|
069e9dedbf9885a8effdfad56c12126292495a40
|
[] |
no_license
|
SandraTamm/TSPO_PET_in_allergy
|
274a56d1dcb7732dd5118efad2b544dcc89c05bd
|
65eeea332dedaca7cdf73084bf103c4575beb80a
|
refs/heads/master
| 2021-05-23T05:42:20.086623
| 2021-03-05T10:05:22
| 2021-03-05T10:05:25
| 94,869,411
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,597
|
r
|
Plots_of_ROIs.R
|
require(gdata)
require(ggplot2)
require(nlme)
require(plyr)
require(gridExtra)
require(cowplot)
source('Utils/SummarisingFunctions.R', chdir = T)
source('Utils/Multiplot.R', chdir = T)
setwd("~/Desktop/RAALLPET")
load("PET_VT_63_2TCM_3exp.RData")
# Gray matter
summary_GM <- summarySEwithin(data=Data_63, measurevar = "GM", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=FALSE)
dodge = position_dodge(width=0.2)
GM_plot <- ggplot(data = Data_63, aes(Pollen_status, GM, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_GM, aes(Pollen_status, GM, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Healthy subjects")) +
theme(legend.position= "top") +
xlab("Pollen season") +
ylab("Grey matter")
# Lateral Frontal Cortex
summary_LFC <- summarySEwithin(data=Data_63, measurevar = "X.LFC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=FALSE)
dodge = position_dodge(width=0.2)
LFC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.LFC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_LFC, aes(Pollen_status, X.LFC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Lateral Frontal \nCortex")
# Medial Frontal Cortex
summary_MFC <- summarySEwithin(data=Data_63, measurevar = "X.MFC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=FALSE)
dodge = position_dodge(width=0.2)
MFC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.MFC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_MFC, aes(Pollen_status, X.MFC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Medial Frontal \nCortex")
# Orbito Frontal Cortex
summary_OFC <- summarySEwithin(data=Data_63, measurevar = "X.OFC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
OFC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.OFC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_OFC, aes(Pollen_status, X.OFC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Orbital Frontal \nCortex")
# Anterior cingulate cortex
summary_ACC <- summarySEwithin(data=Data_63, measurevar = "X.ACC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
ACC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.ACC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_ACC, aes(Pollen_status, X.ACC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Anterior Cingulate \nCortex")
# Insula
summary_INS <- summarySEwithin(data=Data_63, measurevar = "X.INS", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
INS_plot <- ggplot(data = Data_63, aes(Pollen_status, X.INS, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_INS, aes(Pollen_status, X.INS, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Insula")
# Lateral Temporal Cortex
summary_LTC <- summarySEwithin(data=Data_63, measurevar = "X.LTC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
LTC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.LTC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_LTC, aes(Pollen_status, X.LTC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Lateral Temporal \nCortex")
# Medial Temporal Cortex. Check name
summary_MTC <- summarySEwithin(data=Data_63, measurevar = "X.MTC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
MTC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.MTC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_MTC, aes(Pollen_status, X.MTC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Medial Temporal \nCortex")
# Sensory motor cortex
summary_SMC <- summarySEwithin(data=Data_63, measurevar = "X.SMC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
SMC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.SMC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_SMC, aes(Pollen_status, X.SMC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Sensory Motor Cortex")
# Temporal Pole
summary_TP <- summarySEwithin(data=Data_63, measurevar = "X.TP", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
TP_plot <- ggplot(data = Data_63, aes(Pollen_status, X.TP, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_TP, aes(Pollen_status, X.TP, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Temporal Pole")
# Lateral Parietal Cortex
summary_LPC <- summarySEwithin(data=Data_63, measurevar = "X.LPC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
LPC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.LPC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_LPC, aes(Pollen_status, X.LPC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Lateral Parietal \nCortex")
# Posterior cingulate cortex
summary_PCC <- summarySEwithin(data=Data_63, measurevar = "X.PCC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
PCC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.PCC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_PCC, aes(Pollen_status, X.PCC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Posterior Cingulate \nCortex")
# Medial Inferior Occipital Cortex
summary_MIOC <- summarySEwithin(data=Data_63, measurevar = "X.MIOC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
MIOC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.MIOC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_MIOC, aes(Pollen_status, X.MIOC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Medial Inferior \nOccipital Cortex")
# Lateral Occipital Cortex
summary_LOC <- summarySEwithin(data=Data_63, measurevar = "X.LOC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
LOC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.LOC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_LOC, aes(Pollen_status, X.LOC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Lateral Occipital \nCortex")
# Hippocampus
summary_HIP <- summarySEwithin(data=Data_63, measurevar = "X.HIP", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
HIP_plot <- ggplot(data = Data_63, aes(Pollen_status, X.HIP, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_HIP, aes(Pollen_status, X.HIP, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Hippocampus")
# Parahippocampal gyrus
summary_PHIP <- summarySEwithin(data=Data_63, measurevar = "X.PHIP", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
PHIP_plot <- ggplot(data = Data_63, aes(Pollen_status, X.PHIP, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_PHIP, aes(Pollen_status, X.PHIP, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Parahipppocampal \ngyrus")
# Medial Cingulate Cortex
summary_MCC <- summarySEwithin(data=Data_63, measurevar = "X.MCC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
MCC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.MCC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_MCC, aes(Pollen_status, X.MCC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Medial Cingulate \nCortex")
# Medial Parietal Cortex
summary_MPC <- summarySEwithin(data=Data_63, measurevar = "X.MPC", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
MPC_plot <- ggplot(data = Data_63, aes(Pollen_status, X.MPC, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_MPC, aes(Pollen_status, X.MPC, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Medial Parietal \nCortex")
# Amygdala
summary_AMG <- summarySEwithin(data=Data_63, measurevar = "AMG", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
AMG_plot <- ggplot(data = Data_63, aes(Pollen_status, AMG, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_AMG, aes(Pollen_status, AMG, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Amygdala")
# Caudate
summary_CAU <- summarySEwithin(data=Data_63, measurevar = "CAU", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
CAU_plot <- ggplot(data = Data_63, aes(Pollen_status, CAU, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_CAU, aes(Pollen_status, CAU, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Caudate")
# Putamen
summary_PUT <- summarySEwithin(data=Data_63, measurevar = "PUT", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
PUT_plot <- ggplot(data = Data_63, aes(Pollen_status, PUT, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_PUT, aes(Pollen_status, PUT, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Putamen")
# Pallidum
summary_PAL <- summarySEwithin(data=Data_63, measurevar = "PAL", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
PAL_plot <- ggplot(data = Data_63, aes(Pollen_status, PAL, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_PAL, aes(Pollen_status, PAL, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Pallidum")
# Thalamus
summary_THA <- summarySEwithin(data=Data_63, measurevar = "THA", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
THA_plot <- ggplot(data = Data_63, aes(Pollen_status, THA, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_THA, aes(Pollen_status, THA, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Thalamus")
# White Matter
summary_WM <- summarySEwithin(data=Data_63, measurevar = "WM", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
WM_plot <- ggplot(data = Data_63, aes(Pollen_status, WM, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_WM, aes(Pollen_status, WM, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("White Matter")
# Cerebellum
summary_CER <- summarySEwithin(data=Data_63, measurevar = "X.CER", betweenvars="Group",
withinvars="Pollen_status", idvar="Subject", na.rm=TRUE)
dodge = position_dodge(width=0.2)
CER_plot <- ggplot(data = Data_63, aes(Pollen_status, X.CER, group=Subject, colour = factor(Group),
ymin = 0, ymax = 7)) +
geom_jitter(position=dodge, size = 0) +
geom_line(position=dodge, size=0.6, linetype="dotted") +
geom_line(data = summary_CER, aes(Pollen_status, X.CER, group=Group), size = 1.2) +
scale_color_manual(name = "Group", values=c("black", "red"),
breaks=c("All", "Ctrl-all"),
labels=c("Allergy", "Control")) +
theme(legend.position="none") +
xlab("Pollen season") +
ylab("Cerebellum")
multiplot(GM_plot, LFC_plot, MFC_plot, OFC_plot, ACC_plot, INS_plot, LTC_plot, MTC_plot,
SMC_plot, TP_plot, LPC_plot, PCC_plot, MIOC_plot, LOC_plot, HIP_plot, PHIP_plot,
MCC_plot, MPC_plot, AMG_plot, CAU_plot, PUT_plot, PAL_plot, THA_plot,
WM_plot, CER_plot, cols = 5)
|
413255d99d93e9a4f3322996768dc605b1a7838f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/SEMID/R/SEMID.R
|
a55d3095cf57a4de5e68fc4c91a2bfa017ae6dc4
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 360
|
r
|
SEMID.R
|
#' SEMID package documentation.
#'
#' SEMID provides a number of methods for testing the global/generic
#' identifiability of mixed graphs.
#'
#' The only function you're likely to need from \pkg{SEMID} is
#' \code{\link{graphID}}. Otherwise refer to the individual function
#' documentation.
#'
#' @import igraph
#' @import utils
"_PACKAGE"
#> [1] "_PACKAGE"
|
9ca040cd3365b74c0d8a54185fec694b1ee91044
|
7b7151c25cb3f2bd6492c6a3ee991d7e83c58665
|
/code/data_fig2.r
|
374fb686c0e56d12e25a60c01364561bccb47d1f
|
[] |
no_license
|
tpoisot/EvoGeoModules
|
f72b88eb6ef17904ba832cf6c6c1dfb1d72c04d0
|
f25cc1af8667827af7efec2ae97eeb468a1e3b5f
|
refs/heads/master
| 2020-04-12T06:34:08.007909
| 2017-06-06T16:23:17
| 2017-06-06T16:23:17
| 23,810,996
| 0
| 0
| null | 2017-05-23T18:45:14
| 2014-09-08T22:35:56
|
TeX
|
UTF-8
|
R
| false
| false
| 1,952
|
r
|
data_fig2.r
|
library(paco)
library(stringr)
library(igraph)
library(betalink)
library(doMC)
library(ape)
source("commons.r")
load("D.Rdata")
load("webs.Rdata")
load("paco_fig1.Rdata")
mw <- metaweb(raw)
host_tree <- read.tree("../data/host.tre")
host_tree$tip.label <- str_replace(host_tree$tip.label, "_", " ")
para_tree <- compute.brlen(read.tree("../data/para.tre"))
para_tree$tip.label <- str_replace(para_tree$tip.label, "_", " ")
A <- incidence(mw)
h_mat <- cophenetic(host_tree)[colnames(A), colnames(A)]
p_mat <- cophenetic(para_tree)[rownames(A), rownames(A)]
D <- paco_links(D)
save(D, file="D.Rdata")
l_contrib <- function(n)
{
blues <- V(n)$name[degree(n, mode="out")>0]
reds <- V(n)$name[degree(n, mode="in")>0]
reduced_A <- A[blues,reds]
local_A <- incidence(n)
regD <- paco_links(PACo(add_pcoord(prepare_paco_data(H=h_mat[reds,reds], P=p_mat[blues,blues], reduced_A)), nperm=1000))
locD <- paco_links(PACo(add_pcoord(prepare_paco_data(H=h_mat[reds,reds], P=p_mat[blues,blues], local_A)), nperm=1000))
output <- list(loc=locD$jackknife$mean, reg=regD$jackknife$mean)
return(output)
}
#coevolved_locales <- subset(fig1dat, loc <= .05 & reg <= .05)$`.id`
#co_raw <- raw[coevolved_locales]
co_raw <- raw
## Parallel version
registerDoMC(6)
fig2dat <- llply(co_raw, l_contrib, .parallel=TRUE)
f2d <- data.frame()
for(S in names(fig2dat))
{
sites <- fig2dat[[S]]
df_loc <- data.frame(cbind(int=names(sites$loc), score=sites$loc))
df_loc$type <- rep("loc", nrow(df_loc))
sr <- sites$reg[names(sites$loc)]
df_reg <- data.frame(cbind(int=names(sr), score=sr))
df_reg$type <- rep("reg", nrow(df_reg))
df_all <- data.frame(rbind(df_loc, df_reg))
df_all$site <- rep(S, nrow(df_all))
f2d <- rbind(f2d, df_all)
}
f2d$rvalue <- D$jackknife$mean[as.character(f2d$int)]
rownames(f2d) <- c(1:nrow(f2d))
f2d$score <- as.numeric(as.vector(f2d$score))
fig2dat <- f2d
save(fig2dat, file="paco_fig2.Rdata")
|
60fd6efed101f9a60a47b282679f09ed747a13cc
|
0d0cb4f86925ee4b2c8f91fdd388d59c39ccd2c3
|
/scripts/R-Lunches4.R
|
f344b5aaa882d43bdc1e933d898ccd67df2a6a87
|
[] |
no_license
|
brusko/learningR
|
1b5ada25d78c6ee5ff38433f6dcc0b85c13c6e33
|
a672d97eefe23b70c8047fdef9d1dfe70b718aca
|
refs/heads/master
| 2021-05-09T23:36:52.447506
| 2018-01-24T18:52:21
| 2018-01-24T18:52:21
| 118,797,751
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,425
|
r
|
R-Lunches4.R
|
## R-Lunches4.R
getwd() ## make sure you are in the learningR project directory
list.files() ## to see if you have the three "sub-directories", data, scripts, and output
## load packages
## use install.packages('packagename') if package has never been installed
library(tidyverse)
library(EDAWR)
install.packages('nycflights13')
library(nycflights13)
?flights
# Filter rows with filter()
filter(flights, month == 1, day == 1)
flights %>% filter(month==1, day ==1)
jan1 <- filter(flights, month == 1, day == 1)
# wrap assignment in parentheses to print results
(dec25 <- filter(flights, month == 12, day == 25))
# Comparisons
filter(flights, month = 1) ## wrong
filter(flights, month == 1) ## correct
filter(flights, month == 11 | month == 12)
# Exercises
## 1. Find all flights that
## 1. Had an arrival delay of two or more hours.
## 2. Flew to Houston (IAH or HOU)
## 3. Were operated by United, American, or Delta
## 4. Departed in summer (July, August, and September)
## 5. Arrived more than two hours late, but didn’t leave late
## 6. Were delayed by at least an hour, but made up over 30 minutes in flight
## 7. Departed between midnight and 6am (inclusive)
## Arrange rows with arrange()
arrange(flights, year, month, day)
arrange(flights, desc(arr_delay)) ## descending
## Exercises
##@ 1. How could you use arrange() to sort all missing values to the start? (Hint: use is.na()).
##@ 2. Sort flights to find the most delayed flights. Find the flights that left earliest.
##@ 3. Sort flights to find the fastest flights.
##@ 4. Which flights travelled the longest? Which travelled the shortest?
## Select columns with select()
# Select columns by name
select(flights, year, month, day)
# Select all columns between year and day (inclusive)
select(flights, year:day)
# Select all columns except those from year to day (inclusive)
select(flights, -(year:day))
## everything helper function
select(flights, time_hour, air_time, everything())
## Add new variables with mutate()
## start with a smaller dataset
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60
)
# Note that you can refer to columns that you’ve just created:
mutate(flights_sml,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
## Grouped summaries with summarise()
summarise(flights, delay = mean(dep_delay))
summarise(flights, delay = mean(dep_delay, na.rm = TRUE))
## summarize not really useful unless paired with group_by()
by_day <- group_by(flights, year, month, day)
summarise(by_day, delay = mean(dep_delay, na.rm = TRUE))
# Combining multiple operations with the pipe
## old school
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
delay <- filter(delay, count > 20, dest != "HNL")
## using the "pipe"
## this approach focuses on the transformatoins, not on what's being transformed.
delays <- flights %>%
group_by(dest) %>%
summarise(
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(count > 20, dest != "HNL")
#############################################
## ggplot2
library(ggplot2)
# there is a built-in dataset in ggplot2, called mpg
?mpg
## let's use ggplot2 to answer the question: do cars with big engines
## use more fuel than cars with smaller engines...
mpg
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
## follows a template:
## ggplot(data = <DATA>) + <GEOM_FUNCTION>(mapping = aes(<MAPPINGS>))
# Exercises
##@ 1. Run ggplot(data = mpg) what do you see?
##@ 2. What does the drv variable describe? Read the help for ?mpg to find out.
##@ 3. Make a scatterplot of hwy vs cyl.
##@ 4. What happens if you make a scatterplot of class vs drv. Why is the plot not useful?
################################
data(mtcars)
?mtcars
head(mtcars)
## base R
plot(mtcars$wt, mtcars$mpg)
## ggplot2
ggplot(data = mtcars, aes(x = wt, y = mpg)) + geom_point()
## line graph
str(pressure)
ggplot(data = pressure, aes(x = temperature, y = pressure)) + geom_line()
# Add points
ggplot(data = pressure, aes(x = temperature, y = pressure)) + geom_line() +
geom_point()
## Histograms
# base R
hist(mtcars$mpg)
# ggplot2
ggplot(data = mtcars, aes(x = mpg)) + geom_histogram()
ggplot(data = mtcars, aes(x = mpg)) + geom_histogram(binwidth = 4)
## Boxplots
str(ToothGrowth)
head(ToothGrowth)
summary(ToothGrowth$dose)
table(ToothGrowth$dose)
plot(ToothGrowth$supp, ToothGrowth$len)
boxplot(len ~ supp, data = ToothGrowth) ## formula syntax
## ggplot2
ggplot(data = ToothGrowth, aes(x = supp, y = len)) + geom_boxplot()
# First principles of creating a plot
# 1. A dataframe containing what you are plotting should be specified.
# 2. An aesthetic must be given declaring x, y or both as some variable in the data.frame. For some plots (histograms, density, e.g.), one of these will be calculated as a stat from the data.
# 3. A geom must be specified, stating how the aesthetics will appear as geomtrical objects.
|
6a88bfbf059d5390eb0d2fc9adb55dda3adb2298
|
c26548e53ef2c8809a622d86582d7150b9955a4f
|
/R/ccle_barplot.R
|
d218f558f58561d6960773a95eacb0e82475a83e
|
[] |
no_license
|
kevinblighe/AunerLab_CCLE
|
2a75f25345afa468b432b1867683b8361d7ecb2f
|
4c8d9d6491034f6f62785df136c16490f860640d
|
refs/heads/master
| 2020-09-16T04:09:15.801103
| 2020-01-28T02:35:39
| 2020-01-28T02:35:39
| 223,648,553
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,616
|
r
|
ccle_barplot.R
|
ccle_barplot <- function(
ccledata,
clinicaldata,
keyword,
gene,
title,
xlab,
greyscale = TRUE,
colour = NULL,
titlesize = 24,
axissize = 16) {
# extract names of plasma cell myeloma / multiple myeloma lines
lines <- sampleinfo[grep(keyword, clinicaldata$Hist_Subtype1),1]
if (length(lines) < 2) {
lines <- sampleinfo[grep(keyword, clinicaldata$Histology),1]
if (length(lines) < 2) {
stop('Error - too few cell-lines or nothing found')
}
}
# filter expression data to only include multiple myeloma lines
data <- ccledata[,which(colnames(ccledata) %in% lines)]
# extract expression levels and convert to 'long' format
ggdata <- reshape2::melt(data[which(rownames(data) %in% gene),])
# change colnames and order by expression level
colnames(ggdata) <- c('Line','Expression')
ggdata <- ggdata[order(ggdata$Expression, decreasing = TRUE),]
# tidy up cell line names
#'$' matches end of field
ggdata$Line <- gsub('_[A-Za-z0-9_]*$', '', ggdata$Line)
ggdata$Line <- factor(ggdata$Line, levels = ggdata$Line)
# Basic barplot
p <- ggplot(data = ggdata, aes(x = Line, y = Expression, fill = Line)) +
geom_bar(stat = 'identity') +
# axis and main title(s)
xlab(xlab) +
ylab(bquote(italic(.(gene))~RPKM~expression)) +
ggtitle(title) +
# set the size of the plotting window
theme_bw(base_size=24) +
# modify various aspects of the plot text and legend
# NB - most of these, you will not have to edit
theme(
plot.title = element_text(angle=0, size = titlesize, face="bold", vjust = 1),
axis.text.x = element_text(angle = 90, size = axissize, face = "bold", hjust = 1.0, vjust = 0.5),
axis.text.y = element_text(angle = 0, size = axissize, face = "bold", vjust = 0.5),
axis.title = element_text(size = 24, face = "bold"),
# Legend
legend.position = "none", # 'none' turns off the legend
legend.background = element_rect(),
legend.key = element_blank(), #removes the border
legend.key.size = unit(1, "cm"), #Sets overall area/size of the legend
legend.text = element_text(size = 16), #Text size
title = element_text(size = 16)) +
# change the size of the icons/symbols in the legend
guides(colour = guide_legend(override.aes = list(size = 2.5)))
# return the plot
if (greyscale == TRUE) {
p <- p + scale_fill_grey()
}
if (!is.null(colour)) {
p <- p + scale_fill_manual(values = rep(colour, nrow(ggdata)))
}
return(p)
}
|
50cfcbc531e170c4b2a00058a6a296ae2b047a26
|
78dca0d0127674ced44152a0646b2ab123b7c0aa
|
/gbs_functions/gbs_rrblup_valid.R
|
f203b5eadda216deadabca9969efb1a1fbaaa2a0
|
[] |
no_license
|
aho25/GS_BDE
|
70bccd668bf8e75651aa336dc1ebaa7b926939f2
|
1035f5886c6efed12c523857d00781a0edd7f17b
|
refs/heads/master
| 2021-03-25T20:02:14.882435
| 2020-06-03T10:46:06
| 2020-06-03T10:46:06
| 247,642,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
gbs_rrblup_valid.R
|
library(rrBLUP)
### Define function for final_features validation
gbs_rrblup_valid <- function(PHENO_TRAIN, MARKERS_TRAIN, PHENO_TEST, MARKERS_TEST, VAL.ARGS) {
prod_model <- mixed.solve(PHENO_TRAIN[,1], Z = MARKERS_TRAIN, K = NULL, SE = FALSE, return.Hinv = FALSE)
prod_g <- prod_model$u
prod_mu <- prod_model$beta[1]
#
prod_predicted <- prod_mu + as.matrix(MARKERS_TEST) %*% prod_g
prod_accuracy <- cor.test(prod_predicted, PHENO_TEST[,1])
return(prod_accuracy$estimate)
}
|
983bab37300de1298e898737323c2c93a41918f8
|
4f217be84965dcdf28299a7ffea4724d2ef662e4
|
/R/gta rbind.R
|
95e680df5b6a9cba8785ecb35834d8299eb2b659
|
[] |
no_license
|
global-trade-alert/gtalibrary
|
694cbc2718954ca8737ab2d2e72c787da649df68
|
a8ad12b2792f5558dacde494adbd7c13faffff49
|
refs/heads/master
| 2023-08-17T09:21:23.631486
| 2023-08-08T09:45:05
| 2023-08-08T09:45:05
| 145,339,633
| 7
| 1
| null | 2023-07-17T17:01:39
| 2018-08-19T21:43:20
|
R
|
UTF-8
|
R
| false
| false
| 782
|
r
|
gta rbind.R
|
# Roxygen documentation
#' Rbind two dataframes with different columns.
#'
#' This function rbinds two dataframes with a different set of columns and fills unmatched cells with NA.
#'
#' @param list Supply a list of dataframes. E.g. list = list(df1, df2, df3).
#'
#' @references www.globaltradealert.org
#' @author Global Trade Alert
gta_rbind=function(list){
diff = c()
for (i in 1:length(list)) {
diff <- append(diff, colnames(list[[i]]))
diff <- unique(diff)
}
for (i in 1:length(list)) {
cols <- colnames(list[[i]])
temp.diff <- setdiff(diff, cols)
list[[i]][, c(as.character(temp.diff))] <- NA
}
result <- as.data.frame(list[[1]])
for (i in 2:length(list)){
result <- rbind(result, as.data.frame(list[[i]]))
}
return(result)
}
|
5c39141e12ec21fb6ef0094e5f4b954bf09b1b9b
|
bc5aa2493a04fab4ab76a54c135b6c91bb50a921
|
/hw7/random_forest_tune.R
|
ab421e8fd8e4399034014a0a0831ef5f50806bde
|
[] |
no_license
|
reking/stat852
|
b3caba3d5ecfbb1e61874e955cb3e43b2fbdcd1d
|
e704cfe9f4f49b43fda64211a78188bf9cfabc97
|
refs/heads/master
| 2021-01-10T05:04:34.782068
| 2016-01-08T06:08:18
| 2016-01-08T06:08:18
| 44,140,619
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,731
|
r
|
random_forest_tune.R
|
# Gradient Boosting using gbm
library(randomForest)
abelone <- read.table("~/stat852/data/abalone.data", header=TRUE, sep=",", na.strings=" ")
colnames(abelone) <- c("Sex","Length","Diameter","Height","Whole","Shucked","Viscera","Shell","Rings")
abelone$Sex <- as.factor(abelone$Sex)
set.seed(41891019)
nodes <- cbind(1,2,3,4,5,6,7,8)
mino <- cbind(5, 10 ,20)
iter=20
abelone.oob <- matrix(NA,nrow = length(nodes)*length(mino),ncol=iter+2)
for(i in 1:iter)
{
rice <- runif(1,0,1)
set.seed(rice * 10000000)
abelone$set <- ifelse(runif(n=nrow(abelone))>0.75, yes=2, no=1)
y.1 <- abelone[which(abelone$set==1),9]
x.1 <- abelone[which(abelone$set==1),-c(9,10)]
y.2 <- abelone[which(abelone$set==2),9]
x.2 <- abelone[which(abelone$set==2),-c(9,10)]
resamp <- sample.int(n=nrow(x.1), size=0.66 * nrow(x.1), replace=FALSE)
x.r <- x.1[resamp,]
y.r <- y.1[resamp]
x.p <- x.1[-unique(resamp),]
y.p <- y.1[-unique(resamp)]
ii = 1
for(noo in nodes)
for(moo in mino)
{
abelone.oob[ii,1:2] <- c(noo,moo)
abalone.rf <- randomForest(x.1, y.1, importance=TRUE, ntree=600, mtry=noo,nodesize=moo, keep.forest=TRUE)
abelone.oob[ii,i+2] <- mean((y.1-abalone.rf$predicted)^2)
ii <- ii + 1
}
Mean_val <- rowMeans(abelone.oob[,-c(1,2)])
best_index <- which.min(Mean_val)
best_para <- abelone.oob[best_index,1:2]
}
siz.dec <- paste(abelone.oob[,1],abelone.oob[,2],abelone.oob[,3],abelone.oob[,4])
quartz(h=7,w=12,pointsize=12)
boxplot.matrix(x=sqrt(abelone.oob[,-c(1,2,3,4)]), use.cols=FALSE, names=siz.dec)
|
ba8ef6d8421a8def2215ed7b646a1382ca3909d5
|
3cc68045fd140e7def6648f6e561cc07d78abf44
|
/R/SingleR.R
|
b0eaae096ff6ae2e4f25ebc892e036eee3fcd294
|
[] |
no_license
|
nyuhuyang/scRNAseq-MouseSkinEpithelia
|
e09b00c4d608e8a2236e98c21ae22206ed4f76c6
|
c2ba9b42d8db660c13b0075c89740fedd1220ea8
|
refs/heads/master
| 2020-03-26T12:30:36.633436
| 2020-03-01T03:58:30
| 2020-03-01T03:58:30
| 144,896,399
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,785
|
r
|
SingleR.R
|
library(SingleR)
library(Seurat)
library(reshape2)
library(pheatmap)
library(kableExtra)
source("../R/Seurat_functions.R")
source("../R/SingleR_functions.R")
#====== 2.1 Create Singler Object ==========================================
lname1 = load(file = "data/MouseSkin_alignment.Rda");lname1
lname2 = load(file='../SingleR/data/ref_Mouse.RData');lname2
ref_immgen_mouse.rnaseq$name
length(ref_immgen_mouse.rnaseq$types)
length(unique(ref_immgen_mouse.rnaseq$types))
length(unique(ref_immgen_mouse.rnaseq$main_types))
#pca
DimPlot(object = MouseSkin, reduction.use = "tsne", no.legend = TRUE,
do.return = TRUE,vector.friendly = F, pt.size = 1,
do.label = TRUE,label.size = 8, group.by = "ident") +
ggtitle("Cluster ID") +
theme(plot.title = element_text(hjust = 0.5))
singler = CreateSinglerObject(as.matrix(MouseSkin@data), annot = NULL,
project.name=MouseSkin@project.name,
min.genes = 500,technology = "10X", species = "Mouse",
ref.list = list(ref_immgen_mouse.rnaseq), normalize.gene.length = F,
variable.genes = "de",
fine.tune = F, do.signatures = F, clusters = NULL)
GC()
singler$meta.data$orig.ident = MouseSkin@meta.data$orig.ident # the original identities, if not supplied in 'annot'
singler$meta.data$xy = MouseSkin@dr$tsne@cell.embeddings # the tSNE coordinates
singler$meta.data$clusters = MouseSkin@ident # the Seurat clusters (if 'clusters' not provided)
save(singler,file="./output/singler_MouseSkin.RData")
#====== 3.2 SingleR specifications ==========================================
# Step 1: Spearman coefficient
lnames = load(file = "./output/singler_MouseSkin.RData")
lnames
singler$seurat = MouseSkin # (optional)
SingleR.DrawScatter(sc_data = singler$seurat@data,cell_id = 10,
ref = immgen, sample_id = 232)
# Step 2: Multiple correlation coefficients per cell types are aggregated
# to provide a single value per cell type per single-cell.
# In the examples below we use the 80% percentile of correlation values.
# for visualization purposes we only present a subset of cell types (defined in labels.use)
out = SingleR.DrawBoxPlot(sc_data = singler$seurat@data,cell_id = 10,
ref = immgen,main_types = T,
labels.use=c('B cells','T cells','DC','Macrophages','Monocytes','NK cells',
'Mast cells','Neutrophils','Fibroblasts','Endothelial cells'))
print(out$plot)
SingleR.DrawHeatmap(singler$singler[[1]]$SingleR.single.main, top.n = Inf,
clusters = singler$meta.data$orig.ident)
#Or by all cell types (showing the top 50 cell types):
SingleR.DrawHeatmap(singler$singler[[1]]$SingleR.single, top.n = 50,
clusters = singler$meta.data$orig.ident)
SingleR.DrawHeatmap(singler$singler[[1]]$SingleR.single.main,top.n = 50,
normalize = F,clusters = singler$meta.data$orig.ident)
#Next, we can use the fine-tuned labels to color the t-SNE plot:
out = SingleR.PlotTsne.1(singler$singler[[1]]$SingleR.single,
singler$meta.data$xy,do.label=T,
do.letters = F,labels = singler$singler[[1]]$SingleR.single$labels,
label.size = 2, dot.size = 2 ,do.legend = F,alpha = 1,
label.repel = T,force=2)
out+ ggtitle("Supervised sub-cell type labeling by immgen")+
theme(text = element_text(size=20),
plot.title = element_text(hjust = 0.5,size = 18, face = "bold"))
# main types-------
out = SingleR.PlotTsne.1(singler$singler[[1]]$SingleR.single.main,
singler$meta.data$xy,do.label=T,
do.letters = F,labels = singler$singler[[1]]$SingleR.single.main$labels,
label.size = 5, dot.size = 2 ,do.legend = F,alpha = 1,
label.repel = T,force=2)
out + ggtitle("Supervised cell type labeling by immgen and RNA-seq")+
theme(text = element_text(size=20),
plot.title = element_text(hjust = 0.5,size = 18, face = "bold"))
g <- ggplot_build(out$p)
# split singleR plot
output <- SplitSingleR.PlotTsne(singler = singler, split.by = "conditions",main=T,
select.plots =c(2,1),
return.plots= T,do.label=T,do.legend = F,alpha = 1,
label.repel = T, force=2)
plot_grid(output[[1]], output[[2]])
#Finally, we can also view the labeling as a table compared to the original identities:
# cell number
kable(table(singler$singler[[1]]$SingleR.single.main$labels,
singler$meta.data$orig.ident)) %>%
kable_styling()
# cell percentage
prop.table(x = table(singler$singler[[1]]$SingleR.single.main$labels,
MouseSkin@meta.data$orig.ident),margin = 2) %>%
kable() %>% kable_styling()
# total cell number
table(singler$meta.data$orig.ident) %>% t() %>% kable() %>% kable_styling()
# Rename ident
table(names(MouseSkin@ident) == rownames(singler$singler[[1]]$SingleR.single.main$labels))
ident.use <- as.factor(as.character(singler$singler[[1]]$SingleR.single.main$labels))
names(ident.use) = rownames(singler$singler[[1]]$SingleR.single.main$labels)
MouseSkin@ident <- ident.use
TSNEPlot(object = MouseSkin,do.label = F, group.by = "ident",
do.return = TRUE, no.legend = F,
pt.size = 1,label.size = 8 )+
ggtitle("Supervised cell type labeling by GSE43717")+
theme(text = element_text(size=20),
plot.title = element_text(hjust = 0.5))
save(MouseSkin, file = "data/MouseSkin_suplabel_GSE43717.Rda")
|
d9c0e671ad673c09e8e9a8a513fea37034ffa4b1
|
0275ac8727a01f6a61e5b0ab3544288870ac76c3
|
/maxpixels.R
|
077f8541c765bfc9c227f7af3011015c3bd66e79
|
[] |
no_license
|
l-radtke/lradtke-coding-portfolio
|
cd5925d3cce720b3ae6c45e5218ce3b838f848df
|
12004ba13083e430ff6d60c501d29f3160d3e600
|
refs/heads/master
| 2020-07-08T07:52:47.729591
| 2019-08-22T14:42:17
| 2019-08-22T14:42:17
| 203,609,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
maxpixels.R
|
""" defines the maxpixels function that will find the smallest polygon's area"""
maxpixels <- function(polygon){
a <- 0
list <- list()
for(nb in 1:length(polygon@polygons)){
a <- a + 1
list <- c(polygon@polygons[[a]]@area)
}
min(list)
}
|
3010bb175a84f4d1a75a67cfda25b832cc6e3e83
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/VNM/examples/PAR-class.Rd.R
|
9a4f9fb864c1f148e3ba1ee8ea3792821a5e06ca
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
PAR-class.Rd.R
|
library(VNM)
### Name: PAR
### Title: Class to contain the variables for the verification plots from
### function MOPT, ceff1, ceff2, and Deff.
### Aliases: PAR-class
### Keywords: classes
### ** Examples
showClass("PAR")
|
62cf97ada2b88c27378d6b00997f470e2c730d0e
|
2fc19f59ed2a5dbab1c50ec1250446608ec0c233
|
/users/XiaodanLyu/data_split.r
|
d3b8de8d5cbf82402abcd65137170bc92609a16e
|
[] |
no_license
|
ISU-DMC/dmc2018
|
ca05855efd3101696111d25a8e6f4a70b4521cf4
|
c68079f19149316c4954dbfe7da51707df1fe64b
|
refs/heads/master
| 2020-03-08T02:27:14.549852
| 2018-05-23T16:30:55
| 2018-05-23T16:30:55
| 127,860,160
| 5
| 6
| null | 2018-04-08T01:11:23
| 2018-04-03T06:15:21
|
HTML
|
UTF-8
|
R
| false
| false
| 5,411
|
r
|
data_split.r
|
## ---- splitting
train <- read.csv("../../data/raw_data/train.csv", sep = "|", stringsAsFactors = F)
prices <- read.csv("../../data/raw_data/prices.csv", sep = "|", stringsAsFactors = F)
items <- read.csv("../../data/raw_data/items.csv", sep = "|", stringsAsFactors = F)
## format date
library(lubridate)
library(tidyverse)
train <- train %>% mutate(date = ymd(date))
items <- items %>% mutate(releaseDate = ymd(releaseDate))
prices_long <- prices %>% gather(date, price, -pid, -size) %>%
mutate(date = gsub("X", "", date) %>% ymd())
## join three datasets
alldata <- prices_long %>%
full_join(train, by = c("pid", "size", "date")) %>%
full_join(items, by = c("pid", "size")) %>%
filter(date>=releaseDate-1) %>% ## only keep price info since one day before releasedate
mutate(units = replace(units, is.na(units) & date < ymd("2018-02-01"), 0))
## key variable for identifying item
alldata <- alldata %>% mutate(key = paste(pid, size, sep = " - "))
## check sales before and in January
sale.beforeJan <- alldata %>% filter(date < ymd("2018-01-01")) %>%
group_by(key) %>%
summarise(nsale.beforeJan = sum(units))
sale.Jan <- alldata %>% filter(date >= ymd("2018-01-01")) %>%
group_by(key) %>%
summarise(nsale.Jan = sum(units, na.rm = T))
sale.beforeJan %>% full_join(sale.Jan, by = "key") %>%
mutate(nsale.beforeJan = replace(nsale.beforeJan, is.na(nsale.beforeJan), 0)) %>%
filter(nsale.beforeJan==0 | nsale.Jan == 0) -> items.aside
items.aside %>% glimpse
## put aside those items not both sold before and in January
## put aside obs for February
subdata <- alldata %>% filter(!(key %in% items.aside$key),
date < ymd("2018-02-01")) %>% select(-stock)
## randomly assign stock for Jan, and save true sold-out dates under the assigned stock
set.seed(180201)
stock_Jan <- subdata %>%
left_join(sale.Jan, by = "key") %>%
group_by(key) %>%
summarise(stock = sample.int(n = nsale.Jan, size = 1))
stock_Jan %>% glimpse
stock_Jan %>% summary
test_Jan <- subdata %>%
left_join(stock_Jan, by = "key") %>%
filter(date >= ymd("2018-01-01")) %>%
group_by(pid, size) %>%
mutate(cumunits = cumsum(units)) %>%
filter(cumunits >= stock) %>%
summarise(soldOutDate = min(date))
test_Jan %>% glimpse()
test_Jan %>% summary()
## training data, joint info about items, prices and sales
## sale units in January have been set as missing
train_Jan <- subdata %>%
full_join(stock_Jan, by = "key") %>%
select(-key) %>%
mutate(units = replace(units, date >= ymd("2018-01-01"), NA))
train_Jan %>% glimpse
train_Jan %>% select(units, releaseDate, stock) %>% summary
## properties of the 7409 items with the "faked" stock on 2018-01-01
items_Jan <- train_Jan %>% select(-date, -price, -units) %>% unique
items_Jan %>% glimpse
## ---- save
## save datasets as txt file, separated by "|", missing value as empty
write.table(train_Jan, file = "data_clean/train_Jan.txt", sep = "|",
row.names = FALSE, quote = FALSE, na = "")
write.table(items_Jan, file = "data_clean/items_Jan.txt", sep = "|",
row.names = FALSE, quote = FALSE, na = "")
write.table(test_Jan, file = "data_clean/test_Jan.txt", sep = "|",
row.names = FALSE, quote = FALSE)
## ---- subpopular
test_Jan <- read.table("data_clean/test_Jan.txt", sep = "|", header = T)
train_Jan <- read.table("data_clean/train_Jan.txt", sep = "|", header = T)
## only keep items sold at least somedays
## average: 12 days
## median: 6 days
## 3rd quantile: 14 days
cutvalue <- 6
train_Jan %>% filter(!is.na(units), units>0) %>%
group_by(pid, size) %>% summarise(daysale = sum(units>0, na.rm = T)) %>%
filter(daysale >= cutvalue) -> key_popular
test_Jan %>% inner_join(key_popular %>% select(-daysale), by = c("pid", "size")) -> test_Jan_popular
train_Jan_popular <- train_Jan %>% inner_join(key_popular %>% select(-daysale), by = c("pid", "size"))
## save datasets as txt file, separated by "|", missing value as empty
write.table(train_Jan_popular, file = paste0("data_clean/popular/train_Jan_pop", cutvalue, ".txt"), sep = "|",
row.names = FALSE, quote = FALSE, na = "")
write.table(test_Jan_popular, file = paste0("data_clean/popular/test_Jan_pop", cutvalue, ".txt"), sep = "|",
row.names = FALSE, quote = FALSE)
## rare sale products
## only ever sold for one day before Jan
train_Jan %>% filter(!is.na(units), units>0) %>%
group_by(pid, size) %>% summarise(daysale = sum(units>0, na.rm = T)) %>%
filter(daysale == 1) -> key_cold
test_Jan %>% inner_join(key_cold %>% select(-daysale), by = c("pid", "size")) -> test_Jan_cold
train_Jan_cold <- train_Jan %>% inner_join(key_cold %>% select(-daysale), by = c("pid", "size"))
items_cold <- train_Jan_cold %>% select(-date, -price, -units) %>% unique
write.table(test_Jan_cold, file = "C:/Users/lyux/Dropbox/DMC 2018/ForYuchen-冷门产品/test_Jan_cold.txt", sep = "|",
row.names = FALSE, quote = FALSE, na = "")
write.table(train_Jan_cold, file = "C:/Users/lyux/Dropbox/DMC 2018/ForYuchen-冷门产品/train_Jan_cold.txt", sep = "|",
row.names = FALSE, quote = FALSE, na = "")
write.table(items_cold, file = "C:/Users/lyux/Dropbox/DMC 2018/ForYuchen-冷门产品/items_cold.txt", sep = "|",
row.names = FALSE, quote = FALSE, na = "")
|
abeee69059ed7499e5e75b200931f6b6c12eb1d5
|
1dc44e8b9874ea88796c9dd343e50681c244a543
|
/Aguirregabiria lab/code/header.R
|
1de221308dedb9eedb3bcbf99bcaf1567602d065
|
[] |
no_license
|
orsdemir/Structural-Estimation-of-Choice-Models
|
8f7907d6b8b3b4401ac85798c3bf6c1667979290
|
92a46b872a8ee30865864905a8275ba32a1581fb
|
refs/heads/master
| 2022-10-26T16:55:19.789760
| 2020-06-14T05:23:35
| 2020-06-14T05:23:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 189
|
r
|
header.R
|
library('tidyverse')
c('reshape2', 'stringr', 'magrittr','chebpol', 'np') %>%
walk(~library(., character.only=TRUE))
dir('modules') %>%
walk(~source(paste('./modules/', ., sep="")))
|
601c65fbe0d0bddd69b071f9f1041901961fc993
|
236cdc1ba4d23f14cbdcbd4a53e427506c4caf3f
|
/Scripts/DataAnalysis/data_visualization.R
|
9222acf68115b4fadbabc6756b3164f7aa004cb1
|
[
"MIT"
] |
permissive
|
Guliba/FRASER-analysis
|
775b30e079bcb8a50b91f6a8785a631182f076fd
|
3c125dc561de977b89a674e19b720cc72762b392
|
refs/heads/master
| 2023-03-23T03:12:38.876112
| 2020-08-27T05:28:59
| 2020-08-27T05:28:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,328
|
r
|
data_visualization.R
|
#'---
#' title: Dataset-wise splicing correlation
#' author: Christian Mertes
#' wb:
#' input:
#' - fds_raw: '`sm config["DATADIR"] + "/datasets/savedObjects/raw-{dataset}/fds-object.RDS"`'
#' - fds_fil: '`sm config["DATADIR"] + "/datasets/savedObjects/{dataset}/pajdBinomial_psiSite.h5"`'
#' output:
#' - wBhtml: "Output/html/DataAnalysis/data_viz/{dataset}_data_viz.html"
#' type: noindex
#'---
if(FALSE){
snakemake <- readRDS("./tmp/snakemake.RDS")
source(".wBuild/wBuildParser.R")
parseWBHeader("./Scripts/DataAnalysis/splicing_correlation.R", dataset="example")
fds_gtex_skin_ns <- loadFraseRDataSet("/s/project/gtex-processed/splicing_map", "gtex-skin-notsunexposedsuprapubic", TRUE)
fds_kremer <- loadFraseRDataSet("/s/project/fraser/analysis/datasets", "kremer-bader-et-al", TRUE)
fds_prokisch_all <- loadFraseRDataSet("/s/project/fraser/analysis/datasets", "prokisch_batch5", TRUE)
}
#+ source main config
source("./src/r/config.R")
#+ input
fdsFile <- snakemake@input$fds_fil
dataset <- snakemake@wildcards$dataset
workingDir <- dirname(dirname(dirname(fdsFile)))
#' # Load dataset
dataset
workingDir
fds_raw <- loadFraseRDataSet(workingDir, paste0("raw-", dataset))
fds <- loadFraseRDataSet(workingDir, dataset)
#'
#' ## Number of samples
#'
ncol(fds)
#'
#' ## Number of junctions
#'
junction_numbers <- c(
raw_junc = nrow(fds_raw),
fil_junc = nrow(fds),
raw_ss = nrow(nonSplicedReads(fds_raw)),
fil_ss = nrow(nonSplicedReads(fds)))
data <- data.table(type=names(junction_numbers), count=junction_numbers)
ggplot(data, aes(type, count, fill=type)) +
geom_bar(stat="identity") +
scale_y_log10()
#'
#' ## PSI distribution
#'
print_freq <- 0.01
data <- rbindlist(lapply(psiTypes, function(i){
n <- prod(dim(K(fds, type=i)))
probs <- c(print_freq, 1-print_freq)
selection <- sample(c(TRUE, FALSE), n, replace=TRUE, prob=probs)
data.table(type=i, logitPsi=qlogis(assay(fds, i)[selection])) } ))
range <- range(data[is.finite(logitPsi),logitPsi]) + c(-1,1)
data[is.infinite(logitPsi),logitPsi:=ifelse(logitPsi < 0, range[1], range[2])]
ggplot(data, aes(type, logitPsi, fill=type)) + geom_violin()
#'
#' ## Variance across junctions
#'
data <- rbindlist(lapply(psiTypes, function(i){
data.table(type=i, logitPsiSD=colSds(x(fds, type=i, all=TRUE))) } ))
ggplot(data, aes(type, logitPsiSD, fill=type)) + geom_violin()
#'
#' ## Coverage across junctions
#'
data <- rbindlist(lapply(psiTypes, function(i){
data.table(type=i, meanK=rowMeans(K(fds, type=i))) } ))
ggplot(data, aes(type, meanK + 1, fill=type)) + geom_violin() +
scale_y_log10()
data <- rbindlist(lapply(psiTypes, function(i){
data.table(type=i, meanTotal=rowMeans(N(fds, type=i))) } ))
ggplot(data, aes(type, meanTotal + 1, fill=type)) + geom_violin() +
scale_y_log10()
#'
#' Negative binomial fit of junction coverage
#'
fitNegBinom <- function(par, counts){
return( -mean(dnbinom(counts, mu=par[1], size=par[2], log=TRUE)) )
}
negBinomFit <- rbindlist(lapply(psiTypes, function(i){
fit <- optim(c(500, 0.5), fitNegBinom, counts=round(data[type == i, meanTotal]))
return( data.table(type=i, mu=fit$par[1], size=fit$par[2]) ) } ))
DT::datatable(negBinomFit)
#'
#' ## Correlation
#'
known_factors_row <- c("condition")
known_factors_col <- c("FIBROBLAST_ID", "SEX", "BATCH", "TISSUE",
"GROWTH_MEDIUM", "RNA_HOX_GROUP", "RNA_BATCH_GROUP",
"SMCENTER", "SMRIN", "AGE", "DTHHRDY", "GENDER", "SMATSSCR")
known_factors_col <- known_factors_col[known_factors_col %in% colnames(colData(fds))]
known_factors_row <- known_factors_row[known_factors_row %in% colnames(colData(fds))]
if(length(known_factors_col) == 0){
known_factors_col <- NA
}
if(length(known_factors_row) == 0){
known_factors_row <- NA
}
plist <- lapply(psiTypes, plotCountCorHeatmap, fds=fds, logit=TRUE, topN=100000,
annotation_col=known_factors_col, annotation_row=known_factors_row)
#'
#' ## Junctions grouped by shared donors/acceptors
#'
dt <- data.table(
chr = as.factor(seqnames(fds)),
start = start(fds),
end = end(fds),
strand = as.factor(strand(fds)) )
groups <- rbind(data.table(groupsize = dt[,length(end), by=c("chr", "start", "strand")]$V1, groupedBy="donor"),
data.table(groupsize = dt[,length(start), by=c("chr", "end", "strand")]$V1, groupedBy="acceptor") )
summary(groups[groupedBy=="donor",groupsize])
summary(groups[groupedBy=="acceptor",groupsize])
ggplot(groups, aes(groupsize, fill=groupedBy)) + geom_histogram(alpha=0.7, position="identity", binwidth=1) #+ scale_y_log10()
#'
#' ## Distribution of H, D, rho after autoencoder fit
#'
rho <- rbindlist(lapply(psiTypes, function(i){
data.table(rho=rho(fds, i), type=i) } ))
ggplot(rho, aes(x=rho, fill=type)) + geom_density(alpha = 0.7) + scale_x_log10()
E <- rbindlist(lapply(psiTypes, function(i){
data.table(E=as.vector(E(fds, i)), type=i) } ))
ggplot(E, aes(x=E, fill=type)) + geom_density(alpha = 0.7)
D <- rbindlist(lapply(psiTypes, function(i){
data.table(D=as.vector(D(fds, i)), type=i) } ))
ggplot(D, aes(x=D, fill=type)) + geom_density(alpha = 0.7)
H <- rbindlist(lapply(psiTypes, function(i){
data.table(H=as.vector(H(fds, i)), type=i) } ))
ggplot(H, aes(x=H, fill=type)) + geom_density(alpha = 0.7)
|
b1e2e25ec806e4f0e0278923a66ccd4c7f1f78b7
|
17d582790e37f4a1fa3cfcfc531fdf5c4f4086d4
|
/packrat/lib/x86_64-redhat-linux-gnu/3.5.1/vctrs/tests/testthat/test-ptype-abbr-full.R
|
20204f42bb7aef8102f88036ec96691738c7319d
|
[] |
no_license
|
teyden/asthma-research
|
bcd02733aeb893074bb71fd58c5c99de03888640
|
09c1fb98d09e897e652620dcab1482a19743110f
|
refs/heads/master
| 2021-01-26T08:20:58.263136
| 2020-02-27T04:12:56
| 2020-02-27T04:12:56
| 243,374,255
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,803
|
r
|
test-ptype-abbr-full.R
|
context("test-type-string")
test_that("input must be a vector", {
expect_error(vec_ptype_abbr(sum), "Not a vector")
expect_error(vec_ptype_full(sum), "Not a vector")
})
test_that("NULL has method", {
expect_equal(vec_ptype_abbr(NULL), "NULL")
expect_equal(vec_ptype_full(NULL), "NULL")
})
test_that("non objects default to type + shape", {
expect_equal(vec_ptype_abbr(ones(10)), "dbl")
expect_equal(vec_ptype_abbr(ones(0, 10)), "dbl[,10]")
expect_equal(vec_ptype_abbr(ones(10, 0)), "dbl[,0]")
expect_equal(vec_ptype_full(ones(10)), "double")
expect_equal(vec_ptype_full(ones(0, 10)), "double[,10]")
expect_equal(vec_ptype_full(ones(10, 0)), "double[,0]")
})
test_that("objects default to first class", {
x <- structure(1, class = "foofy")
expect_equal(vec_ptype_full(x), "foofy")
expect_equal(vec_ptype_abbr(x), "foofy")
})
test_that("atomic vectors and arrays as expected", {
expect_equal(vec_ptype_full(1:5), "integer")
dbl_mat <- array(double(), c(0, 3))
expect_equal(vec_ptype_full(dbl_mat), "double[,3]")
})
test_that("complex and factor as expected (#323)", {
expect_equal(vec_ptype_abbr(0i), "cpl")
expect_equal(vec_ptype_abbr(factor()), "fct")
})
test_that("I() wraps contents", {
f <- factor()
expect_equal(vec_ptype_abbr(I(f)), "I<fct>")
expect_equal(vec_ptype_full(I(f)), "I<factor<>>")
})
test_that("AsIs class stripped from I()", {
df <- data.frame(x = 1, y = 1:2)
class(df) <- c("myclass", "data.frame")
expect_equal(vec_ptype_full(I(df)), "I<myclass<\n x: double\n y: integer\n>>")
expect_equal(vec_ptype_full(I(df[1])), "I<myclass<x:double>>")
expect_equal(vec_ptype_full(I(df[0])), "I<myclass<>>")
})
test_that("named lists are tagged (#322)", {
expect_identical(vec_ptype_abbr(list(x = 1, y = 2)), "named list")
})
|
7ffee4b12a32d39637b903e627de8d69f6e3ad05
|
d2723d7ac31084fd5a9dfe64fb78ef1d0c254154
|
/Data_Wrangling.R
|
96ad3e0c8ec12316a999af5a88501fdfc933be3a
|
[] |
no_license
|
royal-free-london/RunCharter_Shiny
|
52421b28b581fdada4de8158c37e7beedbbb2d34
|
76408b3cc6125a07902c027de320b052ec2fdc68
|
refs/heads/master
| 2020-08-10T04:46:52.820083
| 2020-01-17T13:52:35
| 2020-01-17T13:52:35
| 214,260,394
| 1
| 1
| null | 2019-10-15T11:00:52
| 2019-10-10T18:47:40
|
R
|
UTF-8
|
R
| false
| false
| 3,664
|
r
|
Data_Wrangling.R
|
#install.packages("RODBC") or install.packages('RODBC', dependencies=TRUE, repos='http://cran.rstudio.com/')
#install.packages("odbc")
#install.packages("ggplot2", lib = "C:/Users/ju0d/Documents/R/win-library/3.6")
library(odbc)
#library(DBI)
library(lubridate)
library(tidyverse) # tidyverse contains library(readr),library(readxl),library(stringr),#library(dplyr),library(tidyr)
# and library("ggplot2", lib.loc= "C:/Users/ju0d/Documents/R/win-library/3.6") #library(gglot2) stopped working after upgrade to version 3.6.1
myConn <- dbConnect(odbc::odbc(), "RFH-INFORMATION")
myData<- dbGetQuery(myConn,"
Select Financial_Year,Financial_Month
,Report_Date,Left(Datename(m,Report_Date),3) As [Month]
,p.Indicator_Code,p.Indicator_Name
,SUM(Numerator) As Numerator
,Sum(Denominator) As Denominator
,Business_Unit
From(
Select Financial_Year,Financial_Month,Cast(DATEADD(d,-day(Report_Date)+1,Report_Date) As Date) As Report_Date
,Indicator_Code,Indicator_Name
,Numerator
,Denominator
,Case when Site_Code in ('RALC7','RVLC7') then 'RVLC7'
when Site_Code in ('RVL01','RAL26') then 'RVL01'
when Site_Code in ('RVL01') then 'RAL01'
ELSE 'Others' end As Site_Code
,Case When Business_Unit IS NULL OR Business_Unit = '' THEN
Case when Site_Code in ('RALC7','RVLC7') then 'Chase Farm Hospital'
when Site_Code in ('RVL01','RAL26') then 'Barnet Hospital'
when Site_Code in ('RAL01') then 'Royal Free Hospital'
ELSE 'Others' End
ELSE
Case when Business_Unit like 'Royal Free%' Then 'Royal Free Hospital'
when Business_Unit like 'Barnet%' Then 'Barnet Hospital'
when Business_Unit like 'Chase Farm%' Then 'Chase Farm Hospital'
Else 'Others' End
END As Business_Unit
From RF_Indicators.dbo.All_Indicators
Where Financial_Year >= '2015/2016'
) a
INNER JOIN (Select Indicator_Code,Indicator_Name From RF_Indicators.dbo.Indicators_Metadata) p
ON a.Indicator_Code= p.Indicator_Code
Group by Financial_Year,Financial_Month,Report_Date,p.Indicator_Code,p.Indicator_Name,Business_Unit")
dbDisconnect(myConn) # Disconnect to free up resources
shinyData <- myData%>%
arrange(Indicator_Code,as.factor(Financial_Month))%>%
select(Financial_Year,Report_Date,Indicator_Code,Indicator_Name,Month,Numerator,Denominator#,Site_Code
,Business_Unit)%>%
mutate(Performance = ifelse(Denominator <= 0|Denominator==''|is.null(Denominator)|is.na(Denominator)
,Numerator,Numerator/Denominator))%>%
select(Financial_Year,Month,Report_Date,Indicator_Code,Indicator_Name,Numerator,Denominator,Performance
,Business_Unit)
#Exporting imported and cleaned data to the shiny_App folder
sFldr <- "//netshare-ds3/Performance/Team/Jonathan/Shiny_App/"
sFile <- "shinyData_New_Table.csv"
write_csv(shinyData,file.path(sFldr,sFile)) #write.csv(shinyData,[path]) will also work
# End of Data wrangling
#To export the file as excel instead of csv, use
library(readxl)
sFldr <- "//netshare-ds3/Performance/Team/Jonathan/Shiny_App/"
sFile <- "shinyData.xlsx"
write.xlsx2(shinyData,file.path(sFldr,sFile))
#or u can use the one below but is much slower
write.xlsx(shinyData,file.path(sFldr,sFile))
#importing the exported data back
new_ShinyData <- read_csv(path) #for the csv files
new_ShinyData2 <- read_xlsx(file.path(sFldr,sFile)) #for excel files
|
4f50d0aa7d31dbade97abf2cff3d87af88090813
|
a807e1bda86a71521deece0637f45e41d82c2d99
|
/RF_Sampat.r
|
9d6893c337a0561239947b84850b1aef89f5ec3b
|
[] |
no_license
|
sampatm28/My_Analytics
|
738c6a258c7d7e0fc5b15de9a07bacfedbdffcb0
|
29016562cec69d2fdeeef3cbdac1d25f5f10bf5d
|
refs/heads/master
| 2020-12-01T23:36:50.696057
| 2016-11-02T01:33:47
| 2016-11-02T01:33:47
| 67,351,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,664
|
r
|
RF_Sampat.r
|
#install.packages("popbio")
#install.packages("mice")
#install.packages("ineq")
#install.packages("caret")
#install.packages('NCStats')
#install.packages('ROCR')
#install.packages('ROSE')
#library('mlr')
#detach("package:mlr",unload=TRUE)
library(rpart)
library(rpart.plot)
library(rattle)
library(RColorBrewer)
library(mice)
library(ROSE)
library(popbio)
library(partykit)
library(ROCR)
library(data.table)
library(ineq)
detach("package:InformationValue",unload=TRUE)
setwd("G:\\DataMining\\Resi5\\Assignment")
getwd()
## Let us import the data that we intend to use for modeling
Emp_attr = read.csv("1452762979_586__HR_Employee_Attrition_Data.csv")
Emp_attrition <-
subset(Emp_attr, select = -c(StandardHours, EmployeeCount, Over18))
str(Emp_attrition)
attach(Emp_attrition)
summarizeColumns(Emp_attrition)
decile <- function(x){
deciles <- vector(length=10)
for (i in seq(0.1,1,.1)){
deciles[i*10] <- quantile(x, i, na.rm=T)
}
return (
ifelse(x<deciles[1], 1,
ifelse(x<deciles[2], 2,
ifelse(x<deciles[3], 3,
ifelse(x<deciles[4], 4,
ifelse(x<deciles[5], 5,
ifelse(x<deciles[6], 6,
ifelse(x<deciles[7], 7,
ifelse(x<deciles[8], 8,
ifelse(x<deciles[9], 9, 10
))))))))))
}
# Split data into Train and Test
library(caret)
train = createDataPartition(Attrition, list=FALSE, times=1, p=0.7)
train.data = Emp_attrition[train,]
test.data = Emp_attrition[-train,]
str(train.data)
nrow(test.data)
attach(train.data)
#Making sure teh partition is right
prop.table((table( Emp_attrition$Attrition)))
prop.table((table(train.data$Attrition)))
prop.table((table(test.data$Attrition)))
##install.packages("randomForest")
library(randomForest)
## Calling syntax to build the Random Forest
RF <- randomForest(Attrition~., data = train.data,
ntree=500, mtrystart = 1,
stepFactor = 1.5,
nodesize = 10,
improve = 0.001,
trace=TRUE,
plot = TRUE,
doBest = TRUE,
importance=TRUE)
?randomForest
print(RF)
varImp(RF)
varImpPlot(RF)
plot(RF)
importance(RF)
varImpPlot(RF,type=2)
plot(RF, main="")
legend("topright", c("OOB", "0", "1"), text.col=1:6, lty=1:3, col=1:3)
title(main="Error Rates Random Forest Training Sample")
RF$err.rate
#Predict on Training (dev) Sample
train.data$predict.class <- predict(RF, train.data, type="class")
train.data$predict.score <- predict(RF, train.data,type="prob")
head(train.data)
r <- table(train.data$predict.class,train.data$Attrition)
#install.packages('e1071')
require('e1071')
library('e1071')
confusionMatrix(r)
pred <- prediction(train.data$predict.score[,2], train.data$Attrition)
summary(pred)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
KS <- max(attr(perf, 'y.values')[[1]]-attr(perf, 'x.values')[[1]])
auc <- performance(pred,"auc");
auc <- as.numeric(auc@y.values)
gini = ineq(train.data$predict.score[,2], type="Gini")
with(train.data, table(Attrition, predict.class))
auc
KS
gini
#confusion matrix (training data)
conf.matrix <- table(train.data$Attrition, predict(RF,type="class"))
rownames(conf.matrix) <- paste("Actual", rownames(conf.matrix), sep = ":")
colnames(conf.matrix) <- paste("Pred", colnames(conf.matrix), sep = ":")
print(conf.matrix)
#Pred:No Pred:Yes
#Actual:No 1721 6
#Actual:Yes 190 142
#Prediction on Test sample
#Model Validation on test data
detach(train.data)
attach(test.data)
test.data$predict.class <- NULL
test.data$predict.score <- NULL
RF_Test <- randomForest(Attrition~., data = test.data,
ntree=200, mtrystart = 3, stepFactor = 1.5, nodesize = 10,improve = 0.001,
trace=TRUE,
plot = TRUE,
doBest = TRUE,
importance=TRUE)
nrow(test.data)
#Plot Error rates for Validation or test sample
plot(RF_Test, main="")
legend("topright", c("OOB", "0", "1"), text.col=1:6, lty=1:3, col=1:3)
title(main="Error Rates Random Forest Validation Sample")
print(RF_Test)
test.data$predict.class <- predict(RF, test.data, type="class")
test.data$predict.score <- predict(RF, test.data, type="prob")
View(test.data)
#KS,Gini,AUC for test.data
pred <- prediction(test.data$predict.score[,2], test.data$Attrition)
summary(pred)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
KS <- max(attr(perf, 'y.values')[[1]]-attr(perf, 'x.values')[[1]])
auc <- performance(pred,"auc");
auc <- as.numeric(auc@y.values)
gini = ineq(test.data$predict.score[,2], type="Gini")
with(test.data, table(Attrition, predict.class))
auc
KS
gini
#predict.class
##Attrition No Yes
#No 737 2
#Yes 83 59
#> auc
#[1] 0.9116192
#> KS
#[1] 0.7067697
#> gini
#[1] 0.5421213
#>
#confusion matrix (testing data)
conf.matrix.test <- table(test.data$Attrition, predict(RF_Test,type="class"))
rownames(conf.matrix.test) <- paste("Actual", rownames(conf.matrix), sep = ":")
colnames(conf.matrix.test) <- paste("Pred", colnames(conf.matrix), sep = ":")
print(conf.matrix.test)
r <- table(test.data$predict.class,test.data$Attrition)
#install.packages('e1071')
require('e1071')
library('e1071')
confusionMatrix(r)
#Pred:Pred:No Pred:Pred:Yes
#Actual:Actual:No 734 5
#Actual:Actual:Yes 106 36
#Missclassification 12.60%
#Accuracy 88.40%
##class(randomForest::importance(RF))
## List the importance of the variables.
##impVar <- round(randomForest::importance(RF), 2)
##impVar[order(impVar[,3], decreasing=TRUE),]
## Tuning Random Forest
tRF <- tuneRF(x = test.data,
y = Attrition,
mtryStart = 3,
ntreeTry=50,
stepFactor = 1.5,
improve = 0.001,
trace=TRUE,
plot = TRUE,
doBest = TRUE,
nodesize = 10,
importance=TRUE
)
print(tRF)
train.data$predict.class <- predict(tRF, train.data, type="class")
train.data$predict.score <- predict(tRF, train.data,type="prob")
head(train.data)
test.data$predict.class <- predict(tRF, test.data, type="class")
test.data$predict.score <- predict(tRF, test.data, type="prob")
View(test.data)
table(test.data$Attrition,test.data$predict.class)
table(train.data$Attrition,train.data$predict.class)
## deciling
train.data$deciles <- decile(train.data$predict.score[,2])
View(train.data)
library(data.table)
tmp_DT = data.table(train.data)
rank <- tmp_DT[, list(
cnt = length(Attrition),
cnt_Retention = sum(Attrition == 'No'),
cnt_Attrtion = sum(Attrition == 'Yes')) ,
by=deciles][order(-deciles)]
rank$rrate <- round(rank$cnt_Retention * 100 / rank$cnt,2);
rank$cnt_Retention <- cumsum(rank$cnt_Retention)
rank$cnt_Attrtion <- cumsum(rank$cnt_Attrtion)
rank$cum_perct_Retention <- round(rank$cnt_Retention * 100 / sum(rank$cnt_Retention),2);
rank$cum_perct_Attrtion <- round(rank$cnt_Attrtion * 100 / sum(rank$cnt_Attrtion),2);
rank$ks <- abs(rank$cum_perct_Retention - rank$cum_perct_Attrtion);
View(rank)
|
79dc3ba252e0f64cd7dab97926dd3248aad539ee
|
d6bdf7cc3f76f96ab10428781fe7fd3f7f2af174
|
/House Prices - Advanced Regression Techniques/House Prices - Advanced Regression Techniques.R
|
7521dd2aa5435889225e5959187e91be167e65db
|
[] |
no_license
|
mike630/Kaggle-Challenges-Portfolio
|
7123cb1aa347280035bea4d876ffe3cf0930ce66
|
44cbe65585fe6da14bdd5ba3f4c1762447201e1f
|
refs/heads/master
| 2020-06-19T17:44:13.143503
| 2019-09-16T15:58:10
| 2019-09-16T15:58:10
| 196,807,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,255
|
r
|
House Prices - Advanced Regression Techniques.R
|
# Selecionando diretório
setwd('C:/Projetos/Kaggle/House Prices - Advanced Regression Techniques')
# install.packages('corrplot') # Instalar pacotes caso não houver
# install.packages('knitr')
# install.packages('randomForest')
# install.packages('caret')
# install.packages('dplyr')
# install.packages('ggplot2')
# install.packages('ggrepel')
# install.packages('psych')
# install.packages('xgboost')
# install.packages('ggrepel')
# install.packages('psych')
library(corrplot)
library(knitr)
library(randomForest)
library(caret)
library(dplyr)
library(ggplot2)
library(ggrepel)
library(psych)
library(xgboost)
train <- read.csv(file.choose(), na.strings = c('NA', ''))
View(train)
test <- read.csv(file.choose(), na.strings = c('NA', ''))
View(test)
colnames(train[!colnames(train) %in% colnames(test)])
test$SalePrice <- NA
all <- rbind(train,test)
dim(all)
str(all)
kable(sapply(all, function(x) sum(is.na(x))))
kable(str(all))
#---------------------------------------------------------------------------------
# Correlação
# Verificando a correlação das variáveis numéricas
numericVars <- which(sapply(all, is.numeric))#index vector numeric variables
numericVars <- names(numericVars)
View(numericVars)
class(numericVars)
length(numericVars)
correlacao <- cor(all[,numericVars], use = 'pairwise.complete.obs')
# Melhor correlações para a variável-alvo
best_cor <- sort(correlacao[,'SalePrice'], decreasing = TRUE)
View(best_cor)
# PS: Lembrando que as correlações fortes são 1 e -1.
# Correlação com todas as variáveis numéricas
corrplot(correlacao, tl.col="black", tl.pos = "lt")
# Preciso incluir apenas uma variável independente que tenha uma alta correlação
# com outra variável independente se elas tiverem logicamente dependentes entre elas.
# Caso contrário, vale a pena verificar se as duas variáveis devem ser incluídas no modelo.
# De acordo com o gráfico, YearBuilt x GarageYrBlt, GrLiveArea x TotRmsAbvGrd,
# HalfBath x TotRmsAbvGrd, YearRemodAdd x GarageYrBlt,GarageArea x GarageCars,
# TotalBsmtSF x X1stFlrSF
# Já as altas correlações entre as variáveis independentes com a variável-alvo (SalePrice),
# são ótimas variáveis preditoras para o nosso modelo.
# Analisando as maiores corelaÇões com a variável-alvo
#sort on decreasing correlations with SalePrice - Target Variable
cor_decresc <- as.matrix(sort(correlacao[,'SalePrice'], decreasing = TRUE))
# Visualizando as correlações
#--------------------------------------------------------------------------------------
# Dealing with missing values
all$MiscFeature <- as.character(all$MiscFeature)
all$MiscFeature <- ifelse(is.na(all$MiscFeature),'None', all$MiscFeature)
all$MiscFeature <- as.factor(all$MiscFeature)
all$Fence <- as.character(all$Fence)
all$Fence <- ifelse(is.na(all$Fence),'No Fence', all$Fence)
all$Fence <- as.factor(all$Fence)
all$PoolQC <- as.character(all$PoolQC)
all$PoolQC <- ifelse(is.na(all$PoolQC),'No Pool', all$PoolQC)
all$PoolQC <- as.factor(all$PoolQC)
all$GarageCond <- as.character(all$GarageCond)
all$GarageCond <- ifelse(is.na(all$GarageCond),'No Garage', all$GarageCond)
all$GarageCond <- as.factor(all$GarageCond)
all$GarageQual <- as.character(all$GarageQual)
all$GarageQual <- ifelse(is.na(all$GarageQual),'No Garage', all$GarageQual)
all$GarageQual <- as.factor(all$GarageQual)
all$GarageFinish <- as.character(all$GarageFinish)
all$GarageFinish <- ifelse(is.na(all$GarageFinish),'No Garage', all$GarageFinish)
all$GarageFinish <- as.factor(all$GarageFinish)
all$GarageType <- as.character(all$GarageType)
all$GarageType <- ifelse(is.na(all$GarageType),'No Garage', all$GarageType)
all$GarageType <- as.factor(all$GarageType)
all$FireplaceQu <- as.character(all$FireplaceQu)
all$FireplaceQu <- ifelse(is.na(all$FireplaceQu),'No Fireplace', all$FireplaceQu)
all$FireplaceQu <- as.factor(all$FireplaceQu)
all$BsmtFinType2 <- as.character(all$BsmtFinType2)
all$BsmtFinType2 <- ifelse(is.na(all$BsmtFinType2),'No Basement', all$BsmtFinType2)
all$BsmtFinType2 <- as.factor(all$BsmtFinType2)
all$BsmtFinType1 <- as.character(all$BsmtFinType1)
all$BsmtFinType1 <- ifelse(is.na(all$BsmtFinType1),'No Basement', all$BsmtFinType1)
all$BsmtFinType1 <- as.factor(all$BsmtFinType1)
all$BsmtExposure <- as.character(all$BsmtExposure)
all$BsmtExposure <- ifelse(is.na(all$BsmtExposure),'No Basement', all$BsmtExposure)
all$BsmtExposure <- as.factor(all$BsmtExposure)
all$BsmtCond <- as.character(all$BsmtCond)
all$BsmtCond <- ifelse(is.na(all$BsmtCond),'No Basement', all$BsmtCond)
all$BsmtCond <- as.factor(all$BsmtCond)
all$BsmtQual <- as.character(all$BsmtQual)
all$BsmtQual <- ifelse(is.na(all$BsmtQual),'No Basement', all$BsmtQual)
all$BsmtQual <- as.factor(all$BsmtQual)
all$Alley <- as.character(all$Alley)
all$Alley <- ifelse(is.na(all$Alley),'No alley access', all$Alley)
all$Alley <- as.factor(all$Alley)
all$GarageYrBlt <- as.character(all$GarageYrBlt)
all$GarageYrBlt <- ifelse(is.na(all$GarageYrBlt),'No Garage', all$GarageYrBlt)
all$GarageYrBlt <- as.character(all$GarageYrBlt)
all$GarageCars <- as.character(all$GarageCars)
all$GarageCars <- ifelse(is.na(all$GarageCars), 0 , all$GarageCars)
all$GarageCars <- as.integer(all$GarageCars)
all$GarageArea <- as.character(all$GarageArea)
all$GarageArea <- ifelse(is.na(all$GarageArea), 0 , all$GarageArea)
all$GarageArea <- as.integer(all$GarageArea)
#----------------------------------------------------------------------------------
# Check Power BI Data Visualizations
all$MSZoning <- as.character(all$MSZoning)
all$MSZoning <- ifelse(is.na(all$MSZoning) &
all$Street == 'Grvl' &
all$Neighborhood == 'IDOTRR', "C (all)", all$MSZoning)
all$MSZoning <- ifelse(is.na(all$MSZoning) &
all$Street == 'Pave' &
all$Neighborhood == 'IDOTRR', 'RM', all$MSZoning)
all$MSZoning <- ifelse(is.na(all$MSZoning) &
all$Street == 'Pave' &
all$Neighborhood == 'Mitchel', 'RL', all$MSZoning)
all$MSZoning <- as.factor(all$MSZoning)
#----------------------------------------------------------------------------------------
# Transformando variáveis quantitativas em qualitativos
all$MSSubClass <- as.factor(all$MSSubClass)
all$OverallQual <- as.factor(all$OverallQual)
all$OverallCond <- as.factor(all$OverallCond)
#---------------------------------------------------------------------------------------
# Modelagem e Previsao para imputar os dados para o MasVnrType no train
kable(sapply(all, function(x) sum(is.na(x))))
# Amostra Holdout
trainClean <- all[!is.na(all$MasVnrType) & !is.na(all$Exterior1st) & !is.na(all$Exterior2nd),]
testClean <- all[is.na(all$MasVnrType) & !is.na(all$Exterior1st) & !is.na(all$Exterior2nd),]
amostra = sample(2,dim(trainClean)[1], replace = T, prob = c(0.7,0.3))
trainClean1 <- trainClean[amostra==1,]
trainClean2 <- trainClean[amostra==2,]
# Modelo 1
# set.seed(2017)
# modelo = randomForest(MasVnrType~MSSubClass+LotArea+LotShape+LotConfig+Neighborhood+
# OverallQual+OverallCond+YearBuilt+
# YearRemodAdd+Exterior1st+Exterior2nd+ExterQual+BsmtQual +BsmtExposure+
# BsmtFinType1+X1stFlrSF+X2ndFlrSF,
# data = trainClean1, ntree= 350)
# plot(modelo)
# summary(modelo$importance)
# varImpPlot(modelo)
# View(importance(modelo))
# Modelo 2
# set.seed(12345)
# modelo2 = train(MasVnrType~MSSubClass+LotArea+LotShape+LotConfig+Neighborhood+
# OverallQual+OverallCond+YearBuilt+
# YearRemodAdd+Exterior1st+Exterior2nd+ExterQual+BsmtQual +BsmtExposure+
# BsmtFinType1+X1stFlrSF+X2ndFlrSF,
# data = trainClean1, method= 'rf',
# trControl=trainControl(method="cv", number=7))
# plot(modelo2)
# modelo2$results
# class(modelo2)
# a <- varImp(modelo2,scale = T)
# View(a$importance)
# previsao = predict(modelo, trainClean2)
# previsao2 = predict(modelo2, trainClean2)
# previsao <- as.factor(previsao)
# previsao2 = as.factor(previsao2)
# trainClean2$MasVnrType <- as.factor(trainClean2$MasVnrType)
# confusao = confusionMatrix(reference = trainClean2$MasVnrType, data = previsao)
# confusao
# confusao2 = confusionMatrix(reference = trainClean2$MasVnrType, data = previsao2)
# confusao2
# Previsao final será com o modelo1
modelo3 = randomForest(MasVnrType~MSSubClass+LotArea+LotShape+LotConfig+Neighborhood+
OverallQual+OverallCond+YearBuilt+
YearRemodAdd+Exterior1st+Exterior2nd+ExterQual+BsmtQual +BsmtExposure+
BsmtFinType1+X1stFlrSF+X2ndFlrSF,
data = trainClean, ntree= 350)
# Imputando a previsao nos missing values do dataset train
imput = predict(modelo3, testClean)
table(imput)
imput <- as.character(imput)
View(all[is.na(all$MasVnrType) | is.na(all$MasVnrArea), c('MasVnrType','MasVnrArea')])
all$MasVnrType <- as.character(all$MasVnrType)
all$MasVnrType[is.na(all$MasVnrType)] <- imput
all$MasVnrType <- as.factor(all$MasVnrType)
# Pegando a média de MasVnrArea para imputar
all$MasVnrArea <- ifelse(all$MasVnrType != 'None' & is.na(all$MasVnrArea),
mean(all$MasVnrArea[!is.na(all$MasVnrArea)]),
ifelse(all$MasVnrType == 'None' & is.na(all$MasVnrArea),
0,all$MasVnrArea))
#----------------------------------------------------------------------------------------
# Modelagem e Previsao para imputar os dados para o LotFrontage no train
# ggplot(all[!is.na(all$SalePrice),], aes(x = LotFrontage, y= LotArea)) +
# geom_point(col = 'green') + geom_smooth(method = 'lm', se = F, col = 'blue' ) + ylim(0,75000)
# Como podemos perceber pela análise do Power BI e pela correlação acima quanto maior a
# LotArea, maior tendência do LotFrontage ser maior também.
# Para imputar os dados, devo verificar a possibilidade de prever o valor de cada NA de LotFrontage
# com regressão.
# Amostra Holdout
# trainClean <- all[!is.na(all$LotFrontage) & !is.na(all$TotalBsmtSF),]
# testClean <- all[is.na(all$LotFrontage) & !is.na(all$TotalBsmtSF),]
# amostra = sample(2,dim(trainClean)[1], replace = T, prob = c(0.7,0.3))
# trainClean1 <- trainClean[amostra==1,]
# trainClean2 <- trainClean[amostra==2,]
# Modelo 1
# set.seed(2017)
# modelo4 = lm(LotFrontage~LotArea+Neighborhood+X1stFlrSF+TotRmsAbvGrd+GarageArea,
# data = trainClean1)
# modelo4
# summary(modelo4)
# Modelo 2
# set.seed(12345)
# modelo5 = train(LotFrontage~LotArea+Neighborhood+X1stFlrSF+TotRmsAbvGrd+
# GarageArea,data = trainClean1,
# method= 'lm',trControl=trainControl(method="cv", number=7))
# modelo5
# modelo5$results
# summary(modelo5)
# sort(summary(modelo5)$coefficients[,4], decreasing = F)
# class(modelo5)
# Podemos verificar pela R-squared e Adj R-squared, além do RMSE, MAE que o
# modelo não é tão bom para prever os missing values da variável LotFrontage
# e por isso, não usaremos mais esta variável
all$LotFrontage <- NULL
#----------------------------------------------------------------------------------------
kable(all[(is.na(all$BsmtFullBath)|is.na(all$BsmtHalfBath)|is.na(all$BsmtFinSF1)|is.na(all$BsmtFinSF2)|
is.na(all$BsmtUnfSF)|is.na(all$TotalBsmtSF)), c('BsmtQual', 'BsmtFullBath', 'BsmtHalfBath',
'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF')])
str(all[,c('BsmtQual', 'BsmtFullBath', 'BsmtHalfBath',
'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF')])
all$BsmtFullBath <- ifelse(is.na(all$BsmtFullBath),0,all$BsmtFullBath)
all$BsmtHalfBath <- ifelse(is.na(all$BsmtHalfBath),0,all$BsmtHalfBath)
all$BsmtFinSF1 <- ifelse(is.na(all$BsmtFinSF1),0,all$BsmtFinSF1)
all$BsmtFinSF2 <- ifelse(is.na(all$BsmtFinSF2),0,all$BsmtFinSF2)
all$BsmtUnfSF <- ifelse(is.na(all$BsmtUnfSF),0,all$BsmtUnfSF)
all$TotalBsmtSF <- ifelse(is.na(all$TotalBsmtSF),0,all$TotalBsmtSF)
# Como só há 1 NA para a variável Exterior1st e Exterior2nd numa mesma observação, irei sortear a
#classe com a maior frequência para fazer a imputação.
all$Exterior1st[is.na(all$Exterior1st)] <- names(sort(table(all$Exterior1st),decreasing=T))[1]
table(all$Exterior1st)
all$Exterior2nd[is.na(all$Exterior2nd)] <- names(sort(table(all$Exterior2nd),decreasing=T))[1]
table(all$Exterior2nd)
# Como Há apenas 1 observação com classe diferente, eu irei desconsiderar a variável Utilities
table(all$Utilities)
View(all[is.na(all$Utilities),])
all$Utilities <- NULL
# Como só há 1 NA para a variável Electrical, irei sortear a
#classe com a maior frequência para fazer a imputação.
table(all$Electrical)
summary(all$Electrical[is.na(all$Electrical)])
# Esta é uma forma mais linear de fazer uma consulta
table(all$Electrical) %>% sort(decreasing = T) %>% names()
# Este é a forma de uma função dentro de outra para trazer a consulta
names(sort(decreasing = T, table(all$Electrical)))[1] -> all$Electrical[is.na(all$Electrical)]
# Como só há 1 NA para a variável KitchenQual, irei sortear a
#classe com a maior frequência para fazer a imputação.
summary(all$KitchenQual)
table(all$KitchenQual)
names(sort(decreasing = T, table(all$KitchenQual)))[1] -> all$KitchenQual[is.na(all$KitchenQual)]
View(table(all[all$KitchenQual=='Gd' | all$KitchenQual=='TA',c('KitchenAbvGr', 'KitchenQual', 'SalePrice')]))
# Como só há 1 NA para a variável SaleType, irei sortear a
#classe com a maior frequência para fazer a imputação.
summary(all$SaleType)
table(all$SaleType)
names(sort(decreasing = T, table(all$SaleType)))[1] -> all$SaleType[is.na(all$SaleType)]
# Como só há 2 NA para a variável Functional, irei sortear a
#classe com a maior frequência para fazer a imputação.
summary(all$Functional)
table(all$Functional)
names(sort(decreasing = T, table(all$Functional)))[1] -> all$Functional[is.na(all$Functional)]
# Missing Values Zerados
kable(sapply(all, function(x) sum(is.na(x))))
#-------------------------------------------------------------------------------------------
#Verificando outliers
ggplot(data=all[!is.na(all$SalePrice),], aes(x=factor(OverallQual), y = SalePrice))+
geom_boxplot(col='blue')
ggplot(data=all[!is.na(all$SalePrice),], aes(x=factor(OverallCond), y = SalePrice))+
geom_boxplot(col='blue')
ggplot(data=all[!is.na(all$SalePrice),], aes(x=factor(ExterCond), y = SalePrice))+
geom_boxplot(col='blue')
ggplot(data=all[!is.na(all$SalePrice),], aes(x=SalePrice, y= GrLivArea))+
geom_point (col='purple') + labs(x='Sale Price', y = 'Living area square feet')+
geom_smooth(method = "lm", se=FALSE, color="black")+
scale_x_continuous(breaks = seq(0,755000, 100000)) +
scale_y_continuous(breaks = seq(0, 6000, 500)) +
geom_text_repel(aes(label = ifelse(all$GrLivArea[!is.na(all$SalePrice)]>4500,
rownames(all), '')))
ggplot(data=all[!is.na(all$SalePrice),], aes(x=SalePrice, y= GarageArea))+
geom_point (col='purple') + labs(x='Sale Price', y = 'Garage Area')+
geom_smooth(method = "lm", se=FALSE, color="black")+
scale_x_continuous(breaks = seq(0,755000, 100000)) +
scale_y_continuous(breaks = seq(0, 1500, 250))+
geom_text_repel(aes(label = ifelse(all$GarageArea[!is.na(all$SalePrice)]>1250,
rownames(all), '')))
ggplot(data=all[!is.na(all$SalePrice),], aes(x=SalePrice, y= TotalBsmtSF))+
geom_point (col='purple') + labs(x='Sale Price', y = 'Total Basement Square Feet')+
geom_smooth(method = "lm", se=FALSE, color="black")+
scale_x_continuous(breaks = seq(0,755000, 100000)) +
scale_y_continuous(breaks = seq(0, 6200, 500)) +
geom_text_repel(aes(label = ifelse(all$TotalBsmtSF[!is.na(all$SalePrice)]>6000,
rownames(all), '')))
ggplot(data=all[!is.na(all$SalePrice),], aes(x=SalePrice, y= X1stFlrSF))+
geom_point (col='purple') + labs(x='Sale Price', y = '1st Floor Square Feet')+
geom_smooth(method = "lm", se=FALSE, color="black")+
scale_x_continuous(breaks = seq(0,755000, 100000)) +
scale_y_continuous(breaks = seq(0, 5100, 400)) +
geom_text_repel(aes(label = ifelse(all$X1stFlrSF[!is.na(all$SalePrice)]>4400,
rownames(all), '')))
ggplot(data=all[!is.na(all$SalePrice),], aes(x=SalePrice, y= MasVnrArea))+
geom_point (col='purple') + labs(x='Sale Price', y = 'Masonry veneer area')+
geom_smooth(method = "lm", se=FALSE, color="black")+
scale_x_continuous(breaks = seq(0,755000, 100000))+
geom_text_repel(aes(label = ifelse(all$MasVnrArea[!is.na(all$SalePrice)]>1500,
rownames(all), '')))
ggplot(data=all[!is.na(all$SalePrice),], aes(x=SalePrice, y= BsmtFinSF1))+
geom_point (col='purple') + labs(x='Sale Price', y = '2nd Floor Square Feet')+
geom_smooth(method = "lm", se=FALSE, color="black")+
scale_x_continuous(breaks = seq(0,755000, 100000))+
geom_text_repel(aes(label = ifelse(all$BsmtFinSF1[!is.na(all$SalePrice)]>4000,
rownames(all), '')))
# Como podemos observar nos gráficos acima, a observaçao de número 1299 é a observação
# que mais enviesa nossas análises e deve ser retirada para fazermos a modelagem.
# Irei retirar mais alguns outliers
all <- all[-c(1299,582,1191,524,298),]
#-----------------------------------------------------------------------------------
# Veriricando as variáveis categóricas mais importantes
classVars <- which(sapply(all, is.factor))#index vector numeric variables
names(classVars)
all %>% select(classVars, SalePrice) -> col1
# Amostra Holdout
trainClean <- col1[!is.na(col1$SalePrice),]
testClean <- col1[is.na(col1$SalePrice),]
amostra = sample(2,dim(trainClean)[1], replace = T, prob = c(0.7,0.3))
trainClean1 <- trainClean[amostra==1,]
trainClean2 <- trainClean[amostra==2,]
# Modelo 1
set.seed(2017)
modelo6 = randomForest(SalePrice ~ ., data = trainClean1, trControl=trainControl(method="cv", number=5))
plot(modelo6)
summary(modelo6)
varImpPlot(modelo6)
importanceVar <- as.data.frame(importance(modelo6))
#---------------------------------------------------------------------------------
# Correlação Novamente
# Verificando a correlação das variáveis numéricas novamente
numericVars2 <- which(sapply(all, is.numeric))#index vector numeric variables
numericVars2 <- names(numericVars2)
View(numericVars2)
class(numericVars2)
length(numericVars2)
correlacao2 <- cor(all[,numericVars2], use = 'pairwise.complete.obs')
# Melhor correlações para a variável-alvo
best_cor2 <- sort(correlacao2[,'SalePrice'], decreasing = TRUE)
View(best_cor2)
# PS: Lembrando que as correlações fortes são 1 e -1.
# Correlação com todas as variáveis numéricas
corrplot(correlacao2, tl.col="black", tl.pos = "lt")
# Preciso incluir apenas uma variável independente que tenha uma alta correlação
# com outra variável independente se elas tiverem lógica dependentes entre elas.
# Caso contrário, vale a pena verificar se as duas variáveis devem ser incluídas no modelo.
# De acordo com o gráfico, BsmtFinSF1 x BsmtFullBath , TotalBsmtSF x X1stFlrSF
# X2ndFlrSF x GrLivArea, X2ndFlrSF x HalfBath, X2ndFlrSF x TotRmsAbvGrd
# GrLivArea x FullBath, TotRmsAbvGrd x GrLivArea, BsmtFullBath x BsmtFinSF1
# TotRmsAbvGrd x BedroomAbvGr, GarageArea x GarageCars
# Já as altas correlações entre as variáveis independentes com a variável-alvo (SalePrice),
# são ótimas variáveis preditoras para o nosso modelo.
# Analisando as maiores corelaÇões com a variável-alvo
#sort on decreasing correlations with SalePrice - Target Variable
cor_decresc2 <- as.matrix(sort(correlacao2[,'SalePrice'], decreasing = TRUE))
# ---------------------------------------------------------------------------------
# Deletando variáveis que tenha forte correlação com outra variável independente,
# se elas tiverem realmente a possibilidade de estarem demonstrando o mesmo tipo de dados.
# De acordo com o gráfico corrplot, eu irei excluir as seguintes variáveis:
all$GarageArea <- NULL
all$TotRmsAbvGrd <- NULL
all$YearRemodAdd <- NULL
all$TotalBsmtSF <- NULL
# Com relação as variáveis categóricas, irei excluir Exterior2nd
all$Exterior2nd <- NULL
# Também devo verificar as variáveis categóricas, se elas tem correlação entre si.
# OverallQual x OverallCond
ggplot(all, aes(x = as.factor(OverallCond))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(OverallQual))) + geom_bar(stat = 'count')
a <- as.numeric(all$OverallQual)
b <- as.numeric(all$OverallCond)
cor(a,b)
# Não há correlação
# GarageQual x GarageCond
ggplot(all, aes(x = as.factor(GarageQual))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(GarageCond))) + geom_bar(stat = 'count')
a <- as.numeric(all$GarageQual)
b <- as.numeric(all$GarageCond)
cor(a,b)
# Correlação de 54%, irei mantê-la
# BsmtQual x BsmtCond
ggplot(all, aes(x = as.factor(BsmtQual))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(BsmtCond))) + geom_bar(stat = 'count')
a <- as.numeric(all$BsmtQual)
b <- as.numeric(all$BsmtCond)
cor(a,b)
# Não há correlação
# ExterQual x ExterCond
ggplot(all, aes(x = as.factor(ExterQual))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(ExterCond))) + geom_bar(stat = 'count')
a <- as.numeric(all$ExterQual)
b <- as.numeric(all$ExterCond)
cor(a,b)
# Não há correlação
# Deletarei variáveis que tenha baixa variabilidade do modelo
ggplot(all, aes(x = as.factor(BsmtCond))) + geom_bar(stat = 'count')
table(all$BsmtCond)
all$BsmtCond <- NULL
ggplot(all, aes(x = as.factor(GarageCond))) + geom_bar(stat = 'count')
table(all$GarageCond)
all$GarageCond <- NULL
ggplot(all, aes(x = as.factor(GarageQual))) + geom_bar(stat = 'count')
table(all$GarageQual)
all$GarageQual <- NULL
ggplot(all, aes(x = as.factor(ExterCond))) + geom_bar(stat = 'count')
table(all$ExterCond)
all$ExterCond <- NULL
ggplot(all, aes(x = as.factor(Street))) + geom_bar(stat = 'count')
table(all$Street)
all$Street <- NULL
ggplot(all, aes(x = as.factor(Alley))) + geom_bar(stat = 'count')
table(all$Alley)
all$Alley <- NULL
ggplot(all, aes(x = as.factor(LandContour))) + geom_bar(stat = 'count')
table(all$LandContour)
all$LandContour <- NULL
ggplot(all, aes(x = as.factor(LandSlope))) + geom_bar(stat = 'count')
table(all$LandSlope)
all$LandSlope <- NULL
ggplot(all, aes(x = as.factor(Condition1))) + geom_bar(stat = 'count')
table(all$Condition1)
all$Condition1 <- NULL
ggplot(all, aes(x = as.factor(Condition2))) + geom_bar(stat = 'count')
table(all$Condition2)
all$Condition2 <- NULL
ggplot(all, aes(x = as.factor(BldgType))) + geom_bar(stat = 'count')
table(all$BldgType)
all$BldgType <- NULL
ggplot(all, aes(x = as.factor(RoofMatl))) + geom_bar(stat = 'count')
table(all$RoofMatl)
all$RoofMatl <- NULL
ggplot(all, aes(x = as.factor(Heating))) + geom_bar(stat = 'count')
table(all$Heating)
all$Heating <- NULL
ggplot(all, aes(x = as.factor(CentralAir))) + geom_bar(stat = 'count')
table(all$CentralAir)
all$CentralAir <- NULL
ggplot(all, aes(x = as.factor(Electrical))) + geom_bar(stat = 'count')
table(all$Electrical)
all$Electrical <- NULL
ggplot(all, aes(x = as.factor(Functional))) + geom_bar(stat = 'count')
table(all$Functional)
all$Functional <- NULL
ggplot(all, aes(x = as.factor(PavedDrive))) + geom_bar(stat = 'count')
table(all$PavedDrive)
all$PavedDrive <- NULL
ggplot(all, aes(x = as.factor(PoolQC))) + geom_bar(stat = 'count')
table(all$PoolQC)
all$PoolQC <- NULL
ggplot(all, aes(x = as.factor(MiscFeature))) + geom_bar(stat = 'count')
table(all$MiscFeature)
all$MiscFeature <- NULL
ggplot(all, aes(x = as.factor(SaleType))) + geom_bar(stat = 'count')
table(all$SaleType)
all$SaleType <- NULL
ggplot(all, aes(x = as.factor(MSSubClass))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(MSZoning))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(LotShape))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(LotConfig))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(Neighborhood))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(HouseStyle))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(RoofStyle))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(Exterior1st))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(MasVnrType))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(ExterQual))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(Foundation))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(BsmtExposure))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(BsmtFinType1))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(BsmtFinType2))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(HeatingQC))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(SaleCondition ))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(KitchenQual))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(FireplaceQu))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(GarageType))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(GarageFinish))) + geom_bar(stat = 'count')
ggplot(all, aes(x = as.factor(Fence))) + geom_bar(stat = 'count')
#---------------------------------------------------------------------------------
#Visualizando gráfico de frequência de SalePrice
ggplot(data=all[!is.na(all$SalePrice),], aes(x=SalePrice)) +
geom_histogram(fill="blue", binwidth = 10000) +
scale_x_continuous(breaks= seq(0, 800000, by=100000))
summary(all$SalePrice)
# Não é normalmente distribuído SalePrice
qqnorm(all$SalePrice)
qqline(all$SalePrice)
shapiro.test(all$SalePrice)
skew(all$SalePrice)
# The skew of 1.87 indicates a right skew that is too high, and the Q-Q plot shows that
# sale prices are also not normally distributed. To fix this I am taking the log of SalePrice.
skew(log(all$SalePrice))
#As you can see,the skew is now quite low and the Q-Q plot is also looking much better.
qqnorm(all$SalePrice)
qqline(all$SalePrice)
all$SalePrice <- log(all$SalePrice)
# Normalizando variáveis numéricas com mais de 0.8 de skew e pre-processando todas
all$Id <- as.character(all$Id)
numericVars2 <- which(sapply(all, is.numeric))
numericVars2 <- numericVars2[-which(names(numericVars2)=='SalePrice')]
# Removendo SalePrice que já está com log e não pode ser preprocessado
skewed <- as.data.frame(ifelse(skew(all[,numericVars2]) > 0.7 | skew(all[,numericVars2]) < -0.7,
log(all[,numericVars2]+1),all[,numericVars2]))
colnames(skewed) <- names(numericVars2)
#-----------------------------------------------------------------------------------------
# Preprocessing and predicting it
# PreProcessing dataset Train and predicting it
preNumVars <- preProcess(skewed, method = c('center','scale'),na.remove = T)
preNumVars
trainClean_NumVars <- predict(preNumVars,skewed)
dim(trainClean_NumVars)
# Transformando variáveis categóricas em dummies para facilitar nosso modelo a prever
# de forma mais eficiente.
categoricalVars2 <- which(sapply(all,is.factor))
dummy <- as.data.frame(model.matrix(~.-1,all[,categoricalVars2]))
dim(dummy)
# Removendo levels com pouca observação no dataset train
ZerocolTrain <- which(colSums(dummy)<10) # removendo menos que 10
colnames(dummy[ZerocolTrain])
dummy <- dummy[,-ZerocolTrain] #removendo
dim(dummy)
# Juntando variáveis numéricas com variáveis dummies
trainClean <- cbind(trainClean_NumVars,dummy, all$SalePrice)
colnames(trainClean)[which(colnames(trainClean)== 'all$SalePrice')] <- 'SalePrice'
dim(trainClean)
#------------------------------------------------------------------------------------------
# Modelagem e previsao final
# my_control <- trainControl(method="cv", number=10)
# amostra = sample(2,dim(trainClean)[1], replace = T, prob = c(0.7,0.3))
# treino = trainClean[!is.na(trainClean$SalePrice) & amostra==1,]
# teste = trainClean[!is.na(trainClean$SalePrice) & amostra==2,]
# Modelo 7
# set.seed(2017)
# lm_mod = lm(SalePrice ~ .,data = trainClean[!is.na(trainClean$SalePrice),])
# Salvando modelo e carregando-o.
# saveRDS(lm_mod, file = 'lm_mod.rda')
lm_mod <- readRDS('lm_mod.rda') #BAIXAR ARQUIVO .RDA DO MODELO NO GITHUB
summary(lm_mod)
# Verificando com validações
treino_mod = lm(SalePrice ~ .,data = treino)
summary(treino_mod)
modelo7_prev = predict(treino_mod, teste[,-which(colnames(treino)=='SalePrice')])
RMSE(pred = modelo7_prev, obs = teste[,'SalePrice'])
# Prevendo as vendas
lm_prev = predict(lm_mod,trainClean[is.na(trainClean$SalePrice),
-which(colnames(trainClean)=='SalePrice')])
# Lasso Modelo
# set.seed(50000)
# lassoGrid <- expand.grid(alpha = 1, lambda = seq(0.00001,0.1,by = 0.0005))
# alpha = 1 (lasso), alpha = 0 (ridge) and a value between 0 and 1 (say 0.3) is elastic net regression.
# lasso_mod <- train(SalePrice~., data = trainClean[!is.na(trainClean$SalePrice),],
#method='glmnet', trControl= my_control,tuneGrid=lassoGrid)
# Salvando modelo e carregando-o.
# saveRDS(lasso_mod, file = 'lasso_mod.rda')
lasso_mod <- readRDS('lasso_mod.rda') #BAIXAR ARQUIVO .RDA DO MODELO NO GITHUB
lasso_mod
lasso_mod$bestTune
min(lasso_mod$results$RMSE)
min(lasso_mod$results$MAE)
max(lasso_mod$results$Rsquared)
#Importância
varImp(lasso_mod)
# Prevendo as vendas
lasso_prev = predict(lasso_mod, trainClean[is.na(trainClean$SalePrice),
-which(colnames(trainClean)=='SalePrice')])
# Ridge Modelo
# set.seed(50000)
# ridgeGrid <- expand.grid(alpha = 0, lambda = seq(0.00001,0.1,by = 0.0005))
# alpha = 1 (lasso), alpha = 0 (ridge) and a value between 0 and 1 (say 0.3) is elastic net regression.
# ridge_mod <- train(SalePrice~., data = trainClean[!is.na(trainClean$SalePrice),],
# method='glmnet',trControl= my_control,tuneGrid=ridgeGrid)
# ridge_mod
# ridge_mod$bestTune
# min(ridge_mod$results$RMSE)
# min(ridge_mod$results$MAE)
# max(ridge_mod$results$Rsquared)
# Elastic Net Modelo
# set.seed(50000)
# elasticnet_mod <- train(SalePrice~., data = trainClean[!is.na(trainClean$SalePrice),],
# method='glmnet', trControl= my_control, tuneLength = 25)
# elasticnet_mod
# elasticnet_mod$bestTune
# min(elasticnet_mod$results$RMSE)
# min(elasticnet_mod$results$MAE)
# max(elasticnet_mod$results$Rsquared)
# eXtreme Gradient Boosting - XGBoost
# xgb_params <- list(
# booster = 'gbtree',
# objective = 'reg:linear',
# colsample_bytree=0.9,
# eta=0.071,
# max_depth=2,
# min_child_weight=5,
# alpha=0.41,
# lambda=0.35,
# gamma=0.0001, # less overfit
# subsample=0.8)
dtrain <- xgb.DMatrix(as.matrix(trainClean[!is.na(trainClean$SalePrice),
-which(colnames(trainClean)=='SalePrice')]),
label = as.matrix(trainClean$SalePrice[!is.na(trainClean$SalePrice)]))
dtest <- xgb.DMatrix(as.matrix(trainClean[is.na(trainClean$SalePrice),
-which(colnames(trainClean)=='SalePrice')]))
# set.seed(50000)
# xgboost_mod <- xgb.cv(xgb_params, data = dtrain,nrounds = 1000, metrics = 'rmse',
# print_every_n = 50, nfold = 10)
# set.seed(50000)
# xgboost_mod2 <- xgb.train(data = dtrain, params=xgb_params, nrounds = 1000)
# saveRDS(xgboost_mod2, file = 'xgboost_mod2.rda')
xgboost_mod2 <- readRDS('xgboost_mod2.rda') #BAIXAR ARQUIVO .RDA DO MODELO NO GITHUB
# Importância
mat <- xgb.importance (feature_names = colnames(trainClean[!is.na(trainClean$SalePrice),
-which(colnames(trainClean)=='SalePrice')]),
model = xgboost_mod2)
xgb.ggplot.importance(importance_matrix = mat[1:20], rel_to_first = TRUE)
# Prevendo as vendas
xgboost_prev = predict(xgboost_mod2, dtest)
# Gradiente Boost Model - GBM
# gbmGrid <- expand.grid(n.trees = 150,
#interaction.depth = c(2,3,4,5,6),
#shrinkage = c(0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7),
#n.minobsinnode = c(4,5,6,7,8,9,10))
#nrow(gbmGrid)
#set.seed(5000000)
#gbm_mod <- train(SalePrice~., data = trainClean[!is.na(trainClean$SalePrice),],
#method = "gbm",
#metric = "RMSE", trControl = my_control,
#tuneGrid = gbmGrid)
#gbm_mod$results
#gbm_mod$bestTune
#gbm_mod$results$RMSE[gbm_mod$results$shrinkage == 0.1 &
#gbm_mod$results$interaction.depth == 6 &
#gbm_mod$results$n.minobsinnode == 4]
# Salvando Modelo
# saveRDS(gbm_mod, file = 'gbm_mod.rda')
# gbmGrid2 <- expand.grid(n.trees = 300,
#interaction.depth = c(6),
#shrinkage = c(0.13),
#n.minobsinnode = c(4))
# gbm_mod2 <- train(SalePrice~., data = trainClean[!is.na(trainClean$SalePrice),],
#method = "gbm",
#metric = "RMSE", trControl = my_control,
#tuneGrid = gbmGrid2)
# saveRDS(gbm_mod2, file = 'gbm_mod2.rda')
gbm_mod2 <- readRDS('gbm_mod2.rda') #BAIXAR ARQUIVO .RDA DO MODELO NO GITHUB
gbm_mod2
# Prevendo as vendas
gbm_prev = predict(gbm_mod2, trainClean[is.na(trainClean$SalePrice),
-which(colnames(trainClean)=='SalePrice')])
#Correlacao
correlacao <- cbind(lm_prev,lasso_prev,xgboost_prev,gbm_prev)
cor(correlacao)
# Previsao Finallasso_prev
previsao <- (((3*lasso_prev)+(4*lm_prev)+(2*xgboost_prev)+gbm_prev)/10)
# média ponderada para os modelos mais eficientes, classificados de forma ordinal.
previsao <- as.data.frame(exp(previsao))#need to reverse the log to the real values
previsao$id <- rownames(trainClean[is.na(trainClean$SalePrice),])
colnames(previsao) <- c('SalePrice','Id')
previsao$SalePrice <- round(previsao$SalePrice)
dim(previsao)
kable(sapply(previsao, function(x) sum(is.na(x))))
write.csv(previsao, file = 'previsao.csv', row.names = F )
|
85ff69b19a676e5ac287a75e60cc117d678bad81
|
c88b0cbeda0edf9e745e324ef942a504e27d4f87
|
/MTMM_ESCS/prestigeFromIPIP.R
|
0650f718fe9dc8ea00bf271d5db113d3b33a3f15
|
[] |
no_license
|
Diapadion/R
|
5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27
|
1485c43c0e565a947fdc058a1019a74bdd97f265
|
refs/heads/master
| 2023-05-12T04:21:15.761115
| 2023-04-27T16:26:35
| 2023-04-27T16:26:35
| 28,046,921
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,931
|
r
|
prestigeFromIPIP.R
|
### Testing out a prestige factor from IPIP data
library(psych)
library(lavaan)
items = c('x110','e60','p361','p434','p421','h974','x247','h2043','h1086','h1193','p436','h1203','h204','p410','p401','h743','h746')
table(complete.cases(df.ipip[,items]))
nfactors(df.ipip[,items])
fa.parallel(df.ipip[,items])
temp = fa(df.ipip[,items], nfactors=3, rotate = 'oblimin')
print(temp)
m.prstg = '
prestige =~ h1193+x110+e60+p361+p434+p421+h974+x247+h2043+h1086+p436+h1203+h204+p410+p401+h743+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove p361 - "Don't let others take credit for my work."
m.prstg = '
prestige =~ h1193+x110+e60+p421+p434+h974+x247+h2043+h1086+p436+h1203+h204+p410+p401+h743+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove p421 - "Let other people take the credit for my work."
m.prstg = '
prestige =~ h1193+x110+e60+p434+h974+x247+h2043+h1086+p436+h1203+h204+p410+p401+h743+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove h204 ### remove h743 - "Overestimate my achievements."
m.prstg = '
prestige =~ h1193+x110+e60+p434+h974+x247+h2043+h1086+p436+h1203+h743+p410+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove e60 - "Don't care what people think of me."
m.prstg = '
prestige =~ h1193+x110+p434+h974+x247+h2043+h1086+p436+h1203+h743+p410+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove p410 ### remove h204 - "Want to mean something to others."
m.prstg = '
prestige =~ h1193+x110+p434+h974+x247+h2043+h1086+p436+h1203+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove h974
m.prstg = '
prestige =~ h1193+x110+p434+x247+h2043+h1086+p436+h1203+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove x247 - "Seldom toot my own horn."
m.prstg = '
prestige =~ h1193+x110+p434+h2043+h1086+p436+h1203+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove p436
m.prstg = '
prestige =~ h1193+x110+p434+h2043+h1086+h1203+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove x110
m.prstg = '
prestige =~ h1193+p434+h2043+h1086+h1203+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove h2043
m.prstg = '
prestige =~ h1193+p434+h1086+h1203+p401+h746
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
omega(df.ipip[,c('h1193','p434','h1086','h1203','h746','p401')])
#######
## remove h974 - "Need the approval of others."
m.prstg = '
prestige =~ h1193+x110+p434+h2043+h1086+p436+h1203
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove p436 - "Want to amount to something special in others' eyes."
m.prstg = '
prestige =~ h1193+x110+p434+h2043+h1086+h1203
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove h2043 - "Think highly of myself."
m.prstg = '
prestige =~ h1193+x110+p434+h1086+h1203
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
## remove x110 - "Am not highly motivated to succeed."
m.prstg = '
prestige =~ h1193+h1086+h1203+h746+p434
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
### ...
m.prstg = '
prestige =~ h1193+p434+h1086+h1203+h746+p401
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
###
m.prstg = '
prestige =~ p434+p401+p410+p436
'
f.prstg = cfa(m.prstg, df.ipip)
fitMeasures(f.prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.prstg)
m.2prstg = '
p1 =~ p401+p436+p434+p410
p2 =~ h1086+h1203+h746+h1193
'
f.2prstg = cfa(m.2prstg, df.ipip)
fitMeasures(f.2prstg, c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))
summary(f.2prstg)
omega(df.ipip[,c('h1193','p434','h1086','h1203','h746','p401','p410','p436')])
omega(df.ipip[,c('h1193','p434','h1086','h1203','h746','p401')])
|
84184c4101017019197d43c426f2f9c065cfc6b2
|
ffe269345445ec40279d6748ff9b5221f294bb15
|
/run_analysis.R
|
73a1cbb4c6bad81ca933989eed0cc127fb1f7237
|
[] |
no_license
|
DominiekL/Getting-and-Cleaning-data---Assignment
|
23d7be4a9db593c47a4385873148275df24d6ac5
|
160d13d0f8ad2074b384d9e8595fd37a95aeb7ca
|
refs/heads/master
| 2020-12-24T15:41:03.363480
| 2015-02-21T14:34:34
| 2015-02-21T14:34:34
| 31,126,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,572
|
r
|
run_analysis.R
|
run_analysis <- function(testdir="test",traindir="train"){
# read all the files that are needed for this script
Y_Test <- read.table(file.path(testdir,"Y_test.txt"))
Y_Train <- read.table(file.path(traindir,"Y_train.txt"))
X_Test <- read.table(file.path(testdir,"X_test.txt"))
X_Train <- read.table(file.path(traindir,"X_train.txt"))
Subj_Test <- read.table(file.path(testdir,"subject_test.txt"))
Subj_Train <- read.table(file.path(traindir,"subject_train.txt"))
Features <- read.table(file.path("features.txt"))
Activities <- read.table(file.path("activity_labels.txt"))
# combine test and training data into one data frame, for each element
X_Full <- rbind(X_Test,X_Train)
Y_Full <- rbind(Y_Test,Y_Train)
Subj_Full <- rbind(Subj_Test,Subj_Train)
# replace activity IDs with activity names
Y_Activities <- merge(Y_Full,Activities,all=TRUE)
Y_Full <- data.frame(Y_Activities$V2)
# add a column title to each data frame
colnames(Subj_Full)<-c("Subject")
colnames(Y_Full)<-c("Activity")
Xlabels <- read.table(file.path("features.txt"))
colnames(X_Full)<-Xlabels$V2
# only retain the columns in X_Full containing "mean()" and "std()"
meanColumns=grep("mean()",MeanStandardColumns)
stdColumns=grep("std()",MeanStandardColumns)
relevantColumns<-c(meanColumns,stdColumns)
relevantXColumns<-MeanStandardColumns[relevantColumns]
X_Full<-X_Full[,which(names(X_Full) %in% relevantXColumns)]
# replace labels of X_Full with descriptive labels
relevantXColumns<-gsub("^t", "Time: ",relevantXColumns)
relevantXColumns<-gsub("^f", "Frequency: ",relevantXColumns)
relevantXColumns<-gsub("Acc", " Accelerometer ",relevantXColumns)
relevantXColumns<-gsub("Gyro", "Gyroscope ",relevantXColumns)
relevantXColumns<-gsub("Mag", " Magnitude ",relevantXColumns)
relevantXColumns<-gsub("mean\\(\\)", " Mean ",relevantXColumns)
relevantXColumns<-gsub("meanFreq\\(\\)", " Mean Frequency ",relevantXColumns)
relevantXColumns<-gsub("std\\(\\)", " Standard Deviation ",relevantXColumns)
relevantXColumns<-gsub(" ", " ",relevantXColumns)
relevantXColumns<-gsub("Jerk-", "Jerk -",relevantXColumns)
names(X_Full)<-relevantXColumns
# merge columns into one data frame
FullData <- cbind(Subj_Full,Y_Full)
FullData <- cbind(FullData,X_Full)
# Create a new data set with average values
library(plyr)
means <- aggregate(. ~Subject + Activity, FullData, mean)
means <- means[order(means$Subject,means$Activity),]
write.table(means,file="RunAnalysis.txt",row.name=FALSE)
library(knitr)
}
|
079d462b69b52f67b80ee216de70bd5849e444ee
|
a1950c24afad9fc53478f97d3803be10ee12388e
|
/Check_Conditions.R
|
f83fde819f3ecdbc5a43629266ccf6db5d4e5e76
|
[] |
no_license
|
NaSed/MONET
|
05e4cadb885e017212aa7e6dc9a30f23518fc416
|
f407d654e30e2c88024581c1977d67d6aedf41bc
|
refs/heads/master
| 2020-06-13T17:56:03.446693
| 2017-01-20T23:20:37
| 2017-01-20T23:20:37
| 75,571,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,832
|
r
|
Check_Conditions.R
|
# Checking conditions:
# 1: for each c in CNF and m in DNF => intersect(m, c) != null
# 2: DNF and CNF must have exactly the same variables
# 3: max{|m|: m \in DNF} <= |C| & max{|c|: c \in CNF} <= |D|
# Inputs: CNF and DNF as lists
Check_Conditions <- function(cnf, dnf, verbose)
{
# browser()
if(verbose) cat('\n', 'Checking conditions:', '\n', file=file, append=TRUE)
Assign <- NULL
# number of monomials in DNF
n.d <- length(dnf)
# number of clauses in CNF
n.c <- length(cnf)
#++++++++++++++++++++++++++++
#
#++++++++++++++++++++++++++++
if (n.c == 0 | n.d == 0)
{
if(verbose) cat('CNF or DNF is NULL, then they do not satisfy none of the conditions.', '\n', file=file, append=TRUE)
# Assign <- Easy_case(cnf=cnf, dnf=dnf)
#if(n.c==0) Assign <- unlist(dnf) else Assign <- unlist(cnf)
Assign <- MinimumConflictAssignment(CNF=cnf, DNF=dnf)
return(Assign)
}
#++++++++++++++++++++++++++++
# First Condition
#++++++++++++++++++++++++++++
for (i in 1:n.c)
{
for ( j in 1:n.d)
{
temp <- intersect(cnf[[i]], dnf[[j]])
# length(temp)==0 means that m_i and c_j have no intersection with each other
# then first condition is not met and we return False
if (length(temp) == 0)
{
if(verbose) cat('The first condtion is not satisfied because clause ', i, 'and monomial ', j, ' do not have common terms.', '\n' , file=file, append=TRUE)
Assign <- Easy_case(cnf=cnf, dnf=dnf)
return(Assign)
}
}
}
#++++++++++++++++++++++++++++
# Second Condition
#++++++++++++++++++++++++++++
check <- setequal(unlist(cnf), unlist(dnf))
if (check == F)
{
# browser()
if(verbose) cat('The second condtion is not satisfied because CNF and DNF have not the same set of variables.', '\n' , file=file, append=TRUE)
Assign <- Easy_case(cnf=cnf, dnf=dnf)
return(Assign)
}
#++++++++++++++++++++++++++++
# Third Condition
#++++++++++++++++++++++++++++
t1 <- max(sapply(cnf, length)) <= n.d # max{|c|: c \in CNF} <= |D|
t2 <- max(sapply(dnf, length)) <= n.c # max{|m|: m \in DNF} <= |C|
check <- t1 & t2
if (check == F)
{
# browser()
if(verbose) cat('The third condtion is not satisfied because:', '\n', 'max{|c|: c in CNF} <= |D| is ', t1, '\n',
' and ', '\n',
'max{|m|: m in DNF} <= |C| is ', t2, '\n' , file=file, append=TRUE)
Assign <- Easy_case(cnf=cnf, dnf=dnf)
return(Assign)
}
# If we reach here, it means that all of conditions are satisfied.
if(verbose) cat('All of conditions are satisfied by CNF and DNF.', '\n', file=file, append=TRUE)
return(Assign)
}
|
7712d22dd5ecc5e0286c3c9a018a318d39a5d013
|
227632938c9bf3bd69511645c34179ebd3df6a95
|
/analysis/surge_pricing/exploratory_analysis.R
|
c9bea25e16f805fad803786077efb4cdbcbc6d40
|
[
"MIT"
] |
permissive
|
toddwschneider/chicago-taxi-data
|
542b8b29f4662e6d0383c2f83ac4724e05d1f586
|
2a3b664b45312a497470e41b820083ab774b46fe
|
refs/heads/master
| 2021-01-11T20:42:52.858673
| 2020-03-25T14:01:34
| 2020-03-25T14:01:34
| 79,171,199
| 82
| 25
|
MIT
| 2020-03-25T14:01:36
| 2017-01-17T00:12:36
|
R
|
UTF-8
|
R
| false
| false
| 23,907
|
r
|
exploratory_analysis.R
|
# assumes estimate_historical_surge_pricing.R has been run in its entirety
# calculate some aggregate stats
tnp_trips %>%
filter(
has_clean_fare_info,
!is.na(fare_ratio),
shared_status == "solo"
) %>%
summarize(
avg_fare_ratio = mean(fare_ratio),
frac12 = mean(fare_ratio >= 1.2),
frac15 = mean(fare_ratio >= 1.5)
)
tnp_trips %>%
filter(
has_clean_fare_info,
!is.na(fare_ratio),
shared_status == "solo"
) %>%
group_by(pricing_regime) %>%
summarize(
avg_fare_ratio = mean(fare_ratio),
frac12 = mean(fare_ratio >= 1.2),
frac15 = mean(fare_ratio >= 1.5)
) %>%
ungroup()
avg_surge_by_time_of_week = tnp_trips %>%
filter(
has_clean_fare_info,
!is.na(fare_ratio),
shared_status == "solo"
) %>%
mutate(wday = wday(trip_start), hour = hour(trip_start)) %>%
group_by(
is_q2_2019 = (pricing_regime == "q2_2019"),
pickup_side,
hour,
weekday_type = case_when(
wday == 6 & hour >= 20 ~ "weekend",
wday == 7 ~ "weekend",
wday == 1 & hour < 20 ~ "weekend",
TRUE ~ "weekday"
)
) %>%
summarize(
avg_fare_ratio = mean(fare_ratio),
frac12 = mean(fare_ratio >= 1.2),
frac15 = mean(fare_ratio >= 1.5),
n = n()
) %>%
ungroup() %>%
mutate(q2_2019_factor = factor(is_q2_2019, levels = c(TRUE, FALSE), labels = c("Q2 2019", "Excl. Q2 2019")))
avg_surge_by_date = tnp_trips %>%
filter(
has_clean_fare_info,
!is.na(fare_ratio),
shared_status == "solo"
) %>%
group_by(date = as.Date(trip_start)) %>%
summarize(
avg_fare_ratio = mean(fare_ratio),
frac12 = mean(fare_ratio >= 1.2),
frac15 = mean(fare_ratio >= 1.5),
n = n()
) %>%
ungroup()
# surge prices in q2 2019
png("graphs/average_surge_by_date.png", width = 800, height = 800)
avg_surge_by_date %>%
ggplot(aes(x = date, y = avg_fare_ratio)) +
annotate(
"rect",
xmin = as.Date("2019-03-29"), xmax = as.Date("2019-06-30"),
ymin = 0.98, ymax = 1.32,
fill = "#ff0000",
alpha = 0.15
) +
geom_line(size = 0.75) +
scale_y_continuous(labels = function(value) paste0(format(value, nsmall = 2), "x")) +
ggtitle("Surge Prices Were Highest in Q2 2019", "Average estimated surge multiplier") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
no_axis_titles()
dev.off()
avg_fare_for_typical_trip = tnp_trips %>%
filter(
has_clean_fare_info,
!is.na(fare_ratio),
shared_status == "solo",
trip_miles >= 4 & trip_miles <= 4.2,
trip_minutes >= 15 & trip_minutes <= 17
) %>%
group_by(date = as.Date(trip_start)) %>%
summarize(avg_fare = mean(fare), n = n()) %>%
ungroup()
png("graphs/average_typical_fare_by_date.png", width = 800, height = 800)
avg_fare_for_typical_trip %>%
ggplot(aes(x = date, y = avg_fare)) +
annotate(
"rect",
xmin = as.Date("2019-03-29"), xmax = as.Date("2019-06-30"),
ymin = 9, ymax = 13,
fill = "#ff0000",
alpha = 0.15
) +
geom_line(size = 0.75) +
scale_y_continuous(labels = scales::dollar) +
ggtitle("Ride-Hail Fares Were Highest in Q2 2019", "Average fare for a typical 4-mile, 15-minute trip") +
labs(caption = "Includes private (not shared) trips 4–4.2 miles, 15–17 minutes. Excludes tips and additional charges\nData via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
no_axis_titles()
dev.off()
daily_trip_counts = tnp_trips %>%
group_by(date = as.Date(trip_start)) %>%
count(date)
# no obvious change in total demand during q2
png("graphs/total_trips_by_date.png", width = 800, height = 800)
daily_trip_counts %>%
ggplot(aes(x = date, y = n)) +
geom_line(size = 0.75) +
scale_y_continuous(labels = scales::comma) +
expand_limits(y = 0) +
ggtitle("Chicago Ride-Hail Activity", "Daily pickups") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
no_axis_titles()
dev.off()
# trends by geography
png("graphs/chicago_weekday_average_surge_by_side_ex_q2_2019.png", width = 1200, height = 1200)
avg_surge_by_time_of_week %>%
filter(weekday_type == "weekday", !is_q2_2019) %>%
ggplot(aes(x = hour, y = avg_fare_ratio)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "")) +
scale_y_continuous(breaks = c(1, 1.05, 1.1), labels = function(value) paste0(format(value, nsmall = 2), "x")) +
expand_limits(y = 1, x = c(0, 25)) +
facet_wrap(~pickup_side, ncol = 3) +
ggtitle("Chicago Weekday Ride-Hail Surge Pricing by Pickup Side", "Average estimated surge pricing multiplier, 11/1/18–3/28/19 + 7/1/19–12/31/19") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 20) +
no_axis_titles()
dev.off()
png("graphs/chicago_weekday_average_surge_by_side_q2_2019.png", width = 1200, height = 1200)
avg_surge_by_time_of_week %>%
filter(weekday_type == "weekday", is_q2_2019) %>%
ggplot(aes(x = hour, y = avg_fare_ratio)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "")) +
scale_y_continuous(labels = function(value) paste0(format(value, nsmall = 1), "x")) +
expand_limits(y = c(1, 1.4), x = c(0, 25)) +
facet_wrap(~pickup_side, ncol = 3) +
ggtitle("Chicago Weekday Ride-Hail Surge Pricing by Pickup Side", "Average estimated surge pricing multiplier, 3/29/19–6/30/19") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 20) +
no_axis_titles()
dev.off()
png("graphs/chicago_weekend_average_surge_by_side_ex_q2_2019.png", width = 1200, height = 1200)
avg_surge_by_time_of_week %>%
filter(weekday_type == "weekend", !is_q2_2019) %>%
ggplot(aes(x = hour, y = avg_fare_ratio)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "")) +
scale_y_continuous(breaks = c(1, 1.05, 1.1), labels = function(value) paste0(format(value, nsmall = 2), "x")) +
expand_limits(y = 1, x = c(0, 25)) +
facet_wrap(~pickup_side, ncol = 3) +
ggtitle("Chicago Weekend Ride-Hail Surge Pricing by Pickup Side", "Average estimated surge pricing multiplier, 11/1/18–3/28/19 + 7/1/19–12/31/19") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 20) +
no_axis_titles()
dev.off()
png("graphs/chicago_weekend_average_surge_by_side_q2_2019.png", width = 1200, height = 1200)
avg_surge_by_time_of_week %>%
filter(weekday_type == "weekend", is_q2_2019) %>%
ggplot(aes(x = hour, y = avg_fare_ratio)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "")) +
scale_y_continuous(labels = function(value) paste0(format(value, nsmall = 1), "x")) +
expand_limits(y = 1, x = c(0, 25)) +
facet_wrap(~pickup_side, ncol = 3) +
ggtitle("Chicago Weekend Ride-Hail Surge Pricing by Pickup Side", "Average estimated surge pricing multiplier, 3/29/19–6/30/19") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 20) +
no_axis_titles()
dev.off()
pickup_distrbutions = avg_surge_by_time_of_week %>%
group_by(weekday_type, pickup_side, hour) %>%
summarize(pickups = sum(n)) %>%
ungroup() %>%
group_by(weekday_type, pickup_side) %>%
mutate(frac = pickups / sum(pickups)) %>%
ungroup()
png("graphs/chicago_weekday_average_pickups_by_side.png", width = 1200, height = 1200)
pickup_distrbutions %>%
filter(weekday_type == "weekday") %>%
ggplot(aes(x = hour, y = frac)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "")) +
scale_y_continuous(labels = scales::percent) +
expand_limits(y = c(0, 0.1), x = c(0, 25)) +
facet_wrap(~pickup_side, ncol = 3) +
ggtitle("Chicago Weekday Ride-Hail Pickups Distribution by Side", "% of weekday pickups, 11/1/18–12/31/19") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 20) +
no_axis_titles()
dev.off()
png("graphs/chicago_weekend_average_pickups_by_side.png", width = 1200, height = 1200)
pickup_distrbutions %>%
filter(weekday_type == "weekend") %>%
ggplot(aes(x = hour, y = frac)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "")) +
scale_y_continuous(labels = scales::percent) +
expand_limits(y = c(0, 0.1), x = c(0, 25)) +
facet_wrap(~pickup_side, ncol = 3) +
ggtitle("Chicago Weekend Ride-Hail Pickups Distribution by Side", "% of weekend pickups, 11/1/18–12/31/19") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 20) +
no_axis_titles()
dev.off()
png("graphs/central_chicago_avg_weekday_surge.png", width = 800, height = 800)
avg_surge_by_time_of_week %>%
filter(pickup_side == "central", weekday_type == "weekday") %>%
bind_rows(mutate(top_n(., 2, -hour), hour = 24)) %>% {
ggplot(., aes(x = hour, y = avg_fare_ratio, color = q2_2019_factor)) +
geom_line(size = 0.75) +
geom_point(size = 3) +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "12:00 AM")) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 3), labels = function(value) paste0(format(value, nsmall = 1), "x")) +
scale_color_manual(values = c(red, black), guide = FALSE) +
expand_limits(y = c(1, 1.1)) +
facet_wrap(~q2_2019_factor, ncol = 1, scales = "free_y") +
ggtitle("Weekday Ride-Hail Surge Pricing in Central Chicago", "Average estimated surge pricing multiplier, weekdays") +
labs(caption = "“Central Chicago” includes the Loop, Near North Side, and Near South Side community areas\nBased on ride-hail trips 11/1/18–12/31/19, “Q2 2019” defined as 3/29/19–6/30/19\nData via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
no_axis_titles()
}
dev.off()
png("graphs/north_side_chicago_avg_weekday_surge.png", width = 800, height = 800)
avg_surge_by_time_of_week %>%
filter(pickup_side == "north", weekday_type == "weekday") %>%
bind_rows(mutate(top_n(., 2, -hour), hour = 24)) %>% {
ggplot(., aes(x = hour, y = avg_fare_ratio, color = q2_2019_factor)) +
geom_line(size = 0.75) +
geom_point(size = 3) +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "12:00 AM")) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 3), labels = function(value) paste0(format(value, nsmall = 1), "x")) +
scale_color_manual(values = c(red, black), guide = FALSE) +
expand_limits(y = c(1, 1.1)) +
facet_wrap(~q2_2019_factor, ncol = 1, scales = "free_y") +
ggtitle("Weekday Ride-Hail Surge Pricing on Chicago’s North Side", "Average estimated surge pricing multiplier, weekdays") +
labs(caption = "“North Side” includes Avondale, North Center, Lake View, Lincoln Park, and Logan Square community areas\nBased on ride-hail trips 11/1/18–12/31/19, “Q2 2019” defined as 3/29/19–6/30/19\nData via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
no_axis_titles()
}
dev.off()
png("graphs/west_side_chicago_avg_weekend_surge.png", width = 800, height = 800)
avg_surge_by_time_of_week %>%
filter(pickup_side == "west", weekday_type == "weekend") %>%
bind_rows(mutate(top_n(., 2, -hour), hour = 24)) %>% {
ggplot(., aes(x = hour, y = avg_fare_ratio, color = q2_2019_factor)) +
geom_line(size = 0.75) +
geom_point(size = 3) +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24), labels = c("12:00 AM", " 6:00 AM", "12:00 PM", " 6:00 PM", "12:00 AM")) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 3), labels = function(value) paste0(format(value, nsmall = 1), "x")) +
scale_color_manual(values = c(red, black), guide = FALSE) +
expand_limits(y = c(1, 1.1)) +
facet_wrap(~q2_2019_factor, ncol = 1, scales = "free_y") +
ggtitle("Weekend Ride-Hail Surge Pricing on Chicago’s West Side", "Average estimated surge pricing multiplier, weekends") +
labs(caption = "“West Side” includes Austin, East Garfield Park, Humboldt Park, Lower West Side, Near West Side,\nNorth Lawndale, South Lawndale, West Garfield Park, and West Town community areas\nBased on ride-hail trips 11/1/18–12/31/19, “Q2 2019” defined as 3/29/19–6/30/19\nData via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
no_axis_titles()
}
dev.off()
# notable events at Soldier Field
soldier_field_trip_counts = trip_counts_padded %>%
filter(
pickup_region_id == major_venues$soldier_field,
region_type == "census_tract"
) %>%
mutate(
date_for_event = case_when(
hour(trip_start) <= 3 ~ as.Date(trip_start) - 1,
TRUE ~ as.Date(trip_start)
)
) %>%
left_join(all_surge_mults, by = c("pickup_region_id", "trip_start", "region_type"))
# biggest surge multipliers at Soldier Field
soldier_field_trip_counts %>%
filter(trips >= 100) %>%
group_by(date_for_event) %>%
top_n(1, estimated_surge_ratio) %>%
ungroup() %>%
arrange(desc(estimated_surge_ratio)) %>%
select(trip_start, trips, modified_z_score, estimated_surge_ratio) %>%
print(n = 25)
# biggest pickup spikes at Soldier Field
soldier_field_trip_counts %>%
filter(trips >= 100) %>%
group_by(date_for_event) %>%
top_n(1, trips) %>%
ungroup() %>%
arrange(desc(trips)) %>%
select(trip_start, trips, modified_z_score, estimated_surge_ratio) %>%
print(n = 25)
plot_surge_chart = function(trip_counts, t1, t2, surge_lim = c(0, 3.5), trips_lim = c(0, 350), title = NULL, subtitle = NULL) {
t1 = as.POSIXct(t1, tz = "UTC")
t2 = as.POSIXct(t2, tz = "UTC")
t_data = trip_counts %>%
filter(trip_start >= t1, trip_start <= t2)
p1 = ggplot(t_data, aes(x = trip_start, y = estimated_surge_ratio)) +
geom_line(size = 1) +
scale_x_datetime(labels = scales::date_format("%l:%M %p")) +
scale_y_continuous(labels = function(num) format(num, nsmall = 1)) +
expand_limits(y = surge_lim) +
ggtitle("Estimated surge pricing multiplier") +
theme_tws(base_size = 24) +
no_axis_titles() +
theme(
panel.grid.minor = element_blank(),
plot.margin = margin(24, 12, 24, 12, "pt"),
plot.title = element_text(size = rel(0.7), family = font_family)
)
p2 = ggplot(t_data, aes(x = trip_start, y = trips)) +
geom_line(size = 1) +
scale_x_datetime(labels = scales::date_format("%l:%M %p")) +
scale_y_continuous(labels = scales::comma) +
expand_limits(y = trips_lim) +
ggtitle("Number of pickups") +
theme_tws(base_size = 24) +
no_axis_titles() +
theme(
panel.grid.minor = element_blank(),
plot.margin = margin(24, 12, 12, 12, "pt"),
plot.title = element_text(size = rel(0.7), family = font_family)
)
p1 / p2 +
plot_annotation(
title = title,
subtitle = subtitle,
caption = "Data via City of Chicago\ntoddwschneider.com",
theme = theme_tws(base_size = 24)
)
}
png("graphs/rolling_stones_soldier_field_20190625.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-06-25 21:00:00",
t2 = "2019-06-26 02:00:00",
title = "Ride-Hailing at Soldier Field: The Rolling Stones No Filter Tour",
subtitle = "Tue Jun 25, 2019"
)
dev.off()
png("graphs/rolling_stones_soldier_field_20190621.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-06-21 21:00:00",
t2 = "2019-06-22 02:00:00",
title = "Ride-Hailing at Soldier Field: The Rolling Stones No Filter Tour",
subtitle = "Fri Jun 21, 2019"
)
dev.off()
png("graphs/bts_soldier_field_20190511.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-05-11 21:00:00",
t2 = "2019-05-12 02:00:00",
title = "Ride-Hailing at Soldier Field: BTS World Tour",
subtitle = "Sat May 11, 2019"
)
dev.off()
png("graphs/bts_soldier_field_20190512.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-05-12 21:00:00",
t2 = "2019-05-13 02:00:00",
title = "Ride-Hailing at Soldier Field: BTS World Tour",
subtitle = "Sun May 12, 2019"
)
dev.off()
png("graphs/bears_packers_soldier_field_20190905.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-09-05 18:00:00",
t2 = "2019-09-06 02:00:00",
title = "Ride-Hailing at Soldier Field: Bears vs. Packers",
subtitle = "Thu Sep 5, 2019. 7:20 PM kickoff"
)
dev.off()
png("graphs/bears_cowboys_soldier_field_20191205.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-12-05 18:00:00",
t2 = "2019-12-06 02:00:00",
title = "Ride-Hailing at Soldier Field: Bears vs. Cowboys",
subtitle = "Thu Dec 5, 2019. 7:20 PM kickoff"
)
dev.off()
png("graphs/bears_eagles_double_doink_soldier_field_20190106.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-01-06 12:00:00",
t2 = "2019-01-07 00:00:00",
title = "Ride-Hailing at Soldier Field: Bears vs. Eagles “Double Doink”",
subtitle = "Sat Jan 6, 2019. 3:40 PM kickoff"
)
dev.off()
png("graphs/bears_chiefs_soldier_field_20191222.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = soldier_field_trip_counts,
t1 = "2019-12-22 18:00:00",
t2 = "2019-12-23 02:00:00",
title = "Ride-Hailing at Soldier Field: Bears vs. Chiefs",
subtitle = "Sun Dec 22, 2019. 7:20 PM kickoff"
)
dev.off()
# notable events at the United Center
united_center_events = read_csv("data/united_center_events_calendar.csv") %>%
mutate(timestamp = fastPOSIXct(timestamp, "UTC"))
united_center_events_daily = united_center_events %>%
group_by(date) %>%
summarize(
num_events = n(),
events = paste(
paste(time, title),
collapse = ", "
)
) %>%
ungroup() %>%
arrange(date)
united_center_trip_counts = trip_counts_padded %>%
filter(
pickup_region_id == major_venues$united_center,
region_type == "census_tract"
) %>%
mutate(
date_for_event = case_when(
hour(trip_start) <= 3 ~ as.Date(trip_start) - 1,
TRUE ~ as.Date(trip_start)
)
) %>%
left_join(united_center_events_daily, by = c("date_for_event" = "date")) %>%
left_join(all_surge_mults, by = c("pickup_region_id", "trip_start", "region_type"))
# biggest surge multipliers
united_center_trip_counts %>%
filter(trips >= 100) %>%
group_by(date_for_event) %>%
top_n(1, estimated_surge_ratio) %>%
ungroup() %>%
arrange(desc(estimated_surge_ratio)) %>%
select(trip_start, trips, modified_z_score, estimated_surge_ratio, events) %>%
print(n = 25)
# biggest pickup spikes
united_center_trip_counts %>%
filter(trips >= 50) %>%
group_by(date_for_event) %>%
top_n(1, trips) %>%
ungroup() %>%
arrange(desc(trips)) %>%
select(trip_start, trips, modified_z_score, estimated_surge_ratio, events) %>%
print(n = 25)
png("graphs/bulls_knicks_united_center_20190409.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = united_center_trip_counts,
t1 = "2019-04-09 18:00:00",
t2 = "2019-04-10 00:00:00",
title = "Ride-Hailing at the United Center: Bulls vs. Knicks",
subtitle = "Tue Apr 9, 2019. 7:00 PM tipoff",
trips_lim = c(0, 200)
)
dev.off()
png("graphs/bulls_knicks_united_center_20191112.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = united_center_trip_counts,
t1 = "2019-11-12 18:00:00",
t2 = "2019-11-13 00:00:00",
title = "Ride-Hailing at the United Center: Bulls vs. Knicks",
subtitle = "Tue Nov 12, 2019. 7:00 PM tipoff",
trips_lim = c(0, 200)
)
dev.off()
png("graphs/mumford_and_sons_united_center_20190329.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = united_center_trip_counts,
t1 = "2019-03-29 18:00:00",
t2 = "2019-03-30 02:00:00",
title = "Ride-Hailing at the United Center: Mumford & Sons Delta Tour",
subtitle = "Fri Mar 29, 2019",
trips_lim = c(0, 300)
)
dev.off()
png("graphs/travis_scott_united_center_20190329.png", width = 800, height = 1000)
plot_surge_chart(
trip_counts = united_center_trip_counts,
t1 = "2018-12-06 18:00:00",
t2 = "2018-12-07 02:00:00",
title = "Ride-Hailing at the United Center: Travis Scott Astroworld Tour",
subtitle = "Thu Dec 6, 2018",
trips_lim = c(0, 300)
)
dev.off()
united_center_evening_pickups_by_date = trip_counts_padded %>%
filter(
pickup_region_id == major_venues$united_center,
region_type == "census_tract",
hour(trip_start) %in% 21:23
) %>%
group_by(date = as.Date(trip_start)) %>%
summarize(evening_trips = sum(trips)) %>%
ungroup()
png("graphs/united_center_evening_pickups_by_date.png", width = 800, height = 800)
united_center_evening_pickups_by_date %>%
ggplot(aes(x = date, y = evening_trips)) +
geom_line() +
geom_point() +
scale_y_continuous(labels = scales::comma) +
ggtitle("Evening Ride-Hail Pickups Near United Center", "Daily pickups 9:00 PM–12:00 AM") +
labs(caption = "Data via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
no_axis_titles()
dev.off()
all_event_dates = united_center_events %>%
filter(date >= "2018-11-01", date <= "2019-12-31") %>%
filter(!grepl("cancel", tolower(title))) %>%
pull(date) %>%
unique() %>%
sort()
for (i in 1:length(all_event_dates)) {
d = all_event_dates[i]
t1 = paste(d, "12:00:00")
t2 = paste(d + 1, "01:00:00")
title = glue::glue("{strftime(d, '%a %b')} {day(d)}, {year(d)} Ride-Hailing at the United Center")
subtitle = united_center_events %>%
filter(date == d) %>%
pull(title) %>%
paste(collapse = ", ")
gg_obj = plot_surge_chart(united_center_trip_counts, t1, t2, title = title, subtitle = subtitle)
filename = paste(d, subtitle) %>%
tolower() %>%
str_replace_all("[^a-z0-9 ]", "") %>%
str_trim() %>%
str_replace_all(" ", "_")
png(glue::glue("graphs/united_center_events/{filename}.png"), width = 800, height = 800)
print(gg_obj)
dev.off()
}
surge_mults_and_z_scores = all_surge_mults %>%
filter(region_type == "census_tract") %>%
inner_join(
filter(trip_counts_padded, region_type == "census_tract"),
by = c("pickup_region_id", "trip_start")
)
aggregate_surge_by_z = surge_mults_and_z_scores %>%
group_by(
q2 = as.Date(trip_start) >= "2019-03-29" & as.Date(trip_start) <= "2019-06-30",
z_bucket = pmax(pmin(floor(modified_z_score), 8), -2)
) %>%
summarize(
n = sum(based_on_n),
avg_ratio = sum(based_on_n * estimated_surge_ratio) / sum(based_on_n)
) %>%
ungroup()
png("graphs/average_surge_vs_modified_z_score.png", width = 800, height = 800)
aggregate_surge_by_z %>%
mutate(label = case_when(q2 ~ "3/29/19–6/30/19", TRUE ~ "11/1/18–3/28/19 + 7/1/19–12/31/19")) %>%
ggplot(aes(x = z_bucket, y = avg_ratio)) +
geom_line(size = 1) +
scale_x_continuous("Modified z-score", breaks = c(-2, 0, 2, 4, 6, 8), labels = c("< -2", 0, 2, 4, 6, "8+")) +
expand_limits(y = 1) +
facet_wrap(~label, ncol = 1, scales = "free_y") +
geom_blank(data = tibble(label = "3/29/19–6/30/19", z_bucket = 0, avg_ratio = 1.3)) +
ggtitle(
"Ride-Hail Surge Pricing vs. Demand",
"Average estimated surge pricing multiplier"
) +
labs(caption = "Modified z-score represents tract-level demand compared to “average” for time of day based on median and median absolute deviation\nData via City of Chicago\ntoddwschneider.com") +
theme_tws(base_size = 24) +
theme(axis.title.y = element_blank())
dev.off()
|
ae8a3a4144c537668b153fc37eba89d0fa73510f
|
8709ac855ca420a513b003559e8f410f65b65b70
|
/source/R/install_packages.R
|
c3ef0740b90717b82fbd94abcf5ee8090bc61320
|
[] |
no_license
|
jorainer/rnw-based-affy-analysis
|
5c5c8dbb8f7963ff8eb9236f1ebf8e62f5af7b98
|
7adc0e7ab339553920902c7f66a63572d0d36f60
|
refs/heads/master
| 2021-05-28T00:57:40.546544
| 2014-10-28T09:19:44
| 2014-10-28T09:19:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
r
|
install_packages.R
|
## simple R-script to install all packages required for the analysis.
## this might be run in addition with the install_packages.sh in order
## to fetch and install packages from github or other repos.
source( "http://www.bioconductor.org/biocLite.R" )
cat( "\n\nInstalling basic Bioconductor:\n" )
biocLite( )
cat( "\n\nInstalling base packages:\n" )
biocLite( c( "Biobase", "RColorBrewer", "xtable", "gplots", "affy", "gcrma", "affyPLM", "multtest", "limma" ) )
|
778adc3cacd776ce77cc657291f2596d0c5a17b3
|
772f625f70ed8c79add852820d5aeb674f0c254a
|
/code/time_since_infection_discharge.R
|
58f479ad231a9da5c08d83e83bc84912f9d7f2df
|
[
"MIT"
] |
permissive
|
gwenknight/hai_first_wave
|
68699f1eb0a1a7de8b219472e5527d1a5823d3de
|
1b9bbba8a312889f2e274897d53483a35cc84991
|
refs/heads/main
| 2023-06-06T23:51:24.641900
| 2021-07-01T15:31:33
| 2021-07-01T15:31:33
| 355,233,961
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,911
|
r
|
time_since_infection_discharge.R
|
# =========================================================================== #
# Probability distribution for time since infection until hospital discharge
# =========================================================================== #
# INPUT
# los_distr = probability distribution for LOS
# OUTPUT
# res = Probability distribution for time since infection until discharge
# Note:
# We assume that given a LOS, the time of infection is equally likely on
# each day of LOS
time.infection.discharge <- function(los_distr){
mean_los <- sum((1:length(los_distr))*los_distr)
res <- rep(0,length(los_distr))
for(t in 1:length(los_distr)){
for(l in t:length(los_distr)){
res[t] <- res[t] + los_distr[l]/mean_los
}
}
return(res)
}
time.infection.discharge.05 <- function(los_distr){
mean_los <- sum(c(0.5,seq(1:(length(los_distr)-1)))*los_distr)
res <- rep(0,length(los_distr))
for(t in 1:length(los_distr)){
for(l in t:length(los_distr)){
res[t] <- res[t] + los_distr[l]/mean_los
}
}
return(res)
}
# # EXAMPLE
# # Exponential probability distribution for LOS
# meanlos <- 1
# maxday <- 30
# cum_prob_los <- pexp(1:maxday,1/meanlos)
# #prob_los <- cum_prob_los-c(0,cum_prob_los[1:(maxday-1)])
# discrete.meanlos<-sum(prob_los*(1:maxday))
#
# # Probability distribution for time since infection until hospital discharge
# time.infection.discharge(prob_los)
#
# s <- sample(x=seq(1,maxday,1),size = 1000, prob = time.infection.discharge(prob_los), replace = TRUE)
#
# # Normal probability distribution for LOS
# meanlos <- 1
# maxday <- 30
# cum_prob_los <- pnorm(1:maxday,mean = meanlos, sd = 0.1)
# #prob_los <- cum_prob_los-c(0,cum_prob_los[1:(maxday-1)])
# discrete.meanlos<-sum(prob_los*(1:maxday))
#
# # Probability distribution for time since infection until hospital discharge
# r <- time.infection.discharge(prob_los)
# sum(r - prob_los)
# r
# prob_los
|
7b60bdcbe08214d3a2ea0c30c49874835b84688e
|
8dc7c48e822815eb71af789e4a97c229c0ab8ecd
|
/man/expand_idf_dots_name.Rd
|
ba1a00557731975f4594f40463443026dc3cb104
|
[
"MIT"
] |
permissive
|
hongyuanjia/eplusr
|
02dc2fb7eaa8dc9158fe42d060759e16c62c6b47
|
4f127bb2cfdb5eb73ef9abb545782f1841dba53a
|
refs/heads/master
| 2023-08-31T02:49:26.032757
| 2023-08-25T15:21:56
| 2023-08-25T15:21:56
| 89,495,865
| 65
| 13
|
NOASSERTION
| 2023-08-24T02:05:22
| 2017-04-26T15:16:34
|
R
|
UTF-8
|
R
| false
| true
| 1,168
|
rd
|
expand_idf_dots_name.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/impl-idf.R
\name{expand_idf_dots_name}
\alias{expand_idf_dots_name}
\title{Parse object ID or name specifications given in list format}
\usage{
expand_idf_dots_name(
idd_env,
idf_env,
...,
.keep_name = TRUE,
.property = NULL
)
}
\arguments{
\item{idd_env}{An environment or list contains IDD tables including class,
field, and reference.}
\item{idf_env}{An environment or list contains IDF tables including object,
value, and reference.}
\item{...}{Lists of object ID or name pair, e.g. \code{c(Name1, Name2)}, \code{c(ID1, ID2)}, \code{NewName = OldName} and \code{NewName = ID}. \code{NewName} is optional.}
\item{.keep_name}{If \code{TRUE}, input new names will be kept in a column named
\code{new_object_name}, otherwise they will be dropped. Default: \code{TRUE}.}
\item{.property}{A character vector of column names in class table to return.
Default: \code{NULL}.}
}
\value{
A \code{\link[data.table:data.table]{data.table::data.table()}} containing extracted object data.
}
\description{
Parse object ID or name specifications given in list format
}
\keyword{internal}
|
ef223f94204d60e84a819e9e4fdba661307f9796
|
9730e665d03a919cede89b2b14ea86ba00bea475
|
/code/W0_analysis.R
|
241c57ce0f31f6f6bd95e17ae42024f85dfba18a
|
[] |
no_license
|
foxeswithdata/StoringForDrought
|
4f49a1ae4243140ede71465d1e3bad65ce31254f
|
8a3c0ea57642395785ada4a6503e424f990f47f0
|
refs/heads/main
| 2023-01-28T17:38:39.168755
| 2020-12-07T04:37:16
| 2020-12-07T04:37:16
| 319,204,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,552
|
r
|
W0_analysis.R
|
source("code/parameters.R")
source("code/models/water_model.R")
source("code/W0_funcs.R")
param <- param_list_drake_W0_1000()
W0min(param)
# W0_maxS
W0_maxS <- W0max(param, simple_water_model_sim_time_breaks, c(4000,7000), deltaW0 = 100, deltat=0.1, kf=0)
print(W0_maxS)
W0_maxS_100 = W0_maxS
W0_maxS_100
W0_maxS <- W0max(param, simple_water_model_sim_time_breaks, c(W0_maxS-100,W0_maxS), deltaW0 = 10, deltat=0.1, kf=0)
print(W0_maxS)
W0_maxS_10 = W0_maxS
W0_maxS <- W0max(param, simple_water_model_sim_time_breaks, c(W0_maxS-10,W0_maxS), deltaW0 = 1, deltat=0.1, kf=0)
print(W0_maxS)
# W0_maxM+S
W0_maxMS <- W0max(param, simple_water_model_sim_time_breaks, c(4000,7000), deltat=0.1, kf=0.5)
kprint(W0_maxMS)
W0_maxMS_100 = W0_maxMS
W0_maxMS_100
W0_maxMS <- W0max(param, simple_water_model_sim_time_breaks, c(W0_maxMS-100,W0_maxMS), deltaW0 = 10, deltat=0.1, kf=0.5)
print(W0_maxMS)
W0_maxMS_10 = W0_maxMS
W0_maxMS <- W0max(param, simple_water_model_sim_time_breaks, c(W0_maxMS-10,W0_maxMS), deltaW0 = 1, deltat=0.1, kf=0.5)
print(W0_maxMS)
kf_list=seq(from=0, to=1, by=0.1)
seqlength = length(kf)
W0_max_res <- vector()
j = 1
for(kf in kf_list){
print(kf)
W0_max <- W0max(param, simple_water_model_sim_time_breaks, c(W0min(param),7000), deltaW0 = 100, deltat=0.1, kf=kf)
W0_max <- W0max(param, simple_water_model_sim_time_breaks, c(W0_max-100,W0_max), deltaW0 = 10, deltat=0.1, kf=kf)
W0_max <- W0max(param, simple_water_model_sim_time_breaks, c(W0_max-10,W0_max), deltaW0 = 1, deltat=0.1, kf=kf)
W0_max_res[j] = W0_max
j = j+1
}
|
043541774a437328242aa2fc67313a137c36bc09
|
6d0999bafd5986933e13719034254d9bfab4d47c
|
/Code/R/predict_lundgren.R
|
ab20720eebb6a2b644ab0a3ea105a6612444c01b
|
[] |
no_license
|
emilio-berti/rewiring-rewilding
|
63199f62d1b403c5f59b2e38d4942f448da3416d
|
864ac86669162238154da89a974b0ef66c95a9a7
|
refs/heads/master
| 2022-12-22T00:18:43.366985
| 2020-09-24T06:43:23
| 2020-09-24T06:43:23
| 290,488,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,406
|
r
|
predict_lundgren.R
|
library(tidyverse)
library(sf)
library(raster)
library(fasterize)
library(foreach)
library(doParallel)
lund_predict <- function(taxon){
pn <- raster(paste0("/home/emilio/PN_resampled/5km/", taxon, ".tif"))
r <- raster(paste0("/home/emilio/Buffer_500km/", taxon, ".tif"))
lund <- lundgren %>%
filter(Species == taxon) %>%
st_transform(st_crs(r)) %>%
fasterize(r)
if(identical(lund, mask(lund, pn, maskvalue = 0))){
return(NA)
} else{
re_intro <- pn + r
re_intro[re_intro > 1] <- 1
lund_area <- sum(values(lund), na.rm = T)
lund_predicted <- sum(values(lund * re_intro), na.rm = T)
return(data.frame(taxon, lund_predicted, lund_area))
}
}
IUCN_predict <- function(taxon){
pn <- raster(paste0("/home/emilio/PN_resampled/5km/", taxon, ".tif"))
r <- raster(paste0("/home/emilio/Buffer_500km/", taxon, ".tif"))
IUCN_raster <- IUCN %>%
filter(Species == taxon) %>%
st_transform(st_crs(r)) %>%
fasterize(r)
if(identical(IUCN_raster, mask(IUCN_raster, pn, maskvalue = 0))){
return(NA)
} else{
re_intro <- pn + r
re_intro[re_intro > 1] <- 1
IUCN_area <- sum(values(IUCN_raster), na.rm = T)
IUCN_predicted <- sum(values(IUCN_raster * re_intro), na.rm = T)
return(data.frame(taxon, IUCN_predicted, IUCN_area))
}
}
setwd("/home/GIT/trophic_restoration/Code/R")
source("clean_taxonomy.R")
lundgren <- read_sf("../../Data/ECOG-03430/IntroducedMegafaunaRangesFinal_%282%29/IntroducedMegafaunaRangesFinal.shp") %>%
mutate(Species = gsub("[.]", "_", Species)) %>%
mutate(Species = gsub(" ", "_", Species))
species <- lundgren %>%
pull(Species) %>%
unique() %>%
sort()
taxonomy <- cbind(species, clean_taxonomy(species)[[1]]) #no missing species for Lundgren dataset
lundgren <- lundgren %>%
mutate(Species = map(Species, function(x){
taxonomy[which(taxonomy[ , 1] == x), 2]
}) %>% unlist())
# Predictions -------------------------------------------------------------
lundgren_species <- lundgren %>%
pull(Species) %>%
unique()
modelled <- list.files("/home/emilio/Buffer_500km", pattern = ".tif") %>%
gsub(".tif", "", .)
species <- intersect(lundgren_species, modelled)
registerDoParallel(6)
foreach(taxon = species,
.combine = "rbind") %dopar% {
lund_predict(taxon)
} -> Lundgren_proportions
stopImplicitCluster()
write_csv(Lundgren_proportions, "../../Results/Lundgren_500km.csv")
Lundgren_proportions %>%
mutate(lund_predicted = sapply(lund_predicted, function(x){00
if(x == 0){return(1)} else{return(x)}
})) %>%
ggplot() +
geom_point(aes(lund_area, lund_predicted)) +
scale_x_log10(limits = c(1, max(Lundgren_proportions$lund_area))) +
scale_y_log10(limits = c(1, max(Lundgren_proportions$lund_area))) +
geom_abline(aes(slope = 1, intercept = 0), linetype = "dashed") +
theme(
panel.background = element_blank(),
axis.line = element_line(),
panel.grid = element_line(colour = "gainsboro")
) +
xlab("Introduced megafauna range")
# Figure ------------------------------------------------------------------
# library(tmap)
# data("World")
#
# lundgren %>%
# ggplot() +
# geom_sf(data = World %>% st_transform(st_crs(lundgren)), fill = NA) +
# geom_sf(aes(fill = Species), show.legend = F) +
# facet_wrap(Species ~ ., ncol = 5) +
# theme(
# panel.background = element_blank()
# )
|
efc054b2a1c6a4f8c640cc799423073fe8631b39
|
62ce083cc73b245787c535c44dcf736f66be6c16
|
/data_fetch.R
|
49e7189fbd18c437fc1b4f66bf1f68156d9a5aa1
|
[] |
no_license
|
WangLiuying/dota2
|
071abb4ae355a60966a4d6e2696579e195b997ca
|
6df9e3fdf5b69bf9448cd811d36d21e7b2c99406
|
refs/heads/master
| 2021-09-08T04:54:03.860090
| 2017-12-08T11:42:37
| 2017-12-08T11:42:37
| 110,673,341
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,339
|
r
|
data_fetch.R
|
#get heros' attributes
library(rvest)
library(stringr)
#取出所有英雄的链接
url = 'http://www.dota2.com.cn/heroes/index.htm'
page = read_html(url)
linkpage = html_nodes(page,xpath="//ul[@class='hero_list']/li/a") %>% html_attr('href')
herosAttributes = data.frame()
for (url_link in linkpage)
{
##fetch data from a hero
page = read_html(url_link)
#英雄名字
name = html_nodes(page,xpath = "//div[@class='top_hero_card']//p") %>% html_text() %>%
str_extract(pattern = "[a-z|A-Z|]+.+") %>% str_replace_all(pattern = '[\\\t]',replacement = "")
name_ch = html_nodes(page,xpath = "//div[@class='top_hero_card']//p") %>% html_text() %>%
str_extract(pattern = ".+[a-z|A-Z|]") %>% str_replace_all(pattern = '[a-z|A-Z]',replacement = "")
jpg = html_nodes(page,xpath = "//div[@class='top_hero_card']//img") %>% html_attr('src')
#攻击类型
AttackType = html_node(page,xpath = "//p[@class='info_p']") %>% html_text() %>%
str_replace_all(pattern = '[\\\t|\\\n]',replacement = "")
#定位&评分
type = html_nodes(page,xpath = "//p[@id='lastEm']//span[@class='_btn_type']") %>%
html_attr("type")
typeStars = html_nodes(page,xpath = "//p[@id='lastEm']//span[@class='_btn_type']") %>%
html_attr("star") %>% as.numeric()
names(typeStars) = type
type = rep(0,9)
names(type) = c('核心','控制','先手','打野','辅助','耐久','高爆发','推进','逃生')
type[names(typeStars)] = typeStars
#阵营
camp = html_nodes(page,xpath = "//p[@class='info_p zhengying_p']") %>% html_text() %>%
str_replace_all(pattern = '[:space:]',replacement = "")
#属性
pro6 = html_node(page,xpath = "//ul[@class='pro6_box']")
##力量
protext = html_node(pro6,xpath = "li[1]") %>% html_text() %>%
str_replace_all(pattern = '[:space:]',replacement = "")
strength = str_extract(protext, pattern = "[0-9|.]+\\+") %>% str_replace(pattern = '\\+',replacement = "") %>% as.numeric()
strength_growth = str_extract(protext, pattern = "\\+[0-9|.]+") %>% str_replace(pattern = '\\+',replacement = "") %>% as.numeric()
##敏捷
protext = html_node(pro6,xpath = "li[2]") %>% html_text() %>%
str_replace_all(pattern = '[:space:]',replacement = "")
agility = str_extract(protext, pattern = "[0-9|.]+\\+") %>% str_replace(pattern = '\\+',replacement = "") %>% as.numeric()
agility_growth = str_extract(protext, pattern = "\\+[0-9|.]+") %>% str_replace(pattern = '\\+',replacement = "") %>% as.numeric()
##智慧
protext = html_node(pro6,xpath = "li[3]") %>% html_text() %>%
str_replace_all(pattern = '[:space:]',replacement = "")
wisdom = str_extract(protext, pattern = "[0-9|.]+\\+") %>% str_replace(pattern = '\\+',replacement = "") %>% as.numeric()
wisdom_growth = str_extract(protext, pattern = "\\+[0-9|.]+") %>% str_replace(pattern = '\\+',replacement = "") %>% as.numeric()
##攻击
protext = html_node(pro6,xpath = "li[4]") %>% html_text() %>%
str_replace_all(pattern = '[:space:]',replacement = "")
attackSpeed = str_extract(protext, pattern = "攻击速度:\\d+") %>% str_replace_all(pattern="\\D",replacement = "") %>% as.numeric()
attackValue = str_extract(protext,pattern = "^\\d+") %>% as.numeric()
attackDistance = str_extract(protext,pattern = "攻击距离:\\d+") %>% str_replace_all(pattern = "\\D",replacement = "") %>% as.numeric()
##护甲
protext = html_node(pro6,xpath = "li[5]") %>% html_text() %>%
str_replace_all(pattern = '[:space:]',replacement = "")
defence = str_extract(protext, pattern = "^\\d+") %>% as.numeric()
pDef = str_extract(protext,pattern = "物理伤害抗性:\\d+") %>% str_replace_all(pattern = "\\D",replacement = "")%>% as.numeric()
mDef = str_extract(protext,pattern = "魔法伤害抗性:\\d+") %>% str_replace_all(pattern = "\\D",replacement = "") %>% as.numeric()
##移动
movement = html_node(pro6,xpath = "li[6]") %>% html_text() %>%
str_replace_all(pattern = '[:space:]',replacement = "") %>% as.numeric()
##范围
areatext = html_node(page,xpath = "//div[@class='area_box']/table")%>% html_text()
sight_day = str_extract(areatext, pattern = "视野范围:[:space:]+\\d+") %>%
str_replace_all(pattern = "\\D",replacement = "") %>% as.numeric()
sight_night = str_extract(areatext, pattern = "\\d+[:space:]+攻击范围") %>%
str_replace_all(pattern = "\\D",replacement = "") %>% as.numeric()
ballisticVelocity = str_extract(areatext, pattern = "弹道速度:\\D+\\d+\\D") %>%
str_replace_all(pattern = "\\D",replacement = "") %>% as.numeric()
df = data.frame(
name = name, name_ch = name_ch, camp = camp,
AttackType = AttackType,
core = type[1], control = type[2], initiating = type[3],
jungle = type[4], support = type[5], durable = type[6],
explosive = type[7], push = type[8], survive = type[9],
strength = strength, strength_growth = strength_growth,
agility = agility, agility_growth = agility_growth,
wisdom = wisdom, wisdom_growth = wisdom_growth,
atttackValue = attackValue, attackSpeed = attackSpeed, attackDistance = attackDistance,
defence = defence, pDef = pDef, mDef = mDef,
movement = movement, ballisticVelocity =ballisticVelocity,
sight_day = sight_day, sight_night = sight_night,
picture = jpg
)
herosAttributes = rbind(herosAttributes,df)
Sys.sleep(runif(n = 1,min = 1,max=7))
}
rownames(herosAttributes)=herosAttributes$name_ch
write.csv(herosAttributes,file= 'heroAttributes.csv',fileEncoding = "UTF-8")
|
0ca5f41c9ef6cea7a644737f5cf32d8dcdcd26f6
|
e9a5a9e952a9ccac535efe64b96cc730b844677b
|
/inst/unitTests/runit.workbook.extraction.R
|
26011c4f194763eed867f8cb88eb9993987dfa94
|
[] |
no_license
|
miraisolutions/xlconnect
|
323c22258439616a4d4e0d66ddc62204094196c9
|
ae73bfd5a368484abc36638e302b167bce79049e
|
refs/heads/master
| 2023-09-04T05:27:42.744196
| 2023-08-30T07:10:44
| 2023-08-30T07:10:44
| 8,108,907
| 114
| 35
| null | 2023-08-30T07:10:46
| 2013-02-09T11:17:42
|
R
|
UTF-8
|
R
| false
| false
| 3,646
|
r
|
runit.workbook.extraction.R
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Test workbook extraction & replacement operators
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
test.workbook.extraction <- function() {
# Create workbooks
wb.xls <- loadWorkbook(rsrc("resources/testWorkbookExtractionOperators.xls"), create = TRUE)
wb.xlsx <- loadWorkbook(rsrc("resources/testWorkbookExtractionOperators.xlsx"), create = TRUE)
# Check that writing a data set to a worksheet works (*.xls)
wb.xls["mtcars1"] = mtcars
checkTrue("mtcars1" %in% getSheets(wb.xls))
checkEquals(as.vector(getLastRow(wb.xls, "mtcars1")), 33)
wb.xls["mtcars2", startRow = 6, startCol = 11, header = FALSE] = mtcars
checkTrue("mtcars2" %in% getSheets(wb.xls))
checkEquals(as.vector(getLastRow(wb.xls, "mtcars2")), 37)
# Check that writing a data set to a worksheet works (*.xlsx)
wb.xlsx["mtcars1"] = mtcars
checkTrue("mtcars1" %in% getSheets(wb.xlsx))
checkEquals(as.vector(getLastRow(wb.xlsx, "mtcars1")), 33)
wb.xlsx["mtcars2", startRow = 6, startCol = 11, header = FALSE] = mtcars
checkTrue("mtcars2" %in% getSheets(wb.xlsx))
checkEquals(as.vector(getLastRow(wb.xlsx, "mtcars2")), 37)
# Check that reading data from a worksheet works (*.xls)
checkEquals(dim(wb.xls["mtcars1"]), c(32, 11))
checkEquals(dim(wb.xls["mtcars2"]), c(31, 11))
# Check that reading data from a worksheet works (*.xlsx)
checkEquals(dim(wb.xlsx["mtcars1"]), c(32, 11))
checkEquals(dim(wb.xlsx["mtcars2"]), c(31, 11))
# Check that writing data to a named region works (*.xls)
wb.xls[["mtcars3", "mtcars3!$B$7"]] = mtcars
checkTrue("mtcars3" %in% getDefinedNames(wb.xls))
checkEquals(as.vector(getLastRow(wb.xls, "mtcars3")), 39)
wb.xls[["mtcars4", "mtcars4!$D$8", rownames = "Car"]] = mtcars
checkTrue("mtcars4" %in% getDefinedNames(wb.xls))
checkEquals(as.vector(getLastRow(wb.xls, "mtcars4")), 40)
# Check that writing data to a named region works (*.xlsx)
wb.xlsx[["mtcars3", "mtcars3!$B$7"]] = mtcars
checkTrue("mtcars3" %in% getDefinedNames(wb.xlsx))
checkEquals(as.vector(getLastRow(wb.xlsx, "mtcars3")), 39)
wb.xlsx[["mtcars4", "mtcars4!$D$8", rownames = "Car"]] = mtcars
checkTrue("mtcars4" %in% getDefinedNames(wb.xlsx))
checkEquals(as.vector(getLastRow(wb.xlsx, "mtcars4")), 40)
# Check that reading data from a named region works (*.xls)
checkEquals(dim(wb.xls[["mtcars3"]]), c(32, 11))
checkEquals(dim(wb.xls[["mtcars4"]]), c(32, 12))
# Check that reading data from a named region works (*.xlsx)
checkEquals(dim(wb.xlsx[["mtcars3"]]), c(32, 11))
checkEquals(dim(wb.xlsx[["mtcars4"]]), c(32, 12))
}
|
6b8751f7fe4a2916c108248c463ba1ffa1fd4188
|
0af0d7f3f28d516d41eedaad41cf5744b414500a
|
/Deliverable/R-Script/8086-002-RScript.R
|
c72f205a53b952014a37a7f534893785b14e9014
|
[] |
no_license
|
srishtynayak/Medicare-Claim-Hospital-Analysis-R
|
8b10fe65e56b444cd74d7e26ff8c6caccdaa9f64
|
20023e171ac9384a8b2902f03d1b2314377f1754
|
refs/heads/master
| 2020-03-31T11:07:09.882457
| 2018-10-07T00:58:36
| 2018-10-07T00:58:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,811
|
r
|
8086-002-RScript.R
|
## R Script - 8086-002 - Data to Decisions
We have addressed all our research questions.
1) Finding out on which claim type did all the hospital across USA has spent more and also for which period?
2) Finding the amount spent in each state and grouping it under highest and lowest claim states?
3) Finding the leading hospital in terms of spending based on the US zones and also finding the differences in spending by hospital on zone level?
---------------------------------------------------------------------------------
1) Finding out on which claim type did all the hospital across USA has spent more and also for which period?
##1.1 Finding out on which claim type did all the hospital across USA has spent more
##setting the path to the folder of the date set, loading the data and checking the data
setwd("C:\\Users\\leirhyh\\Documents\\Leipersonallife\\UNOclass\\ISQA8086-002\\groupproject\\dataset")
list.files()
HospitalSpending <- read.csv ("Medicare_Hospital_Spending_by_Claim_Cleaned.csv", stringsAsFactors = FALSE)
head(HospitalSpending) # check the first six rows of the data
View(HospitalSpending) # view the data
nrow(HospitalSpending) # check how many rows
ncol(HospitalSpending) # check how many columns
HospitalSpending[!complete.cases(HospitalSpending),] #check wheather there is missing data
#Since there is no missing data for current dataset, we won't consider missing data in our following analyses.
##loading the libray
```{r packages}
library(doBy)
library(DT)
library(xtable)
library(ggplot2)
library(reshape2)
library(GGally)
```
## subset the data
### select three columns of Claim_type, ASPE_Hospital, POS_Hospital for the folowing analyses
claimtype <- HospitalSpending[, c("Claim_type" , "ASPE_Hospital", "POS_Hospital")]
head(claimtype)
### Select only two column of "Claim_type" , "ASPE_Hospital" and omitting the Total type
###because we compare the relationships between each claim type
claimtype1 <- HospitalSpending[HospitalSpending$Claim_type != "Total", c("Claim_type" , "ASPE_Hospital")]
head(claimtype1)
# split the data into different columns based on Claim_type
claimtype2 <- dcast(claimtype1, ASPE_Hospital ~ Claim_type)
head(claimtype2)
### subset the proper pair for the following t test
claimCarrierDME <- HospitalSpending[HospitalSpending$Claim_type == "Carrier" | HospitalSpending$Claim_type == "DME", c("Claim_type" , "ASPE_Hospital")]
claimCarrierHHA <- HospitalSpending[HospitalSpending$Claim_type == "Carrier" | HospitalSpending$Claim_type == "HHA", c("Claim_type" , "ASPE_Hospital")]
claimCarrierHospice <- HospitalSpending[HospitalSpending$Claim_type == "Carrier" | HospitalSpending$Claim_type == "Hospice", c("Claim_type" , "ASPE_Hospital")]
claimCarrierInpatient <- HospitalSpending[HospitalSpending$Claim_type == "Carrier" | HospitalSpending$Claim_type == "Inpatient", c("Claim_type" , "ASPE_Hospital")]
claimCarrierOutpatient <- HospitalSpending[HospitalSpending$Claim_type == "Carrier" | HospitalSpending$Claim_type == "Outpatient", c("Claim_type" , "ASPE_Hospital")]
claimCarrierSNF <- HospitalSpending[HospitalSpending$Claim_type == "Carrier" | HospitalSpending$Claim_type == "SNF", c("Claim_type" , "ASPE_Hospital")]
claimDMEHHA <- HospitalSpending[HospitalSpending$Claim_type == "DME" | HospitalSpending$Claim_type == "HHA", c("Claim_type" , "ASPE_Hospital")]
claimDMEHospice <- HospitalSpending[HospitalSpending$Claim_type == "DME" | HospitalSpending$Claim_type == "Hospice", c("Claim_type" , "ASPE_Hospital")]
claimDMEInpatient <- HospitalSpending[HospitalSpending$Claim_type == "DME" | HospitalSpending$Claim_type == "Inpatient", c("Claim_type" , "ASPE_Hospital")]
claimDMEOutpatient <- HospitalSpending[HospitalSpending$Claim_type == "DME" | HospitalSpending$Claim_type == "Outpatient", c("Claim_type" , "ASPE_Hospital")]
claimDMESNF <- HospitalSpending[HospitalSpending$Claim_type == "DME" | HospitalSpending$Claim_type == "SNF", c("Claim_type" , "ASPE_Hospital")]
claimHHAHospice <- HospitalSpending[HospitalSpending$Claim_type == "HHA" | HospitalSpending$Claim_type == "Hospice", c("Claim_type" , "ASPE_Hospital")]
claimHHAInpatient <- HospitalSpending[HospitalSpending$Claim_type == "HHA" | HospitalSpending$Claim_type == "Inpatient", c("Claim_type" , "ASPE_Hospital")]
claimHHAOutpatient <- HospitalSpending[HospitalSpending$Claim_type == "HHA" | HospitalSpending$Claim_type == "Outpatient", c("Claim_type" , "ASPE_Hospital")]
claimHHASNF <- HospitalSpending[HospitalSpending$Claim_type == "HHA" | HospitalSpending$Claim_type == "SNF", c("Claim_type" , "ASPE_Hospital")]
claimHospiceInpatient <- HospitalSpending[HospitalSpending$Claim_type == "Hospice" | HospitalSpending$Claim_type == "Inpatient", c("Claim_type" , "ASPE_Hospital")]
claimHospiceOutpatient <- HospitalSpending[HospitalSpending$Claim_type == "Hospice" | HospitalSpending$Claim_type == "Outpatient", c("Claim_type" , "ASPE_Hospital")]
claimHospiceSNF <- HospitalSpending[HospitalSpending$Claim_type == "Hospice" | HospitalSpending$Claim_type == "SNF", c("Claim_type" , "ASPE_Hospital")]
claimInpatientOutpatient <- HospitalSpending[HospitalSpending$Claim_type == "Inpatient" | HospitalSpending$Claim_type == "Outpatient", c("Claim_type" , "ASPE_Hospital")]
claimInpatientSNF <- HospitalSpending[HospitalSpending$Claim_type == "Inpatient" | HospitalSpending$Claim_type == "SNF", c("Claim_type" , "ASPE_Hospital")]
claimOutpatientSNF <- HospitalSpending[HospitalSpending$Claim_type == "Outpatient" | HospitalSpending$Claim_type == "SNF", c("Claim_type" , "ASPE_Hospital")]
claimHHAHospice <- HospitalSpending[HospitalSpending$Claim_type == "HHA" | HospitalSpending$Claim_type == "Hospice", c("Claim_type" , "ASPE_Hospital")]
head(claimHHAHospice)
claimHHAHospice <- HospitalSpending[HospitalSpending$Claim_type == "HHA" | HospitalSpending$Claim_type == "Hospice", c("Claim_type" , "ASPE_Hospital")]
head(claimHHAHospice)
#change claim_type to full name for analyses
claimtype$Claim_type <- factor(claimtype$Claim_type, levels=c("Carrier","DME", "HHA", "Hospice", "Inpatient", "Outpatient", "SNF","Total"), labels=c("Carrier", "Durable Medical Equipment","Home Health Agency","Hospice","Inpatient","Outpatient", "Skilled Nursing Facility", "Total"))
head(claimtype)
# produce the sumary table for the subsets
# sumary table for the hospital spencid nationally by claim types ($).
claimTable <- do.call(cbind.data.frame, aggregate(ASPE_Hospital ~ Claim_type, data=claimtype, FUN = function(x) {
c("Number"=format(round(length(x), 0), nsmall = 0), M=format(round(mean(x), 2), nsmall = 2), min=min(x),max=max(x),
SD=format(round(sd(x), 2), nsmall = 2),format(round(quantile(x,c(0.05, 0.25, 0.50, 0.75, 0.95)), 2), nsmall = 2), iqr=IQR(x)) })); names(claimTable) <- c("Claim Type", "Number", "Mean", "Minimum", "Maximum", "Stanard deviation", "5% Quantile","25% Quantile","Median","75% Quantile", "95% Quantile", "IQR")
View(claimTable)
# sumary table for the hospital spending perentage nationally by claim types (%).
claimTableP <- do.call(cbind.data.frame, aggregate(POS_Hospital ~ Claim_type, data=claimtype, FUN = function(x) {
c("Number"=format(round(length(x), 0), nsmall = 0), M=format(round(mean(x), 5), nsmall = 5), min=min(x),max=max(x),
SD=format(round(sd(x), 2), nsmall = 2),quantile(x,c(0.05, 0.25, 0.50, 0.75, 0.95)), iqr=IQR(x)) })); names(claimTableP) <- c("Claim Type", "Number", "Mean", "Minimum", "Maximum", "Stanard deviation", "5% Quantile","25% Quantile","Median","75% Quantile", "95% Quantile", "IQR")
View(claimTableP)
#Histogram of the spending based on different claim types
ggplot(data = claimtype, aes(x=ASPE_Hospital)) + geom_histogram(aes(fill=Claim_type)) + facet_wrap(~Claim_type, scales="free")
ggplot(data = claimtype, aes(x=POS_Hospital)) + geom_histogram(aes(fill=Claim_type)) + facet_wrap(~Claim_type, scales="free")
#boxplot of the spending based on different claim types
ggplot(data = claimtype, aes(y=ASPE_Hospital, x=Claim_type)) + geom_boxplot(aes(fill=Claim_type)) + facet_wrap(~Claim_type, scales="free")
ggplot(data = claimtype, aes(y=POS_Hospital, x=Claim_type)) + geom_boxplot(aes(fill=Claim_type)) + facet_wrap(~Claim_type, scales="free")
## ANOVA analysis of the spending based on different claim types
fit = lm(formula = claimtype$ASPE_Hospital ~ claimtype$Claim_type)
anova (fit)
## t test to check which pair are significantly from each other
with(claimCarrierDME, t.test(ASPE_Hospital[Claim_type=="Carrier"],ASPE_Hospital[Claim_type=="DME"]))
t.test(ASPE_Hospital~ Claim_type, data = claimCarrierDME)
with(claimCarrierHHA, t.test(ASPE_Hospital[Claim_type=="Carrier"],ASPE_Hospital[Claim_type=="HHA"]))
t.test(ASPE_Hospital~ Claim_type, data = claimCarrierHHA)
with(claimCarrierHospice , t.test(ASPE_Hospital[Claim_type=="Carrier"],ASPE_Hospital[Claim_type=="Hospice"]))
t.test(ASPE_Hospital~ Claim_type, data = claimCarrierHospice )
with(claimCarrierInpatient, t.test(ASPE_Hospital[Claim_type=="Carrier"],ASPE_Hospital[Claim_type=="Inpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimCarrierInpatient)
with(claimCarrierOutpatient, t.test(ASPE_Hospital[Claim_type=="Carrier"],ASPE_Hospital[Claim_type=="Outpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimCarrierOutpatient)
with(claimCarrierSNF, t.test(ASPE_Hospital[Claim_type=="Carrier"],ASPE_Hospital[Claim_type=="SNF"]))
t.test(ASPE_Hospital~ Claim_type, data = claimCarrierSNF)
with(claimDMEHHA, t.test(ASPE_Hospital[Claim_type=="DME"],ASPE_Hospital[Claim_type=="HHA"]))
t.test(ASPE_Hospital~ Claim_type, data = claimDMEHHA)
with(claimDMEHospice, t.test(ASPE_Hospital[Claim_type=="DME"],ASPE_Hospital[Claim_type=="Hospice"]))
t.test(ASPE_Hospital~ Claim_type, data = claimDMEHospice)
with(claimDMEInpatient, t.test(ASPE_Hospital[Claim_type=="DME"],ASPE_Hospital[Claim_type=="Inpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimDMEInpatient)
with(claimDMEOutpatient, t.test(ASPE_Hospital[Claim_type=="DME"],ASPE_Hospital[Claim_type=="Outpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimDMEOutpatient)
with(claimDMESNF, t.test(ASPE_Hospital[Claim_type=="DME"],ASPE_Hospital[Claim_type=="SNF"]))
t.test(ASPE_Hospital~ Claim_type, data = claimDMESNF)
with(claimHHAHospice, t.test(ASPE_Hospital[Claim_type=="HHA"],ASPE_Hospital[Claim_type=="Hospice"]))
t.test(ASPE_Hospital~ Claim_type, data = claimHHAHospice)
with(claimHHAInpatient, t.test(ASPE_Hospital[Claim_type=="HHA"],ASPE_Hospital[Claim_type=="Inpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimHHAInpatient)
with(claimHHASNF, t.test(ASPE_Hospital[Claim_type=="HHA"],ASPE_Hospital[Claim_type=="SNF"]))
t.test(ASPE_Hospital~ Claim_type, data = claimHHASNF)
with(claimHospiceInpatient, t.test(ASPE_Hospital[Claim_type=="Hospice"],ASPE_Hospital[Claim_type=="Inpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimHospiceInpatient)
with(claimHospiceOutpatient, t.test(ASPE_Hospital[Claim_type=="Hospice"],ASPE_Hospital[Claim_type=="Outpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimHospiceOutpatient)
with(claimHospiceSNF, t.test(ASPE_Hospital[Claim_type=="Hospice"],ASPE_Hospital[Claim_type=="SNF"]))
t.test(ASPE_Hospital~ Claim_type, data = claimHospiceSNF)
with(claimInpatientOutpatient, t.test(ASPE_Hospital[Claim_type=="Inpatient"],ASPE_Hospital[Claim_type=="Outpatient"]))
t.test(ASPE_Hospital~ Claim_type, data = claimInpatientOutpatient)
with(claimInpatientSNF, t.test(ASPE_Hospital[Claim_type=="Inpatient"],ASPE_Hospital[Claim_type=="SNF"]))
t.test(ASPE_Hospital~ Claim_type, data = claimInpatientSNF)
with(claimOutpatientSNF, t.test(ASPE_Hospital[Claim_type=="Outpatient"],ASPE_Hospital[Claim_type=="SNF"]))
t.test(ASPE_Hospital~ Claim_type, data = claimOutpatientSNF)
##1.2 Finding out on which period did all the hospital across USA has spent more
## subset the data
### select three columns of Period, ASPE_Hospital, POS_Hospital for the folowing analyses
head(HospitalSpending)
periodIHA <- HospitalSpending[, c("Period" , "ASPE_Hospital", "POS_Hospital")]
head(periodIHA,20)
### Select only two column of Peirod , ASPE_Hospital and omitting the Total type
###because we compare the relationships between each claim type
periodIHA1 <- HospitalSpending[HospitalSpending$Period !="ComEpisode", c("Period" , "ASPE_Hospital")]
head(periodIHA1)
### subset the proper pair for the following t test
periodAB <- HospitalSpending[HospitalSpending$Period == "AfterIHA" | HospitalSpending$Period == "BeforeIHA", c("Period" , "ASPE_Hospital")]
head(periodAB)
periodAD <- HospitalSpending[HospitalSpending$Period == "AfterIHA" | HospitalSpending$Period == "DuringIHA", c("Period" , "ASPE_Hospital")]
head(periodAD)
periodBD <- HospitalSpending[HospitalSpending$Period == "BeforeIHA" | HospitalSpending$Period == "DuringIHA", c("Period" , "ASPE_Hospital")]
head(periodBD)
# split the data into different columns based on Claim_type
periodIHA2 <- dcast(periodIHA1, ASPE_Hospital ~ Period)
head(periodIHA2)
str(periodIHA2) # check the type of the variable
periodIHA2$AfterIHA <- as.numeric(periodIHA2$AfterIHA)
periodIHA2$BeforeIHA <- as.numeric(periodIHA2$BeforeIHA)
periodIHA2$DuringIHA <- as.numeric(periodIHA2$DuringIHA)
str(periodIHA2)
# produce the sumary table for the subsets
# sumary table for the hospital spending nationally by periods ($).
periodTable <- do.call(cbind.data.frame, aggregate(ASPE_Hospital ~ Period, data=periodIHA, FUN = function(x) {
c("Number"=format(round(length(x), 0), nsmall = 0), M=format(round(mean(x), 2), nsmall = 2), min=min(x),max=max(x),
SD=format(round(sd(x), 2), nsmall = 2),format(round(quantile(x,c(0.05, 0.25, 0.50, 0.75, 0.95)), 2), nsmall = 2), iqr=IQR(x)) })); names(periodTable) <- c("Periods", "Number", "Mean", "Minimum", "Maximum", "Stanard deviation", "5% Quantile","25% Quantile","Median","75% Quantile", "95% Quantile", "IQR")
View(periodTable)
# sumary table for the hospital spending perentage nationally by periods (%).
periodTableP <- do.call(cbind.data.frame, aggregate(POS_Hospital ~ Period, data=periodIHA, FUN = function(x) {
c("Number"=format(round(length(x), 0), nsmall = 0), M=format(round(mean(x), 5), nsmall = 5), min=min(x),max=max(x),
SD=format(round(sd(x), 2), nsmall = 2),quantile(x,c(0.05, 0.25, 0.50, 0.75, 0.95)), iqr=IQR(x)) })); names(periodTableP) <- c("Claim Type", "Number", "Mean", "Minimum", "Maximum", "Stanard deviation", "5% Quantile","25% Quantile","Median","75% Quantile", "95% Quantile", "IQR")
View(periodTableP)
#Histogram of the spending based on different periods ($)
ggplot(data = periodIHA, aes(x=ASPE_Hospital)) + geom_histogram(aes(fill=Period)) + facet_wrap(~Period, scales="free")
ggplot(data = periodIHA, aes(x=POS_Hospital)) + geom_histogram(aes(fill=Period)) + facet_wrap(~Period, scales="free")
#boxplot of the spending based on different periods
ggplot(data = periodIHA, aes(y=ASPE_Hospital, x=Period)) + geom_boxplot(aes(fill=Period)) + facet_wrap(~Period, scales="free")
ggplot(data = periodIHA, aes(y=POS_Hospital, x=Period)) + geom_boxplot(aes(fill=Period)) + facet_wrap(~Period, scales="free")
## ANOVA analysis of the spending based on different periods
fit1 = lm(formula = periodIHA1$ASPE_Hospital ~ periodIHA1$Period)
anova (fit1)
## t test to check which pair are significantly from each other
head(periodAB)
with(periodAB, t.test(ASPE_Hospital[Period=="AfterIHA"],ASPE_Hospital[Period=="BeforeIHA"]))
t.test(ASPE_Hospital~ Period, data = periodAB)
head(periodAD)
with(periodAD, t.test(ASPE_Hospital[Period=="AfterIHA"],ASPE_Hospital[Period=="DuringIHA"]))
t.test(ASPE_Hospital~ Period, data = periodAD)
head(periodBD)
with(periodBD, t.test(ASPE_Hospital[Period=="BeforeIHA"],ASPE_Hospital[Period=="DuringIHA"]))
t.test(ASPE_Hospital~ Period, data = periodBD)
-----------------------------------------------------------------------------------------------------
2) Finding the amount spent in each state and grouping it under highest and lowest claim states?
# Finding the amount spent in each state and grouping it under highest and lowest claim states
## Setting the path to the folder of the date set and loading the data
setwd("C:/Users/hp/Desktop")
StateSpending <- read.csv("Medicare_Hospital_Spending_by_Claim_Cleaned.csv", stringsAsFactors = FALSE)
head(StateSpending) # check the first six rows of the data
View(StateSpending) # view the data
nrow(StateSpending) # check how many rows
ncol(StateSpending) # check how many columns
## Removing "NA" values
StateSpending[is.na(StateSpending)] <- 0
## Creating the subset by filtering the values based on the column "Period"
StateSpending <- subset(StateSpending, Period == "ComEpisode")
## Grouping the state into Region
## North East Region
StateSpending_northeast <- subset(StateSpending, State %in% c("CT", "ME", "MA", "NH", "RI", "VT", "NJ", "PA", "NY"))
## Midwest Region
StateSpending_midwest <- subset(StateSpending, State %in% c("IN", "IL", "MI", "OH", "WI", "IA", "KS", "MN", "MO", "NE", "ND", "SD"))
## South Region
StateSpending_south <- subset(StateSpending, State %in% c("DE", "DC", "FL", "GA", "MD", "NC", "SC", "VA", "WV", "AL", "KY", "MS", "TN", "AR", "LA", "OK", "TX"))
## West Region
StateSpending_west <- subset(StateSpending, State %in% c("AZ", "CO", "ID", "NM", "MT", "UT", "NV", "WY", "AK", "CA", "HI", "OR", "WA"))
## Creating a subset with only Hospital_Name, State, ASPE_State
(StateSpending <- StateSpending[, c("Hospital_Name", "State", "ASPE_State")])
## Creating a subset with only Hospital_Name, State, ASPE_State Regionwise
(StateSpending_northeast <- StateSpending_northeast[, c("Hospital_Name", "State", "ASPE_State")])
(StateSpending_midwest <- StateSpending_midwest[, c("Hospital_Name", "State", "ASPE_State")])
(StateSpending_south <- StateSpending_south[, c("Hospital_Name", "State", "ASPE_State")])
(StateSpending_south <- StateSpending_south[, c("Hospital_Name", "State", "ASPE_State")])
## Aggregating the total expenditure statewise
AggStateSpending<- aggregate(StateSpending$ASPE_State~StateSpending$State, data = StateSpending, sum)
## Changing the Column names
colnames(AggStateSpending) <- c("State", "ASPE_State")
# Summary table for the Average hospital spending, Statewise
AggStateSpendingTable <- do.call(cbind.data.frame, aggregate(State ~ ASPE_State, data=State, FUN = function(x) {
c("Number"=format(round(length(x), 0), nsmall = 0), M=format(round(mean(x), 5), nsmall = 5), min=min(x),max=max(x),
SD=format(round(sd(x), 2), nsmall = 2),quantile(x,c(0.05, 0.25, 0.50, 0.75, 0.95)), iqr=IQR(x)) })); names(AggStateSpendingTable) <- c("State", "Number", "Mean", "Minimum", "Maximum", "Stanard deviation", "5% Quantile","25% Quantile","Median","75% Quantile", "95% Quantile", "IQR")
View(AggStateSpendingTable)
## Subsetting the Max and Min of the Aggregate State Spendings
MaxAggStateSpending <-
AggStateSpending[max(State ~ ASPE_State),]
MinAggStateSpending <-
AggStateSpending[min(State ~ ASPE_State),]
## Statewise Plotting of the Spending
ggplot(AggStateSpending, aes(x=State, y = ASPE_State, color = State))+
+ geom_histogram(stat = "identity")+
+ xlab("State")+
+ ylab("Average Spending Per Episode State")+
+ ggtitle("Graph for spending per state")
## Function for converting exponential to numeric
fancy_scientific <- function(l) {
+ # turn in to character string in scientific notation
+ l <- format(l, scientific = TRUE)
+ # quote the part before the exponent to keep all the digits
+ l <- gsub("^(.*)e", "'\\1'e", l)
+ # turn the 'e+' into plotmath format
+ l <- gsub("e", "%*%10^", l)
+ # return this as an expression
+ parse(text=l)
+ }
## Reploting the graph after changing the expontential values to numbers
ggplot(AggStateSpending, aes(x=State, y = ASPE_State, color = State))+
+ geom_histogram(stat = "identity")+
+ xlab("State")+
+ ylab("Average Spending Per Episode State")+
+ scale_y_continuous(labels=fancy_scientific)+
+ ggtitle("Graph for highest spending per State")
## ANOVA analysis of the spending Statewise
fit = lm(formula = AggStateSpending$ASPE_State ~ AggStateSpending$State)
anova (fit)
-----------------------------------------------------------------------------------------------------
3) Finding the leading hospital in terms of spending based on the US zones and also finding the differences in spending by hospital on zone level?
# Creating the data frame.
hosp <- read.csv("Medicare_Hospital_Spending_by_Claim_Cleaned.csv")
#Removing "NA" values
hosp[is.na(hosp)] <- 0
# Creating the subset by filtering the values based on the column "Period"
hosp <- subset(hosp, Period == "ComEpisode")
# Grouping the state into Region
# North East Region
hosp_northeast <- subset(hosp, State %in% c("CT", "ME", "MA", "NH", "RI", "VT", "NJ", "PA", "NY"))
View(hosp_northeast)
#Midwest Region
hosp_midwest <- subset(hosp, State %in% c("IN", "IL", "MI", "OH", "WI", "IA", "KS", "MN", "MO", "NE", "ND", "SD"))
#South Region
hosp_south <- subset(hosp, State %in% c("DE", "DC", "FL", "GA", "MD", "NC", "SC", "VA", "WV", "AL", "KY", "MS", "TN", "AR", "LA", "OK", "TX"))
#West Region
hosp_west <- subset(hosp, State %in% c("AZ", "CO", "ID", "NM", "MT", "UT", "NV", "WY", "AK", "CA", "HI", "OR", "WA"))
'''
# State wise Plotting
gplot(hosp_northeast, aes(x=State, y = ASPE_Hospital, color = State))+
+ geom_histogram(stat = "identity")+
+ xlab("Hospital Name")+
+ ylab("Average Spending Per Episode Hospital")+
+ ggtitle("Graph for highest spending per hospital in North East Region")
'''
#Function for converting exponential to numeric
library(scales)
fancy_scientific <- function(l) {
# turn in to character string in scientific notation
l <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
l <- gsub("^(.*)e", "'\\1'e", l)
# turn the 'e+' into plotmath format
l <- gsub("e", "%*%10^", l)
# return this as an expression
parse(text=l)
}
#Reploting the graph after changing the expontential to numbers
ggplot(hosp_northeast, aes(x=State, y = ASPE_Hospital, color = State))+
+ geom_histogram(stat = "identity")+
+ xlab("Hospital Name")+
+ ylab("Average Spending Per Episode Hospital")+
+ scale_y_continuous(labels=fancy_scientific)+
+ ggtitle("Graph for highest spending per hospital in North East Region")
ggplot(hosp_midwest, aes(x=State, y = ASPE_Hospital, color = State))+
geom_histogram(stat = "Identity")+
xlab("Hospital Name")+
ylab("Average Spending Per Episode Hospital")+
scale_y_continuous(labels=fancy_scientific)
ggtitle("Graph for highest spending per hospital in Midwest Region")
ggplot(hosp_south, aes(x=State, y = ASPE_Hospital, color = State))+
geom_histogram(stat = "Identity")+
xlab("Hospital Name")+
ylab("Average Spending Per Episode Hospital")+
scale_y_continuous(labels=fancy_scientific)
ggtitle("Graph for highest spending per hospital in South Region")
ggplot(hosp_west, aes(x=State, y = ASPE_Hospital, color = State))+
geom_histogram(stat = "Identity")+
xlab("Hospital Name")+
ylab("Average Spending Per Episode Hospital")+
scale_y_continuous(labels=fancy_scientific)
ggtitle("Graph for highest spending per hospital in West Region")
#Assigning the dataframes to new dataframes.
NE <- hosp_northeast
MW <- hosp_midwest
S <- hosp_south
W <- hosp_west
#Add an additonal col called Region for all the subset data frame
NE$Region <- "North East"
MW$Region <- "Mid West"
S$Region <- "South"
W$Region <- "West"
# Sorting the column ASPE_Hospital in the asending order
NE_sort <- NE[order(NE$ASPE_Hospital, decreasing = TRUE),]
head(NE_sort)
MW_sort <- MW[order(MW$ASPE_Hospital, decreasing = TRUE),]
head(MW_sort)
S_sort <- S[order(S$ASPE_Hospital, decreasing = TRUE),]
head(S_sort)
W_sort <- W[order(W$ASPE_Hospital, decreasing = TRUE),]
head(W_sort)
# Assigning the values for comparision
NE_Firstrow <- head(NE_sort,1)
MW_Firstrow <- head(MW_sort,1)
S_Firstrow <- head(S_sort,1)
W_Firstrow <- head(W_sort,1)
#Bind the rows from multiple dataframes into single data frame
Region <- rbind(NE_Firstrow,MW_Firstrow,S_Firstrow,W_Firstrow)
View(Region)
#Select only the required columns
Region <- Region[,c(1,3,6,13)]
#Combining the Column State and Region to a new Column.
Region$Zone <- paste(Region$State, "\n", Region$Region)
#Combinging Hospital Name and Zone.
Region$Hospital <- paste(Region$Hospital_Name, "\n", Region$Zone)
#Plotting the Graph
p <- ggplot(Region, aes(x=Hospital, y= ASPE_Hospital,fill=Hospital))+
geom_histogram(stat="Identity")
p + theme(axis.text.x = element_blank())+
xlab("Hospital Name and Zones")+
ylab("Average Spending Per Episode Hospital")+
ggtitle("Highest spending hospital in USA")
----------------------------------------------------------------------------------------------------
|
f09d2b8d34bbf08cf63adf198d66d52d4f4f948e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mlbstats/examples/ops.Rd.R
|
a0f067a05ba938d83f34791a76f6ef288af7627c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 155
|
r
|
ops.Rd.R
|
library(mlbstats)
### Name: ops
### Title: Calculates on-base plus slugging
### Aliases: ops
### ** Examples
ops(200, 18, 4, 401, 4, 50, 20, 3, 13)
|
60179478e6439e76f1e710a3963c4b1e75699680
|
c8712e9013f625a3acd882dfebf70d0d3eee1a77
|
/scripts/csvToJSON.R
|
3a46c91de8a9acd594ef28f183684037ddc8ca83
|
[] |
no_license
|
seattleflu/simulated-data
|
db95ada6e191d94f05eb9888a9fdf3d6d082e37f
|
318b5309b574b65db12b20c809344b94452644bd
|
refs/heads/master
| 2020-04-23T18:47:45.504700
| 2019-05-13T20:58:39
| 2019-05-13T20:58:39
| 171,380,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 527
|
r
|
csvToJSON.R
|
# csvToJSON
library(jsonlite)
library(R.utils)
file_dir <- '../models/'
# forAuspice
for(dirPath in list.dirs(file_dir)){
for (filePath in list.files(path = dirPath, pattern="+\\.csv", full.names=TRUE)){
dat<-read.table(filePath,quote='',sep=',',header = TRUE)
if (file.size(filePath) <= 2^21){
write_json(dat, path= sub('csv','json',filePath), pretty=TRUE)
} else {
fn<-sub('csv','json',filePath)
write_json(dat, path=fn, pretty=FALSE)
# gzip(fn)
# unlink(fn)
}
}
}
|
acb9e34996a263a6da57138cefbf6a2258899967
|
b66ff8f265f9af43e8dc7ddb7558c12d01a5a226
|
/scripts/day_of_week.R
|
281b1454bfd0fdcd1c026785f2cf820f1ddb18d8
|
[
"MIT"
] |
permissive
|
TimTaylor/trend_analysis_public
|
0d88b96876bb5b4c05fe8fab101864dd5b135c21
|
75155f91054d7f3f0caa26d2b693c1882b873475
|
refs/heads/main
| 2023-06-23T03:21:14.074644
| 2021-07-14T22:54:43
| 2021-07-14T22:54:43
| 386,217,122
| 0
| 0
|
NOASSERTION
| 2021-07-15T08:21:52
| 2021-07-15T08:21:51
| null |
UTF-8
|
R
| false
| false
| 548
|
r
|
day_of_week.R
|
#' Convert dates to factors
#'
#' This will map a `Date' vector to weekdays, with the following distinction:
#' weekden, monday, of the rest of the week
#'
#' @author Thibaut
#'
day_of_week <- function(date) {
day_of_week <- weekdays(date)
out <- vapply(
day_of_week,
function(x) {
if (x %in% c("Saturday", "Sunday")) {
"weekend"
} else if (x == "Monday") {
"monday"
} else {
"rest_of_week"
}
},
character(1)
)
factor(out, levels = c("rest_of_week", "monday", "weekend"))
}
|
95c91d13db9f4012b4551b862da9e953bbc1fb62
|
93efadbd61fb615358b84864d2641eb11ae3a96a
|
/data.R
|
a150d5cb9e4407311d72e77c49b94f145d6e6044
|
[] |
no_license
|
przemo/komunikatyKM
|
89aee3a40076052fe11644ec618f496f98a05145
|
5384b46c422220179ff58c03210773d791602f7c
|
refs/heads/master
| 2020-06-04T16:17:20.775643
| 2015-03-02T21:05:46
| 2015-03-02T21:05:46
| 26,396,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 149,485
|
r
|
data.R
|
structure(list(dates = structure(c(1414749480, 1414749420, 1414757040,
1414764300, 1414780320, 1414776060, 1414837380, 1414958760, 1415030340,
1415045580, 1415086080, 1415105700, 1415123640, 1415163600, 1415251740,
1415349780, 1415354340, 1415364960, 1415374380, 1415427060, 1415456040,
1415598960, 1415692020, 1415774220, 1415804340, 1415815680, 1415872680,
1415874420, 1415889060, 1415933160, 1415980500, 1415987580, 1416068640,
1416079080, 1416091260, 1416137760, 1416235020, 1416287640, 1416333360,
1416382020, 1416411960, 1416427380, 1416436260, 1416462840, 1416463020,
1416463260, 1416481740, 1416544920, 1416550980, 1416558240, 1416577140,
1416582900, 1416641340, 1416679620, 1416679620, 1416733860, 1416767760,
1416891000, 1416892500, 1416982800, 1416995820, 1416996900, 1417017540,
1417084560, 1417093080, 1417163700, 1417165260, 1417167420, 1417170960,
1417188420, 1417234680, 1417257060, 1417293420, 1417299540, 1417313340,
1417343340, 1417376400, 1417385460, 1417410300, 1417444320, 1417455480,
1417509960, 1417516440, 1417547280, 1417629960, 1417638300, 1417673460,
1417703940, 1417711920, 1417759200, 1417786080, 1417862040, 1417903020,
1417905000, 1417906920, 1417926000, 1417969740, 1418039940, 1418054220,
1418058660, 1418122560, 1418140080, 1418176560, 1418187540, 1418188620,
1418281260, 1418293260, 1418313360, 1418514720, 1418578680, 1418620740,
1418642880, 1418653860, 1418685300, 1418761980, 1418772900, 1418812980,
1418818500, 1418831760, 1418833200, 1418840700, 1418852520, 1418862480,
1418881440, 1418883480, 1418904960, 1418919720, 1418943540, 1418980500,
1418984040, 1418993280, 1418998860, 1419012840, 1419040740, 1419161640,
1419179940, 1419193800, 1419222780, 1419225900, 1419258060, 1419275940,
1419283980, 1419315120, 1419358980, 1419427440, 1419427440, 1419430500,
1419492960, 1419524040, 1419550080, 1419587760, 1419591000, 1419675360,
1419689040, 1419691320, 1419697380, 1419745020, 1419832860, 1419860280,
1419862980, 1419869340, 1419889020, 1419901020, 1419921300, 1419956400,
1419958980, 1419963660, 1420012860, 1420049940, 1420099260, 1420102800,
1420142820, 1420171380, 1420212360, 1420437720, 1420448160, 1420456860,
1420480020, 1420485060, 1420538760, 1420560660, 1420564080, 1420566240,
1420575780, 1420617240, 1420618440, 1420721700, 1420784160, 1420827060,
1420836480, 1420842540, 1420843380, 1420907820, 1420928580, 1420969320,
1420973580, 1420991520, 1420996980, 1421017320, 1421068800, 1421106420,
1421138220, 1421141400, 1421164500, 1421229960, 1421230740, 1421232900,
1421241420, 1421317980, 1421337960, 1421445480, 1421487600, 1421488800,
1421649120, 1421657340, 1421662800, 1421666220, 1421701500, 1421730660,
1421745600, 1421745840, 1421746740, 1421748480, 1421790360, 1421825340,
1421851080, 1421871660, 1421938380, 1421956380, 1422000180, 1422005280,
1422022860, 1422106200, 1422121620, 1422133440, 1422134100, 1422162720,
1422176640, 1422252000, 1422284820, 1422287760, 1422418980, 1422512160,
1422522000, 1422546600, 1422553440, 1422557280, 1422562440, 1422613620,
1422650700, 1422651120, 1422719220, 1422774720, 1422817680, 1422823920,
1422827700, 1422876240, 1422908880, 1422958380, 1422967620, 1422976080,
1422981300, 1422984540, 1422997800, 1423025160, 1423066020, 1423068300,
1423209600, 1423228800, 1423232100, 1423297740, 1423297080, 1423471200,
1423471980, 1423473180, 1423490580, 1423675080, 1423747320, 1423824240,
1424025960, 1424032140, 1424036340, 1424088300, 1424092320, 1424101200,
1424112120, 1424115600, 1424146200, 1424154420, 1424154360, 1424168820,
1424169000, 1424176500, 1424179080, 1424184000, 1424194680, 1424230560,
1424247600, 1424247840, 1424282580, 1424294040, 1424527800, 1424670720,
1424838720, 1424839380, 1424939160, 1424969100, 1424973540, 1424982360,
1424985720, 1424992620, 1424992620, 1425013380, 1425272940, 1425281160,
1425313860, 1414800000, 1414713600, 1414713600, 1414713600, 1414713600,
1414713600, 1414713600, 1414713600, 1414713600, 1414627200, 1414627200,
1414627200, 1414540800, 1414540800, 1414540800, 1414540800, 1414454400,
1414454400, 1414368000, 1414195200, 1414195200, 1414195200, 1414108800,
1414108800, 1414108800, 1414022400, 1414022400, 1413936000, 1413936000,
1413936000, 1413849600, 1413849600, 1413763200, 1413763200, 1413763200,
1413763200, 1413763200, 1413763200, 1413676800, 1413590400, 1413590400,
1413590400, 1413504000, 1413504000, 1413504000, 1413504000, 1413504000,
1413504000, 1413417600, 1413417600, 1413417600, 1413331200, 1413331200,
1413331200, 1413331200, 1413244800, 1413244800, 1413158400, 1413158400,
1413158400, 1413072000, 1413072000, 1412985600, 1412899200, 1412899200,
1412812800, 1412726400, 1412726400, 1412726400, 1412726400, 1412726400,
1412640000, 1412640000, 1412640000, 1412553600, 1412553600, 1412553600,
1412553600, 1412467200, 1412380800, 1412380800, 1412380800, 1412294400,
1412294400, 1412294400, 1412294400, 1412294400, 1412294400, 1412121600,
1412035200, 1412035200, 1412035200, 1411948800, 1411948800, 1411948800,
1411948800, 1411862400, 1411862400, 1411862400, 1411776000, 1411776000,
1411776000, 1411776000, 1411776000, 1411776000, 1411776000, 1411689600,
1411689600, 1411689600, 1411603200, 1411603200, 1411603200, 1411603200,
1411603200, 1411603200, 1411516800, 1411516800, 1411516800, 1411516800,
1411430400, 1411430400, 1411344000, 1411257600, 1411257600, 1411171200,
1411084800, 1411084800, 1411084800, 1410998400, 1410825600, 1410739200,
1410739200, 1410739200, 1410480000, 1410480000, 1410393600, 1410393600,
1410307200, 1410307200, 1410307200, 1410307200, 1410220800, 1410134400,
1410134400, 1410134400, 1410048000, 1410048000, 1410048000, 1410048000,
1409961600, 1409961600, 1409961600, 1409875200, 1409788800, 1409788800,
1409702400, 1409616000, 1409616000, 1409529600, 1409529600, 1409529600,
1409529600, 1409443200, 1409443200, 1409270400, 1409270400, 1409270400,
1409270400, 1409270400, 1409184000, 1409184000, 1409184000, 1409097600,
1409097600, 1409097600, 1409011200, 1409011200, 1409011200, 1408924800,
1408924800, 1408924800, 1408924800, 1408924800, 1408752000, 1408752000,
1408665600, 1408665600, 1408665600, 1408665600, 1408665600, 1408579200,
1408579200, 1408579200, 1408492800, 1408406400, 1408406400, 1408406400,
1408406400, 1408320000, 1408320000, 1408233600, 1408147200, 1408060800,
1407974400, 1407974400, 1407888000, 1407888000, 1407888000, 1407888000,
1407715200, 1407715200, 1407715200, 1407628800, 1407628800, 1407628800,
1407542400, 1407542400, 1407456000, 1407456000, 1407456000, 1407369600,
1407369600, 1407369600, 1407283200, 1407283200, 1407283200, 1407283200,
1407196800, 1407196800, 1407196800, 1407110400, 1407110400, 1407110400,
1407110400, 1407024000, 1406937600, 1406937600, 1406937600, 1406851200,
1406851200, 1406764800, 1406764800, 1406764800, 1406764800, 1406678400,
1406678400, 1406678400, 1406678400, 1406592000, 1406592000, 1406505600,
1406505600, 1406505600, 1406505600, 1406505600, 1406419200, 1406419200,
1406332800, 1406332800, 1406332800, 1406246400, 1406246400, 1406246400,
1406160000, 1406160000, 1406073600, 1405987200, 1405987200, 1405900800,
1405814400, 1405728000, 1405728000, 1405728000, 1405728000, 1405728000,
1405641600, 1405641600, 1405641600, 1405555200, 1405555200, 1405555200,
1405555200, 1405555200, 1405468800, 1405468800, 1405468800, 1405382400,
1405296000, 1405296000, 1405296000, 1405209600, 1405209600, 1405209600,
1405209600, 1405209600, 1405123200, 1405123200, 1405036800, 1405036800,
1405036800, 1404950400, 1404950400, 1404864000, 1404864000, 1404777600,
1404777600, 1404691200, 1404691200, 1404604800, 1404604800, 1404518400,
1404518400, 1404518400, 1404432000, 1404432000, 1404259200, 1404172800,
1404172800, 1404086400, 1.404e+09, 1403913600, 1403913600, 1403913600,
1403827200, 1403740800, 1403654400, 1403654400, 1403568000, 1403568000,
1403568000, 1403481600, 1403481600, 1403481600, 1403481600, 1403395200,
1403395200, 1403395200, 1403395200, 1403222400, 1403049600, 1403049600,
1403049600, 1402963200, 1402963200, 1402963200, 1402876800, 1402876800,
1402704000, 1402704000, 1402617600, 1402617600, 1402617600, 1402617600,
1402617600, 1402617600, 1402617600, 1402531200, 1402531200, 1402531200,
1402444800, 1402444800, 1402444800, 1402358400, 1402358400, 1402358400,
1402358400, 1402358400, 1402358400, 1402272000, 1402272000, 1402272000,
1402099200, 1402099200, 1402012800, 1402012800, 1402012800, 1402012800,
1401926400, 1401926400, 1401926400, 1401926400, 1401840000, 1401840000,
1401840000, 1401753600, 1401753600, 1401753600, 1401667200, 1401667200,
1401667200, 1401667200, 1401494400, 1401408000, 1401408000, 1401321600,
1401148800, 1401148800, 1401148800, 1401148800, 1401148800, 1401148800,
1401148800, 1401148800, 1401062400, 1401062400, 1401062400, 1401062400,
1401062400, 1400976000, 1400976000, 1400976000, 1400976000, 1400889600,
1400889600, 1400803200, 1400803200, 1400803200, 1400716800, 1400716800,
1400630400, 1400544000, 1400544000, 1400457600, 1400371200, 1400284800,
1400198400, 1400198400, 1400198400, 1400198400, 1400112000, 1400112000,
1400025600, 1400025600, 1400025600, 1399939200, 1399939200, 1399852800,
1399852800, 1399852800, 1399680000, 1399593600, 1399593600, 1399420800,
1399334400, 1399248000, 1399248000, 1399161600, 1399161600, 1398988800,
1398902400, 1398816000, 1398816000, 1398729600, 1398729600, 1398643200,
1398556800, 1398470400, 1398384000, 1398384000, 1398384000, 1398384000,
1398297600, 1398297600, 1398211200, 1398211200, 1398211200, 1398124800,
1398124800, 1397952000, 1397779200, 1397779200, 1397779200, 1397779200,
1397692800, 1397692800, 1397606400, 1397520000, 1397520000, 1397433600,
1397433600, 1397433600, 1397347200, 1397347200, 1397347200, 1397088000,
1397088000, 1397001600, 1397001600, 1397001600, 1396915200, 1396915200,
1396915200, 1396915200, 1396915200, 1396742400, 1396569600, 1396569600,
1396569600, 1396569600, 1396569600, 1396483200, 1396483200, 1396396800,
1396396800, 1396310400, 1396224000, 1396224000, 1396224000, 1396051200,
1395964800, 1395964800, 1395964800, 1395964800, 1395792000, 1395792000,
1395792000, 1395705600, 1395619200, 1395619200, 1395619200, 1395619200,
1395619200, 1395532800, 1395532800, 1395532800, 1395360000, 1395360000,
1395273600, 1395273600, 1395273600, 1395187200, 1395100800, 1395014400,
1395014400, 1395014400, 1395014400, 1394755200, 1394668800, 1394668800,
1394582400, 1394582400, 1394496000, 1394496000, 1394496000, 1394409600,
1394409600, 1394409600, 1394150400, 1393977600, 1393977600, 1393977600,
1393977600, 1393977600, 1393891200, 1393804800, 1393804800, 1393632000,
1393545600, 1393545600, 1393372800, 1393200000, 1393113600, 1393113600,
1393027200, 1392854400, 1392854400, 1392854400, 1392854400, 1392768000,
1392768000, 1392768000, 1392681600, 1392681600, 1392508800, 1392336000,
1392249600, 1392249600, 1392163200, 1392163200, 1392163200, 1392076800,
1392076800, 1391990400, 1391990400, 1391731200, 1391731200, 1391731200,
1391644800, 1391644800, 1391558400, 1391558400, 1391558400, 1391558400,
1391558400, 1391472000, 1391472000, 1391472000, 1391385600, 1391385600,
1391385600, 1391385600, 1391385600, 1391385600, 1391299200, 1391299200,
1391299200, 1391212800, 1391212800, 1391212800, 1391212800, 1391126400,
1391126400, 1391126400, 1391126400, 1391126400, 1391126400, 1391126400,
1391126400, 1391126400, 1391040000, 1391040000, 1391040000, 1391040000,
1391040000, 1391040000, 1391040000, 1391040000, 1391040000, 1391040000,
1390953600, 1390953600, 1390953600, 1390953600, 1390953600, 1390953600,
1390867200, 1390867200, 1390867200, 1390867200, 1390867200, 1390867200,
1390867200, 1390867200, 1390867200, 1390867200, 1390867200, 1390867200,
1390867200, 1390780800, 1390780800, 1390780800, 1390780800, 1390780800,
1390780800, 1390780800, 1390780800, 1390780800, 1390694400, 1390694400,
1390608000, 1390608000, 1390608000, 1390608000, 1390608000, 1390521600,
1390521600, 1390521600, 1390521600, 1390521600, 1390521600, 1390435200,
1390435200, 1390435200, 1390435200, 1390435200, 1390435200, 1390435200,
1390435200, 1390348800, 1390262400, 1390176000, 1390176000, 1390176000,
1390089600, 1390089600, 1390089600, 1389916800, 1389916800, 1389916800,
1389916800, 1389830400, 1389830400, 1389744000, 1389744000, 1389744000,
1389744000, 1389657600, 1389571200, 1389484800, 1389225600, 1389225600,
1389139200, 1389139200, 1388966400, 1388793600, 1388707200, 1388707200,
1388534400, 1388448000, 1388448000, 1388448000, 1388361600, 1388016000,
1387929600, 1387929600, 1387756800, 1387584000, 1387411200, 1387411200,
1387411200, 1387411200, 1387411200, 1387324800, 1387238400, 1387238400,
1387152000, 1386892800, 1386720000, 1386720000, 1386633600, 1386547200,
1386547200, 1386547200, 1386547200, 1386460800, 1386460800, 1386374400,
1386288000, 1386288000, 1386288000, 1385769600, 1385683200, 1385683200,
1385683200, 1385683200, 1385596800, 1385510400, 1385510400, 1385510400,
1385424000, 1385424000, 1385337600, 1385337600, 1385164800, 1385164800,
1385078400, 1385078400, 1385078400, 1385078400, 1384905600, 1384905600,
1384819200, 1384819200, 1384732800, 1384646400, 1384560000, 1384560000,
1384473600, 1384473600, 1384473600, 1384387200, 1384300800, 1384300800,
1384214400, 1384214400, 1384214400, 1384128000, 1384128000, 1384128000,
1384041600, 1384041600, 1384041600, 1384041600, 1383955200, 1383955200,
1383955200, 1383868800, 1383868800, 1383782400, 1383696000, 1383696000,
1383696000, 1383609600, 1383609600, 1383523200, 1383523200, 1383350400,
1383264000, 1383264000, 1383264000, 1383264000, 1383177600, 1383177600,
1383091200, 1383091200, 1383091200, 1383004800, 1383004800, 1383004800,
1383004800, 1382918400, 1382918400, 1382918400, 1382918400, 1382918400,
1382918400, 1382832000, 1382745600, 1382572800, 1382572800, 1382572800,
1382572800, 1382572800, 1382572800, 1382486400, 1382486400, 1382486400,
1382486400, 1382486400, 1382400000, 1382400000, 1382400000, 1382400000,
1382313600, 1382313600, 1382313600, 1382313600, 1382313600, 1382227200,
1382140800, 1382140800, 1382054400, 1382054400, 1381968000, 1381968000,
1381968000, 1381881600, 1381881600, 1381881600, 1381881600, 1381622400,
1381536000, 1381536000, 1381363200, 1381363200, 1381190400, 1381104000,
1381104000, 1380844800, 1380672000, 1380585600, 1380585600, 1380585600,
1380499200, 1380499200, 1380499200, 1380326400, 1380240000, 1380240000,
1380240000, 1380153600, 1379980800, 1379808000, 1379635200, 1379635200,
1379635200, 1379462400, 1379462400, 1379462400, 1379376000, 1379376000,
1379289600, 1378857600, 1378857600, 1378771200, 1378598400, 1378425600,
1378425600, 1378425600, 1378339200, 1378339200, 1378252800, 1378252800,
1378252800, 1378252800, 1378166400, 1378166400, 1378166400, 1378166400,
1378166400), tzone = "UTC", class = c("POSIXct", "POSIXt")),
awaria = c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE,
TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE,
TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE,
FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE,
TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE,
TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE,
TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE,
TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE,
TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE,
FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE), utrudnienia = c(TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE,
TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE,
TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE,
TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE,
FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE,
FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE,
FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE,
TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE,
FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE,
FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE,
TRUE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE,
TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE,
TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE), odwolane = c(FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE,
TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE,
TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE), relacja = c("Warszawa - Działdowo",
"Warszawa - Skierniewice", "Warszawa - Działdowo", "Warszawa - Skierniewice",
"Warszawa - Działdowo", NA, "Warszawa - Skierniewice", "Warszawa Lotnisko Chopina - Modlin",
"Warszawa - Łuków", "Warszawa - Skierniewice", "Warszawa - Deblin",
"Warszawa - Skierniewice", "Warszawa - Łowcz Główny",
"Warszawa - Skierniewice", "Warszawa - Małkinia", "Warszawa - Skierniewice",
NA, NA, "Warszawa - Skierniewice", "Warszawa - Dęblin",
"Dęblin - Radom", NA, "Skierniewice - Warszawa", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", NA, NA, "Warszawa - Śiedlce",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa - Łuków",
"Warszawa - Działdowo", NA, "Warszawa Zachodnia - Warszawa Lotnisko Chopina",
"Warszawa - Skierniewice", "Skarżysko Kamienna - Warszawa",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Łowicz", "Warszawa - Skierniewice", NA, "Warszawa Wschodnia - Warszawa Stadion",
"Łuków - Warszawa", NA, "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Dęblin", NA, "Warszawa - Siedlce",
"Pruszków - Otwock", "Warszawa - Skierniewice", "Warszawa - Skarżysko Kamienna",
"Warszawa - Skarżysko Kamienna", "Warszawa - Skarżysko Kamienna",
"Warszawa - Dęblin", "Warszawa - Skierniewice", "Warszawa - Dęblin",
"Warszawa - Skierniewice", NA, NA, "Warszawa - Łowicz",
NA, "Warszawa - Skierniewice", "Warszawa - Małkinia", "Warszawa - Skierniewice",
NA, "Warszawa - Łowiz Główny", "Warszawa - Działdowo",
"Warszawa - Skierniewice", "Warszawa - Kutno", "Warszawa - Skierniewice",
"Warszawa - Łowicz Główny", "Warszawa - Skierniewice",
"Warszawa - Łowicz", "Warszawa - Skierniewice", "Sierpc - Kutno",
NA, "Łuków - Warszawa", NA, "Warszawa - Działdowo", "Warszawa Wschodnia - Płock",
"Warszawa Zachodnia - Warszawa Wschodnia", "Sochaczew - Warszawa Rembertów",
"Warszawa - Dęblin", "Warszawa - Łuków", "Warszawa - Małkinia",
"Warszawa - Skierniewice", NA, "Warszawa - Dęblin", "Warszawa - Łowicz Główny",
"Warszawa - Otwock", "Warszawa - Skierniewice", "Warszawa - Działdowo",
"Warszawa - Skierniewice", NA, "Warszawa - Dęblin", NA,
"Warszawa - Działdowo", NA, "Warszawa - Małkinia", "Warszawa - Małkinia",
"Warszawa - Dęblin", "Warszawa - Łuków", "Warszawa - Łuków",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa - Skarżysko",
NA, "Warszawa - Łuków", "Warszawa - Łowicz", "Warszawa - Małkinia",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", NA,
"Warszawa - Łowicz Główny", "Warszawa - Skierniewice",
"Warszawa Zachodnia - Warszawa Rembertów", "Warszawa - Łowicz Główny",
NA, "Warszawa - Skierniewice", "Warszawa - Łowicz", "Warszawa - Łowicz",
"Warszawa - Działdowo", "Warszawa - Małkina", "Warszawa - Dęblin",
"Warszawa Zachodnia - Góra Kalwaria", "Warszawa - Dęblin",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Dęblin",
NA, "Warszawa - Dęblin", "Warszawa - Łowicz Główny",
"Warszawa - Łowicz Główny", "Warszawa - Łuków", "Warszawa - Łuków",
"Warszawa - Dęblin", NA, "Warszawa - Działdowo", "Warszawa -Radom",
NA, "Warszawa - Dęblin", NA, NA, "Warszawa - Zachodnia",
"Tłuszcz - Ostrołęka", "Warszawa - Skierniewice", "Warszawa - Siedlce",
NA, NA, "Warszawa - Skierniewice", NA, "Warszawa - Łowicz",
"Warszawa - Dęblin", "Warszawa - Kutno", "Warszawa - Łuków",
"Warszawa Zachodnia - Mińsk Mazowiecki", "Warszawa Zachodnia - Siedlce",
"Warszawa Gdańska - Nasielsk", "Sierpc - Kutno", "Kutno - Sierpc",
"Warszawa - Łowicz Główny", "Warszawa - Dęblin", NA,
"Warszawa - Dęblin", NA, "Warszawa - Dęblin", "Warszawa - Skieniewice",
"Warszawa - Radom", NA, "Warszawa - Radom", "Warszawa -Skierniewice",
"Mińsk Mazowiecki - Radom", "Tłuszcz - Małkinia", "Warszawa - Działdowo",
"Warszawa Rembertów - Radom", "Kutno - Sierpc", "Pilawa - Warszawa Zachodnia",
"Warszawa - Dęblin", NA, "Działdowo - Warszawa Zachodnia",
"Siedlce Baza - Czeremcha", NA, NA, "Siedlce - Czeremcha",
NA, NA, NA, "Warszawa - Dęblin", "Radom - Dęblin", NA,
"Warszawa - Radom", "Warszawa - Łowicz", "Warszawa - Radom",
"Warszawa - Łowicz", "Kutno - Płock", NA, "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Mińsk Mazowiecki - Błonie",
"Warszawa -Radom", "Warszawa Wileńska - Tłuszcz", "Warszawa - Łowicz",
"Warszawa - Łuków", "Warszawa Wileńska - Tłuszcz", "Warszawa - Łowicz",
NA, NA, "Warszawa Wschodnia - Łowicz", NA, "Warszawa - Działdowo",
"Warszawa - Dęblin", "Dęblin - Warszawa", "Warszawa - Skierniewice",
"Tłuszcz - Ostrołęka", "Działdowo - Warszawa", "Tłuszcz - Legionowo",
NA, NA, "Sierpc - Kutno", "Warszawa - Skierniewice", "Warszawa Wileńska - Małkinia",
"Siedlce - Warszawa", NA, "Siedlce - Czeremcha", "Warszawa - Skierniewice",
NA, NA, NA, "Siedlce - Warszawa", "Radom - Warszawa", "Skierniewice - Warszawa",
"Wschodnia - Warszawa Zachodnia", "Warszawa - Małkinia",
"Radom - Warszawa", "Warszawa - Skierniewice", NA, NA, NA,
"Warszawa - Skierniewice", "Warszawa - Małkinia", "Warszawa - Dęblin",
"Warszawa - Łuków", "Sierpc - Płock", "Warszawa - Radom",
NA, NA, "Warszawa - Nasielsk", "Warszawa - Dęblin", "Kutno - Sierpc",
"Góra Kalwaria - Warszawa", "Warszawa - Skierniewice", NA,
NA, "Warszawa - Skierniewice", "Dęblin - Warszawa", "Warszawa - Małkinia",
"Warszawa - Skierniewice", "Łuków - Siedlce", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
NA, "Warszawa - Dęblin", "Warszawa - Małkinia", "Warszawa - Skarżysko Kamienna",
NA, NA, NA, "Warszawa - Siedlce", "Radom - Dęblin", "Radom - Drzewica",
"Warszawa - Łowicz", "Warszawa - Kutno", "Warszawa - Skierniewice",
"Tłuszcz - Ostrołęka", "Warszawa Wschodnia - Sochaczew",
"Warszawa - Skierniewice", "Warszawa Wschodnia - Warszawa Toruńska",
"Warszawa - Łowicz", "Warszawa Wileńska - Tłuszcz", "Tłuszcz - Ostrołęka",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", NA,
"Warszawa - Skierniewice", "Warszawa - Skarżysko Kamienna",
"Warszawa - Skierniewice", "Warszawa - Kutno", "Warszawa - Kutno",
"Warszawa - Skierniewice", "Warszawa Rembertów - Warszawa Wschodnia",
"Tłuszcz - Ostrołęka", "Warszawa Zachodnia - Warszawa Wschodnia",
"Warszawa - Siedlce", "Warszawa - Łowicz Główny", "Warszawa - Siedlce",
"Warszawa - Działdowo", "Radom - Dęblin", "Mrozy - Sochaczew",
"Warszawa Zachodnia - Pilawa", "Warszawa Zachodnia - Nasielsk",
NA, "Warszawa - Skarżysko Kamienna", "Warszawa - Skierniewice",
"Warszawa - Dęblin", "Łowicz - Dęblin", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Sochaczew - Warszawa", "Warszawa - Skarżysko Kamienna",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
NA, "Warszawa - Działdowo", "Warszawa - Skierniewice", "Warszawa - Działdowo",
"Warszawa - Skierniewice", "Warszawa - Działdowo", "Warszawa - Dęblin",
"Warszawa - Radom", "Warszawa - Działdowo", "Warszawa - Kutno",
"Warszawa - Małkinia", NA, NA, "Warszawa - Dęblin", "Nasielsk - Sieprc",
"Warszawa - Dęblin", "Warszawa Wileńska - Tłuszcz", "Warszawa - Dęblin",
NA, "Warszawa Gdańska - Warszawa Centrum", NA, NA, NA, NA,
"Warszawa - Radom", NA, "Działdowo - Warszawa", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Dęblin", "Warszawa - Skierniewice",
"Warszawa - Łuków", NA, NA, "Warszawa - Skierniewice",
NA, "Warszawa - Skarżysko Kamienna", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Dęblin",
"Tłuszcz - Łochów", "Warszawa-Skierniewice", NA, NA, "Warszawa - Skierniewice",
"Warszawa Zachodnia - Warszawa Centralna", "Warszawa - Skierniewice",
NA, NA, "Warszawa - Działdowo", NA, "Łochów - Tłuszcz",
"Warszawa - Skienriewice", NA, NA, "Warszawa - Radom", "Warszawa - Łuków",
NA, NA, "Łochów - Tłuszcz", "Warszawa - Łuków", "Warszawa - Dęblin",
"Warszawa - Skierniewice", "Warszawa - Dęblin", "Warszawa - Działdowo",
"Nasielsk - Sierpc", "Warszawa - Działdowo", "Warszawa - Działdowo",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Radom - Dęblin",
NA, "Warszawa - Łowicz Główny", "Warszawa - Skierniewice",
"Warszawa- Skarżysko Kamienna", "Warszawa- Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Łuków", "Siedlce - Czeremcha",
"Warszawa - Skierniewice", "Warszawa - Działdowo", "Warszawa - Łuków",
"Warszawa - Skierniewice", "Warszawa - Działdowo", "Warszawa - Łowicz Główny",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Łowicz",
"Warszawa - Skierniewice", "Warszawa - Łowicz Główny",
"Warszawa - Łowicz Główny,", "Warszawa - Małkinia", "Warszawa - Skierniewice",
"Działdowo - Warszawa", "Warszawa - Siedlce", "Warszawa - Siedlce",
"Warszawa - Skierniewice,", "Warszawa - Łuków", "Nasielsk-Sierpc",
"Warszawa - Skierniewice", "Legionowo - Tłuszcz", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa-Łuków", "Warszawa - Skierniewice",
"Dęblin - Warszawa", "Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Radom", "Warszawa- Małkinia", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Dęblin", NA, NA,
"Radom - Warszawa Rembertów", "Warszawa - Skierniewice",
"Warszawa - Siedlce", NA, "Warszawa Zachodnia- Warszawa Wschodnia",
"Warszawa - Siedlce", "Warszawa - Skierniewice", "Warszawa - Łowicz",
"Warszawa - Małkinia", "Warszawa Zachodnia - Warszawa Wschodnia",
"Warszawa - Dęblin", "Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Działdowo", "Warszawa - Małkinia", "Radom - Dęblin",
"Sierpc - Kutno", "Warszawa - Skierniewice", "Warszawa - Działdowo",
"Warszawa - Siedlce", "Warszawa - Dęblin", "Warszawa - Skarżysko-Kamienna",
"Otwock - Błonie", "Warszawa - Siedlce", "Warszawa - Skierniewice",
"Tłuszcz - Ostrołęka", "Siedlce - Czeremcha", "Warszawa - Skierniewice",
"Warszawa - Siedlce Wzajemne", "Warszawa - Działdowo", "Warszawa - Skierniewice",
"Warszawa - Łuków", "Warszawa - Skierniewice", "Warszawa - Dzialdowo",
"Warszawa - Skierniewice", NA, "Warszawa Zachodnia - Warszawa Wschodnia",
"Warszawa - Dęblin", "Warszawa - Skierniewice", "Warszawa - Łuków",
"Warszawa - Skierniewice", "Warszawa - Radom", NA, "Warszawa - Łowicz",
"Warszawa - Łowicz", "Warszawa - Dęblin", "Warszawa- Skierniewice",
"Warszawa - Skierniewice", "Warszawa Wileńska - Małkinia",
NA, "Warszawa Zachodnia - Mrozy", "Warszawa - Skierniewice",
NA, "Warszawa - Łowicz", "Warszawa - Łowicz", NA, "Warszawa - Skarżysko Kamienna",
"Warszawa - Skierniewice", "Warszawa - Łuków", "Warszawa - Dęblin",
"Warszawa - Dęblin", "Warszawa - Skierniewice", "Warszawa - Dęblin",
NA, "Dęblin - Radom", "Warszawa - Małkinia", NA, "Warszawa - Działdowo",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa - Małkinia",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa Lotnisko Chopina - Modlin",
"Warszawa - Siedlce", "Warszawa - Skierniewice", "Działdowo - Warszawa",
"Warszawa - Małkinia", NA, "Dęblin - Warszawa", "Siedlce - Łuków",
"Warszawa - Skarżysko-Kamienna", NA, "Mińsk Mazowiecki - Warszawa Zachodnia",
NA, "Dęblin - Warszawa", "Warszawa-Łowicz", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa- Łuków", "Warszawa - Siedlce",
"Warszawa - Skierniewice", "Warszawa - Skierniewice[", "Warszawa Lotnisko Chopina-Modlin",
NA, "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Łowicz", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Łuków", "Warszawa - Skierniewice", "Warszawa - Łuków",
"Warszawa - Małkinia", "Dęblin - Góra Kalwaria", "Warszawa - Małkinia",
"Warszawa - Skierniewice", "Warszawa - Małkinia", "Warszawa - Małkinia",
"Radom - Dęblin", "Warszawa - Skierniewice", "Warszawa - Siedlce",
"Warszawa - Skierniewice", "Warszawa - Dęblin", "Warszawa -Łuków",
"Warszawa Wileńska - Małkinia", "Warszawa - Działdowo",
"Góra Kalwaria - Warszawa", "Drzewica - Radom", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Nasielsk - Sierpc", "Radom - Dęblin",
"Warszawa Zachodnia - Działdowo;", NA, "Warszawa Wawer - Warszawa Falenica",
"Warszawa Wileńska - Małkinia", "Warszawa - Łuków", "Skarżysko-Kamienna",
"Tłuszcz - Ostrołęka", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Łuków", "Warszawa - Skierniewice", "Warszawa - Łuków",
"Warszawa - Łowicz", "Radom - Warszawa", NA, "Warszawa - Małkinia",
"Warszawa - Dęblin", "Warszawa - Skierniewice", "Warszawa - Działdowo",
"Warszawa - Działdowo", "Warszawa - Skarżysko Kamienna",
"Dęblin - Góra Kalwaria", NA, "Warszawa - Łowicz", NA,
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Łowicz",
"Warszawa - Radom", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Siedlce", "Kutno - Sierpc", "Warszawa - Dęblin",
"Warszawa - Dęblin", "Warszawa - Działdowo", "Warszawa - Sochaczew",
"Warszawa - Łuków", "Warszawa - Gdynia", "Błonie - Warszawa",
NA, "Radom - Drzewica", "Łowicz - Warszawa", "Warszawa - Skierniewice",
"Warszawa - Łuków", "Gdynia - Warszawa", "Radom - Drzewica",
"Warszawa - Działdowo", "Warszawa-Dzialdowo[", "Warszawa Okęcie - Piaseczno",
"Warszawa - Łuków", "Warszawa - Łowicz", "Warszawa Wschodnia - Warszawa Zachodnia",
"Warszawa - Radom", "Warszawa Zachodnia - Warszawa Wschodnia",
"Warszawa - Łuków", "Warszawa - Małkinia", "Warszawa - Skierniewice",
"Warszawa - Siedlce", NA, "Warszawa- Małkinia", "Warszawa - Łuków",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Łowicz Główny",
"Warszawa - Góra Kalwaria", "Warszawa - Skierniewice", "Warszawa - Radom",
"Warszawa - Radom", "Warszawa - Dęblin", "Nasielsk - Warszawa",
"Warszawa - Łowicz", "Warszawa - Radom", "Warszawa - Radom",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", NA,
"Warszawa - Skierniewice", "Warszawa - Siedlce", "Radom - Warszawa",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Działdowo", "Ostrołęka - Tłuszcz", "Warszawa - Dęblin",
"Warszawa - Skierniewice", "Warszawa Zachodnia - Działdowo",
"Radom - Skarżysko Kamienna", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Sochaczew", "Warszawa -Skierniewice",
"Warszawa - Skierniewice", "Mrozy - Mińsk Mazowiecki", NA,
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Radom - Skarżysko Kamienna",
"Warszawa - Działdowo", NA, "Małkinia - Warszawa", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa -Legionowo",
"Góra Kalwaria - Warszawa Wschodnia", NA, "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Siedlce - Warszawa", "Warszawa - Skierniewice",
"Siedlce - Warszawa", "Warszawa Wsch. - Modlin", "Piaseczno - Czachówek Południowy",
NA, "Warszawa - Działdowo", "Warszawa - Skierniewice", NA,
"Warszawa - Skierniewice", "Radom - Drzewica", "Warszawa - Dęblin",
NA, "Warszawa-Skierniewice", "Warszawa - Dęblin", "Warszawa - Dęblin",
"Warszawa - Łowicz Główny", "Warszawa - Skierniewice",
"Skierniewice - Warszawa", "Warszawa -Skierniewice[", "Warszawa Zachodnia-Pilawa",
NA, "Warszawa - Łowicz", "Warka-Radom", "Warszawa - Pilawa",
"Otwock-Warszawa[", "Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa-Działdowo", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Siedlce - Warszawa", "Warszawa - Małkinia",
"Warszawa - Skierniewice,", "Warszawa - Skierniewice", "Warszawa - Siedlce",
"Warszawa - Skierniewice", "Warszawa - Skierniewice[", "Warszawa - Działdowo",
"Warszawa - Dęblin[", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Radom", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Działdowo - Warszawa",
NA, "Warszawa - Działdowo", "Warszawa - Skierniewice", "Warszawa - Łowicz",
"Warszawa - Skierniewice", "Warszawa - Radm", "Warszawa - Działdowo",
"Warszawa - Skierniewice", "Warszawa - Działdowo", "Warszawa -Skierniewice",
NA, "Kutno - Sierpc", "Warszawa - Skierniewice", "Radom - Warszawa",
"Warszawa Zachodnia - Działdowo", NA, "Mińsk Mazowiecki - Warszawa Zachodnia",
"Warszawa - Skierniewice", "Warszawa - Dęblin", "Skierniewice - Warszawa",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Małkinia - Warszawa Wileńska",
NA, NA, NA, "Warszawa - Skierniewice", "Warszaw - Dęblin",
"Warszawa - Skarżysko Kamienna", "Warszawa - Siedlce", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa Zachodnia - Warszawa Wschodnia",
"Otwock - Warszawa", "Skierniewice - Warszawa", "Mińsk Mazowiecki - Siedlce",
"Warszawa - Dęblin", "Warszawa-Skierniewice", "Tłuszcz-Ostrołęka",
"Grodzisk Mazowiecki - Warszawa Rembertów", NA, NA, NA,
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Łowicz", "Warszawa - Łowicz", "Pilawa - Góra Kalwaria",
"Warszawa Wileńska - Małkinia", "Tłuszcz - Ostrołęka",
"Warszawa - Skierniewice", "Warszawa Zachodnia - Warszawa Włochy",
"Ostrołęka - Tłuszcz", "Warszawa - Łuków", NA, "Warszawa - Skierniewice",
"Warszawa - Działdowo", "Błonie - Warszawa Gołąbki",
"Warszawa - Skierniewice", "Siedlce - Warszawa", NA, NA,
"Warszawa - Dęblin", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa Zachodnia - Działdowo",
"Warszawa - Skierniewice", "Warszawa - Radom", "Warszawa - Skierniewice",
"Warszawa Zachodnia - Warszawa Wschodnia[", "Warszawa Wschodnia - Warszawa Zachodnia",
NA, "Warszawa - Siedlce", "Warszawa - Modlin", "Warszawa - Skierniewice",
"Warszawa - Dęblin", "Góra Kalwaria - Warszawa Wsch.",
"Warszawa - Łowicz", "Sierpc - Kutno", NA, "Warszawa - Skierniewice",
"Dęblin - Radom", "Mińsk Mazowiecki - Grodzisk Mazowiecki",
NA, "Warszawa - Skierniewice", "Warszawa - Małkinia", "Warszawa - Dęblin",
"Warszawa - Siedlce", NA, "Warszawa - Skierniewice", NA,
"Warszawa - Siedlce", "Warszawa - Małkinia", "Warszawa Rembertów - Grodzisk Mazowiecki",
"Warszawa - Skierniewice", NA, "Warszawa - Małkinia", NA,
NA, "Warszawa - Łowicz", NA, NA, "Warszawa - Działdowo",
"Kutno - Sierpc", "Warszawa - Skierniewice", NA, "Warszawa Wschodnia - Warszawa Rembertów",
"Warszawa - Skierniewice", "Warszawa - Małkinia", "Warszawa - Skierniewice",
"Sierpc - Kutno", "Warszawa - Sierpc", "Warszawa Włochy - Pruszków",
NA, "Warszawa - Dęblin", "Warszawa - Skierniewice", "Warszawa - Siedlce",
NA, "Warszawa - Łowicz", "Tłuszcz - Ostrołęka", "Warszawa - Skierniewice",
NA, "Warszawa - Radom", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Siedlce", "Warszawa - Siedlce", NA, "Warszawa - Małkinia",
"Grodzisk Mazowiecki - Warszawa", "Warszawa - Siedlce", "Warszawa - Skierniewice",
NA, "Warszawa - Skierniewice", "Warszawa - Skierniewice",
NA, "Warszawa - Małkinia", "Warszawa - Skierniewice", "Radom - Warszawa",
NA, "Warszawa - Skierniewice", "Warszawa - Radom", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Działdowo", "Warszawa - Skierniewice",
"Warszawa - Siedlce", "Dęblin - Radom", "Sierpc - Kutno",
"Tłuszcz - Legionowo", NA, "Legionowo - Tłuszcz", "Warszawa - Skierniewice",
NA, "Sierpc - Nasielsk", "Sierpc - Nasielsk", "Warszawa - Siedlce",
NA, "Mińsk Mazowiecki - Warszawa", NA, "Sierpc - Kutno",
"Warszawa - Dęblin", NA, "Warszawa - Skierniewice", "Działdowo - Warszawa",
"Warszawa - Działdowo", "Przysucha - Radom", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Dęblin", "Warszawa - Siedlce",
NA, NA, "Warszawa - Siedlce", NA, NA, "Warszawa - Błonie,",
"Warszawa - Dęblin", "Warszawa - Łowicz", "Warszawa - Małkinia",
"Warszawa - Dęblin", "Warszawa - Siedlce", NA, "Dęblin - Radom",
"Warszawa - Siedlce", "Warszawa - Łowicz", "Warszawa - Skierniewice",
NA, "Warszawa - Dęblin", NA, NA, "Warszawa - Dęblin", "Kutno - Sierpc",
NA, "Warszawa - Skierniewice", "Kutno - Sierpc", NA, NA,
"Kutno - Sierpc", "Warszawa - Skierniewice", "Płock - Sierpc",
NA, "Radom - Warszawa", NA, NA, NA, NA, "Warszawa - Siedlce",
NA, NA, NA, NA, "Radom - Dęblin", NA, NA, NA, NA, NA, "Warszawa - Grodzisk Mazowiecki",
NA, "Kutno - Sierpc", NA, "Łowicz - Warszawa", NA, "Radom - Dęblin",
"Warszawa - Skierniewice", "Warszawa Gdańska - Nasielsk",
"Warszawa - Skierniewice", "Radom - Przysucha,", NA, "Warszawa Rembertów - Warszawa Zachodnia",
"Warszawa Zachodnia - Warszawa Rembertów", "Warszawa - Skierniewice",
"Warszawa - Małkinia", NA, "Skierniewice - Warszawa", "Warszawa - Sochaczew",
"Sierpc - Nasielsk", "Warszawa - Siedlce", NA, NA, "Warszawa - Radom",
"Modlin - Warszawa Lotnisko Chopina,", "Warszawa - Góra Kalwaria",
"Warszawa - Skierniewice", NA, "Warszawa Zachodnia - Siedlce",
"Siedlce - Warszawa", "Warszawa - Sochaczew", "Warszawa - Grodzisk Mazowiecki",
"Radom - Skarżysko Kamienna", "Kutno - Sierpc", NA, "Warszawa - Skierniewice",
"Radom - Skarżysko Kamienna", "Warszawa - Radom", "Przysucha - Radom",
"Radom - Drzewica", "Warszawa - Skierniewice", "Dęblin - Warszawa",
"Tłuszcz - Ostrołęka", "Sierpc - Kutno", "Warszawa Zachodnia - Warszawa Gdańska",
"Warszawa - Dęblin", NA, "Warszawa - Skierniewice", NA,
NA, "Siedlce - Łuków", "Radom - Skarżysko Kamienna", "Warszawa - Radom",
NA, "Radom - Warszawa", "Warszawa Zachodnia - Warszawa Gdańska",
NA, "Wołomin - Warszawa", NA, "Warszawa - Dęblin", NA,
"Warszawa - Skierniewce", NA, "Warszawa - Skierniewice",
"Warszawa - Małkinia", NA, "Radom - Dęblin", NA, "Warszawa - Dęblin",
"Warszawa - Skierniewice", "Góra Kalwaria - Warszawa", NA,
"Pilawa - Warszawa", "Warszawa - Skierniewice", "Warszawa - Działdowo",
"Warszawa - Małkinia", "Warszawa - Działdowo", NA, "Warszawa - Skierniewice",
"Sochaczew - Warszawa", NA, NA, "Warszawa - Małkinia", "Warszawa - Skierniewice",
"Warszawa - Radom", NA, "Warszawa - Dęblin", "Warszawa - Skierniewice",
"Warszawa - Łowicz", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Łowicz", "Warszawa - Dęblin",
"Warszawa Zachodnia - Warszawa Gdańska", "Warszawa - Małkinia",
"Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
NA, NA, "Warszawa - Łowicz", "Warszawa - Radom", "Łowicz - Warszawa",
"Warszawa - Skierniewice", "Siedlce - Warszawa", "Kutno - Sierpc",
NA, "Góra Kalwaria - Dęblin", "Skierniewice - Warszawa",
"Radom - Dęblin", "Warszawa - Otwock", "Ostrołęka - Tłuszcz",
"Warszawa - Skierniewice", "Warszawa - Siedlce", NA, "Warszawa - Siedlce",
"Warszawa - Skierniewice", "Warszawa - Małkinia", "Warszawa - Wołomin",
"Warszawa - Siedlce", "Kutno - Sierpc", "Warszawa Wschodnia - Warszawa Zachodnia)",
"Tłuszcz - Ostrołęka", NA, "Grodzisk Mazowiecki - Warszawa Wschodnia",
"Warszawa Gdańska - Warszawa", "Siedlce - Warszawa", NA,
"Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
NA, NA, "Warszawa - Skierniewice", "Warszawa - Łowicz",
"Warszawa - Łowicz", "Warszawa - Otwock", "Warszawa Wschodnia - Łowicz Główny",
"Warszawa - Skierniewice", "Warszawa - Skierniewice", "Warszawa - Małkinia",
"Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa Wschodnia - Radom",
"Warszawa Zachodnia - Warszawa Wschodnia", "Warszawa - Łowicz",
"Warszawa - Działdowo", "Modlin - Lotnisko Chopina", "Warszawa - Łowicz",
NA, "Dęblin - Warszawa", "Warszawa - Łowicz", "Warszawa - Małkinia",
"Warszawa - Małkinia", "Siedlce - Łuków", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Łowicz", "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Skierniewice - Warszawa", NA,
"Tłuszcz - Czeremcha", "Warszawa - Nasielsk", "Tłuszcz - Czeremcha",
"Ostrołęka - Tłuszcz", NA, "Warszawa - Siedlce", NA, "Warszawa - Skierniewice",
"Warszawa - Siedlce", "Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Łowicz - Otwock", NA, "Warszawa - Skierniewice",
"Warszawa - Małkinia", "Warszawa - Skierniewice", "Warszawa - Skierniewice",
"Warszawa - Skierniewce", "Warszawa - Łowicz", "Sochaczew - Bednary",
"Kutno - Sierpc", NA, "Warszawa Wschodnia - Skierniewice",
"Warszawa - Skierniewice", "Warszawa Wschodnia - Warszawa Zachodnia.",
NA, "Radom - Pionki", "Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Skierniewice", "Warszawa - Radom", NA, "Warszawa - Dęblin)",
"Łuków - Warszawa", NA, "Warszawa - Siedlce", "Warszawa - Skierniewice",
"Mińsk Mazowiecki - Radom", "Warszawa - Radom", "Warszawa - Radom",
"Zielonka - Warszawa Rembertów", "Dęblin - Radom", "Warszawa - Skierniewice",
"Warszawa - Małkinia", "Warszawa - Małkinia", NA, "Małkinia - Warszawa",
"Mińsk Mazowiecki - Błonie", "Warszawa - Mińsk Mazowiecki",
"Warszawa - Dęblin", "Warszawa - Skierniewice", NA, "Warszawa - Siedlce",
"Warszawa - Skierniewice", NA, "Warszawa - Skierniewice",
"Warszawa - Grodzisk Mazowiecki", "Warszawa - Skierniewice",
"Warszawa - Siedlce", NA, "Radom - Tłuszcz", "Warszawa - Błonie",
NA, NA, NA, "Warszawa - Skierniewice", "Warszawa - Skierniewce",
"Warszawa - Łowicz", NA, "Otwock - Warszawa Zachodnia",
"Warszawa - Skierniewice", "Czeremcha - Siedlce", "Warszawa - Siedlce",
"Warszawa - Małkinia", "Grodzisk Mazowiecki - Warszawa",
"Siedlce - Grodzisk Mazowiecki", "Siedlce - Grodzisk Mazowiecki",
"Skierniewice - Otwock", NA, "Warszawa - Skierniewice", NA,
NA, "Warszawa - Skierniewice", "Sierpc - Kutno", "Sierpc - Nasielsk",
NA, "Warszawa - Siedlce", NA, NA, NA, "Otwock - Warszawa",
"Warszawa - Skierniewice", "Warszawa - Siedlce", "Siedlce - Warszawa",
"Warszawa - Siedlce", "Radom - Dęblin", "Tłuszcz - Nasielsk",
NA, "Warszawa - Radom", "Warszawa - Działdowo", "Warszawa - Grodzisk Mazowiecki"
), from = c("Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa Lotnisko Chopina",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, NA, "Warszawa", "Warszawa",
"Dęblin", NA, "Skierniewice", "Warszawa", "Warszawa", NA,
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa Zachodnia", "Warszawa", "Skarżysko Kamienna",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa Wschodnia", "Łuków", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Pruszków", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, NA, "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Sierpc", NA, "Łuków", NA, "Warszawa", "Warszawa Wschodnia",
"Warszawa Zachodnia", "Sochaczew", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", NA, "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa Zachodnia", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa Zachodnia",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", NA, "Warszawa", NA, NA, "Warszawa",
"Tłuszcz", "Warszawa", "Warszawa", NA, NA, "Warszawa", NA,
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa Zachodnia",
"Warszawa Zachodnia", "Warszawa Gdańska", "Sierpc", "Kutno",
"Warszawa", "Warszawa", NA, "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Mińsk Mazowiecki",
"Tłuszcz", "Warszawa", "Warszawa Rembertów", "Kutno", "Pilawa",
"Warszawa", NA, "Działdowo", "Siedlce Baza", NA, NA, "Siedlce",
NA, NA, NA, "Warszawa", "Radom", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Kutno", NA, "Warszawa", "Warszawa",
"Mińsk Mazowiecki", "Warszawa", "Warszawa Wileńska", "Warszawa",
"Warszawa", "Warszawa Wileńska", "Warszawa", NA, NA, "Warszawa Wschodnia",
NA, "Warszawa", "Warszawa", "Dęblin", "Warszawa", "Tłuszcz",
"Działdowo", "Tłuszcz", NA, NA, "Sierpc", "Warszawa", "Warszawa Wileńska",
"Siedlce", NA, "Siedlce", "Warszawa", NA, NA, NA, "Siedlce",
"Radom", "Skierniewice", "Wschodnia", "Warszawa", "Radom",
"Warszawa", NA, NA, NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Sierpc", "Warszawa", NA, NA, "Warszawa", "Warszawa",
"Kutno", "Góra Kalwaria", "Warszawa", NA, NA, "Warszawa",
"Dęblin", "Warszawa", "Warszawa", "Łuków", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", NA, NA, NA, "Warszawa", "Radom", "Radom", "Warszawa",
"Warszawa", "Warszawa", "Tłuszcz", "Warszawa Wschodnia",
"Warszawa", "Warszawa Wschodnia", "Warszawa", "Warszawa Wileńska",
"Tłuszcz", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa Rembertów",
"Tłuszcz", "Warszawa Zachodnia", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Radom", "Mrozy", "Warszawa Zachodnia",
"Warszawa Zachodnia", NA, "Warszawa", "Warszawa", "Warszawa",
"Łowicz", "Warszawa", "Warszawa", "Sochaczew", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, NA, "Warszawa", "Nasielsk",
"Warszawa", "Warszawa Wileńska", "Warszawa", NA, "Warszawa Gdańska",
NA, NA, NA, NA, "Warszawa", NA, "Działdowo", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, NA, "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Tłuszcz", "Warszawa", NA, NA, "Warszawa", "Warszawa Zachodnia",
"Warszawa", NA, NA, "Warszawa", NA, "Łochów", "Warszawa",
NA, NA, "Warszawa", "Warszawa", NA, NA, "Łochów", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Nasielsk",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Radom",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Siedlce", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Działdowo", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Nasielsk", "Warszawa", "Legionowo", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Dęblin", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, NA, "Radom", "Warszawa", "Warszawa", NA,
"Warszawa Zachodnia", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa Zachodnia", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Radom", "Sierpc",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Otwock", "Warszawa", "Warszawa", "Tłuszcz", "Siedlce",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa Zachodnia",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa Wileńska", NA, "Warszawa Zachodnia", "Warszawa",
NA, "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Dęblin",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa Lotnisko Chopina", "Warszawa",
"Warszawa", "Działdowo", "Warszawa", NA, "Dęblin", "Siedlce",
"Warszawa", NA, "Mińsk Mazowiecki", NA, "Dęblin", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa Lotnisko Chopina", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Dęblin", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Radom", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa Wileńska",
"Warszawa", "Góra Kalwaria", "Drzewica", "Warszawa", "Warszawa",
"Nasielsk", "Radom", "Warszawa Zachodnia", NA, "Warszawa Wawer",
"Warszawa Wileńska", "Warszawa", "Skarżysko", "Tłuszcz",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Radom", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Dęblin", NA, "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Kutno", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Błonie",
NA, "Radom", "Łowicz", "Warszawa", "Warszawa", "Gdynia",
"Radom", "Warszawa", "Warszawa", "Warszawa Okęcie", "Warszawa",
"Warszawa", "Warszawa Wschodnia", "Warszawa", "Warszawa Zachodnia",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Nasielsk",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Radom", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Ostrołęka", "Warszawa", "Warszawa", "Warszawa Zachodnia",
"Radom", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Mrozy", NA, "Warszawa", "Warszawa", "Radom",
"Warszawa", NA, "Małkinia", "Warszawa", "Warszawa", NA,
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Góra Kalwaria",
NA, "Warszawa", "Warszawa", "Siedlce", "Warszawa", "Siedlce",
"Warszawa Wsch", "Piaseczno", NA, "Warszawa", "Warszawa",
NA, "Warszawa", "Radom", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Skierniewice", "Warszawa",
"Warszawa Zachodnia", NA, "Warszawa", "Warka", "Warszawa",
"Otwock", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Siedlce", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Działdowo", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Kutno", "Warszawa", "Radom", "Warszawa Zachodnia",
NA, "Mińsk Mazowiecki", "Warszawa", "Warszawa", "Skierniewice",
"Warszawa", "Warszawa", "Małkinia", NA, NA, NA, "Warszawa",
"Warszaw", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa Zachodnia", "Otwock", "Skierniewice", "Mińsk Mazowiecki",
"Warszawa", "Warszawa", "Tłuszcz", "Grodzisk Mazowiecki",
NA, NA, NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Pilawa", "Warszawa Wileńska", "Tłuszcz", "Warszawa",
"Warszawa Zachodnia", "Ostrołęka", "Warszawa", NA, "Warszawa",
"Warszawa", "Błonie", "Warszawa", "Siedlce", NA, NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa Zachodnia",
"Warszawa", "Warszawa", "Warszawa", "Warszawa Zachodnia",
"Warszawa Wschodnia", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Góra Kalwaria", "Warszawa", "Sierpc", NA, "Warszawa",
"Dęblin", "Mińsk Mazowiecki", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa Rembertów", "Warszawa", NA, "Warszawa", NA, NA,
"Warszawa", NA, NA, "Warszawa", "Kutno", "Warszawa", NA,
"Warszawa Wschodnia", "Warszawa", "Warszawa", "Warszawa",
"Sierpc", "Warszawa", "Warszawa Włochy", NA, "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Tłuszcz", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Grodzisk Mazowiecki", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Radom",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Dęblin", "Sierpc", "Tłuszcz",
NA, "Legionowo", "Warszawa", NA, "Sierpc", "Sierpc", "Warszawa",
NA, "Mińsk Mazowiecki", NA, "Sierpc", "Warszawa", NA, "Warszawa",
"Działdowo", "Warszawa", "Przysucha", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, NA, "Warszawa", NA, NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Dęblin", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
NA, NA, "Warszawa", "Kutno", NA, "Warszawa", "Kutno", NA,
NA, "Kutno", "Warszawa", "Płock", NA, "Radom", NA, NA, NA,
NA, "Warszawa", NA, NA, NA, NA, "Radom", NA, NA, NA, NA,
NA, "Warszawa", NA, "Kutno", NA, "Łowicz", NA, "Radom",
"Warszawa", "Warszawa Gdańska", "Warszawa", "Radom", NA,
"Warszawa Rembertów", "Warszawa Zachodnia", "Warszawa",
"Warszawa", NA, "Skierniewice", "Warszawa", "Sierpc", "Warszawa",
NA, NA, "Warszawa", "Modlin", "Warszawa", "Warszawa", NA,
"Warszawa Zachodnia", "Siedlce", "Warszawa", "Warszawa",
"Radom", "Kutno", NA, "Warszawa", "Radom", "Warszawa", "Przysucha",
"Radom", "Warszawa", "Dęblin", "Tłuszcz", "Sierpc", "Warszawa Zachodnia",
"Warszawa", NA, "Warszawa", NA, NA, "Siedlce", "Radom", "Warszawa",
NA, "Radom", "Warszawa Zachodnia", NA, "Wołomin", NA, "Warszawa",
NA, "Warszawa", NA, "Warszawa", "Warszawa", NA, "Radom",
NA, "Warszawa", "Warszawa", "Góra Kalwaria", NA, "Pilawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Sochaczew", NA, NA, "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa Zachodnia",
"Warszawa", "Warszawa", NA, "Warszawa", NA, NA, "Warszawa",
"Warszawa", "Łowicz", "Warszawa", "Siedlce", "Kutno", NA,
"Góra Kalwaria", "Skierniewice", "Radom", "Warszawa", "Ostrołęka",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Kutno", "Warszawa Wschodnia", "Tłuszcz",
NA, "Grodzisk Mazowiecki", "Warszawa Gdańska", "Siedlce",
NA, "Warszawa", NA, "Warszawa", NA, NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa Wschodnia", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa Wschodnia", "Warszawa Zachodnia", "Warszawa", "Warszawa",
"Modlin", "Warszawa", NA, "Dęblin", "Warszawa", "Warszawa",
"Warszawa", "Siedlce", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Skierniewice", NA, "Tłuszcz", "Warszawa",
"Tłuszcz", "Ostrołęka", NA, "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Łowicz",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Sochaczew", "Kutno", NA, "Warszawa Wschodnia",
"Warszawa", "Warszawa Wschodnia", NA, "Radom", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Łuków",
NA, "Warszawa", "Warszawa", "Mińsk Mazowiecki", "Warszawa",
"Warszawa", "Zielonka", "Dęblin", "Warszawa", "Warszawa",
"Warszawa", NA, "Małkinia", "Mińsk Mazowiecki", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Radom", "Warszawa",
NA, NA, NA, "Warszawa", "Warszawa", "Warszawa", NA, "Otwock",
"Warszawa", "Czeremcha", "Warszawa", "Warszawa", "Grodzisk Mazowiecki",
"Siedlce", "Siedlce", "Skierniewice", NA, "Warszawa", NA,
NA, "Warszawa", "Sierpc", "Sierpc", NA, "Warszawa", NA, NA,
NA, "Otwock", "Warszawa", "Warszawa", "Siedlce", "Warszawa",
"Radom", "Tłuszcz", NA, "Warszawa", "Warszawa", "Warszawa"
), to = c("Działdowo", "Skierniewice", "Działdowo", "Skierniewice",
"Działdowo", NA, "Skierniewice", "Modlin", "Łuków", "Skierniewice",
"Deblin", "Skierniewice", "Łowcz Główny", "Skierniewice",
"Małkinia", "Skierniewice", NA, NA, "Skierniewice", "Dęblin",
"Radom", NA, "Warszawa", "Skierniewice", "Skierniewice",
NA, NA, "Śiedlce", "Małkinia", "Skierniewice", "Łuków",
"Działdowo", NA, "Warszawa Lotnisko Chopina", "Skierniewice",
"Warszawa", "Małkinia", "Skierniewice", "Skierniewice",
"Łowicz", "Skierniewice", NA, "Warszawa Stadion", "Warszawa",
NA, "Skierniewice", "Skierniewice", "Skierniewice", "Dęblin",
NA, "Siedlce", "Otwock", "Skierniewice", "Skarżysko Kamienna",
"Skarżysko Kamienna", "Skarżysko Kamienna", "Dęblin",
"Skierniewice", "Dęblin", "Skierniewice", NA, NA, "Łowicz",
NA, "Skierniewice", "Małkinia", "Skierniewice", NA, "Łowiz Główny",
"Działdowo", "Skierniewice", "Kutno", "Skierniewice", "Łowicz Główny",
"Skierniewice", "Łowicz", "Skierniewice", "Kutno", NA, "Warszawa",
NA, "Działdowo", "Płock", "Warszawa Wschodnia", "Warszawa Rembertów",
"Dęblin", "Łuków", "Małkinia", "Skierniewice", NA, "Dęblin",
"Łowicz Główny", "Otwock", "Skierniewice", "Działdowo",
"Skierniewice", NA, "Dęblin", NA, "Działdowo", NA, "Małkinia",
"Małkinia", "Dęblin", "Łuków", "Łuków", "Małkinia",
"Skierniewice", "Skarżysko", NA, "Łuków", "Łowicz", "Małkinia",
"Skierniewice", "Skierniewice", NA, "Łowicz Główny", "Skierniewice",
"Warszawa Rembertów", "Łowicz Główny", NA, "Skierniewice",
"Łowicz", "Łowicz", "Działdowo", "Małkina", "Dęblin",
"Góra Kalwaria", "Dęblin", "Skierniewice", "Skierniewice",
"Dęblin", NA, "Dęblin", "Łowicz Główny", "Łowicz Główny",
"Łuków", "Łuków", "Dęblin", NA, "Działdowo", "Radom",
NA, "Dęblin", NA, NA, "Zachodnia", "Ostrołęka", "Skierniewice",
"Siedlce", NA, NA, "Skierniewice", NA, "Łowicz", "Dęblin",
"Kutno", "Łuków", "Mińsk Mazowiecki", "Siedlce", "Nasielsk",
"Kutno", "Sierpc", "Łowicz Główny", "Dęblin", NA, "Dęblin",
NA, "Dęblin", "Skieniewice", "Radom", NA, "Radom", "Skierniewice",
"Radom", "Małkinia", "Działdowo", "Radom", "Sierpc", "Warszawa Zachodnia",
"Dęblin", NA, "Warszawa Zachodnia", "Czeremcha", NA, NA,
"Czeremcha", NA, NA, NA, "Dęblin", "Dęblin", NA, "Radom",
"Łowicz", "Radom", "Łowicz", "Płock", NA, "Skierniewice",
"Skierniewice", "Błonie", "Radom", "Tłuszcz", "Łowicz",
"Łuków", "Tłuszcz", "Łowicz", NA, NA, "Łowicz", NA,
"Działdowo", "Dęblin", "Warszawa", "Skierniewice", "Ostrołęka",
"Warszawa", "Legionowo", NA, NA, "Kutno", "Skierniewice",
"Małkinia", "Warszawa", NA, "Czeremcha", "Skierniewice",
NA, NA, NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa Zachodnia",
"Małkinia", "Warszawa", "Skierniewice", NA, NA, NA, "Skierniewice",
"Małkinia", "Dęblin", "Łuków", "Płock", "Radom", NA,
NA, "Nasielsk", "Dęblin", "Sierpc", "Warszawa", "Skierniewice",
NA, NA, "Skierniewice", "Warszawa", "Małkinia", "Skierniewice",
"Siedlce", "Skierniewice", "Skierniewice", "Skierniewice",
"Skierniewice", NA, "Dęblin", "Małkinia", "Skarżysko Kamienna",
NA, NA, NA, "Siedlce", "Dęblin", "Drzewica", "Łowicz",
"Kutno", "Skierniewice", "Ostrołęka", "Sochaczew", "Skierniewice",
"Warszawa Toruńska", "Łowicz", "Tłuszcz", "Ostrołęka",
"Skierniewice", "Skierniewice", NA, "Skierniewice", "Skarżysko Kamienna",
"Skierniewice", "Kutno", "Kutno", "Skierniewice", "Warszawa Wschodnia",
"Ostrołęka", "Warszawa Wschodnia", "Siedlce", "Łowicz Główny",
"Siedlce", "Działdowo", "Dęblin", "Sochaczew", "Pilawa",
"Nasielsk", NA, "Skarżysko Kamienna", "Skierniewice", "Dęblin",
"Dęblin", "Skierniewice", "Skierniewice", "Warszawa", "Skarżysko Kamienna",
"Małkinia", "Skierniewice", "Skierniewice", NA, "Działdowo",
"Skierniewice", "Działdowo", "Skierniewice", "Działdowo",
"Dęblin", "Radom", "Działdowo", "Kutno", "Małkinia", NA,
NA, "Dęblin", "Sieprc", "Dęblin", "Tłuszcz", "Dęblin",
NA, "Warszawa Centrum", NA, NA, NA, NA, "Radom", NA, "Warszawa",
"Skierniewice", "Skierniewice", "Dęblin", "Skierniewice",
"Łuków", NA, NA, "Skierniewice", NA, "Skarżysko Kamienna",
"Skierniewice", "Skierniewice", "Skierniewice", "Dęblin",
"Łochów", "Skierniewice", NA, NA, "Skierniewice", "Warszawa Centralna",
"Skierniewice", NA, NA, "Działdowo", NA, "Tłuszcz", "Skienriewice",
NA, NA, "Radom", "Łuków", NA, NA, "Tłuszcz", "Łuków",
"Dęblin", "Skierniewice", "Dęblin", "Działdowo", "Sierpc",
"Działdowo", "Działdowo", "Skierniewice", "Skierniewice",
"Dęblin", NA, "Łowicz Główny", "Skierniewice", "Skarżysko Kamienna",
"Skierniewice", "Skierniewice", "Łuków", "Czeremcha", "Skierniewice",
"Działdowo", "Łuków", "Skierniewice", "Działdowo", "Łowicz Główny",
"Skierniewice", "Skierniewice", "Łowicz", "Skierniewice",
"Łowicz Główny", "Łowicz Główny", "Małkinia", "Skierniewice",
"Warszawa", "Siedlce", "Siedlce", "Skierniewice", "Łuków",
"Sierpc", "Skierniewice", "Tłuszcz", "Skierniewice", "Skierniewice",
"Łuków", "Skierniewice", "Warszawa", "Skierniewice", NA,
"Skierniewice", "Radom", "Małkinia", "Skierniewice", "Skierniewice",
"Dęblin", NA, NA, "Warszawa Rembertów", "Skierniewice",
"Siedlce", NA, "Warszawa Wschodnia", "Siedlce", "Skierniewice",
"Łowicz", "Małkinia", "Warszawa Wschodnia", "Dęblin",
"Skierniewice", NA, "Skierniewice", "Działdowo", "Małkinia",
"Dęblin", "Kutno", "Skierniewice", "Działdowo", "Siedlce",
"Dęblin", "Skarżysko", "Błonie", "Siedlce", "Skierniewice",
"Ostrołęka", "Czeremcha", "Skierniewice", "Siedlce Wzajemne",
"Działdowo", "Skierniewice", "Łuków", "Skierniewice",
"Dzialdowo", "Skierniewice", NA, "Warszawa Wschodnia", "Dęblin",
"Skierniewice", "Łuków", "Skierniewice", "Radom", NA, "Łowicz",
"Łowicz", "Dęblin", "Skierniewice", "Skierniewice", "Małkinia",
NA, "Mrozy", "Skierniewice", NA, "Łowicz", "Łowicz", NA,
"Skarżysko Kamienna", "Skierniewice", "Łuków", "Dęblin",
"Dęblin", "Skierniewice", "Dęblin", NA, "Radom", "Małkinia",
NA, "Działdowo", "Małkinia", "Skierniewice", "Małkinia",
"Małkinia", "Skierniewice", "Modlin", "Siedlce", "Skierniewice",
"Warszawa", "Małkinia", NA, "Warszawa", "Łuków", "Skarżysko",
NA, "Warszawa Zachodnia", NA, "Warszawa", "Łowicz", "Skierniewice",
"Skierniewice", "Łuków", "Siedlce", "Skierniewice", "Skierniewice",
"Modlin", NA, "Skierniewice", "Skierniewice", "Łowicz",
"Skierniewice", "Skierniewice", "Łuków", "Skierniewice",
"Łuków", "Małkinia", "Góra Kalwaria", "Małkinia", "Skierniewice",
"Małkinia", "Małkinia", "Dęblin", "Skierniewice", "Siedlce",
"Skierniewice", "Dęblin", "Łuków", "Małkinia", "Działdowo",
"Warszawa", "Radom", "Skierniewice", "Skierniewice", "Sierpc",
"Dęblin", "Działdowo", NA, "Warszawa Falenica", "Małkinia",
"Łuków", "Kamienna", "Ostrołęka", "Skierniewice", "Skierniewice",
"Łuków", "Skierniewice", "Łuków", "Łowicz", "Warszawa",
NA, "Małkinia", "Dęblin", "Skierniewice", "Działdowo",
"Działdowo", "Skarżysko Kamienna", "Góra Kalwaria", NA,
"Łowicz", NA, "Skierniewice", "Skierniewice", "Łowicz",
"Radom", "Skierniewice", "Skierniewice", "Siedlce", "Sierpc",
"Dęblin", "Dęblin", "Działdowo", "Sochaczew", "Łuków",
"Gdynia", "Warszawa", NA, "Drzewica", "Warszawa", "Skierniewice",
"Łuków", "Warszawa", "Drzewica", "Działdowo", "Dzialdowo",
"Piaseczno", "Łuków", "Łowicz", "Warszawa Zachodnia",
"Radom", "Warszawa Wschodnia", "Łuków", "Małkinia", "Skierniewice",
"Siedlce", NA, "Małkinia", "Łuków", "Skierniewice", "Skierniewice",
"Łowicz Główny", "Góra Kalwaria", "Skierniewice", "Radom",
"Radom", "Dęblin", "Warszawa", "Łowicz", "Radom", "Radom",
"Skierniewice", "Skierniewice", NA, "Skierniewice", "Siedlce",
"Warszawa", "Skierniewice", "Skierniewice", "Skierniewice",
"Skierniewice", "Skierniewice", "Skierniewice", "Działdowo",
"Tłuszcz", "Dęblin", "Skierniewice", "Działdowo", "Skarżysko Kamienna",
"Skierniewice", "Skierniewice", "Sochaczew", "Skierniewice",
"Skierniewice", "Mińsk Mazowiecki", NA, "Skierniewice",
"Skierniewice", "Skarżysko Kamienna", "Działdowo", NA,
"Warszawa", "Skierniewice", "Skierniewice", NA, "Skierniewice",
"Skierniewice", "Skierniewice", "Legionowo", "Warszawa Wschodnia",
NA, "Skierniewice", "Skierniewice", "Warszawa", "Skierniewice",
"Warszawa", "Modlin", "Czachówek Południowy", NA, "Działdowo",
"Skierniewice", NA, "Skierniewice", "Drzewica", "Dęblin",
NA, "Skierniewice", "Dęblin", "Dęblin", "Łowicz Główny",
"Skierniewice", "Warszawa", "Skierniewice", "Pilawa", NA,
"Łowicz", "Radom", "Pilawa", "Warszawa", "Skierniewice",
NA, "Skierniewice", "Małkinia", "Skierniewice", "Skierniewice",
"Skierniewice", "Działdowo", "Skierniewice", "Skierniewice",
"Warszawa", "Małkinia", "Skierniewice", "Skierniewice",
"Siedlce", "Skierniewice", "Skierniewice", "Działdowo",
"Dęblin", "Skierniewice", "Skierniewice", "Radom", "Skierniewice",
"Skierniewice", "Małkinia", "Skierniewice", "Warszawa",
NA, "Działdowo", "Skierniewice", "Łowicz", "Skierniewice",
"Radm", "Działdowo", "Skierniewice", "Działdowo", "Skierniewice",
NA, "Sierpc", "Skierniewice", "Warszawa", "Działdowo", NA,
"Warszawa Zachodnia", "Skierniewice", "Dęblin", "Warszawa",
"Skierniewice", "Skierniewice", "Warszawa Wileńska", NA,
NA, NA, "Skierniewice", "Dęblin", "Skarżysko Kamienna",
"Siedlce", "Skierniewice", "Skierniewice", "Warszawa Wschodnia",
"Warszawa", "Warszawa", "Siedlce", "Dęblin", "Skierniewice",
"Ostrołęka", "Warszawa Rembertów", NA, NA, NA, "Skierniewice",
"Skierniewice", "Skierniewice", "Łowicz", "Łowicz", "Góra Kalwaria",
"Małkinia", "Ostrołęka", "Skierniewice", "Warszawa Włochy",
"Tłuszcz", "Łuków", NA, "Skierniewice", "Działdowo",
"Warszawa Gołąbki", "Skierniewice", "Warszawa", NA, NA,
"Dęblin", "Skierniewice", "Skierniewice", "Skierniewice",
"Działdowo", "Skierniewice", "Radom", "Skierniewice", "Warszawa Wschodnia",
"Warszawa Zachodnia", NA, "Siedlce", "Modlin", "Skierniewice",
"Dęblin", "Warszawa Wsch", "Łowicz", "Kutno", NA, "Skierniewice",
"Radom", "Grodzisk Mazowiecki", NA, "Skierniewice", "Małkinia",
"Dęblin", "Siedlce", NA, "Skierniewice", NA, "Siedlce",
"Małkinia", "Grodzisk Mazowiecki", "Skierniewice", NA, "Małkinia",
NA, NA, "Łowicz", NA, NA, "Działdowo", "Sierpc", "Skierniewice",
NA, "Warszawa Rembertów", "Skierniewice", "Małkinia", "Skierniewice",
"Kutno", "Sierpc", "Pruszków", NA, "Dęblin", "Skierniewice",
"Siedlce", NA, "Łowicz", "Ostrołęka", "Skierniewice",
NA, "Radom", "Skierniewice", "Skierniewice", "Siedlce", "Siedlce",
NA, "Małkinia", "Warszawa", "Siedlce", "Skierniewice", NA,
"Skierniewice", "Skierniewice", NA, "Małkinia", "Skierniewice",
"Warszawa", NA, "Skierniewice", "Radom", "Skierniewice",
"Skierniewice", "Działdowo", "Skierniewice", "Siedlce",
"Radom", "Kutno", "Legionowo", NA, "Tłuszcz", "Skierniewice",
NA, "Nasielsk", "Nasielsk", "Siedlce", NA, "Warszawa", NA,
"Kutno", "Dęblin", NA, "Skierniewice", "Warszawa", "Działdowo",
"Radom", "Skierniewice", "Skierniewice", "Dęblin", "Siedlce",
NA, NA, "Siedlce", NA, NA, "Błonie", "Dęblin", "Łowicz",
"Małkinia", "Dęblin", "Siedlce", NA, "Radom", "Siedlce",
"Łowicz", "Skierniewice", NA, "Dęblin", NA, NA, "Dęblin",
"Sierpc", NA, "Skierniewice", "Sierpc", NA, NA, "Sierpc",
"Skierniewice", "Sierpc", NA, "Warszawa", NA, NA, NA, NA,
"Siedlce", NA, NA, NA, NA, "Dęblin", NA, NA, NA, NA, NA,
"Grodzisk Mazowiecki", NA, "Sierpc", NA, "Warszawa", NA,
"Dęblin", "Skierniewice", "Nasielsk", "Skierniewice", "Przysucha",
NA, "Warszawa Zachodnia", "Warszawa Rembertów", "Skierniewice",
"Małkinia", NA, "Warszawa", "Sochaczew", "Nasielsk", "Siedlce",
NA, NA, "Radom", "Warszawa Lotnisko Chopina", "Góra Kalwaria",
"Skierniewice", NA, "Siedlce", "Warszawa", "Sochaczew", "Grodzisk Mazowiecki",
"Skarżysko Kamienna", "Sierpc", NA, "Skierniewice", "Skarżysko Kamienna",
"Radom", "Radom", "Drzewica", "Skierniewice", "Warszawa",
"Ostrołęka", "Kutno", "Warszawa Gdańska", "Dęblin", NA,
"Skierniewice", NA, NA, "Łuków", "Skarżysko Kamienna",
"Radom", NA, "Warszawa", "Warszawa Gdańska", NA, "Warszawa",
NA, "Dęblin", NA, "Skierniewce", NA, "Skierniewice", "Małkinia",
NA, "Dęblin", NA, "Dęblin", "Skierniewice", "Warszawa",
NA, "Warszawa", "Skierniewice", "Działdowo", "Małkinia",
"Działdowo", NA, "Skierniewice", "Warszawa", NA, NA, "Małkinia",
"Skierniewice", "Radom", NA, "Dęblin", "Skierniewice", "Łowicz",
"Skierniewice", "Skierniewice", "Skierniewice", "Łowicz",
"Dęblin", "Warszawa Gdańska", "Małkinia", "Skierniewice",
NA, "Skierniewice", NA, NA, "Łowicz", "Radom", "Warszawa",
"Skierniewice", "Warszawa", "Sierpc", NA, "Dęblin", "Warszawa",
"Dęblin", "Otwock", "Tłuszcz", "Skierniewice", "Siedlce",
NA, "Siedlce", "Skierniewice", "Małkinia", "Wołomin", "Siedlce",
"Sierpc", "Warszawa Zachodnia", "Ostrołęka", NA, "Warszawa Wschodnia",
"Warszawa", "Warszawa", NA, "Skierniewice", NA, "Skierniewice",
NA, NA, "Skierniewice", "Łowicz", "Łowicz", "Otwock", "Łowicz Główny",
"Skierniewice", "Skierniewice", "Małkinia", "Skierniewice",
NA, "Skierniewice", "Skierniewice", "Radom", "Warszawa Wschodnia",
"Łowicz", "Działdowo", "Lotnisko Chopina", "Łowicz", NA,
"Warszawa", "Łowicz", "Małkinia", "Małkinia", "Łuków",
"Skierniewice", "Skierniewice", "Łowicz", "Skierniewice",
"Skierniewice", "Warszawa", NA, "Czeremcha", "Nasielsk",
"Czeremcha", "Tłuszcz", NA, "Siedlce", NA, "Skierniewice",
"Siedlce", "Skierniewice", NA, "Skierniewice", "Skierniewice",
"Otwock", NA, "Skierniewice", "Małkinia", "Skierniewice",
"Skierniewice", "Skierniewce", "Łowicz", "Bednary", "Sierpc",
NA, "Skierniewice", "Skierniewice", "Warszawa Zachodnia",
NA, "Pionki", "Skierniewice", NA, "Skierniewice", "Skierniewice",
"Radom", NA, "Dęblin", "Warszawa", NA, "Siedlce", "Skierniewice",
"Radom", "Radom", "Radom", "Warszawa Rembertów", "Radom",
"Skierniewice", "Małkinia", "Małkinia", NA, "Warszawa",
"Błonie", "Mińsk Mazowiecki", "Dęblin", "Skierniewice",
NA, "Siedlce", "Skierniewice", NA, "Skierniewice", "Grodzisk Mazowiecki",
"Skierniewice", "Siedlce", NA, "Tłuszcz", "Błonie", NA,
NA, NA, "Skierniewice", "Skierniewce", "Łowicz", NA, "Warszawa Zachodnia",
"Skierniewice", "Siedlce", "Siedlce", "Małkinia", "Warszawa",
"Grodzisk Mazowiecki", "Grodzisk Mazowiecki", "Otwock", NA,
"Skierniewice", NA, NA, "Skierniewice", "Kutno", "Nasielsk",
NA, "Siedlce", NA, NA, NA, "Warszawa", "Skierniewice", "Siedlce",
"Warszawa", "Siedlce", "Dęblin", "Nasielsk", NA, "Radom",
"Działdowo", "Grodzisk Mazowiecki"), pozostale = c(FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE,
TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE,
FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE,
FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE,
TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, FALSE,
TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE), miasto.from = c("Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, NA, "Warszawa",
"Warszawa", "Dęblin", NA, "Skierniewice", "Warszawa", "Warszawa",
NA, NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Skarżysko Kamienna", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Łuków", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Pruszków", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, NA, "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Sierpc",
NA, "Łuków", NA, "Warszawa", "Warszawa", "Warszawa", "Sochaczew",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", NA, "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", NA, "Warszawa",
NA, NA, "Warszawa", "Tłuszcz", "Warszawa", "Warszawa", NA,
NA, "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Sierpc", "Kutno", "Warszawa",
"Warszawa", NA, "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Mińsk Mazowiecki", "Tłuszcz",
"Warszawa", "Warszawa", "Kutno", "Pilawa", "Warszawa", NA,
"Działdowo", "Siedlce", NA, NA, "Siedlce", NA, NA, NA, "Warszawa",
"Radom", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Kutno", NA, "Warszawa", "Warszawa", "Mińsk Mazowiecki",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, NA, "Warszawa", NA, "Warszawa", "Warszawa",
"Dęblin", "Warszawa", "Tłuszcz", "Działdowo", "Tłuszcz",
NA, NA, "Sierpc", "Warszawa", "Warszawa", "Siedlce", NA,
"Siedlce", "Warszawa", NA, NA, NA, "Siedlce", "Radom", "Skierniewice",
"Wschodnia", "Warszawa", "Radom", "Warszawa", NA, NA, NA,
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Sierpc",
"Warszawa", NA, NA, "Warszawa", "Warszawa", "Kutno", "Góra Kalwaria",
"Warszawa", NA, NA, "Warszawa", "Dęblin", "Warszawa", "Warszawa",
"Łuków", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", NA, NA, NA, "Warszawa",
"Radom", "Radom", "Warszawa", "Warszawa", "Warszawa", "Tłuszcz",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Tłuszcz", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Tłuszcz", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Radom", "Mrozy", "Warszawa", "Warszawa", NA,
"Warszawa", "Warszawa", "Warszawa", "Łowicz", "Warszawa",
"Warszawa", "Sochaczew", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, NA, "Warszawa", "Nasielsk", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", NA, NA, NA, NA, "Warszawa", NA,
"Działdowo", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, NA, "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Tłuszcz", "Warszawa",
NA, NA, "Warszawa", "Warszawa", "Warszawa", NA, NA, "Warszawa",
NA, "Łochów", "Warszawa", NA, NA, "Warszawa", "Warszawa",
NA, NA, "Łochów", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Nasielsk", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Radom", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Siedlce", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Działdowo", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Nasielsk", "Warszawa",
"Legionowo", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Dęblin", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, NA, "Radom", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Radom", "Sierpc", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Otwock", "Warszawa",
"Warszawa", "Tłuszcz", "Siedlce", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
NA, "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Dęblin",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Działdowo", "Warszawa", NA, "Dęblin", "Siedlce", "Warszawa",
NA, "Mińsk Mazowiecki", NA, "Dęblin", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Dęblin", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Radom", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Góra Kalwaria", "Drzewica",
"Warszawa", "Warszawa", "Nasielsk", "Radom", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Skarżysko Kamienna",
"Tłuszcz", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Radom", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Dęblin",
NA, "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Kutno", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Błonie", NA, "Radom", "Łowicz", "Warszawa", "Warszawa",
"Gdynia", "Radom", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Nasielsk", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Radom", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Ostrołęka",
"Warszawa", "Warszawa", "Warszawa", "Radom", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Mrozy",
NA, "Warszawa", "Warszawa", "Radom", "Warszawa", NA, "Małkinia",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Góra Kalwaria", NA, "Warszawa", "Warszawa",
"Siedlce", "Warszawa", "Siedlce", "Warszawa", "Piaseczno",
NA, "Warszawa", "Warszawa", NA, "Warszawa", "Radom", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Skierniewice", "Warszawa", "Warszawa", NA, "Warszawa", "Warka",
"Warszawa", "Otwock", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Siedlce", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Działdowo", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, "Kutno", "Warszawa", "Radom",
"Warszawa", NA, "Mińsk Mazowiecki", "Warszawa", "Warszawa",
"Skierniewice", "Warszawa", "Warszawa", "Małkinia", NA,
NA, NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Otwock", "Skierniewice", "Mińsk Mazowiecki",
"Warszawa", "Warszawa", "Tłuszcz", "Grodzisk Mazowiecki",
NA, NA, NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Pilawa", "Warszawa", "Tłuszcz", "Warszawa",
"Warszawa", "Ostrołęka", "Warszawa", NA, "Warszawa", "Warszawa",
"Błonie", "Warszawa", "Siedlce", NA, NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Góra Kalwaria", "Warszawa", "Sierpc",
NA, "Warszawa", "Dęblin", "Mińsk Mazowiecki", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", NA, NA,
"Warszawa", NA, NA, "Warszawa", "Kutno", "Warszawa", NA,
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Sierpc",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Tłuszcz", "Warszawa", NA, "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Grodzisk Mazowiecki",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Radom", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Dęblin",
"Sierpc", "Tłuszcz", NA, "Legionowo", "Warszawa", NA, "Sierpc",
"Sierpc", "Warszawa", NA, "Mińsk Mazowiecki", NA, "Sierpc",
"Warszawa", NA, "Warszawa", "Działdowo", "Warszawa", "Przysucha",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, NA, "Warszawa",
NA, NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Dęblin", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", NA, NA, "Warszawa", "Kutno", NA, "Warszawa",
"Kutno", NA, NA, "Kutno", "Warszawa", "Płock", NA, "Radom",
NA, NA, NA, NA, "Warszawa", NA, NA, NA, NA, "Radom", NA,
NA, NA, NA, NA, "Warszawa", NA, "Kutno", NA, "Łowicz", NA,
"Radom", "Warszawa", "Warszawa", "Warszawa", "Radom", NA,
"Warszawa", "Warszawa", "Warszawa", "Warszawa", NA, "Skierniewice",
"Warszawa", "Sierpc", "Warszawa", NA, NA, "Warszawa", "Modlin",
"Warszawa", "Warszawa", NA, "Warszawa", "Siedlce", "Warszawa",
"Warszawa", "Radom", "Kutno", NA, "Warszawa", "Radom", "Warszawa",
"Przysucha", "Radom", "Warszawa", "Dęblin", "Tłuszcz",
"Sierpc", "Warszawa", "Warszawa", NA, "Warszawa", NA, NA,
"Siedlce", "Radom", "Warszawa", NA, "Radom", "Warszawa",
NA, "Wołomin", NA, "Warszawa", NA, "Warszawa", NA, "Warszawa",
"Warszawa", NA, "Radom", NA, "Warszawa", "Warszawa", "Góra Kalwaria",
NA, "Pilawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
NA, "Warszawa", "Sochaczew", NA, NA, "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", NA, NA, "Warszawa",
"Warszawa", "Łowicz", "Warszawa", "Siedlce", "Kutno", NA,
"Góra Kalwaria", "Skierniewice", "Radom", "Warszawa", "Ostrołęka",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Kutno", "Warszawa", "Tłuszcz",
NA, "Grodzisk Mazowiecki", "Warszawa", "Siedlce", NA, "Warszawa",
NA, "Warszawa", NA, NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Modlin", "Warszawa", NA, "Dęblin",
"Warszawa", "Warszawa", "Warszawa", "Siedlce", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Warszawa", "Skierniewice",
NA, "Tłuszcz", "Warszawa", "Tłuszcz", "Ostrołęka", NA,
"Warszawa", NA, "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Łowicz", NA, "Warszawa", "Warszawa", "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Sochaczew", "Kutno",
NA, "Warszawa", "Warszawa", "Warszawa", NA, "Radom", "Warszawa",
NA, "Warszawa", "Warszawa", "Warszawa", NA, "Warszawa", "Łuków",
NA, "Warszawa", "Warszawa", "Mińsk Mazowiecki", "Warszawa",
"Warszawa", "Zielonka", "Dęblin", "Warszawa", "Warszawa",
"Warszawa", NA, "Małkinia", "Mińsk Mazowiecki", "Warszawa",
"Warszawa", "Warszawa", NA, "Warszawa", "Warszawa", NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", NA, "Radom", "Warszawa",
NA, NA, NA, "Warszawa", "Warszawa", "Warszawa", NA, "Otwock",
"Warszawa", "Czeremcha", "Warszawa", "Warszawa", "Grodzisk Mazowiecki",
"Siedlce", "Siedlce", "Skierniewice", NA, "Warszawa", NA,
NA, "Warszawa", "Sierpc", "Sierpc", NA, "Warszawa", NA, NA,
NA, "Otwock", "Warszawa", "Warszawa", "Siedlce", "Warszawa",
"Radom", "Tłuszcz", NA, "Warszawa", "Warszawa", "Warszawa"
), miasto.to = c("Działdowo", "Skierniewice", "Działdowo",
"Skierniewice", "Działdowo", NA, "Skierniewice", "Modlin",
"Łuków", "Skierniewice", "Deblin", "Skierniewice", "Łowcz Główny",
"Skierniewice", "Małkinia", "Skierniewice", NA, NA, "Skierniewice",
"Dęblin", "Radom", NA, "Warszawa", "Skierniewice", "Skierniewice",
NA, NA, "Śiedlce", "Małkinia", "Skierniewice", "Łuków",
"Działdowo", NA, "Warszawa", "Skierniewice", "Warszawa",
"Małkinia", "Skierniewice", "Skierniewice", "Łowicz", "Skierniewice",
NA, "Warszawa", "Warszawa", NA, "Skierniewice", "Skierniewice",
"Skierniewice", "Dęblin", NA, "Siedlce", "Otwock", "Skierniewice",
"Skarżysko Kamienna", "Skarżysko Kamienna", "Skarżysko Kamienna",
"Dęblin", "Skierniewice", "Dęblin", "Skierniewice", NA,
NA, "Łowicz", NA, "Skierniewice", "Małkinia", "Skierniewice",
NA, "Łowiz Główny", "Działdowo", "Skierniewice", "Kutno",
"Skierniewice", "Łowicz", "Skierniewice", "Łowicz", "Skierniewice",
"Kutno", NA, "Warszawa", NA, "Działdowo", "Płock", "Warszawa",
"Warszawa", "Dęblin", "Łuków", "Małkinia", "Skierniewice",
NA, "Dęblin", "Łowicz", "Otwock", "Skierniewice", "Działdowo",
"Skierniewice", NA, "Dęblin", NA, "Działdowo", NA, "Małkinia",
"Małkinia", "Dęblin", "Łuków", "Łuków", "Małkinia",
"Skierniewice", "Skarżysko Kamienna", NA, "Łuków", "Łowicz",
"Małkinia", "Skierniewice", "Skierniewice", NA, "Łowicz",
"Skierniewice", "Warszawa", "Łowicz", NA, "Skierniewice",
"Łowicz", "Łowicz", "Działdowo", "Małkina", "Dęblin",
"Góra Kalwaria", "Dęblin", "Skierniewice", "Skierniewice",
"Dęblin", NA, "Dęblin", "Łowicz", "Łowicz", "Łuków",
"Łuków", "Dęblin", NA, "Działdowo", "Radom", NA, "Dęblin",
NA, NA, "Zachodnia", "Ostrołęka", "Skierniewice", "Siedlce",
NA, NA, "Skierniewice", NA, "Łowicz", "Dęblin", "Kutno",
"Łuków", "Mińsk Mazowiecki", "Siedlce", "Nasielsk", "Kutno",
"Sierpc", "Łowicz", "Dęblin", NA, "Dęblin", NA, "Dęblin",
"Skieniewice", "Radom", NA, "Radom", "Skierniewice", "Radom",
"Małkinia", "Działdowo", "Radom", "Sierpc", "Warszawa",
"Dęblin", NA, "Warszawa", "Czeremcha", NA, NA, "Czeremcha",
NA, NA, NA, "Dęblin", "Dęblin", NA, "Radom", "Łowicz",
"Radom", "Łowicz", "Płock", NA, "Skierniewice", "Skierniewice",
"Błonie", "Radom", "Tłuszcz", "Łowicz", "Łuków", "Tłuszcz",
"Łowicz", NA, NA, "Łowicz", NA, "Działdowo", "Dęblin",
"Warszawa", "Skierniewice", "Ostrołęka", "Warszawa", "Legionowo",
NA, NA, "Kutno", "Skierniewice", "Małkinia", "Warszawa",
NA, "Czeremcha", "Skierniewice", NA, NA, NA, "Warszawa",
"Warszawa", "Warszawa", "Warszawa", "Małkinia", "Warszawa",
"Skierniewice", NA, NA, NA, "Skierniewice", "Małkinia",
"Dęblin", "Łuków", "Płock", "Radom", NA, NA, "Nasielsk",
"Dęblin", "Sierpc", "Warszawa", "Skierniewice", NA, NA,
"Skierniewice", "Warszawa", "Małkinia", "Skierniewice",
"Siedlce", "Skierniewice", "Skierniewice", "Skierniewice",
"Skierniewice", NA, "Dęblin", "Małkinia", "Skarżysko Kamienna",
NA, NA, NA, "Siedlce", "Dęblin", "Drzewica", "Łowicz",
"Kutno", "Skierniewice", "Ostrołęka", "Sochaczew", "Skierniewice",
"Warszawa", "Łowicz", "Tłuszcz", "Ostrołęka", "Skierniewice",
"Skierniewice", NA, "Skierniewice", "Skarżysko Kamienna",
"Skierniewice", "Kutno", "Kutno", "Skierniewice", "Warszawa",
"Ostrołęka", "Warszawa", "Siedlce", "Łowicz", "Siedlce",
"Działdowo", "Dęblin", "Sochaczew", "Pilawa", "Nasielsk",
NA, "Skarżysko Kamienna", "Skierniewice", "Dęblin", "Dęblin",
"Skierniewice", "Skierniewice", "Warszawa", "Skarżysko Kamienna",
"Małkinia", "Skierniewice", "Skierniewice", NA, "Działdowo",
"Skierniewice", "Działdowo", "Skierniewice", "Działdowo",
"Dęblin", "Radom", "Działdowo", "Kutno", "Małkinia", NA,
NA, "Dęblin", "Sierpc", "Dęblin", "Tłuszcz", "Dęblin",
NA, "Warszawa", NA, NA, NA, NA, "Radom", NA, "Warszawa",
"Skierniewice", "Skierniewice", "Dęblin", "Skierniewice",
"Łuków", NA, NA, "Skierniewice", NA, "Skarżysko Kamienna",
"Skierniewice", "Skierniewice", "Skierniewice", "Dęblin",
"Łochów", "Skierniewice", NA, NA, "Skierniewice", "Warszawa",
"Skierniewice", NA, NA, "Działdowo", NA, "Tłuszcz", "Skierniewice",
NA, NA, "Radom", "Łuków", NA, NA, "Tłuszcz", "Łuków",
"Dęblin", "Skierniewice", "Dęblin", "Działdowo", "Sierpc",
"Działdowo", "Działdowo", "Skierniewice", "Skierniewice",
"Dęblin", NA, "Łowicz", "Skierniewice", "Skarżysko Kamienna",
"Skierniewice", "Skierniewice", "Łuków", "Czeremcha", "Skierniewice",
"Działdowo", "Łuków", "Skierniewice", "Działdowo", "Łowicz",
"Skierniewice", "Skierniewice", "Łowicz", "Skierniewice",
"Łowicz", "Łowicz", "Małkinia", "Skierniewice", "Warszawa",
"Siedlce", "Siedlce", "Skierniewice", "Łuków", "Sierpc",
"Skierniewice", "Tłuszcz", "Skierniewice", "Skierniewice",
"Łuków", "Skierniewice", "Warszawa", "Skierniewice", NA,
"Skierniewice", "Radom", "Małkinia", "Skierniewice", "Skierniewice",
"Dęblin", NA, NA, "Warszawa", "Skierniewice", "Siedlce",
NA, "Warszawa", "Siedlce", "Skierniewice", "Łowicz", "Małkinia",
"Warszawa", "Dęblin", "Skierniewice", NA, "Skierniewice",
"Działdowo", "Małkinia", "Dęblin", "Kutno", "Skierniewice",
"Działdowo", "Siedlce", "Dęblin", "Skarżysko Kamienna",
"Błonie", "Siedlce", "Skierniewice", "Ostrołęka", "Czeremcha",
"Skierniewice", "Siedlce", "Działdowo", "Skierniewice",
"Łuków", "Skierniewice", "Działdowo", "Skierniewice",
NA, "Warszawa", "Dęblin", "Skierniewice", "Łuków", "Skierniewice",
"Radom", NA, "Łowicz", "Łowicz", "Dęblin", "Skierniewice",
"Skierniewice", "Małkinia", NA, "Mrozy", "Skierniewice",
NA, "Łowicz", "Łowicz", NA, "Skarżysko Kamienna", "Skierniewice",
"Łuków", "Dęblin", "Dęblin", "Skierniewice", "Dęblin",
NA, "Radom", "Małkinia", NA, "Działdowo", "Małkinia",
"Skierniewice", "Małkinia", "Małkinia", "Skierniewice",
"Modlin", "Siedlce", "Skierniewice", "Warszawa", "Małkinia",
NA, "Warszawa", "Łuków", "Skarżysko Kamienna", NA, "Warszawa",
NA, "Warszawa", "Łowicz", "Skierniewice", "Skierniewice",
"Łuków", "Siedlce", "Skierniewice", "Skierniewice", "Modlin",
NA, "Skierniewice", "Skierniewice", "Łowicz", "Skierniewice",
"Skierniewice", "Łuków", "Skierniewice", "Łuków", "Małkinia",
"Góra Kalwaria", "Małkinia", "Skierniewice", "Małkinia",
"Małkinia", "Dęblin", "Skierniewice", "Siedlce", "Skierniewice",
"Dęblin", "Łuków", "Małkinia", "Działdowo", "Warszawa",
"Radom", "Skierniewice", "Skierniewice", "Sierpc", "Dęblin",
"Działdowo", NA, "Warszawa", "Małkinia", "Łuków", "Skarżysko Kamienna",
"Ostrołęka", "Skierniewice", "Skierniewice", "Łuków",
"Skierniewice", "Łuków", "Łowicz", "Warszawa", NA, "Małkinia",
"Dęblin", "Skierniewice", "Działdowo", "Działdowo", "Skarżysko Kamienna",
"Góra Kalwaria", NA, "Łowicz", NA, "Skierniewice", "Skierniewice",
"Łowicz", "Radom", "Skierniewice", "Skierniewice", "Siedlce",
"Sierpc", "Dęblin", "Dęblin", "Działdowo", "Sochaczew",
"Łuków", "Gdynia", "Warszawa", NA, "Drzewica", "Warszawa",
"Skierniewice", "Łuków", "Warszawa", "Drzewica", "Działdowo",
"Działdowo", "Piaseczno", "Łuków", "Łowicz", "Warszawa",
"Radom", "Warszawa", "Łuków", "Małkinia", "Skierniewice",
"Siedlce", NA, "Małkinia", "Łuków", "Skierniewice", "Skierniewice",
"Łowicz", "Góra Kalwaria", "Skierniewice", "Radom", "Radom",
"Dęblin", "Warszawa", "Łowicz", "Radom", "Radom", "Skierniewice",
"Skierniewice", NA, "Skierniewice", "Siedlce", "Warszawa",
"Skierniewice", "Skierniewice", "Skierniewice", "Skierniewice",
"Skierniewice", "Skierniewice", "Działdowo", "Tłuszcz",
"Dęblin", "Skierniewice", "Działdowo", "Skarżysko Kamienna",
"Skierniewice", "Skierniewice", "Sochaczew", "Skierniewice",
"Skierniewice", "Mińsk Mazowiecki", NA, "Skierniewice",
"Skierniewice", "Skarżysko Kamienna", "Działdowo", NA,
"Warszawa", "Skierniewice", "Skierniewice", NA, "Skierniewice",
"Skierniewice", "Skierniewice", "Legionowo", "Warszawa",
NA, "Skierniewice", "Skierniewice", "Warszawa", "Skierniewice",
"Warszawa", "Modlin", "Czachówek Południowy", NA, "Działdowo",
"Skierniewice", NA, "Skierniewice", "Drzewica", "Dęblin",
NA, "Skierniewice", "Dęblin", "Dęblin", "Łowicz", "Skierniewice",
"Warszawa", "Skierniewice", "Pilawa", NA, "Łowicz", "Radom",
"Pilawa", "Warszawa", "Skierniewice", NA, "Skierniewice",
"Małkinia", "Skierniewice", "Skierniewice", "Skierniewice",
"Działdowo", "Skierniewice", "Skierniewice", "Warszawa",
"Małkinia", "Skierniewice", "Skierniewice", "Siedlce", "Skierniewice",
"Skierniewice", "Działdowo", "Dęblin", "Skierniewice",
"Skierniewice", "Radom", "Skierniewice", "Skierniewice",
"Małkinia", "Skierniewice", "Warszawa", NA, "Działdowo",
"Skierniewice", "Łowicz", "Skierniewice", "Radom", "Działdowo",
"Skierniewice", "Działdowo", "Skierniewice", NA, "Sierpc",
"Skierniewice", "Warszawa", "Działdowo", NA, "Warszawa",
"Skierniewice", "Dęblin", "Warszawa", "Skierniewice", "Skierniewice",
"Warszawa", NA, NA, NA, "Skierniewice", "Dęblin", "Skarżysko Kamienna",
"Siedlce", "Skierniewice", "Skierniewice", "Warszawa", "Warszawa",
"Warszawa", "Siedlce", "Dęblin", "Skierniewice", "Ostrołęka",
"Warszawa", NA, NA, NA, "Skierniewice", "Skierniewice", "Skierniewice",
"Łowicz", "Łowicz", "Góra Kalwaria", "Małkinia", "Ostrołęka",
"Skierniewice", "Warszawa", "Tłuszcz", "Łuków", NA, "Skierniewice",
"Działdowo", "Warszawa", "Skierniewice", "Warszawa", NA,
NA, "Dęblin", "Skierniewice", "Skierniewice", "Skierniewice",
"Działdowo", "Skierniewice", "Radom", "Skierniewice", "Warszawa",
"Warszawa", NA, "Siedlce", "Modlin", "Skierniewice", "Dęblin",
"Warszawa", "Łowicz", "Kutno", NA, "Skierniewice", "Radom",
"Grodzisk Mazowiecki", NA, "Skierniewice", "Małkinia", "Dęblin",
"Siedlce", NA, "Skierniewice", NA, "Siedlce", "Małkinia",
"Grodzisk Mazowiecki", "Skierniewice", NA, "Małkinia", NA,
NA, "Łowicz", NA, NA, "Działdowo", "Sierpc", "Skierniewice",
NA, "Warszawa", "Skierniewice", "Małkinia", "Skierniewice",
"Kutno", "Sierpc", "Pruszków", NA, "Dęblin", "Skierniewice",
"Siedlce", NA, "Łowicz", "Ostrołęka", "Skierniewice",
NA, "Radom", "Skierniewice", "Skierniewice", "Siedlce", "Siedlce",
NA, "Małkinia", "Warszawa", "Siedlce", "Skierniewice", NA,
"Skierniewice", "Skierniewice", NA, "Małkinia", "Skierniewice",
"Warszawa", NA, "Skierniewice", "Radom", "Skierniewice",
"Skierniewice", "Działdowo", "Skierniewice", "Siedlce",
"Radom", "Kutno", "Legionowo", NA, "Tłuszcz", "Skierniewice",
NA, "Nasielsk", "Nasielsk", "Siedlce", NA, "Warszawa", NA,
"Kutno", "Dęblin", NA, "Skierniewice", "Warszawa", "Działdowo",
"Radom", "Skierniewice", "Skierniewice", "Dęblin", "Siedlce",
NA, NA, "Siedlce", NA, NA, "Błonie", "Dęblin", "Łowicz",
"Małkinia", "Dęblin", "Siedlce", NA, "Radom", "Siedlce",
"Łowicz", "Skierniewice", NA, "Dęblin", NA, NA, "Dęblin",
"Sierpc", NA, "Skierniewice", "Sierpc", NA, NA, "Sierpc",
"Skierniewice", "Sierpc", NA, "Warszawa", NA, NA, NA, NA,
"Siedlce", NA, NA, NA, NA, "Dęblin", NA, NA, NA, NA, NA,
"Grodzisk Mazowiecki", NA, "Sierpc", NA, "Warszawa", NA,
"Dęblin", "Skierniewice", "Nasielsk", "Skierniewice", "Przysucha",
NA, "Warszawa", "Warszawa", "Skierniewice", "Małkinia",
NA, "Warszawa", "Sochaczew", "Nasielsk", "Siedlce", NA, NA,
"Radom", "Warszawa", "Góra Kalwaria", "Skierniewice", NA,
"Siedlce", "Warszawa", "Sochaczew", "Grodzisk Mazowiecki",
"Skarżysko Kamienna", "Sierpc", NA, "Skierniewice", "Skarżysko Kamienna",
"Radom", "Radom", "Drzewica", "Skierniewice", "Warszawa",
"Ostrołęka", "Kutno", "Warszawa", "Dęblin", NA, "Skierniewice",
NA, NA, "Łuków", "Skarżysko Kamienna", "Radom", NA, "Warszawa",
"Warszawa", NA, "Warszawa", NA, "Dęblin", NA, "Skierniewice",
NA, "Skierniewice", "Małkinia", NA, "Dęblin", NA, "Dęblin",
"Skierniewice", "Warszawa", NA, "Warszawa", "Skierniewice",
"Działdowo", "Małkinia", "Działdowo", NA, "Skierniewice",
"Warszawa", NA, NA, "Małkinia", "Skierniewice", "Radom",
NA, "Dęblin", "Skierniewice", "Łowicz", "Skierniewice",
"Skierniewice", "Skierniewice", "Łowicz", "Dęblin", "Warszawa",
"Małkinia", "Skierniewice", NA, "Skierniewice", NA, NA,
"Łowicz", "Radom", "Warszawa", "Skierniewice", "Warszawa",
"Sierpc", NA, "Dęblin", "Warszawa", "Dęblin", "Otwock",
"Tłuszcz", "Skierniewice", "Siedlce", NA, "Siedlce", "Skierniewice",
"Małkinia", "Wołomin", "Siedlce", "Sierpc", "Warszawa",
"Ostrołęka", NA, "Warszawa", "Warszawa", "Warszawa", NA,
"Skierniewice", NA, "Skierniewice", NA, NA, "Skierniewice",
"Łowicz", "Łowicz", "Otwock", "Łowicz", "Skierniewice",
"Skierniewice", "Małkinia", "Skierniewice", NA, "Skierniewice",
"Skierniewice", "Radom", "Warszawa", "Łowicz", "Działdowo",
"Lotnisko Chopina", "Łowicz", NA, "Warszawa", "Łowicz",
"Małkinia", "Małkinia", "Łuków", "Skierniewice", "Skierniewice",
"Łowicz", "Skierniewice", "Skierniewice", "Warszawa", NA,
"Czeremcha", "Nasielsk", "Czeremcha", "Tłuszcz", NA, "Siedlce",
NA, "Skierniewice", "Siedlce", "Skierniewice", NA, "Skierniewice",
"Skierniewice", "Otwock", NA, "Skierniewice", "Małkinia",
"Skierniewice", "Skierniewice", "Skierniewice", "Łowicz",
"Bednary", "Sierpc", NA, "Skierniewice", "Skierniewice",
"Warszawa", NA, "Pionki", "Skierniewice", NA, "Skierniewice",
"Skierniewice", "Radom", NA, "Dęblin", "Warszawa", NA, "Siedlce",
"Skierniewice", "Radom", "Radom", "Radom", "Warszawa", "Radom",
"Skierniewice", "Małkinia", "Małkinia", NA, "Warszawa",
"Błonie", "Mińsk Mazowiecki", "Dęblin", "Skierniewice",
NA, "Siedlce", "Skierniewice", NA, "Skierniewice", "Grodzisk Mazowiecki",
"Skierniewice", "Siedlce", NA, "Tłuszcz", "Błonie", NA,
NA, NA, "Skierniewice", "Skierniewice", "Łowicz", NA, "Warszawa",
"Skierniewice", "Siedlce", "Siedlce", "Małkinia", "Warszawa",
"Grodzisk Mazowiecki", "Grodzisk Mazowiecki", "Otwock", NA,
"Skierniewice", NA, NA, "Skierniewice", "Kutno", "Nasielsk",
NA, "Siedlce", NA, NA, NA, "Warszawa", "Skierniewice", "Siedlce",
"Warszawa", "Siedlce", "Dęblin", "Nasielsk", NA, "Radom",
"Działdowo", "Grodzisk Mazowiecki")), .Names = c("dates",
"awaria", "utrudnienia", "odwolane", "relacja", "from", "to",
"pozostale", "miasto.from", "miasto.to"), row.names = c("1",
"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35",
"36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46",
"47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57",
"58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68",
"69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
"80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90",
"91", "92", "93", "94", "95", "96", "97", "98", "99", "100",
"101", "102", "103", "104", "105", "106", "107", "108", "109",
"110", "111", "112", "113", "114", "115", "116", "117", "118",
"119", "120", "121", "122", "123", "124", "125", "126", "127",
"128", "129", "130", "131", "132", "133", "134", "135", "136",
"137", "138", "139", "140", "141", "142", "143", "144", "145",
"146", "147", "148", "149", "150", "151", "152", "153", "154",
"155", "156", "157", "158", "159", "160", "161", "162", "163",
"164", "165", "166", "167", "168", "169", "170", "171", "172",
"173", "174", "175", "176", "177", "178", "179", "180", "181",
"182", "183", "184", "185", "186", "187", "188", "189", "190",
"191", "192", "193", "194", "195", "196", "197", "198", "199",
"200", "201", "202", "203", "204", "205", "206", "207", "208",
"209", "210", "211", "212", "213", "214", "215", "216", "217",
"218", "219", "220", "221", "222", "223", "224", "225", "226",
"227", "228", "229", "230", "231", "232", "233", "234", "235",
"236", "237", "238", "239", "240", "241", "242", "243", "244",
"245", "246", "247", "248", "249", "250", "251", "252", "253",
"254", "255", "256", "257", "258", "259", "260", "261", "262",
"263", "264", "265", "266", "267", "268", "269", "270", "271",
"272", "273", "274", "275", "276", "277", "278", "279", "280",
"281", "282", "283", "284", "285", "286", "287", "288", "289",
"290", "291", "292", "293", "294", "295", "296", "297", "298",
"299", "300", "301", "302", "303", "304", "305", "306", "307",
"308", "309", "310", "311", "312", "313", "314", "315", "316",
"href", "href1", "href2", "href3", "href4", "href5", "href6",
"href7", "href8", "href9", "href10", "href11", "href12", "href13",
"href14", "href15", "href16", "href17", "href18", "href19", "href20",
"href21", "href22", "href23", "href24", "href25", "href26", "href27",
"href28", "href29", "href30", "href31", "href32", "href33", "href34",
"href35", "href36", "href37", "href38", "href39", "href40", "href41",
"href42", "href43", "href44", "href45", "href46", "href47", "href48",
"href49", "href50", "href51", "href52", "href53", "href54", "href55",
"href56", "href57", "href58", "href59", "href60", "href61", "href62",
"href63", "href64", "href65", "href66", "href67", "href68", "href69",
"href70", "href71", "href72", "href73", "href74", "href75", "href76",
"href77", "href78", "href79", "href80", "href81", "href82", "href83",
"href84", "href85", "href86", "href87", "href88", "href89", "href90",
"href91", "href92", "href93", "href94", "href95", "href96", "href97",
"href98", "href99", "href100", "href101", "href102", "href103",
"href104", "href105", "href106", "href107", "href108", "href109",
"href110", "href111", "href112", "href113", "href114", "href115",
"href116", "href117", "href118", "href119", "href120", "href121",
"href122", "href123", "href124", "href125", "href126", "href127",
"href128", "href129", "href130", "href131", "href132", "href133",
"href134", "href135", "href136", "href137", "href138", "href139",
"href140", "href141", "href142", "href143", "href144", "href145",
"href146", "href147", "href148", "href149", "href150", "href151",
"href152", "href153", "href154", "href155", "href156", "href157",
"href158", "href159", "href160", "href161", "href162", "href163",
"href164", "href165", "href166", "href167", "href168", "href169",
"href170", "href171", "href172", "href173", "href174", "href175",
"href176", "href177", "href178", "href179", "href180", "href181",
"href182", "href183", "href184", "href185", "href186", "href187",
"href188", "href189", "href190", "href191", "href192", "href193",
"href194", "href195", "href196", "href197", "href198", "href199",
"href200", "href201", "href202", "href203", "href204", "href205",
"href206", "href207", "href208", "href209", "href210", "href211",
"href212", "href213", "href214", "href215", "href216", "href217",
"href218", "href219", "href220", "href221", "href222", "href223",
"href224", "href225", "href226", "href227", "href228", "href229",
"href230", "href231", "href232", "href233", "href234", "href235",
"href236", "href237", "href238", "href239", "href240", "href241",
"href242", "href243", "href244", "href245", "href246", "href247",
"href248", "href249", "href250", "href251", "href252", "href253",
"href254", "href255", "href256", "href257", "href258", "href259",
"href260", "href261", "href262", "href263", "href264", "href265",
"href266", "href267", "href268", "href269", "href270", "href271",
"href272", "href273", "href274", "href275", "href276", "href277",
"href278", "href279", "href280", "href281", "href282", "href283",
"href284", "href285", "href286", "href287", "href288", "href289",
"href290", "href291", "href292", "href293", "href294", "href295",
"href296", "href297", "href298", "href299", "href300", "href301",
"href302", "href303", "href304", "href305", "href306", "href307",
"href308", "href309", "href310", "href311", "href312", "href313",
"href314", "href315", "href316", "href317", "href318", "href319",
"href320", "href321", "href322", "href323", "href324", "href325",
"href326", "href327", "href328", "href329", "href330", "href331",
"href332", "href333", "href334", "href335", "href336", "href337",
"href338", "href339", "href340", "href341", "href342", "href343",
"href344", "href345", "href346", "href347", "href348", "href349",
"href350", "href351", "href352", "href353", "href354", "href355",
"href356", "href357", "href358", "href359", "href360", "href361",
"href362", "href363", "href364", "href365", "href366", "href367",
"href368", "href369", "href370", "href371", "href372", "href373",
"href374", "href375", "href376", "href377", "href378", "href379",
"href380", "href381", "href382", "href383", "href384", "href385",
"href386", "href387", "href388", "href389", "href390", "href391",
"href392", "href393", "href394", "href395", "href396", "href397",
"href398", "href399", "href400", "href401", "href402", "href403",
"href404", "href405", "href406", "href407", "href408", "href409",
"href410", "href411", "href412", "href413", "href414", "href415",
"href416", "href417", "href418", "href419", "href420", "href421",
"href422", "href423", "href424", "href425", "href426", "href427",
"href428", "href429", "href430", "href431", "href432", "href433",
"href434", "href435", "href436", "href437", "href438", "href439",
"href440", "href441", "href442", "href443", "href444", "href445",
"href446", "href447", "href448", "href449", "href450", "href451",
"href452", "href453", "href454", "href455", "href456", "href457",
"href458", "href459", "href460", "href461", "href462", "href463",
"href464", "href465", "href466", "href467", "href468", "href469",
"href470", "href471", "href472", "href473", "href474", "href475",
"href476", "href477", "href478", "href479", "href480", "href481",
"href482", "href483", "href484", "href485", "href486", "href487",
"href488", "href489", "href490", "href491", "href492", "href493",
"href494", "href495", "href496", "href497", "href498", "href499",
"href500", "href501", "href502", "href503", "href504", "href505",
"href506", "href507", "href508", "href509", "href510", "href511",
"href512", "href513", "href514", "href515", "href516", "href517",
"href518", "href519", "href520", "href521", "href522", "href523",
"href524", "href525", "href526", "href527", "href528", "href529",
"href530", "href531", "href532", "href533", "href534", "href535",
"href536", "href537", "href538", "href539", "href540", "href541",
"href542", "href543", "href544", "href545", "href546", "href547",
"href548", "href549", "href550", "href551", "href552", "href553",
"href554", "href555", "href556", "href557", "href558", "href559",
"href560", "href561", "href562", "href563", "href564", "href565",
"href566", "href567", "href568", "href569", "href570", "href571",
"href572", "href573", "href574", "href575", "href576", "href577",
"href578", "href579", "href580", "href581", "href582", "href583",
"href584", "href585", "href586", "href587", "href588", "href589",
"href590", "href591", "href592", "href593", "href594", "href595",
"href596", "href597", "href598", "href599", "href600", "href601",
"href602", "href603", "href604", "href605", "href606", "href607",
"href608", "href609", "href610", "href611", "href612", "href613",
"href614", "href615", "href616", "href617", "href618", "href619",
"href620", "href621", "href622", "href623", "href624", "href625",
"href626", "href627", "href628", "href629", "href630", "href631",
"href632", "href633", "href634", "href635", "href636", "href637",
"href638", "href639", "href640", "href641", "href642", "href643",
"href644", "href645", "href646", "href647", "href648", "href649",
"href650", "href651", "href652", "href653", "href654", "href655",
"href656", "href657", "href658", "href659", "href660", "href661",
"href662", "href663", "href664", "href665", "href666", "href667",
"href668", "href669", "href670", "href671", "href672", "href673",
"href674", "href675", "href676", "href677", "href678", "href679",
"href680", "href681", "href682", "href683", "href684", "href685",
"href686", "href687", "href688", "href689", "href690", "href691",
"href692", "href693", "href694", "href695", "href696", "href697",
"href698", "href699", "href700", "href701", "href702", "href703",
"href704", "href705", "href706", "href707", "href708", "href709",
"href710", "href711", "href712", "href713", "href714", "href715",
"href716", "href717", "href718", "href719", "href720", "href721",
"href722", "href723", "href724", "href725", "href726", "href727",
"href728", "href729", "href730", "href731", "href732", "href733",
"href734", "href735", "href736", "href737", "href738", "href739",
"href740", "href741", "href742", "href743", "href744", "href745",
"href746", "href747", "href748", "href749", "href750", "href751",
"href752", "href753", "href754", "href755", "href756", "href757",
"href758", "href759", "href760", "href761", "href762", "href763",
"href764", "href765", "href766", "href767", "href768", "href769",
"href770", "href771", "href772", "href773", "href774", "href775",
"href776", "href777", "href778", "href779", "href780", "href781",
"href782", "href783", "href784", "href785", "href786", "href787",
"href788", "href789", "href790", "href791", "href792", "href793",
"href794", "href795", "href796", "href797", "href798", "href799",
"href800", "href801", "href802", "href803", "href804", "href805",
"href806", "href807", "href808", "href809", "href810", "href811",
"href812", "href813", "href814", "href815", "href816", "href817",
"href818", "href819", "href820", "href821", "href822", "href823",
"href824", "href825", "href826", "href827", "href828", "href829",
"href830", "href831", "href832", "href833", "href834", "href835",
"href836", "href837", "href838", "href839", "href840", "href841",
"href842", "href843", "href844", "href845", "href846", "href847",
"href848", "href849", "href850", "href851", "href852", "href853",
"href854", "href855", "href856", "href857", "href858", "href859",
"href860", "href861", "href862", "href863", "href864", "href865",
"href866", "href867", "href868", "href869", "href870", "href871",
"href872", "href873", "href874", "href875", "href876", "href877",
"href878", "href879", "href880", "href881", "href882", "href883",
"href884", "href885", "href886", "href887", "href888", "href889",
"href890", "href891", "href892", "href893", "href894", "href895",
"href896", "href897", "href898", "href899"), class = "data.frame")
|
8494589469d104ef34520dedbb6efc8cad8e9865
|
a6c12efd32ec6a240b259b2bca8f697c06aa258b
|
/rl/multi-armed-bandit/click_through_rate.R
|
793f6a45ece372fd89400aa54a9df49952939a8c
|
[] |
no_license
|
krzjoa/learning-R
|
545334791f7e0a1264ff9d2c4895adcd74d44183
|
ec1c1a1ce00c0fcf35d34498cbfd31e56b9c8923
|
refs/heads/master
| 2021-06-27T22:13:39.486064
| 2020-10-12T22:09:58
| 2020-10-12T22:09:58
| 172,254,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 875
|
r
|
click_through_rate.R
|
library(ggplot2)
library(magrittr)
plot_beta_distribution <- function(a, b){
linspace <- seq(0, 1, length.out = 200)
beta.distribution <- dbeta(linspace, a, b)
inputs <- data.frame(y=beta.distribution,
x=linspace)
ggplot(inputs) +
geom_line(aes(x=x, y=y))
}
run_experiment <- function(n.tosses = 20, true.rate = 0.3, show = c(1, 10, 100)){
# Prior - beta parameters
a <- 1
b <- 1
for(i in 1:n.tosses){
coin.toss.result <- runif(1) < true.rate
if (coin.toss.result)
a <- a + 1
else
b <- b + 1
if (i %in% show){
plot_beta_distribution(a, b) %>%
print()
}
}
}
run_experiment(n.tosses = 1000, show = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
20, 50, 100, 200, 300, 400, 500,
600, 700, 800, 900, 1000))
|
0000009f89d5612c870a4cfb8a4462f3373768d3
|
693d88d479f96e91be7607de520875861f3f6e4d
|
/man/infsearch.Rd
|
ee67df9353588528b02546d0064066a05b3b7c50
|
[] |
no_license
|
vijaydairyf/DMMongoDB
|
c4239c144f3357856177855e2fa82baf72bd34bf
|
920bbbbaed086df6d271be6c47dd2f4bcbe4341a
|
refs/heads/master
| 2020-12-05T19:20:18.297176
| 2019-12-18T21:44:41
| 2019-12-18T21:44:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 857
|
rd
|
infsearch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/infsearch.R
\name{infsearch}
\alias{infsearch}
\title{Retrieves infrastructure information from the DataMuster database}
\usage{
infsearch(property = NULL, active = NULL, infstype = NULL,
username = NULL, password = NULL)
}
\arguments{
\item{property}{the name of the property to search the database}
\item{username}{a username to access the DataMuster database, contact Lauren O'Connor for database access}
\item{password}{a password to access the DataMuster database}
}
\value{
a dataframe of infrastructure and associated information
}
\description{
This function allows infrastructure information to be retreived from the DataMuster database via the DataMuster website
}
\author{
Dave Swain \email{d.swain@cqu.edu.au} and Lauren O'Connor \email{l.r.oconnor@cqu.edu.au}
}
|
6e1399cd1fc1b28ff855dbb4ef104b78d9fb4fea
|
2c1f0d3bf36a1d4ea7ce73f8eab4c301b35fa281
|
/man/summarize_weighted_corr.Rd
|
7701b2e3343447d2d492739699981a96a1597a6f
|
[] |
no_license
|
yitao-li/sparklyr.flint
|
c72b00b9cb85ea2c915d3746f6c8428b1465bea5
|
59a8a14ba9bf049e0de1cec3b6581f55f25c0018
|
refs/heads/main
| 2023-03-03T06:36:30.429384
| 2020-12-14T15:51:35
| 2020-12-14T15:51:35
| 338,419,405
| 1
| 0
| null | 2021-02-12T19:56:38
| 2021-02-12T19:56:37
| null |
UTF-8
|
R
| false
| true
| 3,482
|
rd
|
summarize_weighted_corr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarizers.R
\name{summarize_weighted_corr}
\alias{summarize_weighted_corr}
\title{Pearson weighted correlation summarizer}
\usage{
summarize_weighted_corr(
ts_rdd,
xcolumn,
ycolumn,
weight_column,
key_columns = list(),
incremental = FALSE
)
}
\arguments{
\item{ts_rdd}{Timeseries RDD being summarized}
\item{xcolumn}{Column representing the first random variable}
\item{ycolumn}{Column representing the second random variable}
\item{weight_column}{Column specifying relative weight of each data point}
\item{key_columns}{Optional list of columns that will form an equivalence
relation associating each record with the time series it belongs to (i.e.,
any 2 records having equal values in those columns will be associated with
the same time series, and any 2 records having differing values in those
columns are considered to be from 2 separate time series and will therefore
be summarized separately)
By default, `key_colums` is empty and all records are considered to be part
of a single time series.}
\item{incremental}{If FALSE and `key_columns` is empty, then apply the
summarizer to all records of `ts_rdd`.
If FALSE and `key_columns` is non-empty, then apply the summarizer to all
records within each group determined by `key_columns`.
If TRUE and `key_columns` is empty, then for each record in `ts_rdd`,
the summarizer is applied to that record and all records preceding it, and
the summarized result is associated with the timestamp of that record.
If TRUE and `key_columns` is non-empty, then for each record within a group
of records determined by 1 or more key columns, the summarizer is applied
to that record and all records preceding it within its group, and the
summarized result is associated with the timestamp of that record.}
}
\value{
A TimeSeriesRDD containing the summarized result
}
\description{
Compute Pearson weighted correlation between `xcolumn` and `ycolumn` weighted
by `weight_column` and store result in a new columns named
`<xcolumn>_<ycolumn>_<weight_column>_weightedCorrelation`
}
\examples{
library(sparklyr)
library(sparklyr.flint)
sc <- try_spark_connect(master = "local")
if (!is.null(sc)) {
sdf <- copy_to(sc, tibble::tibble(t = seq(10), x = rnorm(10), y = rnorm(10), w = 1.1^seq(10)))
ts <- fromSDF(sdf, is_sorted = TRUE, time_unit = "SECONDS", time_column = "t")
ts_weighted_corr <- summarize_weighted_corr(ts, xcolumn = "x", ycolumn = "y", weight_column = "w")
} else {
message("Unable to establish a Spark connection!")
}
}
\seealso{
Other summarizers:
\code{\link{ols_regression}()},
\code{\link{summarize_avg}()},
\code{\link{summarize_corr2}()},
\code{\link{summarize_corr}()},
\code{\link{summarize_count}()},
\code{\link{summarize_covar}()},
\code{\link{summarize_dot_product}()},
\code{\link{summarize_ema_half_life}()},
\code{\link{summarize_ewma}()},
\code{\link{summarize_geometric_mean}()},
\code{\link{summarize_kurtosis}()},
\code{\link{summarize_max}()},
\code{\link{summarize_min}()},
\code{\link{summarize_nth_central_moment}()},
\code{\link{summarize_nth_moment}()},
\code{\link{summarize_product}()},
\code{\link{summarize_quantile}()},
\code{\link{summarize_skewness}()},
\code{\link{summarize_stddev}()},
\code{\link{summarize_sum}()},
\code{\link{summarize_var}()},
\code{\link{summarize_weighted_avg}()},
\code{\link{summarize_weighted_covar}()},
\code{\link{summarize_z_score}()}
}
\concept{summarizers}
|
fcdfbd386375795a17ff0801a2d3ead4265e47b7
|
4ee9d7179b4af02d1b2efcb0f0f43f03cabc1164
|
/man/gsea.t2cov.Rd
|
6711a307e23326e7f9e0275e90273319ec3cbbee
|
[] |
no_license
|
FrankD/NetHet_old
|
78795a58d8a0484f4773230d391c0b99b0a4e0a8
|
38a55860acd636410c98ef30b51756776455be08
|
refs/heads/master
| 2020-04-16T07:10:05.290938
| 2015-08-18T12:07:48
| 2015-08-18T12:07:48
| 21,828,742
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 723
|
rd
|
gsea.t2cov.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ggmgsa.R
\name{gsea.t2cov}
\alias{gsea.t2cov}
\title{GSA using T2cov-test}
\usage{
gsea.t2cov(x1, x2, gene.sets, gene.names, gs.names = NULL,
method = "t2cov.lr", method.p.adjust = "fdr")
}
\arguments{
\item{x1}{expression matrix (condition 1)}
\item{x2}{expression matrix (condition 2)}
\item{gene.sets}{list of gene-sets}
\item{gene.names}{gene names}
\item{gs.names}{gene-set names}
\item{method}{method for testing equality of covariance matrices}
\item{method.p.adjust}{method for p-value adjustment (default: 'fdr')}
}
\value{
list of results
}
\description{
GSA using T2cov-test
}
\author{
n.stadler
}
\keyword{internal}
|
3798314238bd9ceb3ddd72a7f19e5d317940f339
|
f993e437735ff0520099598bbdcf40f270d9c471
|
/Clase04.R
|
a2c615e71ecefea25d370878b736e1ade4711258
|
[] |
no_license
|
PatyTuga/Analisis-y-Tratamiento-de-Datos-con-R
|
95af27e0250f8d75f2358df58255cd62cf8409dc
|
583164749de282491cebbf08dd608eb0f0005ee0
|
refs/heads/master
| 2021-01-10T15:57:10.638532
| 2015-10-20T01:52:46
| 2015-10-20T01:52:46
| 44,573,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,105
|
r
|
Clase04.R
|
##### Clase 04 #####
verificador <- function(cedula){
if(nchar(cedula)==10){
index <- c(2,1,2,1,2,1,2,1,2)
val <- numeric(10)
for(i in 1:10){
val[i] <- as.numeric(substring(cedula,i,i))
}
produ <- index*val[1:9]
produ[which(produ >=10)] <- produ[produ>=10]-9
if((sum(produ) + val[10])%%10 == 0){
msg <- paste("La cédula ", cedula, "SI es correcta")
} else {
msg <- paste("La cédula ", cedula, " NO es correcta")
}
return(msg)
} else {
print("La identificación no es correcta")
}
}
verificador("060401439")
load("DataCruce.RData")
head(data)
library(dplyr)
data <- tbl_df(data)
data <- data %>% mutate(id=ifelse(nchar(identificacion)==9,
paste("0",identificacion),
as.character(identificacion)))
registro <- function(cedula){
reg <- data %>% filter(id==cedula)
return(reg)
}
|
a79a98fdc698424c48668301cebbeaac7e9a1ed9
|
38d52a7e16b96555f277cb879a69d3f1ba086dad
|
/man/apply_decimal.Rd
|
21a882d4b88590775946d339f9ecb2ae078b46c6
|
[
"MIT"
] |
permissive
|
next-game-solutions/tronr
|
c7ec41a0785536670942c653f0f1500f09e7e692
|
e7eb8b1d07e1c0415881ca3259358f707d78b181
|
refs/heads/main
| 2023-06-19T03:06:34.302241
| 2021-07-12T22:01:05
| 2021-07-12T22:01:05
| 305,829,963
| 7
| 0
|
NOASSERTION
| 2021-07-12T22:01:06
| 2020-10-20T20:48:08
|
JavaScript
|
UTF-8
|
R
| false
| true
| 1,182
|
rd
|
apply_decimal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply_decimal.R
\name{apply_decimal}
\alias{apply_decimal}
\title{Change representation of token amounts}
\usage{
apply_decimal(amount, decimal)
}
\arguments{
\item{amount}{(double): token amount expressed using the "machine-level"
precision (see Details). Can be a vector of values.}
\item{decimal}{(double): number of digits after the decimal point for the
token of interest. Can be a vector of values.}
}
\value{
A numeric vector with token amounts.
}
\description{
Represents token amounts using the whole number and the decimal parts
}
\details{
All token transfers on the TRON blockchain are performed using the
"machine-level" precision of token amounts. This function helps users to
obtain the whole number and the decimal parts of the corresponding
amounts. Here is an example for Tronix (TRX), whose decimal precision
is 6:
\itemize{
\item machine-level representation: \code{30000555}
\item representation using the whole number and the decimal parts: \code{30.000555}
}
}
\examples{
amounts <- c(30000555, 110500655)
decimals <- c(6, 8)
apply_decimal(amount = amounts, decimal = decimals)
}
|
f5d9161280422bb8d480d0a562f067d546718289
|
a7adce03ceaf94e5b93b43a5d337bb49750196c6
|
/scripts/11_assemble_final_human_tx.R
|
929bc685c2f3ce79c6f4b980b499a87bf4eac0ae
|
[] |
no_license
|
czhu/FulQuant
|
cb2a5ada7904252e15c66a14051ad2740950237c
|
76139924631626e55143753a6fd58f283814a501
|
refs/heads/master
| 2023-04-07T14:48:23.139435
| 2022-07-20T07:40:51
| 2022-07-20T07:40:51
| 342,687,654
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,878
|
r
|
11_assemble_final_human_tx.R
|
library(tidyverse)
library(rtracklayer)
projectFolder = "."
SCRIPTDIR = file.path(projectFolder, "sw")
GENOMEDIR = file.path(projectFolder, "genome")
source(file.path(SCRIPTDIR, "clustering_functions.R"))
## change here
labPrefix = "My"
labSuffix = "Lab"
infolder = file.path(projectFolder, "combined/tx_annot")
load( file.path(infolder, "novel_cluster_class.rda") )
load( file.path(infolder, "txCluster.rda") )
load( file.path(GENOMEDIR, "tx.rda"))
myclusters = subset(myclusters, is_human_final)
myclusters$type = clusterClass$type[match(myclusters$clname, clusterClass$clname)]
myclusters$type[is.na(myclusters$type)] = "UNKNOWN"
myclusters$type[myclusters$match_tx_pass | myclusters$match_tx_fail] = "REF"
novelClNameWithEndVariation = clusterClass$clname[clusterClass$is_end_variation]
novelClNameWithAlignIssues = clusterClass$clname[ clusterClass$align_issue!="none" ]
myclusters$is_end_variation = FALSE
myclusters$is_end_variation[myclusters$clname %in% novelClNameWithEndVariation] = TRUE
myclusters$has_align_issue = FALSE
myclusters$has_align_issue[myclusters$clname %in% novelClNameWithAlignIssues] = TRUE
COLOR_SCHEME_HUMAN_FINAL_TX = c(
"base" = "black",
"match_ref_filter_pass" = "#4daf4a", ## match_tx_pass
"match_ref_filter_fail" = "#73D171", ## match_tx_fail light green
"novel_align_issue" = "#a65628",
"novel_ir" = "#e41a1c" ,
"novel_es" = "#e41a1c" ,
"novel_nc" = "#e41a1c" ,
"novel_ne" = "#e41a1c" ,
"novel_ev" = "#e41a1c",
"novel_multi" = "#e41a1c"
)
mt = match(myclusters$clname,clusterClass$clname)
myclusters$align_issue = clusterClass$align_issue[mt]
## Benjamin's data has higher count
finalTx = subset(myclusters, !is_end_variation & (!has_align_issue | (novel_pass & count >= 10)))
## new color code
finalTx$itemRgb = COLOR_SCHEME_HUMAN_FINAL_TX["base"]
finalTx$itemRgb[finalTx$match_tx_fail] = COLOR_SCHEME_HUMAN_FINAL_TX["match_ref_filter_fail"]
finalTx$itemRgb[finalTx$clname %in% clusterClass$clname[ clusterClass$type == "IR" ] ] = COLOR_SCHEME_HUMAN_FINAL_TX["novel_ir"]
finalTx$itemRgb[finalTx$clname %in% clusterClass$clname[ clusterClass$type == "ES" ] ] = COLOR_SCHEME_HUMAN_FINAL_TX["novel_es"]
finalTx$itemRgb[finalTx$clname %in% clusterClass$clname[ clusterClass$type == "NC" ] ] = COLOR_SCHEME_HUMAN_FINAL_TX["novel_nc"]
finalTx$itemRgb[finalTx$clname %in% clusterClass$clname[ clusterClass$type == "EV" ] ] = COLOR_SCHEME_HUMAN_FINAL_TX["novel_ev"]
finalTx$itemRgb[finalTx$clname %in% clusterClass$clname[ clusterClass$type %in% c("NE","ASS") ] ] = COLOR_SCHEME_HUMAN_FINAL_TX["novel_ne"]
finalTx$itemRgb[finalTx$clname %in% clusterClass$clname[ clusterClass$type == "MN" ] ] = COLOR_SCHEME_HUMAN_FINAL_TX["novel_multi"]
stopifnot(all(finalTx$itemRgb %in% COLOR_SCHEME_HUMAN_FINAL_TX))
## NOTE remove novel tx with only 1 intron
hasNoGeneAssigned = is.na( finalTx$associated_gene )
message( sum( hasNoGeneAssigned ) ," ncRNA to assign gene name")
ncRNATx = finalTx[ hasNoGeneAssigned ]
ncRNAloci = GenomicRanges::reduce( ncRNATx, ignore.strand=FALSE)
## XXX strand must be defined, we have a case with undefined strand
ncRNAloci = subset(ncRNAloci, strand(ncRNAloci) != "*")
tx2locus = findOverlaps(ncRNATx, ncRNAloci, ignore.strand=FALSE, type = "within")
stopifnot( all(lengths(as.list(tx2locus))==1) )
## assign gene tx name etc
finalTx$gene_name = mygenes$gene_name[match(finalTx$associated_gene, mygenes$gene_id)]
## gene_source to source GENCODE
finalTx$gene_source = as.character(mygenes$source[match(finalTx$associated_gene, mygenes$gene_id)])
finalTx$gene_type = mygenes$gene_type[match(finalTx$associated_gene, mygenes$gene_id)]
finalTx$transcript_name = tx$transcript_name[match(finalTx$associated_tx, tx$transcript_id)]
## transcript_source to source GENCODE
finalTx$transcript_source = as.character(tx$source[match(finalTx$associated_tx, tx$transcript_id)])
allowedGeneBiotype = c("protein_coding","lincRNA")
ovlps = findOverlaps( ncRNAloci, mygenes[mygenes$gene_type %in% allowedGeneBiotype] )
stopifnot( length(ovlps) ==0 )
## define antise
ovlps2 = findOverlaps(invertStrand(ncRNAloci), mygenes[mygenes$gene_type %in% allowedGeneBiotype])
locusType = ifelse(lengths( as.list(ovlps2) ) > 0, "antisense" , "intergenic" )
locusName = sprintf("NCRNA%05d", 1:length(ncRNAloci))
ncRNAType = as_tibble(tx2locus) %>% mutate(
clname = ncRNATx$clname[queryHits],
locus_name = locusName[subjectHits],
locus_type = locusType[subjectHits]
)
ncRNAType = ncRNAType %>% mutate(count = ncRNATx$count[queryHits]) %>%
group_by(subjectHits) %>% mutate(rank=rank( -count, tie="first" ),
tx_name = paste(locus_name, rank, sep="-"),
locus_range = granges_to_igvCoord(ncRNAloci)[subjectHits])
stopifnot( identical(finalTx$clname[hasNoGeneAssigned], ncRNAType$clname) )
finalTx$gene_name[hasNoGeneAssigned] = ncRNAType$locus_name
finalTx$gene_source[hasNoGeneAssigned] = labPrefix
finalTx$gene_type[hasNoGeneAssigned] = paste(labPrefix,ncRNAType$locus_type, sep="_")
finalTx$transcript_name[hasNoGeneAssigned] = ncRNAType$tx_name
finalTx$transcript_source[hasNoGeneAssigned] = labPrefix
## assign tx name
hasNoTxName = is.na(finalTx$transcript_name)
stopifnot( !anyNA(finalTx$associated_gene[hasNoTxName]))
txCountOrderInKnownGenes = as_tibble(finalTx[hasNoTxName,c("count","associated_gene", "clname")]) %>%
group_by(associated_gene) %>% mutate(rank=rank(-count,tie="first"))
stopifnot( identical(txCountOrderInKnownGenes$clname, finalTx$clname[hasNoTxName]) )
finalTx$transcript_name[hasNoTxName] = paste0( finalTx$gene_name[hasNoTxName],
"-",labSuffix,txCountOrderInKnownGenes$rank )
finalTx$transcript_source[hasNoTxName] = labPrefix
stopifnot( !anyNA(finalTx$transcript_name) )
stopifnot( !anyNA(finalTx$transcript_source) )
stopifnot( !anyNA(finalTx$gene_name) )
stopifnot( !anyNA(finalTx$gene_source) )
stopifnot( !anyNA(finalTx$gene_type))
## define read through
mygenesAllowed = mygenes[mygenes$gene_type %in% allowedGeneBiotype]
mygenesReduced = GenomicRanges::reduce(mygenesAllowed)
ovlps3 = findOverlaps(finalTx, mygenesReduced)
hasReadThrough = lengths( as.list(ovlps3) ) > 1
readThrough = ifelse(hasReadThrough,
sapply( as.list(findOverlaps(finalTx[which(hasReadThrough)], mygenesAllowed)),
function(x) paste(mygenesAllowed$gene_name[x], collapse=";")),
as.character(NA))
finalTx$read_through = readThrough
finalTx$name = finalTx$transcript_name
export(myclusters, file.path(infolder, "tx_human_full.bed.gz") )
export(finalTx, file.path(infolder, "tx_human_final.bed.gz") )
txAnnot = mcols(finalTx)[, c( "name", "is_human_final", "gene_name", "transcript_name", "count", "type", "displayName","score", "igv_coord")]
write.table(txAnnot, file.path(infolder, "tx_human_final_annot.txt"), quote=FALSE, sep="\t",row.names=FALSE)
save(myclusters,finalTx, file=file.path(infolder, "tx_human.rda") )
|
bac30879cba803430eafe9f57798fb93783fd015
|
5d4cba65032b333387991db78f1e7d76779773e2
|
/gbifapp/gbifapp.R
|
d052d70800c9f52c9625589f8572574f7b46edfc
|
[] |
no_license
|
AlexxNica/abs
|
889981b672181ad4675cc40fb5137fd3358ed2b2
|
cf25666fae30ad0de1a15c21d50cfbc57e8137fa
|
refs/heads/master
| 2021-01-23T02:48:41.870318
| 2017-02-27T21:13:19
| 2017-02-27T21:13:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,776
|
r
|
gbifapp.R
|
library(shiny)
library(leaflet)
library(dplyr)
library(RColorBrewer)
library(ggplot2)
library(data.table)
# use datatable to speed things up here
data <- fread("kenya_slim.csv", na.strings = c("", NA))
data <- sample_n(data, 30000)
# establish bounds for map view
bounds <- read.csv("bounds.csv", stringsAsFactors = FALSE)
# function to create hyperlinks
map_url <- function(query, label = "NULL", type = "NULL"){
href <- "<a href="
close_href <- ">" #included for flexibility in labelling
close_a <- "</a>"
if(type == "google"){
query <- stringr::str_replace_all(query, " ", "+")
google_base <- "https://www.google.co.uk/#q="
url <- paste0(google_base, query)
out <- paste0(href, shQuote(url), close_href, label, close_a)
}
if(type == "crossref"){
query <- stringr::str_replace_all(query, " ", "+%2B")
crossref_base <- "http://search.crossref.org/?q=%2B"
url <- paste0(crossref_base, query)
out <- paste0(href, shQuote(url), close_href, label, close_a)
}
if(type == "gbif"){
query <- stringr::str_replace_all(query, " ", "+")
gbif_base <- "http://www.gbif.org/species/search?q="
url <- paste0(gbif_base, query)
out <- paste0(href, shQuote(url), close_href, label, close_a)
}
if(type == "lens"){
# note restriction to main jurisdictions and no stemming to reduce duplication and false positives
query <- stringr::str_replace_all(query, " ", "+")
lens_base <- "https://www.lens.org/lens/search?q="
url <- paste0(lens_base, "%22", query, "%22", "&jo=true&j=EP&j=JP&j=US&j=WO&st=false&n=50")
out <- paste0(href, shQuote(url), close_href, label, close_a)
}
out
}
# create columns with formatted links
data$google <- map_url(data$species, label = "Lookup Google", type = "google")
data$crossref <- map_url(data$species, label = "Lookup Crossref", type = "crossref")
data$lens <- map_url(data$species, label = "Lookup Patents", type = "lens")
data$gbif <- map_url(data$species, label = "Lookup GBIF", type = "gbif")
# combine links for use as popup in leafletproxy
data$combined_label <- paste0("<br>", "<strong>", data$species, "</strong>", "</br>", "<br>", data$google, "</br>", "<br>", data$gbif, "</br>", "<br>", data$crossref, "</br>", "<br>", data$lens, "</br>")
# create stable color palette
kingdom <- c("incertae sedis", "Archaea", "Fungi", "Plantae", "Chromista", "Animalia", "Bacteria", "Protozoa", "Viruses")
# my_palette <- brewer.pal(length(kingdom), "Paired")
load("my_palette.rda")
factpal <- colorFactor(my_palette, domain = kingdom, ordered = TRUE)
# user interface
ui <- fluidPage(
titlePanel("Map a sample of GBIF Data"),
sidebarLayout(
sidebarPanel(
uiOutput("kenya_output"),
br(),
downloadButton("downloadData", "csv Download"),
br(),
br(),
h3("About"),
p("This app provides an example of the visualisation of 30,000 species occurrence records for Kenya from the", a("Global Biodiversity Information Facility.", href="http://www.gbif.org/", target = "_blank"), "To learn how the data was created see", a("this article.", href="https://poldham.github.io/abs/mapgbif.html", target = "_blank")),
p("The raw data can be accessed from the following DOI and please cite as: GBIF.org (24th January 2017) GBIF Occurrence Download", a("http://doi.org/10.15468/dl.b04fyt", href="http://doi.org/10.15468/dl.b04fyt", target = "_blank"),". The data was imported using", a("rgbif from ROpensci", href="https://github.com/ropensci/rgbif"), "and the site was built in", a("RStudio", href="https://www.rstudio.com/", target = "_blank"), "with", a("Shiny.", href="https://www.rstudio.com/products/shiny/", target = "_blank")),
p("The app does not use Cookies.")
),
mainPanel(width = 8,
tabsetPanel(type = "tabs",
tabPanel("Map", leafletOutput("mymap")),
tabPanel("Summary", plotOutput("summary")),
tabPanel("Table", dataTableOutput("table"))
)
)
)
)
server <- function(input, output){
output$kenya_output <- renderUI({
selectInput(inputId = "kingdom_input", "kingdom",
sort(unique(data$kingdom)),
selected = "Animalia")
})
# use renderLeaflet for elements of the map that don't change, note setting default sizes
output$mymap <- renderLeaflet({
leaflet(data) %>%
addTiles() %>%
addCircleMarkers(~longitude, ~latitude, popup = data$species, radius = 2, weight = 5, opacity = 0.5, fill= TRUE, fillOpacity = 0.2)
})
# Use leafletProxy for elements that change
observe({
set <- data %>%
filter(data$kingdom %in% input$kingdom_input)
leafletProxy("mymap") %>% clearMarkers() %>%
addCircleMarkers(lng = set$longitude,
lat = set$latitude, popup = data$combined_label, radius = 1, weight = 2, opacity = 0.5, fill= TRUE, fillOpacity = 0.2, color = factpal(input$kingdom_input))
})
output$table <- renderDataTable({
table <- data %>%
filter(data$kingdom %in% input$kingdom_input)
})
# Add summary plot counting species by occurrences. filter(n > 10) to limit swamping the chart. Note that the n value filter will often need adjustment for the dataset
output$summary <- renderPlot({
data %>%
filter(data$kingdom %in% input$kingdom_input) %>%
count(species) %>%
dplyr::filter(n > 100) %>%
ggplot(aes(x = reorder(species, n), y = n, fill = species)) +
geom_bar(stat="identity", show.legend = FALSE) +
coord_flip() + labs(x = "species", y = "occurrences")
})
output$downloadData <- downloadHandler(
filename = function() {
paste("kenya_slim", ".csv", sep="")
},
content = function(file) {
file.copy("kenya_slim.csv", file)
}, contentType = "text/csv"
)
}
shinyApp(ui = ui, server = server)
|
3d9ccbb35eb33ab255e8e1b5c1b49ccda64060d6
|
c1034eb8f34b18105acf3244bf9a0b0339d6ca8d
|
/man/plotExtreme.Rd
|
b80ebabac757a0a5d3c9d6111ec21dec85fb7b20
|
[
"MIT"
] |
permissive
|
svkucheryavski/mdatools
|
f8d4eafbb34d57283ee753eceea1584aed6da3b9
|
2e3d262e8ac272c254325a0a56e067ebf02beb59
|
refs/heads/master
| 2023-08-17T16:11:14.122769
| 2023-08-12T16:58:49
| 2023-08-12T16:58:49
| 11,718,739
| 31
| 11
|
NOASSERTION
| 2020-07-23T18:50:22
| 2013-07-28T11:10:36
|
R
|
UTF-8
|
R
| false
| true
| 348
|
rd
|
plotExtreme.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaults.R
\name{plotExtreme}
\alias{plotExtreme}
\title{Shows extreme plot for SIMCA model}
\usage{
plotExtreme(obj, ...)
}
\arguments{
\item{obj}{a SIMCA model}
\item{...}{other parameters}
}
\description{
Generic function for creating extreme plot for SIMCA model
}
|
c03d254c5b1593f120ae2699aa4563983bb75e63
|
7f9026c8be2400a6ca51291c6d97f737ee0fa51b
|
/Analysis_maincode.R
|
4f898abce7bdebe5d8256e6cff5b1baaea87c5d0
|
[] |
no_license
|
jwisch/Falls
|
dc76b3a98c2c737e08ef231c3d18747d6f9fddb0
|
ccd1e7492b2c0501af550851cdf14fee8871f919
|
refs/heads/master
| 2020-12-13T09:04:51.852196
| 2020-01-16T17:10:25
| 2020-01-16T17:10:25
| 234,370,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,154
|
r
|
Analysis_maincode.R
|
FILEPATH_DATA<-"C:/Users/julie.wisch/Documents/Transition/DraftedMS_Falls/Data/"
FILEPATH_CODE<-"C:/Users/julie.wisch/Documents/Transition/DraftedMS_Falls/Code/"
library(ggplot2)
library(ggpubr)
library(sjPlot)
library(gridExtra)
library(psych)
library(ppcor)
source(paste(FILEPATH_CODE, "CommonFuncs.R", sep = ""))
source(paste(FILEPATH_CODE, "Falls_Funcs.R", sep = ""))
################################################################################################################################
#Data Cleaning
################################################################################################################################
df_PIB <- read.csv(paste(FILEPATH_DATA, "PIB.csv", sep = ""))
dF_fall <- read.csv(paste(FILEPATH_DATA,"adrcpib_fallfinal_deidentified.csv", sep = ""))
df_FC <- read.csv(paste(FILEPATH_DATA,"C:/Users/julie.wisch/Documents/ADRC/Data/Ances_compositeBB298.csv", sep = ""))
df.MRI<-read.csv(paste(FILEPATH_DATA,"HASD_ACS_DR14_3TMR.csv", sep = ""))
demog<-read.csv(paste(FILEPATH_DATA,"DR_demographics_20190122.csv", sep = ""))
#Converting the ID's to factors
df_PIB$MapID <- as.factor(df_PIB$MapID)
#Converting the dates to dates
df_PIB$PET_Date <-as.Date(df_PIB$PET_Date, format = "%m/%d/%Y") #4 digit years get capital Y, 2 digit years get lowercase y
dF_fall$MapID <- as.factor(dF_fall$MapID)
dF_fall$start <- as.Date(dF_fall$start, format = "%d-%b-%y") #use b if it's the month spelled out
#Dropping all the PIB columns we don't need.
df_PIB<- df_PIB[,c("MapID", "PET_Date", "PUP_fSUVR_rsf_TOT_CORTMEAN" )]
#To shring a dataframe down to only the columns you need, list them - like above - inside c().
#Make sure to put your column names in quotes. Spell them EXACTLY the same. Seperate with commas.
df_matched<-MatchbyNearestDate(dF_fall, df_PIB, "MapID", "start", "PET_Date")
df_matched<-df_matched[,c(1:4, 29, 30:32, 74:75)]
rm(dF_fall, df_PIB)
df_matched$timefall1<-as.numeric(as.character(df_matched$timefall1))
df_matched$PIBpos<-as.factor(ifelse(df_matched$PUP_fSUVR_rsf_TOT_CORTMEAN > 1.42, 1, 0))
df_matched$BIRTH<-format(as.Date(df_matched$BIRTH, "%d-%b-%y"), "19%y-%m-%d")
df_matched$age<-as.numeric(difftime(df_matched$BIRTH, df_matched$PET_Date, units = "weeks")/(-52))
colnames(df_matched)[4]<-"falls"
df_matched$timegap<-as.numeric(difftime(df_matched$start, df_matched$PET_Date, units = "weeks")/(-52))
df_matched<-df_matched[df_matched$timegap < 2 & df_matched$timegap > -2,] #keeping only people with PIB scans within 2 years of falls enrollment date
#Now getting resting state
df_FC$DATE_SCANNED<-as.Date(df_FC$DATE_SCANNED, format = "%m/%d/%Y")
colnames(df_FC)[1]<-"MapID"
df_FC<-MatchbyNearestDate(df_matched, df_FC, "MapID", "start", "DATE_SCANNED")
df_FC$timegap<-as.numeric(difftime(df_FC$start, df_FC$DATE_SCANNED, units = "weeks")/(-52))
df_FC<-df_FC[df_FC$timegap < 2 & df_FC$timegap > -2,] #keeping only people with PIB scans within 2 years of falls enrollment date
df_FC[,15:105] <- apply(df_FC[,15:105], 2, fisherz)
#Adding in race
colnames(demog)[1]<-"MapID"
df_FC<-merge(df_FC, demog[,c("MapID", "race2", "apoe")], by = "MapID", all.x = TRUE, all.y = FALSE)
df_FC$apoe4<-ifelse(df_FC$apoe == 33 | df_FC$apoe == 23, 0 , 1)
library(tableone)
listVars<-c("PUP_fSUVR_rsf_TOT_CORTMEAN", "timefall1", "EDUC", "age", "GENDER", "race2", "apoe4")
catVars<-c("GENDER", "race2", "apoe4")
table1 <- CreateTableOne(vars = listVars, data = df_FC, factorVars = catVars, strata = c("falls", "PIBpos"))
table1
rm(demog)
#df_FC<-df_FC[ , !(names(df_FC) %in% c("race2", "apoe", "apoe4"))] #dropping some demographics back out so it doesn't jack anything up later....
################################################################################################################################
################################################################################################################################
#Testing for relationship between amyloid level and time to fall
################################################################################################################################
#There are 71 individuals who fall, 50 who do not.
#Linking amyloid with falls
my_comparisons<-list(c("0", "1"))
p1<-ggboxplot(df_FC, y = "PUP_fSUVR_rsf_TOT_CORTMEAN", x = "falls", color = "falls", palette = "jco",
add = "jitter", outlier.shape = NA) + stat_compare_means(comparisons = my_comparisons) + xlab("Did Participant Fall?") +
ylab("Cortical Amyloid Accumulation")+scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_x_discrete(labels =c("0" = "No", "1" = "Yes")) + theme(legend.position = "none")
df_FC_plot<-df_FC[,1:14]
df_FC_plot$PIBpos<-revalue(df_FC_plot$PIBpos, c("0"="Amyloid Negative", "1"="Amyloid Positive"))
AmyloidPlot<-ggboxplot(df_FC_plot, y = "PUP_fSUVR_rsf_TOT_CORTMEAN", x = "falls", fill = "PIBpos", palette = "jco",
add = "jitter", outlier.shape = NA) + stat_compare_means(comparisons = my_comparisons) + xlab("Did Participant Fall?") +
ylab("Cortical Amyloid Accumulation")+scale_fill_manual(values=c("#9ecae1","#3182bd"))+
scale_x_discrete(labels =c("0" = "No", "1" = "Yes")) + labs(fill = "")
pcor.test(df_FC[df_FC$falls == 1, "PUP_fSUVR_rsf_TOT_CORTMEAN"], df_FC[df_FC$falls == 1, "timefall1"],
df_FC[df_FC$falls == 1 ,c("EDUC", "age", "GENDER")], method = "pearson")
#basically the same if you treat amyloid as a continuous variable vs categorical
################################################################################################################################
################################################################################################################################
#Doing PCA to get a single intranetwork signature
################################################################################################################################
colnames(df_FC)[c(15, 28, 40, 51, 61, 70, 78, 85, 91, 96, 100, 103, 105)]<-c("Somatomotor", "Lateral Somatomotor",
"Cingulate Operculum", "Auditory",
"Default Mode", "Memory", "Vision",
"Frontoparietal", "Salience",
"Subcortical", "Visual Attention",
"Default Attention", "Cerebellar")
df_pca<-prcomp(df_FC[df_FC$falls == 1,c(15, 28, 40, 51, 61, 70, 78, 85, 91, 96, 100, 103, 105)])
df_pca$rotation<-df_pca$rotation * -1 #reversing sign so things are more intuitive
#first component explains 37% of variance, second does 15%
df_FC$Signature<-as.numeric(as.matrix(df_FC[,c(15, 28, 40, 51, 61, 70, 78, 85, 91, 96, 100, 103, 105)])%*%df_pca$rotation[,1])
var_coord_func <- function(loadings, comp.sdev){
loadings*comp.sdev}
loadings <- df_pca$rotation
sdev <- df_pca$sdev
var.coord <- t(apply(loadings, 1, var_coord_func, sdev))
var.cos2 <- var.coord^2
# Compute contributions of each brain region to the component
#result is between 0 and 1...should sum to 1
comp.cos2 <- apply(var.cos2, 2, sum)
contrib <- function(var.cos2, comp.cos2){var.cos2/comp.cos2}
var.contrib <- t(apply(var.cos2,1, contrib, comp.cos2))
MakeBarPlots<-function(Component, ComponentTitle){
theme_set(theme_bw())
pcaPlot<-as.data.frame(Component)
pcaPlot<-cbind(rownames(pcaPlot), pcaPlot[,1])
colnames(pcaPlot)<-c("region", "contribution")
pcaPlot<-as.data.frame(pcaPlot)
pcaPlot$contribution<-as.numeric(as.character(pcaPlot$contribution))
pcaPlot <- pcaPlot[order(-pcaPlot$contribution), ] # sort
# Diverging Barcharts
p<-ggplot(pcaPlot, aes(x=reorder(region, contribution), y = contribution, label=contribution)) +
geom_bar(stat='identity', width=.5) +
#coord_cartesian(ylim = c(-0.4, 0.4)) +
scale_y_continuous()+ ylab("Contribution")+xlab("Intranetwork Connection")+
labs(subtitle="37% of variance",
title= ComponentTitle) +
coord_flip() +
theme(legend.position = "none")
return(p)}
MakeBarPlots(subset(var.contrib[,1], var.contrib[,1] > 0), "Global Rs-Fc Signature")
df_FC_plot$Signature<-df_FC$Signature
################################################################################################################################
################################################################################################################################
#Testing for relationship between networks and time to fall
################################################################################################################################
df_FC$cluster<-as.factor(paste(df_FC$PIBpos, df_FC$falls, sep = "-"))
my_comparisons<-list(c("0", "1"))
p2<-ggboxplot(df_FC, x = "falls", y = "Signature", color = "falls", palette = "jco",
add = "jitter", outlier.shape = NA) + stat_compare_means(comparisons = my_comparisons) +
theme(legend.position = "none", axis.title.y = element_text(size = 11))+
ylab("Global Intranetwork Connection") +
xlab("Did participant fall?") +
scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_x_discrete(labels =c("0" = "No", "1" = "Yes")) + theme(legend.position = "none")
SignaturePlot<-ggboxplot(df_FC_plot, y = "Signature", x = "falls", fill = "PIBpos", palette = "jco",
add = "jitter", outlier.shape = NA) + stat_compare_means(comparisons = my_comparisons) + xlab("Did Participant Fall?") +
ylab("Global Rs-Fc Signature")+scale_fill_manual(values=c("#9ecae1","#3182bd"))+
scale_x_discrete(labels =c("0" = "No", "1" = "Yes")) + labs(fill = "")
grid.arrange(p1, p2, nrow = 1)
df_FC$falls<-as.factor(df_FC$falls)
################################################################################################################################
################################################################################################################################
#Testing for relationship between amyloid level and networks
################################################################################################################################
#Prior lit has found differences in DAN and DMN dependent on amyloid
#https://academic.oup.com/cercor/article/26/2/695/2366897
#also the raichle paper about dmn
pcor.test(df_FC[, "Signature"], df_FC[, "PUP_fSUVR_rsf_TOT_CORTMEAN"],
df_FC[,c("EDUC", "age", "GENDER")], method = "pearson")
#p = 0.21
posfall<-pcor.test(df_FC[df_FC$PIBpos == 1 & df_FC$falls == 1, "Signature"], df_FC[df_FC$PIBpos == 1 & df_FC$falls == 1, "PUP_fSUVR_rsf_TOT_CORTMEAN"],
df_FC[df_FC$PIBpos == 1 & df_FC$falls == 1,c("EDUC", "age", "GENDER", "apoe4")], method = "pearson")
#p = 0.012, r = -.75
pcor.test(df_FC[df_FC$PIBpos == 0 & df_FC$falls == 1, "Signature"], df_FC[df_FC$PIBpos == 0 & df_FC$falls == 1, "PUP_fSUVR_rsf_TOT_CORTMEAN"],
df_FC[df_FC$PIBpos == 0 & df_FC$falls == 1,c("EDUC", "age", "GENDER", "apoe4")], method = "pearson")
#p = 0.34
posno<-pcor.test(df_FC[df_FC$PIBpos == 1 & df_FC$falls == 0, "Signature"], df_FC[df_FC$PIBpos == 1 & df_FC$falls == 0, "PUP_fSUVR_rsf_TOT_CORTMEAN"],
df_FC[df_FC$PIBpos == 1 & df_FC$falls == 0,c("EDUC", "age", "GENDER", "apoe4")], method = "pearson")
#p = 0.08, r = 0.70
pcor.test(df_FC[df_FC$PIBpos == 0 & df_FC$falls == 0, "Signature"], df_FC[df_FC$PIBpos == 0 & df_FC$falls == 0, "PUP_fSUVR_rsf_TOT_CORTMEAN"],
df_FC[df_FC$PIBpos == 0 & df_FC$falls == 0,c("EDUC", "age", "GENDER", "apoe4")], method = "pearson")
#p = 0.9
ggplot(df_FC[df_FC$PIBpos == 1,], aes(x = Signature, y = PUP_fSUVR_rsf_TOT_CORTMEAN, color = cluster, shape = cluster))+
geom_point(show.legend = FALSE)+geom_smooth(method = "lm", show.legend = FALSE)+ylab("Cortical Amyloid Accumulation")+
xlab("Global Rs-Fc Signature")+
scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_shape_manual(values = c(1, 2))+
annotate("label", x = 0.65, y = 4, size = 3,
label = paste("Fallers \n R=", round(posfall$estimate, 3),
"\n p=", round(posfall$p.value, 3))) +
annotate("label", x = 0.75, y = 1.3, size = 3,
label = paste("Non-Fallers \n R=", round(posno$estimate, 3),
"\n p=", round(posno$p.value, 3))) + theme_classic()
ggplot(df_FC, aes(x = Signature, y = PUP_fSUVR_rsf_TOT_CORTMEAN, color = falls, shape = falls))+
geom_point(show.legend = FALSE)+geom_smooth(method = "lm", show.legend = FALSE)+ylab("Cortical Amyloid Accumulation")+
xlab("Intranetwork Connectivity Signature")+
scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_shape_manual(values = c(1, 2))
#Can't pick anything up when we lump them all together, but for amyloid positive people...
#Amyloid positive individuals who fall tend to have high amyloid and low signatures OR low amyloid and high signatures
#Apos people who do not fall have either low amyloid-low signature or high amyloid-high signature
#so it seems like you can compensate...if you have a lot of amyloid but your brain is still connected, you're not going to tip over
#if you have crappy brain connections, but you don't have too much amyloid, you're still not going to fall over
#Checking to see if there's a relationship between network strength and number of falls
pcor.test(df_FC[ df_FC$falls == 1, "Signature"], df_FC[df_FC$falls == 1, "totfall"],
df_FC[df_FC$falls == 1,c("EDUC", "age", "GENDER", "apoe4")], method = "pearson")
#amyloid positives
pcor.test(df_FC[df_FC$PIBpos == 1 & df_FC$falls == 1, "Signature"], df_FC[df_FC$PIBpos == 1 & df_FC$falls == 1, "totfall"],
df_FC[df_FC$PIBpos == 1 & df_FC$falls == 1,c("EDUC", "age", "GENDER", "apoe4")], method = "pearson")
#amyloid negatives
pcor.test(df_FC[df_FC$PIBpos == 0 & df_FC$falls == 1, "Signature"], df_FC[df_FC$PIBpos == 0 & df_FC$falls == 1, "totfall"],
df_FC[df_FC$PIBpos == 0 & df_FC$falls == 1,c("EDUC", "age", "GENDER", "apoe4")], method = "pearson")
ggplot(df_FC[df_FC$falls == 1,], aes(x = Signature, y = totfall, color = PIBpos, shape = PIBpos))+
geom_point(show.legend = FALSE)+geom_smooth(method = "lm", show.legend = FALSE)+ylab("Total Number of Falls")+
xlab("Intranetwork Connectivity Signature")+
scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_shape_manual(values = c(1, 2))
#################################################################################################################
#################################################################################################################
#################################################################################################################
#Now adding in volumes
#getting z scores on a region by region basis
normalize<-function(PARAM){(PARAM - mean(PARAM))/sd(PARAM)}
#Now looking at Liang Wang's region
df.MRI$ADsig<-(normalize(df.MRI$MR_LV_INFRTMP)+normalize(df.MRI$MR_LV_MIDTMP)+normalize(df.MRI$MR_LV_SUPERTMP)+
normalize(df.MRI$MR_RV_INFRTMP)+normalize(df.MRI$MR_RV_MIDTMP)+normalize(df.MRI$MR_RV_SUPERTMP)+
normalize(df.MRI$MR_LV_INFRPRTL)+normalize(df.MRI$MR_LV_SUPERPRTL)+
normalize(df.MRI$MR_RV_INFRPRTL)+normalize(df.MRI$MR_RV_SUPERPRTL)+
normalize(df.MRI$MR_LV_ENTORHINAL)+normalize(df.MRI$MR_RV_ENTORHINAL)+
normalize(df.MRI$MR_LV_PRECUNEUS)+normalize(df.MRI$MR_RV_PRECUNEUS))/14
df.MRI$HippoVol<-(normalize(df.MRI$MR_LV_HIPPOCAMPUS)+normalize(df.MRI$MR_RV_HIPPOCAMPUS))/2
df.MRI$MR_Date<-as.Date(df.MRI$MR_Date, format = "%m/%d/%Y")
colnames(df.MRI)[4]<-"MapID"
df_matched<-MatchbyNearestDate(df_matched, df.MRI[,c("MapID", "MR_Date", "ADsig", "HippoVol")], "MapID", "start", "MR_Date")
df_matched$timegap2<-as.numeric(difftime(df_matched$start, df_matched$MR_Date, units = "weeks")/(-52))
df_matched<-df_matched[df_matched$timegap2 < 2 & df_matched$timegap2 > -2,] #keeping only people with mri scans within 2 years of falls enrollment date
df_matched$PIBpos<-revalue(df_matched$PIBpos, c("0"="Amyloid Negative", "1"="Amyloid Positive"))
p1<-ggboxplot(df_matched, x = "falls", y = "ADsig", color = "falls", palette = "jco",
add = "jitter", outlier.shape = NA) + stat_compare_means(comparisons = my_comparisons) +
theme(legend.position = "none", axis.title.y = element_text(size = 11))+
ylab("AD Signature Regions") +
xlab("Did participant fall?") +
scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_x_discrete(labels =c("0" = "No", "1" = "Yes")) + theme(legend.position = "none")
p2<-ggboxplot(df_matched, x = "falls", y = "HippoVol", color = "falls", palette = "jco",
add = "jitter", outlier.shape = NA) + stat_compare_means(comparisons = my_comparisons) +
theme(legend.position = "none", axis.title.y = element_text(size = 11))+
ylab("Hippocampal Volume") +
xlab("Did participant fall?") +
scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_x_discrete(labels =c("0" = "No", "1" = "Yes")) + theme(legend.position = "none")
HippoVol<-ggboxplot(df_matched, y = "HippoVol", x = "falls", fill = "PIBpos", palette = "jco",
add = "jitter", outlier.shape = NA) + stat_compare_means(comparisons = my_comparisons) + xlab("Did Participant Fall?") +
ylab("Hippocampus Volume")+scale_fill_manual(values=c("#9ecae1","#3182bd"))+
scale_x_discrete(labels =c("0" = "No", "1" = "Yes")) + labs(fill = "")
grid.arrange(p1, p2, nrow = 1)
df_matched_FC<-MatchbyNearestDate(df_matched, df_FC[,c("MapID", "start", "Signature", "apoe4")], "MapID", "MR_Date", "start")
df_matched_FC$falls <- as.factor(df_matched_FC$falls)
p1<-ggplot(df_matched_FC, aes(x = Signature, y = ADsig, color = falls, shape = falls))+
geom_point(show.legend = FALSE)+geom_smooth(method = "lm", show.legend = FALSE)+ylab("AD Signature Volume")+
xlab("Intranetwork Connectivity Signature")+xlim(c(0.4, 1.4))+
scale_color_manual(values=c("#9ecae1","#3182bd"))+
scale_shape_manual(values = c(1, 2))
ggplot(df_matched_FC, aes(x = Signature, y = HippoVol, color = falls, shape = falls))+
geom_point(show.legend = FALSE)+geom_smooth(method = "lm", show.legend = TRUE)+ylab("Hippocampal Volume")+
xlab("Global Rs-Fc Signature")+
scale_color_manual(values=c("#9ecae1","#3182bd"))+xlim(c(0.4, 1.4))+
scale_shape_manual(values = c(1, 2)) +
annotate("label", x = 1.25, y = -1.42, size = 3,
label = "Overall Correlation \n R = 0.226; p = 0.046\nParticipants do not differ by Fall Status\n p = 0.31") +
theme_classic()
grid.arrange(p1, p2, nrow = 2)
p1<-ggplot(df_matched_FC, aes(y = PUP_fSUVR_rsf_TOT_CORTMEAN, x = ADsig, color = falls, shape = falls))+
geom_point(show.legend = FALSE)+geom_smooth(method = "lm", show.legend = FALSE)+xlab("AD Signature Volume")+
ylab("Cortical Amyloid Accumulation")+
scale_color_manual(values=c("#9ecae1","#3182bd"))+xlim(c(-1, 1))+
scale_shape_manual(values = c(1, 2))
p2<-ggplot(df_matched_FC, aes(y = PUP_fSUVR_rsf_TOT_CORTMEAN, x = HippoVol, color = falls, shape = falls))+
geom_point(show.legend = FALSE)+geom_smooth(method = "lm", show.legend = TRUE)+xlab("Hippocampal Volume")+
ylab("Cortical Amyloid Accumulation")+
scale_color_manual(values=c("#9ecae1","#3182bd"))+xlim(c(-1, 1))+
scale_shape_manual(values = c(1, 2)) + theme(legend.position = "bottom")
grid.arrange(p1, p2, nrow = 2)
grid.arrange(AmyloidPlot, SignaturePlot, HippoVol, nrow = 1)
model.null<-lm(PUP_fSUVR_rsf_TOT_CORTMEAN ~ falls + PIBpos + falls:PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
model.new<-lm(PUP_fSUVR_rsf_TOT_CORTMEAN ~ PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.new<-lm(PUP_fSUVR_rsf_TOT_CORTMEAN ~ falls + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.new<-lm(PUP_fSUVR_rsf_TOT_CORTMEAN ~ falls + PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.null<-lm(Signature ~ falls + PIBpos + falls:PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
model.new<-lm(Signature ~ PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.new<-lm(Signature ~ falls + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.new<-lm(Signature ~ falls + PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.null<-lm(HippoVol ~ falls + PIBpos + falls:PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
model.new<-lm(HippoVol ~ PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.new<-lm(HippoVol ~ falls + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
model.new<-lm(HippoVol ~ falls + PIBpos + GENDER + apoe4 + EDUC + age, data = df_matched_FC)
anova(model.null, model.new)
#Are people who are PIBpos and have reduced AD sig more likely to fall?
length(df_matched_FC[df_matched_FC$Signature < median(df_matched_FC$Signature), "MapID"])
length(df_matched_FC[df_matched_FC$Signature < median(df_matched_FC$Signature) & df_matched_FC$falls == 1, "MapID"])
length(df_matched_FC[df_matched_FC$Signature < median(df_matched_FC$Signature) & df_matched_FC$falls == 0, "MapID"])
length(df_matched_FC[df_matched_FC$Signature < median(df_matched_FC$Signature) & df_matched_FC$PIBpos == 1, "MapID"])
length(df_matched_FC[df_matched_FC$Signature < median(df_matched_FC$Signature) & df_matched_FC$PIBpos == 0, "MapID"])
length(df_matched_FC[df_matched_FC$Signature > median(df_matched_FC$Signature) & df_matched_FC$PIBpos == 1, "MapID"])
length(df_matched_FC[df_matched_FC$Signature > median(df_matched_FC$Signature) & df_matched_FC$falls == 1 & df_matched_FC$PIBpos == 1, "MapID"])
length(df_matched_FC[df_matched_FC$Signature < median(df_matched_FC$Signature) & df_matched_FC$falls == 0 & df_matched_FC$PIBpos == 1, "MapID"])
df_matched_FC$LowSig<-ifelse(df_matched_FC$Signature < median(df_matched_FC$Signature), 1, 0)
#There are 22 PIB positive people
#out of the PIB positive participants, 14 have low signature. 9 of them fell, 5 did not.
#out of the PIB positive participants, 8 have high signature. 3 fell. 5 did not.
prop.test(x = c(9, 5), n = c(14, 14))
prop.test(x = c(9, 5, 3, 5), n = c(22, 22, 22, 22))
prop.test(table(df_matched_FC[df_matched_FC$PIBpos == 1, "LowSig"], df_matched_FC[df_matched_FC$PIBpos == 1, "falls"]), correct = FALSE)
prop.test(table(df_matched_FC[df_matched_FC$falls == 1, "LowSig"], df_matched_FC[df_matched_FC$falls == 1, "PIBpos"]), correct = FALSE)
#82 people. 22 are PIB positive.
|
e6bc3c9b1731cbb9a6e430d8b50b7f547d267313
|
c76122b42df71a68def61684d92b54ada23338cd
|
/nfl_playcalls.R
|
6e995dce70b5587349bbed5a5be32c322243150a
|
[] |
no_license
|
marius-oetting/NFL-Play-Call-Predictions
|
546f5f5f5753829c0b8dd915c209d24024b2b6bb
|
97813ecf8afd04ec72a929e69eba465bce5ff487
|
refs/heads/master
| 2021-06-17T10:40:09.826089
| 2021-02-09T08:20:25
| 2021-02-09T08:20:25
| 161,164,452
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 17,741
|
r
|
nfl_playcalls.R
|
### R code for the paper
### "Predicting play-calls in the National Football League using hidden Markov models
### author: Marius Ötting
## load packages
library(dplyr)
library(ggplot2)
library(lemon)
# import data
all.teams.df <- read.csv("nfl_data.csv")
# figures -----------------------------------------------------------------
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
## Figure 1
plot.shotgun <- all.teams.df %>% ggplot(aes(x = down, y = PlayType.new, fill = factor(shotgun))) +
geom_bar(position = "dodge", stat = "summary", fun.y = "mean") + ylab("pass proportion") +
scale_fill_manual(name = "Shotgun formation",
labels = c("no",
"yes"),
values = cbbPalette[6:7]) + theme_minimal() +
theme(text = element_text(size = 22))
plot.shotgun
## Figure 2
all.teams.df <- all.teams.df %>% mutate(ScoreDiffCat = cut(ScoreDiff, breaks = c(-Inf, -8, 0, 8, Inf))) %>%
mutate(ScoreDiff1 = if_else(ScoreDiffCat == "(-Inf,-8]", 1, 0)) %>%
mutate(ScoreDiff2 = if_else(ScoreDiffCat == "(-8,0]", 1, 0)) %>%
mutate(ScoreDiff3 = if_else(ScoreDiffCat == "(0,8]", 1, 0)) %>%
mutate(ScoreDiff4 = if_else(ScoreDiffCat == "(8, Inf]", 1, 0))
plot.scorediff <- all.teams.df %>% group_by(ydstogo, ScoreDiffCat) %>% summarize(meanpass = mean(PlayType.new), count = n()) %>%
filter(ydstogo <= 25) %>%
ggplot(aes(x = ydstogo, y = meanpass, color = ScoreDiffCat)) +
geom_pointline() + ylab("pass proportion") + xlab("yards to go") +
scale_color_manual(name = "Categorized\nscore difference",
labels = c(expression(phantom(x) < "-7"),
expression(paste(" ", phantom(x) >= "-7", " &", phantom(x) <= "0")),
expression(paste(" ", phantom(x) > "0", " &", phantom(x) <= "7")),
expression(phantom(x) > "7")),
values = cbbPalette) + theme_minimal() +
theme(text = element_text(size = 22))
plot.scorediff
# model fitting -----------------------------------------------------------
# first, different functions are defined:
# 1) HMM likelihood
# 2) function to fit the model using nlm()
# 3) two functions for forecasting
# 4) a function which performs data preprocessing, model fitting, AIC forward selection,
# and forecasting for a selected team
# TODO!!
# Using these functions, models are fitted to each team separately (see below) and
# summary statistics on the forecasts are computed
L.choosecov <- function(theta.star, x, N, covariates){
nr.covariates <- length(covariates) + 1 # 10 + 1
beta <- matrix(theta.star[1:((N - 1) * N * nr.covariates)], nrow = N * (N - 1), ncol = nr.covariates, byrow = FALSE)
pis.beta <- theta.star[((N - 1) * N * nr.covariates + 1):(((N - 1) * N * nr.covariates) + N)]
delta <- exp(c(theta.star[(((N - 1) * N * nr.covariates) + N + 1):(((N - 1) * N * nr.covariates) + N + N - 1)], 0)) /
sum(exp(c(theta.star[(((N - 1) * N * nr.covariates) + N + 1):(((N - 1) * N * nr.covariates) + N + N - 1)], 0)))
l.all <- 0
for(k in 1:length(x)){
idx.covariates <- which(colnames(x[[k]]) %in% covariates)
idx.response <- which(colnames(x[[k]]) == "PlayType.new")
zwerg <- lapply(x, "[", c(idx.covariates, idx.response)) # response is included
covariate.mat <- zwerg[[k]][!names(zwerg[[k]]) %in% c("PlayType.new")]
covariate.mat <- covariate.mat[, order(match(colnames(covariate.mat), covariates))] # sort columns
covariate.mat <- cbind(c(rep(1, nrow(zwerg[[k]]))), covariate.mat) # response is not included
covariate.mat <- as.matrix(covariate.mat)
ind <- which(!is.na(zwerg[[k]]$PlayType.new))
allprobs <- matrix(1, nrow(zwerg[[k]]), N)
for(j in 1:N){
allprobs[ind, j] <- dbinom(zwerg[[k]]$PlayType.new[ind], size = 1, plogis(pis.beta[j]))
}
foo <- delta %*% diag(allprobs[1,])
l <- log(sum(foo))
phi <- foo/sum(foo)
for(t in 2:nrow(zwerg[[k]])){
eta <- as.vector(beta %*% covariate.mat[t,])
Gamma <- diag(N)
Gamma[!Gamma] <- exp(eta)
Gamma <- Gamma/rowSums(Gamma)
foo <- phi %*% Gamma %*% diag(allprobs[t,])
l <- l + log(sum(foo))
phi <- foo/sum(foo)
}
l.all <- l.all + l
}
return(-l.all)
}
mle <- function(theta, N, data, covariates){
theta.star <- theta
mod <- nlm(L.choosecov, theta.star, x = data, N = N, covariates = covariates, print.level = 2, iterlim = 10000, hessian = FALSE)
hess.mat <- mod$hessian
result <- mod$estimate
nr.covariates <- length(covariates) + 1
list(beta = matrix(result[1:((N - 1) * N * nr.covariates)], nrow = N * (N - 1), ncol = nr.covariates, byrow = FALSE),
pis = plogis(result[((N - 1) * N * nr.covariates + 1):(((N - 1) * N * nr.covariates) + N)]),
delta = c(exp(c(result[(((N - 1) * N * nr.covariates) + N + 1):(((N - 1) * N * nr.covariates) + N + N - 1)], 0)) /
sum(exp(c(result[(((N - 1) * N * nr.covariates) + N + 1):(((N - 1) * N * nr.covariates) + N + N - 1)], 0)))),
wp = mod$estimate, AIC = 2 * (mod$minimum + length(mod$estimate)), llk = mod$minimum, hessian = hess.mat)
}
hmm.forecast <- function(xf, x.new, mod, covariates = selected.covariates){
n <- length(x.new)
nr.plays <- sum(unlist(lapply(x.new, nrow)))
nxf <- length(xf)
dxf.fin <- matrix(ncol = 3)
for(k in 1:n){
dxf <- matrix(0, nrow = nrow(x.new[[k]]), ncol = nxf)
allprobs <- matrix(1, nrow(x.new[[k]]), N)
ind <- which(!is.na(x.new[[k]]$PlayType.new))
# covariates
idx.covariates <- which(colnames(x.new[[k]]) %in% covariates)
idx.response <- which(colnames(x.new[[k]]) == "PlayType.new")
zwerg <- lapply(x.new, "[", c(idx.covariates, idx.response)) # response is included
covariate.mat <- zwerg[[k]][!names(zwerg[[k]]) %in% c("PlayType.new")]
covariate.mat <- covariate.mat[, order(match(colnames(covariate.mat), covariates))] # sort columns
covariate.mat <- cbind(c(rep(1, nrow(zwerg[[k]]))), covariate.mat) # response is not included
covariate.mat <- as.matrix(covariate.mat)
# covariate effects
beta <- mod$beta
foo <- mod$delta
sumfoo <- sum(foo)
lscale <- log(sumfoo)
foo <- foo/sumfoo
for(j in 1:N){
dxf[1,] <- dxf[1,] + foo[j] * dbinom(xf, size = 1, prob = mod$pis[j])
}
for(i in 2:nrow(x.new[[k]])){
eta <- as.vector(beta %*% covariate.mat[i,])
Gamma1 <- diag(N)
Gamma1[!Gamma1] <- exp(eta)
Gamma1 <- Gamma1/rowSums(Gamma1)
foo <- foo %*% Gamma1
for(j in 1:N){
dxf[i,] <- dxf[i,] + foo[j] * dbinom(xf, size = 1, prob = mod$pis[j])
}
}
dxf <- cbind(dxf, k)
dxf.fin <- rbind(dxf.fin, dxf)
}
dxf.fin <- dxf.fin[-1, ]
return(dxf.fin)
}
hmm.forecast.step <- function(xf, x.new, mod, H = 1, n, N, covariates = selected.covariates){
# input: one time series, i.e. one match
nxf <- length(xf)
allprobs <- matrix(1, n, nxf)
# covariates
idx.covariates <- which(colnames(x.new) %in% covariates)
idx.response <- which(colnames(x.new) == "PlayType.new")
zwerg <- x.new[, c(idx.covariates, idx.response)] # response is included
covariate.mat <- zwerg[!names(zwerg) %in% c("PlayType.new")]
covariate.mat <- covariate.mat[, order(match(colnames(covariate.mat), covariates))] # sort columns
covariate.mat <- cbind(c(rep(1, nrow(zwerg))), covariate.mat) # response is not included
covariate.mat <- as.matrix(covariate.mat)
# covariate effects
beta <- mod$beta
for(j in 1:N){
allprobs[, j] <- dbinom(x.new$PlayType.new[1:n], size = 1, mod$pis[j])
}
foo <- mod$delta %*% diag(allprobs[1, ])
sumfoo <- sum(foo)
lscale <- log(sumfoo)
foo <- foo / sumfoo
for(i in 2:n){
eta <- as.vector(beta %*% covariate.mat[i,])
Gamma1 <- diag(N)
Gamma1[!Gamma1] <- exp(eta)
Gamma1 <- Gamma1 / rowSums(Gamma1)
foo <- foo %*% Gamma1 * allprobs[i, ]
sumfoo <- sum(foo)
lscale <- lscale + log(sumfoo)
foo <- foo / sumfoo
}
xi <- matrix(NA, nrow = N, ncol = H)
allprobs.new <- matrix(0, nrow = H, ncol = nxf)
for(i in (n + 1):(n + H)){
eta <- as.vector(beta %*% covariate.mat[i,])
Gamma1 <- diag(N)
Gamma1[!Gamma1] <- exp(eta)
Gamma1 <- Gamma1 / rowSums(Gamma1)
foo <- foo %*% Gamma1
xi[, (i - n)] <- foo
for(j in 1:N){
allprobs.new[(i - n), ] <- allprobs.new[(i - n),] + foo[j] * dbinom(xf, size = 1, mod$pis[j])
}
}
return(allprobs.new)
}
fit.team <- function(team){
akt.team <- team
gameid.49 <- unique(all.teams.df[which(all.teams.df$posteam == akt.team | all.teams.df$defteam == akt.team), ]$game_id)
data.team <- all.teams.df %>% filter(game_id %in% gameid.49)
data.team <- data.team %>% filter(posteam==akt.team)
data.team$GameIDDrive <- paste0(data.team$game_id, data.team$drive)
# create yards per run / yards per pass column
gameids <- unique(data.team$game_id)
data.team$prev_run <- NA
data.team$prev_pass <- NA
zwerg.list <- list()
for(i in 1:length(gameids)){
cur.match <- data.team %>% filter(game_id == gameids[i])
cur.match$prev_pass[1] <- 0
cur.match$prev_run[1] <- 0
for(j in 2:nrow(cur.match)){
zwerg_prev_run <- cur.match[1:(j - 1),] %>% filter(play_type == "run") %>% .[["yards_gained"]]
if(length(zwerg_prev_run) == 0) cur.match$prev_run[j] <- 0
else cur.match$prev_run[j] <- mean(zwerg_prev_run)
zwerg_prev_pass <- cur.match[1:(j - 1),] %>% filter(play_type == "pass") %>% .[["yards_gained"]]
if(length(zwerg_prev_pass) == 0) cur.match$prev_pass[j] <- 0
else cur.match$prev_pass[j] <- mean(zwerg_prev_pass)
}
zwerg.list[[i]] <- cur.match
#print(i)
}
data.team <- bind_rows(zwerg.list) %>% as.data.frame()
# one time series = one match
data.list <- split(data.team, f = data.team$game_id)
# remove time series with less than 3 observations
idx.delete <- sapply(data.list, nrow) < 3
idx.delete <- names(which(idx.delete == TRUE))
data.team <- data.team[!(data.team$GameIDDrive %in% idx.delete),]
# playtype as numeric
data.team$PlayType.new <- ifelse(data.team$play_type == "pass", 1, 0)
# generate scaled covariates
data.team$ScoreDiffscale <- as.vector(scale(data.team$ScoreDiff))
data.team$ydstogoscale <- as.vector(scale(data.team$ydstogo))
data.team$downscale <- as.vector(scale(data.team$down))
data.team$ydstogo_down <- data.team$ydstogo * data.team$down
data.team$ydstogo_down_scale <- as.vector(scale(data.team$ydstogo_down))
data.team$prev_pass_scale <- as.vector(scale(data.team$prev_pass))
data.team$prev_run_scale <- as.vector(scale(data.team$prev_run))
data.team$down1 <- ifelse(data.team$down == 1, 1, 0)
data.team$down2 <- ifelse(data.team$down == 2, 1, 0)
data.team$down3 <- ifelse(data.team$down == 3, 1, 0)
# generate interaction columns
data.team$interact_down1_yds <- data.team$down1 * data.team$ydstogoscale
data.team$interact_down2_yds <- data.team$down2 * data.team$ydstogoscale
data.team$interact_down3_yds <- data.team$down3 * data.team$ydstogoscale
data.team$interact_shotgun_ydstogo <- data.team$shotgun * data.team$ydstogoscale
data.team$interact_nohuddle_ScoreDiff <- data.team$nohuddle * data.team$ScoreDiffscale
data.team$interact_nohuddle_shotgun <- data.team$nohuddle * data.team$shotgun
# test data = data for season 2018/19
data.team.18.short <- data.team[data.team$season == "2018/19",]
# training data = data until season 2017/18
data.team <- data.team[data.team$season != "2018/19",]
# training data as list
data.list <- split(data.team, f = data.team$game_id)
# test data as list
data.list.18 <- split(data.team.18.short, f = data.team.18.short$game_id)
### forward selection
considered.covariates <- c("ScoreDiffscale", "ydstogoscale", "shotgun", "down1", "down2", "down3", "Home", "yardline90",
"interact_down1_yds", "interact_down2_yds", "interact_down3_yds", "interact_shotgun_ydstogo",
"nohuddle", "interact_nohuddle_ScoreDiff", "interact_nohuddle_shotgun")
aic.vec <- rep(NA, length(considered.covariates) + 1)
N <- 2
# step 0: fit model without any covariates
selected.covariates <- c()
nr.covariates <- length(selected.covariates) + 1
theta.star <- c(runif(N * (N - 1) * nr.covariates, -0.1, 0.1), qlogis(runif(N, 0, 1)), qlogis(runif(N - 1, 0, 1)))
mod <- mle(theta = theta.star, N = N, data = data.list, covariates = selected.covariates)
aic.vec[1] <- mod$AIC
# step 1: fit model with covariate 1
# step 2: add covariate 2, if AIC increases, keep covariate 2, ...
# ...
for(i in 1:length(considered.covariates)){
selected.covariates <- c(selected.covariates, considered.covariates[i])
# choose initial values
nr.covariates <- length(selected.covariates) + 1
theta.star <- c(runif(N * (N - 1) * nr.covariates, -0.1, 0.1), qlogis(runif(N, 0, 1)), qlogis(runif(N - 1, 0, 1)))
# fit model
mod <- tryCatch(mle(theta = theta.star, N = N, data = data.list, covariates = selected.covariates),
error = function(e) NA)
# if nlm failed, run again with different starting values
if(is.na(mod[1])){
for(a in 1:5){
theta.star <- c(runif(N * (N - 1) * nr.covariates, -0.1, 0.1), qlogis(runif(N, 0, 1)), qlogis(runif(N - 1, 0, 1)))
mod <- tryCatch(mle(theta = theta.star, N = N, data = data.list, covariates = selected.covariates),
error = function(e) NA)
if(!is.na(mod[1])) break;
}
}
# store and compare AIC
aic.vec[i + 1] <- mod$AIC
if(mod$AIC < aic.vec[i]) selected.covariates <- selected.covariates
else selected.covariates <- selected.covariates[selected.covariates != considered.covariates[i]]
}
### fit model with selected.covariates
N <- 2
nr.covariates <- length(selected.covariates) + 1
theta.star <- c(runif(N * (N - 1) * nr.covariates, -0.1, 0.1), qlogis(runif(N, 0, 1)), qlogis(runif(N - 1, 0, 1)))
mod <- mle(theta = theta.star, N = N, data = data.list, covariates = selected.covariates)
# forecast
nr.matches <- length(data.list.18)
forecast.all <- matrix(NA, ncol = 3)
for(i in 1:nr.matches){
cur.match <- data.list.18[[i]]
forecast.probs.match <- matrix(NA, nrow = nrow(cur.match), ncol = 2)
for(j in 2:(nrow(cur.match) - 1)){
forecast.probs.step <- hmm.forecast.step(c(0:1), cur.match, mod = mod, H = 1, n = j, N = N, covariates = selected.covariates)
forecast.probs.match[j + 1, ] <- forecast.probs.step
}
forecast.probs.firsttwo <- hmm.forecast(c(0:1), data.list.18, mod = mod, covariates = selected.covariates) %>%
as.data.frame() %>% filter(k == i) %>% head(2) %>% select(V1, V2)
forecast.probs.match[1:2, ] <- forecast.probs.firsttwo %>% as.matrix()
forecast.probs.match <- cbind(forecast.probs.match, i)
forecast.all <- rbind(forecast.all, forecast.probs.match)
# dynamically update forecasts throughout season
data.list[[length(data.list) + 1]] <- cur.match
mod <- mle(theta = theta.star, N = N, data = data.list, covariates = selected.covariates)
}
forecast.all <- forecast.all[-1, ]
forecast.plays <- apply(forecast.all[, 1:2], 1, which.max)
forecast.plays[forecast.plays == 1] <- 0
forecast.plays[forecast.plays == 2] <- 1
true.play <- data.team.18.short$PlayType.new
# prediction accuracy
pred.accuracy.step <- length(which(forecast.plays == true.play)) / length(na.omit(true.play))
# precision pass
prec.pass <- sum(forecast.plays == 1 & true.play == 1) / (sum(forecast.plays == 1 & true.play == 1) + sum(forecast.plays == 1 & true.play == 0))
# precision run
prec.run <- sum(forecast.plays == 0 & true.play == 0) / (sum(forecast.plays == 0 & true.play == 0) + sum(forecast.plays == 0 & true.play == 1))
# recall pass
reca.pass <- sum(forecast.plays == 1 & true.play == 1) / (sum(forecast.plays == 1 & true.play == 1) + sum(forecast.plays == 0 & true.play == 1))
# recall run
reca.run <- sum(forecast.plays == 0 & true.play == 0) / (sum(forecast.plays == 0 & true.play == 0) + sum(forecast.plays == 1 & true.play == 0))
# nr observations
nr.obs <- length(forecast.plays)
zwerg.team <- data.frame(akt.team, nr.obs, pred.accuracy.step, prec.pass, prec.run, reca.pass, reca.run)
return(zwerg.team)
}
### loop over all teams
all.teams <- all.teams.df$home_team %>% as.character() %>% unique
res <- data.frame(team = NA, count = NA, pred.acc = NA, precision.pass = NA, precision.run = NA,
recall.pass = NA, recall.run = NA)
set.seed(123)
for(i in 1:length(all.teams)){
N <- 2
cur.team <- all.teams[i]
if(cur.team == "BUF" | cur.team == "DET" | cur.team == "CHI") N <- 3
zwerg <- fit.team(cur.team)
colnames(zwerg) <- c("team", "count", "pred.acc", "precision.pass", "precision.run", "recall.pass", "recall.run")
res <- rbind(res, zwerg)
print(paste("Finished iteration", i))
}
### "res" includes summary statistics on the prediction accuracy etc. for all teams
View(res)
|
5c5a3342c9c7d5b9b66513930f3c4b93530489be
|
acb0fffc554ae76533ba600f04e4628315b1cd95
|
/R/MergeSurfaceYSIChemistry.R
|
f5d281f89b33c938065e9f1aae6e117c2e50704c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
lukeloken/USBRDelta
|
83826e12a5b5a2e81adeb2119e9c2599a5f8b870
|
fd6569385776d4579748b6422b5153e64606e0ba
|
refs/heads/master
| 2021-06-09T19:08:01.976985
| 2020-05-28T21:51:10
| 2020-05-28T21:51:10
| 145,152,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,117
|
r
|
MergeSurfaceYSIChemistry.R
|
#Code to extract surface water measurements from profile data at fixed sites
library(readxl)
library(plyr)
library(dplyr)
library(viridis)
library(lubridate)
library(ggplot2)
library(gridExtra)
source('R/read_excel_allsheets.R')
source('R/g_legend.R')
# Project folder where outputs are stored
dropbox_dir<-'C:/Dropbox/USBR Delta Project'
#Where data come from
google_dir<-'C:/GoogleDrive/DeltaNutrientExperiment'
# Find all filenames in directory
# These will be used to loop through all old data
bigdf <- read.csv(file=paste0(dropbox_dir, '/Data/SurfaceChemistry/YSISurface.csv'), stringsAsFactors = F)
bigdf$Date<-as.Date(bigdf$Date, format='%Y-%m-%d')
nutrientname<-"SSC Nutrients March 2019.xlsx"
#Data file as two rows as a header with mixed units/labels
df1<-read_excel(paste0(google_dir, '/Data/WaterQuality/', nutrientname, sep=''), skip=1)
df2<-read_excel(paste0(google_dir, '/Data/WaterQuality/', nutrientname, sep=''), skip=0)
names1A<-gsub("\\_.*","",names(df1))
names1B<-gsub("X", '', names1A)
names1C<-gsub("2012", '', names1B)
names2A<-gsub("\\X.*","",names(df2))
names<-paste0(names2A, names1C)
names(df1)<-names
df_sub<-df1[,-which(names(df1) %in% c("", "Chlo+Pheoppb", "Pre-HClPost-HCl"))]
names(df_sub)[which(names(df_sub)=="Chlo+Pheo__1ppb")]<-"Chlo+Pheoppb"
names(df_sub)[which(names(df_sub)=="location")]<-'Station'
names(df_sub)[which(names(df_sub)=="pH")]<-'pH_WQ'
df_sub_noNA<-df_sub[!is.na(df_sub$Date) & !is.na(df_sub$`Lab #`),]
unique(df_sub_noNA$Station)
goodstations<-c('NL 16', 'NL 34', 'NL 44', 'NL 56', 'NL 62', 'NL 64', 'NL 66', 'NL 70', 'NL 74', 'NL 76', 'NL 84', 'WSP', 'PS', 'RB 34')
df_stations<-df_sub_noNA[df_sub_noNA$Station %in% goodstations,]
df_stations$Station<-gsub('PS', 'Pro', df_stations$Station)
df_stations$Station<-gsub('NL ', '', df_stations$Station)
df_stations$Station<-gsub('RB ', '', df_stations$Station)
df_stations$Date<-as.Date(df_stations$Date, format='%Y-%m-%d')
merge_df<-full_join(df_stations, bigdf)
head(merge_df)
write.csv(merge_df, file=paste0(dropbox_dir, '/Data/SurfaceChemistry/YSIChemSurface.csv'), row.names=F)
|
65699b8388abc14e517c082c730140481633fb4f
|
d460e5e5f143aaaf2c2e783031cb35cf0ed8e865
|
/plot4.R
|
884482ddee1d135a90d0247d99aad8af00cdd754
|
[] |
no_license
|
azambranog/ExData_Plotting1
|
f1fceb136418d41c2abf180cb1b4a14123e0e985
|
acaf1d10f4a9d8fdc07cc34a871376b60271de60
|
refs/heads/master
| 2021-01-09T08:54:36.291976
| 2014-05-11T14:28:51
| 2014-05-11T14:28:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,426
|
r
|
plot4.R
|
# This code generates 4 plots in a figure
# 2007-02-01 and 2007-02-02. The result is stored in plot4.png
#get all data
data<-read.table("household_power_consumption.txt",
header=T,
sep=";",
na.strings="?",
stringsAsFactors=F)
#get necessary subset
data<-data[data$Date %in% c("1/2/2007","2/2/2007"),]
#create new column that has date and time format together in adequate date format
data$DateTime<-paste(data$Date,data$Time) #new column
#transform to time
data$DateTime<-strptime(data$DateTime,format="%d/%m/%Y %H:%M:%S")
#save and do plot
png("plot4.png") #open device png default size is what we need
par(mfrow = c(2, 2))
with(data, {
plot(DateTime, Global_active_power,
main = "",
xlab="",
ylab="Global Active Power",
type="l")
plot(DateTime, Voltage,
main = "",
xlab="datetime",
type="l")
plot(data$DateTime,data[,7],
main="",
xlab="",
ylab="Energy sub metering",
type="l",
col="black")
lines(data$DateTime,data[,8],col="red")
lines(data$DateTime,data[,9],col="blue")
legend("topright",
col = c("black","red", "blue"),
legend = names(data[7:9]),
lty=1,
border="white")
plot(DateTime,Global_reactive_power,
main = "",
xlab="datetime",
type="l")
})
dev.off() #close device png
|
655607d2a90e836bb7ea8fa35338664f548a0621
|
670b321ea9891c1e6259732b9445dfe573dc25a9
|
/EsercitazioneGIT.R
|
d4c8a938355e0b0b2a6a328cba6d670486b3d2fe
|
[] |
no_license
|
PROVAGITMR/MIA
|
3f34c57b705eec14af017fb8700432250c37befb
|
cfb7ece515bd963f07bdebd883460123c449910b
|
refs/heads/main
| 2023-05-03T05:04:27.401448
| 2021-05-23T09:12:36
| 2021-05-23T09:12:36
| 369,314,091
| 0
| 0
| null | 2021-05-22T14:37:35
| 2021-05-20T19:16:13
|
R
|
UTF-8
|
R
| false
| false
| 94
|
r
|
EsercitazioneGIT.R
|
somma = function (a,b){
somma=a+b
cat(somma(1,2))
return(somma)
}
#commento
|
f455155ba1099a6b6e521b84e522a78cd44f826c
|
a53773793496b1034a3194a0c174c9c6a7212c18
|
/waterquality_entryform_draft.R
|
5deed5d7b924c00c7dcf5ebaac0d6443bf24de79
|
[] |
no_license
|
mkleinha/sqlite_interface
|
b5e694b2c3c9601be9d9a68e6956fe8f8e4db4e6
|
57fa35b0fa70bf09302bb429b2478dfb1db2cfec
|
refs/heads/master
| 2023-03-14T06:12:50.777357
| 2021-02-28T15:45:07
| 2021-02-28T15:45:07
| 266,857,199
| 1
| 0
| null | 2020-06-25T13:34:58
| 2020-05-25T18:54:23
|
R
|
UTF-8
|
R
| false
| false
| 43,302
|
r
|
waterquality_entryform_draft.R
|
#' This is script for the Water Quality Date Entry Interface tab of the Acanthonus database
#' This form is for entering data primarily associated with Section 6 and NFWF projects in the Upper Coosa Basin
# author: Maxwell Kleinhans (maxwell.kleinhans@gmail.com) and Phillip Bumpers (bumpersp@uga.edu)
# this line shouldn't be necessary if you're using r 4.0
options(stringsAsFactors = FALSE)
# read in packages
library(shiny)
library(shinyjs)
library(RSQLite)
library(DBI)
current_time <- as.POSIXlt(Sys.time()) # get current local time and date
current_date <- as.Date(current_time) # date component of current time and date
first_date <- "1995-01-01" # earliest date for data entry
# path to .sqlite database file
db_path <- "./wq_test.sqlite"
default_style <- "color:black" # CSS for good input values
error_style <- "color:red" # CSS for bad input values
# maximum and minimum values defining the ranges of inputs allowed by QAQC rules
max_temp <- 40
min_temp <- 0
max_ph <- 14
min_ph <- 0
max_do <- 20
min_do <- 0
max_spc <- 10000
min_spc <- 0
max_turb <- 10000
min_turb <- 0
max_dissovled<-40
min_dissolved<-0
max_totals<-40
min_totals<-0
# list of streams for input options
substrates<-c("not determined", "bedrock", "boulder", "cobble", "gravel", "sand", "silt", "mud", "concrete", "wood", "leaves")
labs<-c("CAIS-UGA", "Analytical Chemistry Lab, Ecology, UGA", "Dalton Utilities","Laboratory for Environmental Analysis, Hassan Lab, UGA", "NA", "Other" )
collect_types<-c("wading", "bucket")
instream_locations<-c("left bank", "right bank", "thalweg", "open channel", "bridge")
flow_types<-c("riffle", "run", "pool", "backwater")
flow_conditions<-c("not determined", "stable-low", "stable-high", "stable-normal", "rising", "falling", "peak")
weather_conditons<-c("heavy rain", "hot", "cold", "sunny", "cloudy", "partly cloudy", "light rain", "snow")
buffer_conditions<-c("cleared", "fringe", "canopy")
#usgs_gages<c()
#' update_hab
#'
#' @param Date character string date in the form yyyy-mm-dd
#' @param Og_Site integer site number
#' @param Observers character string listing collectors of the data
#' @param Temperature_c numeric temperature in degrees celcius
#' @param pH integer pH
#' @param Dissolved_oxygen_mgl numeric dissolved oxygen concentration in mg / L
#' @param Specific_Conductivity_uscm numeric specific conductivity
#' @param Turbidity_ntu numeric turbidity in NTU
#' @param Dissolved_Nitrate_mgl numeric nitrate in mg/L
#' @param Dissolved_Ammonium_mgl numeric ammonium in mg/L
#' @param Dissolved_phosphorus_mgl numeric phosphorus in mg/L
#' @param Total_Nitrogen_mgl numeric total nitrogen in mg/L
#' @param Total_Phosphorus_mgl numeric total phosphours in mg/L
#' @param Calcium_mgl numeric calcium in mg/L
#' @param Magnesium_mgl numeric magnesium in mg/L
#' @param Sodium_mgl numeric sodium in mg/L
#' @param Analytical_Lab character string stating where chemical analyses were performed
#' @param Collection_Type character string describing if sample was collected by wading or bucket
#' @param Instream_Location character string describing location of sample in the channel
#' @param Flow_Type character string describing flow type, riffle, run, pool, thalweg
#' @param Substrate character string describing dominant substrate types
#' @param Channel_Width_m integer of approximate wetted width of stream in meters
#' @param Stage_Condition character string describing flow condition
#' @param Water_Odor character string describing the presence of stream odor
#' @param Water_Color character string describing color of water
#' @param Weather_Conditions character string describing weather conditions during sampling
#' @param RiverRight_Buffer character string describing river right riparian buffer
#' @param RiverLeft_Buffer character string describing river left buffer
#' @param Water_Quality_Notes character string of additinal notes recorded
#' @param USGS_Gage_cfs integer of discharge from appropriate USGS gage in cfs
#' @param USGS_Gage_ID integer of the USGS gage ID for appropriate USGS gage
#' @param db_path character string path to .sqlite database file
#'
#' @return error messages pasted into a single string separated by line breaks for printing
#'
#' @examples
#'
update_hab <- function(Date, Og_Site, Observers, Temperature_c, pH, Dissolved_Oxyen_mgl, Specific_Conductivity_uscm, Turbidity_ntu, Dissolved_Nitrate_mgl,
Dissolved_Ammonium_mgl,Dissolved_Phosphorus_mgl, Total_Nitrogen_mgl, Total_Phosphorus_mgL, Calcium_mgL, Magnesium_mgl, Sodium_mgl, Analytical_lab,
Instream_Location,Collection_Type, Channel_Width_m,Flow_Type, Substrate, Stage_Condition, Water_Odor,
Water_Color, Weather_Conditions, RiverRight_Buffer, RiverLeft_Buffer, Water_Quality_Notes, USGS_Gage_cfs,USGS_Gage_ID,db_path){
# replace one single quote in location descripton with two for SQL query formatting reasons
#location <- gsub("'", "''", location)
date <- gsub(" *UTC$", "", date) # remove time from date
# empty string to which error messages will be pasted
msg <- ""
# the measurements are useless if they aren't associated with a site ID
if(is.na(Og_Site)){
msg <- paste0(msg, "No site ID entered.<br/>")
}
#need to select substrate types
if(is.na(Substrate)){
msg <- paste0(msg, "No substrate selected.<br/>")
}
#need to select collection type
if(is.na(Collection_Type)){
msg <- paste0(msg, "No collection type selected.<br/>")
}
#need to select location of sample in channel
if(is.na(Instream_Location)){
msg <- paste0(msg, "No insream location selected.<br/>")
}
#need to select flow type
if(is.na(Flow_Type)){
msg <- paste0(msg, "No flow type selected.<br/>")
}
#need to select stage condition during sample event
if(is.na(Stage_Condition)){
msg <- paste0(msg, "No stage condition selected.<br/>")
}
# if temperature measurement is missing, don't throw an error,
# but also skip bounds check to avoid 'missing value where TRUE/FALSE needed' error
if(is.na(Temperature_c)){
# no action needed here, 'if' statement just prevents conditions below from
# throwing 'missing value where TRUE/FALSE needed' error if temp is NA
}else if(Temperature_c > max_temp | Temperature_c < min_temp){ # check if temp is within a reasonable range of values
# if temp is outside specified acceptable range, add error message to output
msg <- paste0(msg, "Entered temperature outside reasonable range (",min_temp,"-",max_temp,").<br/>")
}
if(is.na(ph)){
}else if(ph > max_ph | ph < min_ph){
msg <- paste0(msg, "Entered pH value outside reasonable range (",min_ph,"-",max_ph,").<br/>")
}
if(is.na(Dissolved_Oxyen_mgl)){
}else if(Dissolved_Oxyen_mgl > max_do | Dissolved_Oxyen_mgl < min_do){
msg <- paste0(msg, "Entered dissolved oxygen value outside reasonable range (",min_do,"-",max_do,").<br/>")
}
if(is.na(Specific_Conductivity_uscm)){
}else if(Specific_Conductivity_uscm > max_spc | Specific_Conductivity_uscm < min_spc){
msg <- paste0(msg, "Entered conductivity value outside reasonable range (",min_spc,"-",max_spc,").<br/>")
}
if(is.na(Turbidity_ntu)){
}else if(Turbidity_ntu > max_turb | Turbidity_ntu < min_turb){
msg <- paste0(msg, "Entered turbidity value outside reasonable range (",min_turb,"-",max_turb,").<br/>")
}
if(is.na(Dissolved_Nitrate_mgl)){
}else if(Dissolved_Nitrate_mgl > max_dissolved | Dissolved_Nitrate_mgl < min_dissolved){
msg <- paste0(msg, "Entered dissolved nitrate value outside reasonable range (",min_dissolved,"-",max_dissovled,").<br/>")
}
if(is.na(Dissolved_Ammonium_mgl)){
}else if(Dissolved_Ammonium_mgl > max_dissolved | Dissolved_Ammonium_mgl < min_dissolved){
msg <- paste0(msg, "Entered dissolved ammonium value outside reasonable range (",min_dissolved,"-",max_dissovled,").<br/>")
}
if(is.na(Dissolved_Phosphorus_mgl)){
}else if(Dissolved_Phosphorus_mgl > max_dissolved | Dissolved_Phosphorus_mgl < min_dissolved){
msg <- paste0(msg, "Entered dissolved phosphorus value outside reasonable range (",min_dissolved,"-",max_dissovled,").<br/>")
}
if(is.na(Total_Nitrogen_mgl)){
}else if(Total_Nitrogen_mgl > max_totals | Total_Nitrogen_mgl < min_totals){
msg <- paste0(msg, "Entered total nitrogen value outside reasonable range (",min_totals,"-",max_totals,").<br/>")
}
if(is.na(Total_Phosphorus_mgL)){
}else if(Total_Phosphorus_mgL > max_totals | Total_Phosphorus_mgL < min_totals){
msg <- paste0(msg, "Entered total phosphorus value outside reasonable range (",min_totals,"-",max_totals,").<br/>")
}
# if there are no error messages (the length of the messages string is 0),
# add data to database as new record
if(nchar(msg) == 0){
# set up SQL insert query structure specifying fields and values (see usage examples of sqlInterpolate function)
sql <- "INSERT INTO habitat (Date, Og_Site, Observers, Temperature_c, ph, Dissolved_Oxyen_mgl, Specific_Conductivity_uscm, Turbidity_ntu, Dissolved_Nitrate_mgl,
Dissolved_Ammonium_mgl,Dissolved_Phosphorus_mgl, Total_Nitrogen_mgl, Total_Phosphorus_mgL, Calcium_mgL, Magnesium_mgl, Sodium_mgl, Analytical_lab,
Instream_Location,Collection_Type, Channel_Width_m,Flow_Type, Substrate, Stage_Condition, Water_Odor,
Water_Color, Weather_Conditions, RiverRight_Buffer, RiverLeft_Buffer, Water_Quality_Notes, USGS_Gage_cfs,USGS_Gage_ID) VALUES (?Date, ?Og_Site, ?Observers, ?Temperature_c, ?ph, ?Dissolved_Oxyen_mgl, Specific_Conductivity_uscm, Turbidity_ntu, Dissolved_Nitrate_mgl,
?Dissolved_Ammonium_mgl,?Dissolved_Phosphorus_mgl, ?Total_Nitrogen_mgl, ?Total_Phosphorus_mgL, ?Calcium_mgL, ?Magnesium_mgl, ?Sodium_mgl, ?Analytical_lab,
?Instream_Location,?Collection_Type, ?Channel_Width_m,?Flow_Type, ?Substrate, ?Stage_Condition, ?Water_Odor,
?Water_Color, ?Weather_Conditions, ?RiverRight_Buffer, ?RiverLeft_Buffer, ?Water_Quality_Notes, ?USGS_Gage_cfs,?USGS_Gage_ID);"
# connect to database
con <- dbConnect(RSQLite::SQLite(), db_path)
# construct query using sqlInterpolate to prevent SQL injection attacks
query <- sqlInterpolate(con, sql,
Date = Date,
Og_Site = Og_Site,
Observers = Observers,
Temperature_c = Temperature_c,
ph = ph,
Dissolved_Oxyen_mgl = Dissolved_Oxyen_mgl,
Specific_Conductivity_uscm = Specific_Conductivity_uscm,
Turbidity_ntu = Turbidity_ntu,
Dissolved_Nitrate_mgl=Dissolved_Nitrate_mgl,
Dissolved_Ammonium_mgl=Dissolved_Ammonium_mgl,
Dissolved_Phosphorus_mgl=Dissolved_Phosphorus_mgl,
Total_Nitrogen_mgl=Total_Nitrogen_mgl,
Total_Phosphorus_mgL=TTotal_Phosphorus_mgL,
Calcium_mgL=Calcium_mgL,
Sodium_mgl=Sodium_mgl,
Magnesium_mgl=Magnesium_mgl,
Analytical_lab=Analytical_lab,
Instream_Location=Instream_Location,
Collection_Type=Collection_Type,
Channel_Width_m=Channel_Width_m,
Flow_Type=Flow_Type,
Stage_Condition=Stage_Condition,
Substrate=Substrate,
Water_Odor=Water_Odor,
Water_Color=Water_Color,
Weather_Conditions=Weather_Conditions,
RiverRight_Buffer=RiverRight_Buffer,
RiverLeft_Buffer=RiverLeft_Buffer,
Water_Quality_Notes=Water_Quality_Notes,
USGS_Gage_cfs=USGS_Gage_cfs,
USGS_Gage_ID=USGS_Gage_ID)
# finally execute query to add record to database
dbExecute(con, query)
dbDisconnect(con) # disconnect from database
}
# return error messages
return(msg)
}
#' style_switch
#'
#' @param value numeric value to test
#' @param min numeric lower bound of acceptable range of values
#' @param max numeric upper bound of acceptable range of values
#' @param style1 character string css for value within acceptable range
#' @param style2 character string css for value outside of acceptable
#'
#' @return style1 if value is within range defined by 'min' and 'max' parameters, else style2
#'
#' @examples
style_switch <- function(value, min, max, style1, style2){
# missing value accepted
if(is.na(value)){
style <- style1
# value outside of range
}else if(value > max | value < min){
style <- style2
# value within acceptable range
}else{
style <- style1
}
return(style)
}
# Define UI ----
ui <- fluidPage(
useShinyjs(), # this line is necessary if any functions from the shinyjs package are used in the app
title = "Water Quality Example Interface",
tabsetPanel(id = "tabs",
# page 51 of "PN, Mon Guidelines & Perf Stds_11.8.18.pdf"
tabPanel("Field Data Sheet Entry", id = "single",
# first row, location, date, site
fluidRow(
column(2,
dateInput(
inputId = "Date",
label = "Date",
format = "yyyy-mm-dd",
value = current_date,
max = current_date,
min = first_date
)
),
column(4,
numericInput(inputId = "Og_Site",
label = "Site#",
value = NULL,
min = 0,
max = 1000000000 # this seems like a lot of sites, but who knows how people number their sites
)
),
column(6,
textInput(
inputId = "Observers",
label = "Observer(s)",
value = "",
# entry window takes up the entire width of its container / the browser window
# to allow for long lists of data collectors to be visible
width = "100%"
)
)
),
hr(),
# second row, water quality
fluidRow(
column(2,
htmlOutput("water_quality")
),
# the 'min' and 'max' arguments to the input functions
# specified here are only enforced by the function
# if the user sets the value using the up and down arrows,
# not if the user enters a specific value with the keyboard,
# necessitating additional input validation
column(2,
# this line allows changing of the style of the field label dynamically
# based on whether the entered value is within the acceptable range
htmlOutput("temp_div"),
numericInput(inputId = "Temperature_c",
label = "", # label is replaced by 'htmlOutput()' above
value = NULL,
min = min_temp,
max = max_temp,
step = .01)
),
column(2,
htmlOutput("ph_div"),
numericInput(inputId = "ph",
label = "",
value = NULL,
min = min_ph,
max = max_ph,
step = .01)
),
column(2,
htmlOutput("do_div"),
numericInput(inputId = "Dissolved_Oxygen_mgl",
label = "",
value = NULL,
min = min_do,
max = max_do,
step = .01)
),
column(2,
htmlOutput("spc_div"),
numericInput(inputId = "Specific_Conductivity_uscm",
label = "",
value = NULL,
min = min_spc,
max = max_spc,
step = .01)
),
column(2,
htmlOutput("turb_div"),
numericInput(inputId = "Turbidity_ntu",
label = "",
value = NULL,
min = min_turb,
max = max_turb,
step = .01)
)
),
hr(),
# second row, water quality
fluidRow(
column(2,
selectInput(inputId = "Analytical_Lab",
label = "Analytical Lab",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("", labs))
),
column(2,
htmlOutput("nitrate_div"),
numericInput(inputId = "Dissolved_Nitrate_mgl",
label = "",
value = NULL,
min = min_dissolved,
max = max_dissovled,
step = .01)
),
column(2,
htmlOutput("ammonium_div"),
numericInput(inputId = "Dissolved_Ammonium_mgl",
label = "",
value = NULL,
min = min_dissolved,
max = max_dissovled,
step = .01)
),
column(2,
htmlOutput("srp_div"),
numericInput(inputId = "Dissolved_Phosphorus_mgl",
label = "",
value = NULL,
min = min_dissolved,
max = max_dissovled,
step = .01)
),
column(2,
htmlOutput("totn_div"),
numericInput(inputId = "Total_Nitrogen_mgl",
label = "",
value = NULL,
min = min_totals,
max = max_totals,
step = .01)
),
column(2,
htmlOutput("totp_div"),
numericInput(inputId = "Total_Phosphorus_mgl",
label = "",
value = NULL,
min = min_totals,
max = max_totals,
step = .01)
)
),
hr(),
fluidRow(
column(2,
htmlOutput("calc_div"),
numericInput(inputId = "Calcium_mgl",
label = "",
value = NULL,
step = .01)
),
column(2,
htmlOutput("sod_div"),
numericInput(inputId = "Sodium_mgl",
label = "",
value = NULL,
step = .01)
),
column(2,
htmlOutput("mag_div"),
numericInput(inputId = "Magnesium_mgl",
label = "",
value = NULL,
step = .01)
)
),
hr(),
fluidRow(
column(2, selectInput(inputId = "Instream_Location",
label = "Instream Location",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("", instream_locations),
multiple = TRUE)
),
column(2, selectInput(inputId = "Collection_Type",
label = "Collection Type",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("", collect_types),
multiple = TRUE)
),
column(2, selectInput(inputId = "Flow_Type",
label = "Flow Type",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("", flow_types),
multiple = TRUE)
),
column(2, selectInput(inputId = "Flow_Condition",
label = "Stage Condition",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("", flow_conditions),
multiple = TRUE)
)
),
hr(),
fluidRow(
column(2, selectInput(inputId = "Substrate",
label = "Substrate",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("", substrates),
multiple = TRUE)
),
column(2,
numericInput(inputId = "Channel_Width_m",
label = "Channel Width (m)",
value = NULL,
min = 0,
max = 10000 #
)
),
column(4,
textInput(inputId = "Water_Odor",
label = "Water Odor",
value = "",
# entry window takes up the entire width of its container / the browser window
# to allow for long lists of data collectors to be visible
width = "10%"
)
),
column(4,
textInput(
inputId = "Water_Color",
label = "Water Color",
value = "",
# entry window takes up the entire width of its container / the browser window
# to allow for long lists of data collectors to be visible
width = "10%"
)
)
),
hr(),
fluidRow(
column(2, selectInput(inputId = "Weather_Conditions",
label = "Weather",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("", weather_conditons),
multiple = TRUE)
)
),
fluidRow(
column(2, selectInput(inputId = "RiverRight_Buffer",
label = "River Right Riparian Buffer Conition",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("",buffer_conditons),
multiple = TRUE)
)
),
fluidRow(
column(2, selectInput(inputId = "RiverLeft_Buffer",
label = "River Left Riparian Buffer Conition",
# added empty string to options for streams in order to prevent errors
# that could occur if users submit data without changing the stream name from the default stream,
# leading to data misattributed to the default stream
choices = c("",buffer_conditons),
multiple = TRUE)
),
column(2,
numericInput(inputId = "USGS_Gage_cfs",
label = "USGS Gage Discharge (cfs)",
value = NULL)
),
column(2,
numericInput(inputId = "USGS_Gage_ID",
label = "USGS Gage ID",
value = NULL)
)
),
hr(),
fluidRow(
column(6,
textInput(
inputId = "Water_Quality_Notes",
label = "Water Quality Notes",
value = "",
# entry window takes up the entire width of its container / the browser window
# to allow for long lists of data collectors to be visible
width = "100%"
)
)
),
hr(),
fluidRow(
column(12,
actionButton(inputId = "submit", label = "Submit")
)
),
hr(),
fluidRow(
column(12,
# this div is hidden by default and becomes visible
# after a record is successfully added to the database table
hidden(
div(
id = "success",
h4("Data submitted successfully")
)
)
)
),
fluidRow(
column(12,
# this div is hidden by default and becomes visible
# if errors or unaccepted values are detected in the entered data
hidden(
div(
id = "error",
h4("There are problems with the entered data that prevented them from being imported into the database.",
HTML("<div style='text-decoration:underline'>Resolve these errors before attempting to resubmit.</div>"))
)
)
)
),
fluidRow(
# display error messages returned from the update_hab function
column(12,
htmlOutput("entry_errs")
)
),
hr(),
fluidRow(
# display records entered into database table
DT::dataTableOutput("table_out")
)
)
)
)
# Define server logic ----
server <- function(input, output, session) {
# update current time - changes every time something in the inputs changes
current_time <- as.POSIXlt(Sys.time())
# convert current time to number of seconds since January 1, 1970
current_unix_time <- as.double(current_time)
# CSS of the temperature entry field reacts to value entered
temp_style <- eventReactive(input$Temperature_c, {
style_switch(input$Temperature_c, min_temp, max_temp, default_style, error_style)
})
# create temperature entry field label incorporating changing CSS
output$temp_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",temp_style(), "'>Temperature (degrees C)</div>"))
})
ph_style <- eventReactive(input$ph, {
style_switch(input$ph, min_ph, max_ph, default_style, error_style)
})
output$ph_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",ph_style(), "'>pH</div>"))
})
do_style <- eventReactive(input$Dissolved_Oxygen_mgl, {
style_switch(input$Dissolved_Oxygen_mgl, min_do, max_do, default_style, error_style)
})
output$do_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",do_style(), "'>DO (mg/L)</div>"))
})
spc_style <- eventReactive(input$Specific_Conductivity_uscm, {
style_switch(input$Specific_Conductivity_uscm, min_spc, max_spc, default_style, error_style)
})
output$spc_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",spc_style(), "'>Specific Conductance (uS/cm)</div>"))
})
# format label for row of water quality values
output$water_quality <- renderUI({
HTML("<div style='font-weight:bolder;padding-top:20px;text-align: right;'>Water Quality: </div>")
})
nitrate_style <- eventReactive(input$Dissolved_Nitrate_mgl, {
style_switch(input$Dissolved_Nitrate_mgl, min_dissolved, max_dissovled, default_style, error_style)
})
output$nitrate_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Dissolved NO3 (mg/L)</div>"))
})
turb_style <- eventReactive(input$Turbidity_ntu, {
style_switch(input$Turbidity_ntu, min_turb, max_turb, default_style, error_style)
})
output$turb_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Turbidity (ntu)</div>"))
})
ammonium_style <- eventReactive(input$Dissolved_Ammonium_mgl, {
style_switch(input$Dissolved_Ammonium_mgl, min_dissolved, max_dissovled, default_style, error_style)
})
output$ammonium_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Dissolved NH4 (mg/L)</div>"))
})
srp_style <- eventReactive(input$Dissolved_Phosphorus_mgl, {
style_switch(input$Dissolved_Phosphorus_mgl, min_dissolved, max_dissovled, default_style, error_style)
})
output$srp_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Dissolved PO4 (mg/L)</div>"))
})
totn_style <- eventReactive(input$Total_Nitrogen_mgl, {
style_switch(input$Total_Nitrogen_mgl, min_totals, max_totals, default_style, error_style)
})
output$totn_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Total N (mg/L)</div>"))
})
totp_style <- eventReactive(input$Total_Phosphorus_mgl, {
style_switch(input$Total_Phosphorus_mgl, min_totals, max_totals, default_style, error_style)
})
output$totp_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Total P (mg/L)</div>"))
})
calc_style <- eventReactive(input$Calcium_mgl, {
style_switch(input$Calcium_mgl, default_style, error_style)
})
output$calc_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Calcium (mg/L)</div>"))
})
sod_style <- eventReactive(input$Sodium_mgl, {
style_switch(input$Sodium_mgl, default_style, error_style)
})
output$sod_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Sodium (mg/L)</div>"))
})
mag_style <- eventReactive(input$Magnesium_mgl, {
style_switch(input$Magnesium_mgl, default_style, error_style)
})
output$mag_div <- renderUI({
HTML(paste0("<div style='font-weight:bolder;",turb_style(), "'>Magnesium (mg/L)</div>"))
})
# store errors for printing to display
err_out <- eventReactive(input$submit, {
# While the water quality measurements are optional,
# the data are useless without associating them with a stream.
# a stream is required in order to attempt a record addition
if(input$Og_Site == ""){
result <- "No Site selected.<br/>"
}else{
# attempt to add a record composed of the entered values and retrieve any error messages
result <- update_hab(input$Date,
input$Og_Site,
input$Observers,
input$Temperature_c,
input$ph,
input$Dissolved_Oxygen_mgl,
input$Specific_Conductivity_uscm,
input$Turbidity_ntu,
input$Dissolved_Nitrate_mgl,
input$Dissolved_Ammonium_mgl,
input$Dissolved_Phosphorus_mgl,
input$Total_Nitrogen_mgl,
input$Total_Phosphorus_mgL,
input$Calcium_mgL,
input$Sodium_mgl,
input$Magnesium_mgl,
input$Analytical_lab,
input$Instream_Location,
input$Collection_Type,
input$Channel_Width_m,
input$Flow_Type,
input$Stage_Condition,
input$Substrate,
input$Water_Odor,
input$Water_Color,
input$Weather_Conditions,
input$RiverRight_Buffer,
input$RiverLeft_Buffer,
input$Water_Quality_Notes,
input$USGS_Gage_cfs,
input$USGS_Gage_ID,
db_path
)
}
# if there are no error messages, show success message and hide error message
if(nchar(result) == 0){
shinyjs::show("success")
shinyjs::hide("error")
}else{ # if there are error messages, show error message and hide success message
shinyjs::show("error")
shinyjs::hide("success")
}
# disable the submit button until inputs are altered to avoid adding duplicate records
disable("submit")
result
})
# format error messages for display in interface
output$entry_errs <- renderUI({
HTML(paste0("<div style='color:red;font-size:large;'>",err_out(),"</div>"))
})
# combine all inputs to monitor for changes
check_all_inputs <- reactive({
list(input$Date,
input$Og_Site,
input$Observers,
input$Temperature_c,
input$ph,
input$Dissolved_Oxygen_mgl,
input$Specific_Conductivity_uscm,
input$Turbidity_ntu,
input$Dissolved_Nitrate_mgl,
input$Dissolved_Ammonium_mgl,
input$Dissolved_Phosphorus_mgl,
input$Total_Nitrogen_mgl,
input$Total_Phosphorus_mgL,
input$Calcium_mgL,
input$Sodium_mgl,
input$Magnesium_mgl,
input$Analytical_lab,
input$Instream_Location,
input$Collection_Type,
input$Channel_Width_m,
input$Flow_Type,
input$Stage_Condition,
input$Substrate,
input$Water_Odor,
input$Water_Color,
input$Weather_Conditions,
input$RiverRight_Buffer,
input$RiverLeft_Buffer,
input$Water_Quality_Notes,
input$USGS_Gage_cfs,
input$USGS_Gage_ID)
})
# if any of the inputs change, enable the submit button and hide the success message
observeEvent(check_all_inputs(), {
shinyjs::hide("success")
enable("submit")
})
# query database records for display in table form
data_out <- eventReactive(input$submit, {
# construct SQL SELECT query
get_records <- paste0("SELECT * FROM habitat")
# connect to database
con <- dbConnect(RSQLite::SQLite(), db_path)
# execute query
result <- dbGetQuery(con, get_records)
# disconnect from database
dbDisconnect(con)
# return table of database records
result
})
# construct table display
output$table_out <- DT::renderDataTable({DT::datatable(data_out(), options = list(pageLength = 50))})
}
# Run the app ----
shinyApp(ui = ui, server = server)
|
2982e9b20d7bc9766843f64608fd404e43a90e08
|
72f7878de8b07cfa6945fea9f9d21ce5e52aa238
|
/R/process_thermal_data.R
|
11b2aa34e2c2b472c928d8da87d813a7ab753cb0
|
[
"BSD-2-Clause"
] |
permissive
|
IMMM-SFA/wmpp
|
06d35e6676b09e02e5aad464de7ffc8dfa21a434
|
d1cd7e4401963470eff1970714c0edbda812a294
|
refs/heads/master
| 2020-05-01T19:57:28.675462
| 2020-03-20T17:30:49
| 2020-03-20T17:30:49
| 177,661,130
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,645
|
r
|
process_thermal_data.R
|
# process_thermal_data
#
# gets essential data out from matlab file plantFlow_thermal.mat
# this is performed offline to avoid creating dependency for R.matlab
process_thermal_data <- function(){
extdata_dir <- system.file("extdata/", package = "wmpp")
data_ml <- readMat(paste0(extdata_dir, "plantFlow_thermal.mat"))
lapply(seq(dim(data_ml$plantFlow.thermal)[3]), function(x){
data_ml$plantFlow.thermal[ , , x]
}
) %>% purrr::map(function(x){
if(is.null(x$coolingTechnology)){
ct <- NA_character_
}else{
ct <- x$coolingTechnology[1,1]
}
if(is.null(x$coolingType)){
ctp <- NA_character_
}else{
ctp <- x$coolingType[1,1]
}
# if(is.null(x$fuel)){
# fu <- NA_character_
# }else{
# fu <- x$fuel[[1]][[1]][1,1]
# }
tibble::tibble(refID = x$refID[1,1],
name = x$name[1,1],
annualMWh = x$annualMWh[1,1],
namePlate = x$namePlate[1,1],
TEPPCtype = x$TEPPCtype[1,1],
plantCode = x$plantCode[1,1],
coolingTechnology = ct,
coolingType = ctp,
#fuel = x$fuel[[1]][[1]][1,1],
latAdj = x$latAdj[1,1],
lonAdj = x$lonAdj[1,1],
region = x$region[1,1],
ifDerating = x$ifDerating[1,1],
huc4 = x$huc4[1,1],
huc4.name = x$huc4.name[1,1],
huc2 = x$huc2[1,1])
}) %>% dplyr::bind_rows()
}
# wmpp:::process_thermal_data() %>% readr::write_csv("inst/extdata/plantFlow_thermal.csv")
|
ae38f8eab38768b9ad87b65938c57afad8675feb
|
53e0aee63e97aae26f4394d875794bb70d734fe7
|
/man/FamilyExperiment-class.Rd
|
2706b22f05227dcf1970322852fe89671a31e66a
|
[] |
no_license
|
syounkin/Trioconductor
|
c7ffb64d050d0963157406ea5d9e93c22dda5a5a
|
b05b5146ae1ae8e238cff6dcb605c6809984dec7
|
refs/heads/master
| 2021-01-23T13:49:30.804282
| 2013-07-22T16:11:06
| 2013-07-22T16:11:06
| 7,007,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,159
|
rd
|
FamilyExperiment-class.Rd
|
\name{FamilyExperiment-class}
\docType{class}
% Class:
\alias{class:FamilyExperiment}
\alias{FamilyExperiment-class}
\alias{FamilyExperiment}
% Constructor
\alias{FamilyExperiment,SummarizedExperiment,PedClass-method}
% Accessors:
\alias{pedigree,FamilyExperiment-method}
\alias{pedigree}
\alias{MAF,FamilyExperiment-method}
\alias{MAF}
\alias{completeTrios,FamilyExperiment-method}
\alias{completeTrios}
\alias{parents,FamilyExperiment-method}
%\alias{parents}
\alias{baf}
\alias{baf,FamilyExperiment-method}
\alias{cnv}
\alias{cnv,FamilyExperiment-method}
\alias{geno}
\alias{geno,FamilyExperiment-method}
\alias{logR}
\alias{logR,FamilyExperiment-method}
%\alias{ids}
%\alias{ids,
% Methods
%\alias{[,FamilyExperiment-method]}
\alias{[,FamilyExperiment,ANY,ANY,ANY-method}
%\alias{[,FamilyExperiment,ANY,ANY,ANY]}
%\alias{[,FamilyExperiment,ANY,ANY-method]}
%\alias{[,FamilyExperiment,ANY,ANY]}
%\alias{[,FamilyExperiment-method]}
\title{FamilyExperiment Class}
\description{The FamilyExperiment (FE) class is an extension of the
SummarizedExperiment (SE) class defined in GenomicRanges. An FE object
contains an additional slot for a pedigree object. If the FE object
is designed to contain genotype data, then a SNPMatrix object named
'geno' must appear in the assayData list. Similarly with cnv data the
assayData list must contain a matrix named 'cnv'.}
%% \usage{
%% ## Constructors
%% FamilyExperiment(se, pedigree)
%% \S4method{FamilyExperiment}{SummarizedExperiment}(se, pedigree)
%% %\S4method{SummarizedExperiment}{missing}(assays, ...)
%% %\S4method{SummarizedExperiment}{list}(assays, ...)
%% %\S4method{SummarizedExperiment}{matrix}(assays, ...)
%% }
\section{Accessors}{
In the following code snippets, \code{fe} is a FamilyExperiment
object.
\describe{
\item{}{
\code{pedigree(fe)}: Returns the pedigree object
contained in \code{fe}. The returned object will have class
PedClass.
}
\item{}{
\code{MAF(fe)}: Returns a numeric vector of minor allele
frequencies. The frequencies are computed after subsetting the data
to be parents only. This clearly is problematice if your pedigree
contains multiple generations. This method is essentially a wrapper
for the The minor allele frequency method of SnpStats.
}
\item{}{
\code{completeTrios(fe)}: Returns an \code{n} by 3 (character or factor)?
matrix with rows that correspond to the parent-offspring trios
contained in \code{fe} such that each of the trio members appears in
the assay data. \code{n} is the number of ``complete trios.''
}
\item{}{
\code{parents(fe)}: Returns the parents IDs. Note that this is
intended to be used with parent-offspring trios. It will return any
subject that has both parents in the pedigree data.
}
\item{}{
\code{baf(fe)}: Retrieve matrix of B allele frequencies. Not yet implemented.
}
\item{}{
\code{logr(fe)}: Retrieve matrix of log R ratio values. Not yet implemented.
}
\item{}{
\code{geno(fe)}: Retrieve a matrix of genotypes.
}
\item{}{
\code{cnv(fe)}:
}
\item{}{
\code{fe[i,j]}: subset operator
}
}
}
\author{Samuel G. Younkin <syounkin@jhsph.edu>}
|
2cdbee1a75c86ea501f1ba7d1de3fae5568ec088
|
ce1794734a1dd59e4ab68a9e414abc9ec8998282
|
/man/nufft_1d1.Rd
|
c64f378434481ea6494df8cc4c7a1261a5e2bb11
|
[
"Apache-2.0"
] |
permissive
|
jkennel/finufft
|
e2b7c22be5b90a156812d09bbad9b7198a782671
|
46d8f8fc6dd986a22acbce4c8044bd455ed3cd94
|
refs/heads/master
| 2022-12-31T08:42:16.879083
| 2020-10-24T13:52:45
| 2020-10-24T13:52:45
| 304,408,540
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 435
|
rd
|
nufft_1d1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{nufft_1d1}
\alias{nufft_1d1}
\title{nufft_1d1}
\usage{
nufft_1d1(xj, cj, n1, tol = 1e-09, iflag = 1L)
}
\arguments{
\item{xj}{locations}
\item{cj}{complex weights}
\item{n1}{number of output modes}
\item{tol}{precision}
\item{iflag}{+ or - i}
}
\value{
complex vector
}
\description{
wrapper for 1d dim nufft (dimension 1, type 1)
}
|
68e7627bda36e8d3839b83247bb4ae702695a90d
|
affee151ef20940e52eea1473635c8f4e35b65de
|
/man/allen.relations.set.Rd
|
1729e88fbde16276fcbc2a02f98484268a51376a
|
[] |
no_license
|
tsdye/allen.archaeology
|
d433c346b6ae93935cb369a8dd917e267aee2cb0
|
ae1e3806df684ffa27fbf2ec1645f178ba101e18
|
refs/heads/master
| 2023-04-11T14:12:16.751804
| 2023-03-25T12:59:26
| 2023-03-25T12:59:26
| 245,044,592
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
allen.relations.set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{allen.relations.set}
\alias{allen.relations.set}
\title{Express the Allen relation set in mathematical notation}
\usage{
allen.relations.set(allen.set)
}
\arguments{
\item{allen.set}{an Allen relation set}
}
\description{
Return a string expressing an Allen relation set in mathematical
set notation. Useful for printing to the console.
}
\author{
Thomas S. Dye
}
|
cf898de47f130e59960b3046187c22d6593040a9
|
6d399293ee87676a5855f875cb3edca25c823e11
|
/functions.R
|
0ad6f79056e0190bd3c1d2647ceb1632df4a6a70
|
[] |
no_license
|
gabrielteotonio/shape-analysis
|
d4a35ba4d578608be591ccf75db344c52f265a57
|
c01bc1ed7fbd7cc7f0de1c1a86e43efdb839468a
|
refs/heads/master
| 2020-05-05T01:22:22.536206
| 2019-04-25T19:15:19
| 2019-04-25T19:15:19
| 179,600,996
| 0
| 0
| null | 2019-04-25T19:15:20
| 2019-04-05T01:14:41
|
R
|
UTF-8
|
R
| false
| false
| 395
|
r
|
functions.R
|
# Gamma's f(x) ----
gamma_density <- function(x, alpha, beta) {
(beta^alpha)/gamma(alpha) * x^(alpha - 1) * exp(-beta*x)
}
# Gamma's Estimated Function ----
gamma_hat <- function(alpha) {
sqrt(2*pi) * alpha^(alpha - 1/2) * exp(-alpha)
}
# Gamma's Saddlepoint Density ----
gamma_saddle_density <- function(x, alpha, beta) {
gamma(alpha)/gamma_hat(alpha) * gamma_density(x, alpha, beta)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.